From 57b6abf85acb1f697913a44e5e105e333d894b78 Mon Sep 17 00:00:00 2001 From: Han Yin Date: Wed, 5 Mar 2025 22:22:49 -0800 Subject: [PATCH 001/398] android : fix KV cache log message condition (#12212) --- examples/llama.android/llama/src/main/cpp/llama-android.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/llama.android/llama/src/main/cpp/llama-android.cpp b/examples/llama.android/llama/src/main/cpp/llama-android.cpp index 2a73983a9832f..0de61ce77c4fa 100644 --- a/examples/llama.android/llama/src/main/cpp/llama-android.cpp +++ b/examples/llama.android/llama/src/main/cpp/llama-android.cpp @@ -361,7 +361,7 @@ Java_android_llama_cpp_LLamaAndroid_completion_1init( const auto tokens_list = common_tokenize(context, text, true, parse_special); auto n_ctx = llama_n_ctx(context); - auto n_kv_req = tokens_list.size() + (n_len - tokens_list.size()); + auto n_kv_req = tokens_list.size() + n_len; LOGi("n_len = %d, n_ctx = %d, n_kv_req = %d", n_len, n_ctx, n_kv_req); From e721c05c9336a72fbb59d5c75967360bc67036c6 Mon Sep 17 00:00:00 2001 From: uvos Date: Thu, 6 Mar 2025 08:20:52 +0100 Subject: [PATCH 002/398] HIP/CUDA: set the paramerter value in maintain_cuda_graph instead of replaceing it. (#12209) This avoids conflict with internal cuda/hip runtimes memory managment behavior. --- ggml/src/ggml-cuda/ggml-cuda.cu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index b5d2c84111e60..497de37be8210 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -2571,7 +2571,7 @@ static void maintain_cuda_graph(ggml_backend_cuda_context * cuda_ctx, std::vecto for (size_t i = 0; i < cuda_ctx->cuda_graph->num_nodes; i++) { if(count(ggml_cuda_cpy_fn_ptrs.begin(), ggml_cuda_cpy_fn_ptrs.end(), cuda_ctx->cuda_graph->params[i].func) > 0) { char ** updated_kernel_arg_ptr = cuda_ctx->cuda_graph->updated_kernel_arg.at(k++); - cuda_ctx->cuda_graph->params[i].kernelParams[1] = updated_kernel_arg_ptr; + *(void**)cuda_ctx->cuda_graph->params[i].kernelParams[1] = *(void**)updated_kernel_arg_ptr; CUDA_CHECK(cudaGraphKernelNodeSetParams(cuda_ctx->cuda_graph->nodes[i], &cuda_ctx->cuda_graph->params[i])); } } From e9b2f84f145fbd458dcb98f227bd09370918be6e Mon Sep 17 00:00:00 2001 From: Aaron Teo <57927438+taronaeo@users.noreply.github.com> Date: Thu, 6 Mar 2025 16:33:21 +0800 Subject: [PATCH 003/398] llava: add big-endian conversion for image encoder (#12218) Signed-off-by: Aaron Teo --- examples/llava/convert_image_encoder_to_gguf.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/examples/llava/convert_image_encoder_to_gguf.py b/examples/llava/convert_image_encoder_to_gguf.py index de29687ec9236..2949faec421be 100644 --- a/examples/llava/convert_image_encoder_to_gguf.py +++ b/examples/llava/convert_image_encoder_to_gguf.py @@ -89,6 +89,7 @@ def bytes_to_unicode(): ap = argparse.ArgumentParser() ap.add_argument("-m", "--model-dir", help="Path to model directory cloned from HF Hub", required=True) ap.add_argument("--use-f32", action="store_true", default=False, help="Use f32 instead of f16") +ap.add_argument('--bigendian', action="store_true", default=False, help="Model is executed on big-endian machine") ap.add_argument("--text-only", action="store_true", required=False, help="Save a text-only model. It can't be used to encode images") ap.add_argument("--vision-only", action="store_true", required=False, @@ -191,7 +192,7 @@ def bytes_to_unicode(): os.makedirs(output_dir, exist_ok=True) output_prefix = os.path.basename(output_dir).replace("ggml_", "") fname_out = os.path.join(output_dir, f"{fname_middle}model-{ftype_str[ftype]}.gguf") -fout = GGUFWriter(path=fname_out, arch="clip") +fout = GGUFWriter(path=fname_out, arch="clip", endianess=GGUFEndian.LITTLE if not args.bigendian else GGUFEndian.BIG) fout.add_bool("clip.has_text_encoder", has_text_encoder) fout.add_bool("clip.has_vision_encoder", has_vision_encoder) From 42994048a34b0bddd72b57c26f8ae2c7d417946d Mon Sep 17 00:00:00 2001 From: Olivier Chafik Date: Thu, 6 Mar 2025 09:03:31 +0000 Subject: [PATCH 004/398] update function-calling.md w/ template override for functionary-small-v3.2 (#12214) --- docs/function-calling.md | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/docs/function-calling.md b/docs/function-calling.md index 92cb6531ab34a..c3873c3fa63d1 100644 --- a/docs/function-calling.md +++ b/docs/function-calling.md @@ -287,30 +287,32 @@ Here are some models known to work (w/ chat template override when needed): llama-server --jinja -fa -hf bartowski/Qwen2.5-7B-Instruct-GGUF:Q4_K_M llama-server --jinja -fa -hf bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q6_K_L -llama-server --jinja -fa -hf bartowski/functionary-small-v3.2-GGUF:Q4_K_M llama-server --jinja -fa -hf bartowski/Llama-3.3-70B-Instruct-GGUF:Q4_K_M -# Native support for DeepSeek R1 works best w/ our own template (official template buggy) +# Native support for DeepSeek R1 works best w/ our template override (official template is buggy, although we do work around it) llama-server --jinja -fa -hf bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q6_K_L \ ---chat-template-file models/templates/llama-cpp-deepseek-r1.jinja + --chat-template-file models/templates/llama-cpp-deepseek-r1.jinja llama-server --jinja -fa -hf bartowski/DeepSeek-R1-Distill-Qwen-32B-GGUF:Q4_K_M \ ---chat-template-file models/templates/llama-cpp-deepseek-r1.jinja + --chat-template-file models/templates/llama-cpp-deepseek-r1.jinja # Native support requires the right template for these GGUFs: +llama-server --jinja -fa -hf bartowski/functionary-small-v3.2-GGUF:Q4_K_M + --chat-template-file models/templates/meetkai-functionary-medium-v3.2.jinja + llama-server --jinja -fa -hf bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M \ ---chat-template-file <( python scripts/get_chat_template.py NousResearch/Hermes-2-Pro-Llama-3-8B tool_use ) + --chat-template-file models/templates/NousResearch-Hermes-2-Pro-Llama-3-8B-tool_use.jinja llama-server --jinja -fa -hf bartowski/Hermes-3-Llama-3.1-8B-GGUF:Q4_K_M \ ---chat-template-file <( python scripts/get_chat_template.py NousResearch/Hermes-3-Llama-3.1-8B tool_use ) + --chat-template-file models/templates/NousResearch-Hermes-3-Llama-3.1-8B-tool_use.jinja llama-server --jinja -fa -hf bartowski/firefunction-v2-GGUF -hff firefunction-v2-IQ1_M.gguf \ ---chat-template-file <( python scripts/get_chat_template.py fireworks-ai/llama-3-firefunction-v2 tool_use ) + --chat-template-file models/templates/fireworks-ai-llama-3-firefunction-v2.jinja llama-server --jinja -fa -hf bartowski/c4ai-command-r7b-12-2024-GGUF:Q6_K_L \ ---chat-template-file <( python scripts/get_chat_template.py CohereForAI/c4ai-command-r7b-12-2024 tool_use ) + --chat-template-file models/templates/CohereForAI-c4ai-command-r7b-12-2024-tool_use.jinja # Generic format support llama-server --jinja -fa -hf bartowski/phi-4-GGUF:Q4_0 @@ -318,6 +320,8 @@ llama-server --jinja -fa -hf bartowski/gemma-2-2b-it-GGUF:Q8_0 llama-server --jinja -fa -hf bartowski/c4ai-command-r-v01-GGUF:Q2_K ``` +To get the official template from original HuggingFace repos, you can use [scripts/get_chat_template.py](../scripts/get_chat_template.py) (see examples invocations in [models/templates/README.md](../models/templates/README.md)) + > [!TIP] > If there is no official `tool_use` Jinja template, you may want to set `--chat-template chatml` to use a default that works with many models (YMMV!), or write your own (e.g. we provide a custom [llama-cpp-deepseek-r1.jinja](../models/templates/llama-cpp-deepseek-r1.jinja) for DeepSeek R1 distills) From 3ffbbd5ce130859be91909e9b77d4c1962a6be2c Mon Sep 17 00:00:00 2001 From: David Huang <1969802+hjc4869@users.noreply.github.com> Date: Thu, 6 Mar 2025 21:14:11 +0800 Subject: [PATCH 005/398] HIP: rocWMMA documentation and enabling in workflow builds (#12179) * Enable rocWMMA for Windows CI build * Enable for Ubuntu * GGML_HIP_ROCWMMA_FATTN documentation work --- .github/workflows/build.yml | 16 ++++++++++++++++ docs/build.md | 6 ++++++ 2 files changed, 22 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index b653b1f823278..7e4596ab2de67 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -467,6 +467,7 @@ jobs: run: | cmake -B build -S . \ -DCMAKE_HIP_COMPILER="$(hipconfig -l)/clang" \ + -DGGML_HIP_ROCWMMA_FATTN=ON \ -DGGML_HIP=ON cmake --build build --config Release -j $(nproc) @@ -476,6 +477,7 @@ jobs: cmake -B build2 -S . \ -DCMAKE_C_COMPILER=hipcc \ -DCMAKE_CXX_COMPILER=hipcc \ + -DGGML_HIP_ROCWMMA_FATTN=ON \ -DGGML_HIP=ON cmake --build build2 --config Release -j $(nproc) @@ -1202,6 +1204,11 @@ jobs: id: checkout uses: actions/checkout@v4 + - name: Clone rocWMMA repository + id: clone_rocwmma + run: | + git clone https://github.com/rocm/rocwmma --branch rocm-6.2.4 --depth 1 + - name: Install id: depends run: | @@ -1231,8 +1238,10 @@ jobs: cmake -G "Unix Makefiles" -B build -S . ` -DCMAKE_C_COMPILER="${env:HIP_PATH}\bin\clang.exe" ` -DCMAKE_CXX_COMPILER="${env:HIP_PATH}\bin\clang++.exe" ` + -DCMAKE_CXX_FLAGS="-Irocwmma/library/include/" ` -DCMAKE_BUILD_TYPE=Release ` -DGGML_HIP=ON ` + -DGGML_HIP_ROCWMMA_FATTN=ON ` -DGGML_RPC=ON cmake --build build -j ${env:NUMBER_OF_PROCESSORS} @@ -1251,6 +1260,11 @@ jobs: with: fetch-depth: 0 + - name: Clone rocWMMA repository + id: clone_rocwmma + run: | + git clone https://github.com/rocm/rocwmma --branch rocm-6.2.4 --depth 1 + - name: ccache uses: hendrikmuhs/ccache-action@v1.2.16 with: @@ -1280,8 +1294,10 @@ jobs: cmake -G "Unix Makefiles" -B build -S . ` -DCMAKE_C_COMPILER="${env:HIP_PATH}\bin\clang.exe" ` -DCMAKE_CXX_COMPILER="${env:HIP_PATH}\bin\clang++.exe" ` + -DCMAKE_CXX_FLAGS="-Irocwmma/library/include/" ` -DCMAKE_BUILD_TYPE=Release ` -DAMDGPU_TARGETS=${{ matrix.gpu_target }} ` + -DGGML_HIP_ROCWMMA_FATTN=ON ` -DGGML_HIP=ON ` -DGGML_RPC=ON cmake --build build -j ${env:NUMBER_OF_PROCESSORS} diff --git a/docs/build.md b/docs/build.md index b3ecf043d7e48..3d8333328fce0 100644 --- a/docs/build.md +++ b/docs/build.md @@ -235,6 +235,12 @@ You can download it from your Linux distro's package manager or from here: [ROCm On Linux it is also possible to use unified memory architecture (UMA) to share main memory between the CPU and integrated GPU by setting `-DGGML_HIP_UMA=ON`. However, this hurts performance for non-integrated GPUs (but enables working with integrated GPUs). + To enhance flash attention performance on RDNA3+ or CDNA architectures, you can utilize the rocWMMA library by enabling the `-DGGML_HIP_ROCWMMA_FATTN=ON` option. This requires rocWMMA headers to be installed on the build system. + + The rocWMMA library is included by default when installing the ROCm SDK using the `rocm` meta package provided by AMD. Alternatively, if you are not using the meta package, you can install the library using the `rocwmma-dev` or `rocwmma-devel` package, depending on your system's package manager. + + As an alternative, you can manually install the library by cloning it from the official [GitHub repository](https://github.com/ROCm/rocWMMA), checkout the corresponding version tag (e.g. `rocm-6.2.4`) and set `-DCMAKE_CXX_FLAGS="-I/library/include/"` in CMake. This also works under Windows despite not officially supported by AMD. + Note that if you get the following error: ``` clang: error: cannot find ROCm device library; provide its path via '--rocm-path' or '--rocm-device-lib-path', or pass '-nogpulib' to build without ROCm device library From 5220a16d18563d3ffc509002f0514415fdda4036 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=A4=C3=9Fler?= Date: Thu, 6 Mar 2025 18:45:09 +0100 Subject: [PATCH 006/398] CUDA: fix FA logic for PTX 7.0 and CC >= 7.5 (#12222) --- ggml/src/ggml-cuda/fattn.cu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml/src/ggml-cuda/fattn.cu b/ggml/src/ggml-cuda/fattn.cu index 24f973056aa9a..2e72fc8fd380b 100644 --- a/ggml/src/ggml-cuda/fattn.cu +++ b/ggml/src/ggml-cuda/fattn.cu @@ -310,7 +310,7 @@ void ggml_cuda_flash_attn_ext(ggml_backend_cuda_context & ctx, ggml_tensor * dst } // The MMA implementation needs Turing or newer, use the old WMMA code for Volta: - if (cc == GGML_CUDA_CC_VOLTA) { + if (fp16_mma_available(cc) && !new_mma_available(cc)) { ggml_cuda_flash_attn_ext_wmma_f16(ctx, dst); return; } From 3d652bfddfba09022525067e672c3c145c074649 Mon Sep 17 00:00:00 2001 From: Lucas Moura Belo Date: Thu, 6 Mar 2025 16:15:13 -0300 Subject: [PATCH 007/398] readme : update bindings (#12229) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index d73b0495d8e36..e371c44ed1b6b 100644 --- a/README.md +++ b/README.md @@ -157,6 +157,7 @@ Instructions for adding support for new models: [HOWTO-add-model.md](docs/develo - Guile Scheme: [guile_llama_cpp](https://savannah.nongnu.org/projects/guile-llama-cpp) - Swift [srgtuszy/llama-cpp-swift](https://github.com/srgtuszy/llama-cpp-swift) - Swift [ShenghaiWang/SwiftLlama](https://github.com/ShenghaiWang/SwiftLlama) +- Delphi [Embarcadero/llama-cpp-delphi](https://github.com/Embarcadero/llama-cpp-delphi) From 776f9e59cc8a85e840d1d4af8540d199c77190ac Mon Sep 17 00:00:00 2001 From: xiaofei Date: Fri, 7 Mar 2025 06:58:25 +0800 Subject: [PATCH 008/398] cmake : fix undefined reference errors for std::filesystem in ggml (#12092) (#12094) Signed-off-by: Ray Lee Co-authored-by: Ray Lee --- ggml/src/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml/src/CMakeLists.txt b/ggml/src/CMakeLists.txt index cfd4ac54cacb2..52817510f6e75 100644 --- a/ggml/src/CMakeLists.txt +++ b/ggml/src/CMakeLists.txt @@ -236,7 +236,7 @@ add_library(ggml target_link_libraries(ggml PUBLIC ggml-base) if (CMAKE_SYSTEM_NAME MATCHES "Linux") - target_link_libraries(ggml PRIVATE dl) + target_link_libraries(ggml PRIVATE dl stdc++fs) endif() function(ggml_add_backend_library backend) From d76a86d967ef491d530400b08bc8ef8a14807936 Mon Sep 17 00:00:00 2001 From: lhez Date: Thu, 6 Mar 2025 16:20:35 -0800 Subject: [PATCH 009/398] opencl: Noncontiguous `norm`, `rms_norm`, disable `fp16` for some ops (#12217) * opencl: support noncontiguous `norm` * opencl: support noncontiguous `rms_norm` * opencl: disable fp16 for `ADD`, `MUL`, `SCALE`, `RELU`, `GELU`, `SILU`, `CLAMP` --- ggml/src/ggml-opencl/ggml-opencl.cpp | 70 +++++++++++++-------- ggml/src/ggml-opencl/kernels/ggml-opencl.cl | 26 ++++++-- 2 files changed, 65 insertions(+), 31 deletions(-) diff --git a/ggml/src/ggml-opencl/ggml-opencl.cpp b/ggml/src/ggml-opencl/ggml-opencl.cpp index bc2ea06b59ed4..b85a895c45c43 100644 --- a/ggml/src/ggml-opencl/ggml-opencl.cpp +++ b/ggml/src/ggml-opencl/ggml-opencl.cpp @@ -1007,17 +1007,18 @@ static bool ggml_opencl_supports_op(ggml_backend_dev_t dev, const struct ggml_te case GGML_OP_ADD: case GGML_OP_SCALE: case GGML_OP_MUL: - return true; + return op->src[0]->type == GGML_TYPE_F32; case GGML_OP_UNARY: switch (ggml_get_unary_op(op)) { case GGML_UNARY_OP_GELU: case GGML_UNARY_OP_SILU: case GGML_UNARY_OP_RELU: - return ggml_is_contiguous(op->src[0]); + return ggml_is_contiguous(op->src[0]) && op->src[0]->type == GGML_TYPE_F32; default: return false; } case GGML_OP_CLAMP: + return op->src[0]->type == GGML_TYPE_F32; case GGML_OP_SOFT_MAX: case GGML_OP_NORM: case GGML_OP_RMS_NORM: @@ -2573,26 +2574,33 @@ static void ggml_cl_norm(ggml_backend_t backend, const ggml_tensor * src0, const memcpy(&eps, dst->op_params, sizeof(float)); const int ne00 = src0 ? src0->ne[0] : 0; - const cl_ulong nb01 = src0 ? src0->nb[1] : 0; + const int ne01 = src0 ? src0->ne[1] : 0; + const int ne02 = src0 ? src0->ne[2] : 0; + const int ne03 = src0 ? src0->ne[3] : 0; - GGML_ASSERT(ggml_is_contiguous_1(src0)); + const cl_ulong nb01 = src0 ? src0->nb[1] : 0; + const cl_ulong nb02 = src0 ? src0->nb[2] : 0; + const cl_ulong nb03 = src0 ? src0->nb[3] : 0; const int nth = MIN(64, ne00); cl_kernel kernel = backend_ctx->kernel_norm; - CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); - CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); - CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device)); - CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd)); - CL_CHECK(clSetKernelArg(kernel, 4, sizeof(int), &ne00)); - CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &nb01)); - CL_CHECK(clSetKernelArg(kernel, 6, sizeof(float), &eps)); - CL_CHECK(clSetKernelArg(kernel, 7, sizeof(float)*nth, NULL)); - - const int64_t nrows = ggml_nrows(src0); + CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); + CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); + CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device)); + CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd)); + CL_CHECK(clSetKernelArg(kernel, 4, sizeof(int), &ne00)); + CL_CHECK(clSetKernelArg(kernel, 5, sizeof(int), &ne01)); + CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne02)); + CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne03)); + CL_CHECK(clSetKernelArg(kernel, 8, sizeof(cl_ulong), &nb01)); + CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_ulong), &nb02)); + CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb03)); + CL_CHECK(clSetKernelArg(kernel, 11, sizeof(float), &eps)); + CL_CHECK(clSetKernelArg(kernel, 12, sizeof(float)*nth, NULL)); - size_t global_work_size[] = {(size_t)nrows*nth, 1, 1}; + size_t global_work_size[] = {(size_t)ne01*nth, (size_t)ne02, (size_t)ne03}; size_t local_work_size[] = {(size_t)nth, 1, 1}; #ifdef GGML_OPENCL_PROFILING @@ -2630,16 +2638,19 @@ static void ggml_cl_rms_norm(ggml_backend_t backend, const ggml_tensor * src0, c memcpy(&eps, dst->op_params, sizeof(float)); const int ne00 = src0 ? src0->ne[0] : 0; + const int ne01 = src0 ? src0->ne[1] : 0; + const int ne02 = src0 ? src0->ne[2] : 0; + const int ne03 = src0 ? src0->ne[3] : 0; + const cl_ulong nb01 = src0 ? src0->nb[1] : 0; + const cl_ulong nb02 = src0 ? src0->nb[2] : 0; + const cl_ulong nb03 = src0 ? src0->nb[3] : 0; GGML_ASSERT(ne00 % 4 == 0); - GGML_ASSERT(ggml_is_contiguous_1(src0)); const int nth = MIN(64, ne00); - const int64_t nrows = ggml_nrows(src0); - - size_t global_work_size[] = {(size_t)nrows*nth, 1, 1}; + size_t global_work_size[] = {(size_t)ne01*nth, (size_t)ne02, (size_t)ne03}; size_t local_work_size[] = {(size_t)nth, 1, 1}; cl_kernel kernel = backend_ctx->kernel_rms_norm; @@ -2654,15 +2665,20 @@ static void ggml_cl_rms_norm(ggml_backend_t backend, const ggml_tensor * src0, c sizeof(local_work_size), local_work_size, sizeof(size_t), &sgs, NULL)); - CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); - CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); - CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device)); - CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd)); - CL_CHECK(clSetKernelArg(kernel, 4, sizeof(int), &ne00)); - CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &nb01)); - CL_CHECK(clSetKernelArg(kernel, 6, sizeof(float), &eps)); + CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); + CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); + CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device)); + CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd)); + CL_CHECK(clSetKernelArg(kernel, 4, sizeof(int), &ne00)); + CL_CHECK(clSetKernelArg(kernel, 5, sizeof(int), &ne01)); + CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne02)); + CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne03)); + CL_CHECK(clSetKernelArg(kernel, 8, sizeof(cl_ulong), &nb01)); + CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_ulong), &nb02)); + CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb03)); + CL_CHECK(clSetKernelArg(kernel, 11, sizeof(float), &eps)); // This is local memory - the size depends on subgroup size. - CL_CHECK(clSetKernelArg(kernel, 7, sizeof(float)*nth/sgs, NULL)); + CL_CHECK(clSetKernelArg(kernel, 12, sizeof(float)*nth/sgs, NULL)); #ifdef GGML_OPENCL_PROFILING cl_event evt; diff --git a/ggml/src/ggml-opencl/kernels/ggml-opencl.cl b/ggml/src/ggml-opencl/kernels/ggml-opencl.cl index 8882a8c9c6225..1d43642a983be 100644 --- a/ggml/src/ggml-opencl/kernels/ggml-opencl.cl +++ b/ggml/src/ggml-opencl/kernels/ggml-opencl.cl @@ -506,14 +506,23 @@ kernel void kernel_norm( global float * dst, ulong offsetd, int ne00, + int ne01, + int ne02, + int ne03, ulong nb01, + ulong nb02, + ulong nb03, float eps, local float * sum ) { src0 = (global void*)((global char*)src0 + offset0); dst = (global void*)((global char*)dst + offsetd); - global float * x = (global float *) ((global char *) src0 + get_group_id(0)*nb01); + int i03 = get_group_id(2); + int i02 = get_group_id(1); + int i01 = get_group_id(0); + + global float * x = (global float *) ((global char *) src0 + i03*nb03 + i02*nb02 + i01*nb01); // MEAN // parallel sum @@ -533,7 +542,7 @@ kernel void kernel_norm( // recenter and VARIANCE barrier(CLK_LOCAL_MEM_FENCE); - global float * y = dst + get_group_id(0)*ne00; + global float * y = dst + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; sum[get_local_id(0)] = 0.0f; for (int i00 = get_local_id(0); i00 < ne00; i00 += get_local_size(0)) { y[i00] = x[i00] - mean; @@ -566,14 +575,23 @@ kernel void kernel_rms_norm( global float * dst, ulong offsetd, int ne00, + int ne01, + int ne02, + int ne03, ulong nb01, + ulong nb02, + ulong nb03, float eps, local float * sum // Note, the size depends on number of subgroups ) { src0 = (global void*)((global char*)src0 + offset0); dst = (global float*)((global char*)dst + offsetd); - global float4 * x = (global float4 *) ((global char *) src0 + get_group_id(0)*nb01); + int i03 = get_group_id(2); + int i02 = get_group_id(1); + int i01 = get_group_id(0); + + global float4 * x = (global float4 *) ((global char *) src0 + i03*nb03 + i02*nb02 + i01*nb01); global float * x_scalar = (global float *) x; float4 sumf = 0; float all_sum = 0; @@ -607,7 +625,7 @@ kernel void kernel_rms_norm( const float mean = sum[0]; const float scale = 1.0f/sqrt(mean + eps); - global float4 * y = (global float4 *) (dst + get_group_id(0)*ne00); + global float4 * y = (global float4 *) (dst + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00); global float * y_scalar = (global float *) y; for (int i00 = get_local_id(0); i00 < ne00/4; i00 += get_local_size(0)) { y[i00] = x[i00] * scale; From d6c95b0740510231b3797b80d6d3440d8fe188b6 Mon Sep 17 00:00:00 2001 From: Daniel Bevenius Date: Fri, 7 Mar 2025 06:23:16 +0100 Subject: [PATCH 010/398] metal : fix default.metallib build (#12224) This commit updates the custom command to build the default.metallib file to use the correct path to ../ggml-common.h by using the variable METALLIB_COMMON. The motivation for this change is that currently when building and specifying GGML_METAL_EMBED_LIBRARY=OFF the following error is generated: ```console [ 11%] Linking CXX shared library ../../bin/libggml.dylib [ 11%] Built target ggml make[2]: *** No rule to make target `ggml/src/ggml-metal/ggml-common.h', needed by `bin/default.metallib'. Stop. make[1]: *** [ggml/src/ggml-metal/CMakeFiles/ggml-metal-lib.dir/all] Error 2 ``` With the above change the build could progress but there was a follow on error about not being able to find the ggml-common.h file in ggml-metal.metal where is was included as a relative path: ```console [ 11%] Compiling Metal kernels /Users/danbev/work/llama.cpp/build/bin/ggml-metal.metal:6:10: error: '../ggml-common.h' file not found, did you mean 'ggml-common.h'? ^~~~~~~~~~~~~~~~~~ "ggml-common.h" 1 error generated. ``` Removing the relative path then allowed the build to complete successfully. --- ggml/src/ggml-metal/CMakeLists.txt | 4 ++-- ggml/src/ggml-metal/ggml-metal.metal | 3 +-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/ggml/src/ggml-metal/CMakeLists.txt b/ggml/src/ggml-metal/CMakeLists.txt index 89fcde2faa4c0..be3fb3fa95183 100644 --- a/ggml/src/ggml-metal/CMakeLists.txt +++ b/ggml/src/ggml-metal/CMakeLists.txt @@ -27,12 +27,12 @@ configure_file(../ggml-common.h ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-common.h configure_file(ggml-metal.metal ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.metal COPYONLY) configure_file(ggml-metal-impl.h ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal-impl.h COPYONLY) +set(METALLIB_COMMON "${CMAKE_CURRENT_SOURCE_DIR}/../ggml-common.h") if (GGML_METAL_EMBED_LIBRARY) enable_language(ASM) add_compile_definitions(GGML_METAL_EMBED_LIBRARY) - set(METALLIB_COMMON "${CMAKE_CURRENT_SOURCE_DIR}/../ggml-common.h") set(METALLIB_SOURCE "${CMAKE_CURRENT_SOURCE_DIR}/ggml-metal.metal") set(METALLIB_IMPL "${CMAKE_CURRENT_SOURCE_DIR}/ggml-metal-impl.h") @@ -93,7 +93,7 @@ else() COMMAND rm -f ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.air COMMAND rm -f ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-common.h COMMAND rm -f ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.metal - DEPENDS ggml-metal.metal ggml-common.h + DEPENDS ggml-metal.metal ${METALLIB_COMMON} COMMENT "Compiling Metal kernels" ) diff --git a/ggml/src/ggml-metal/ggml-metal.metal b/ggml/src/ggml-metal/ggml-metal.metal index d092a16906155..c46a13050891f 100644 --- a/ggml/src/ggml-metal/ggml-metal.metal +++ b/ggml/src/ggml-metal/ggml-metal.metal @@ -3,8 +3,7 @@ #if defined(GGML_METAL_EMBED_LIBRARY) __embed_ggml-common.h__ #else -// TODO: this should not be a relative path, but can't figure out how to set Metal include paths in Package.swift -#include "../ggml-common.h" +#include "ggml-common.h" #endif #include "ggml-metal-impl.h" From f1648e91cf6c52e9593810aa70857e412d474c09 Mon Sep 17 00:00:00 2001 From: David Huang <1969802+hjc4869@users.noreply.github.com> Date: Fri, 7 Mar 2025 15:06:08 +0800 Subject: [PATCH 011/398] HIP: fix rocWMMA build flags under Windows (#12230) --- .github/workflows/build.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 7e4596ab2de67..f2c81c0c26120 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1238,7 +1238,7 @@ jobs: cmake -G "Unix Makefiles" -B build -S . ` -DCMAKE_C_COMPILER="${env:HIP_PATH}\bin\clang.exe" ` -DCMAKE_CXX_COMPILER="${env:HIP_PATH}\bin\clang++.exe" ` - -DCMAKE_CXX_FLAGS="-Irocwmma/library/include/" ` + -DCMAKE_CXX_FLAGS="-I$($PWD.Path.Replace('\', '/'))/rocwmma/library/include/" ` -DCMAKE_BUILD_TYPE=Release ` -DGGML_HIP=ON ` -DGGML_HIP_ROCWMMA_FATTN=ON ` @@ -1294,7 +1294,7 @@ jobs: cmake -G "Unix Makefiles" -B build -S . ` -DCMAKE_C_COMPILER="${env:HIP_PATH}\bin\clang.exe" ` -DCMAKE_CXX_COMPILER="${env:HIP_PATH}\bin\clang++.exe" ` - -DCMAKE_CXX_FLAGS="-Irocwmma/library/include/" ` + -DCMAKE_CXX_FLAGS="-I$($PWD.Path.Replace('\', '/'))/rocwmma/library/include/" ` -DCMAKE_BUILD_TYPE=Release ` -DAMDGPU_TARGETS=${{ matrix.gpu_target }} ` -DGGML_HIP_ROCWMMA_FATTN=ON ` From 5e2d57b2b2e43eadbe6d66ba3e873a824b95e725 Mon Sep 17 00:00:00 2001 From: BB-fat <45072480+BB-fat@users.noreply.github.com> Date: Fri, 7 Mar 2025 15:35:57 +0800 Subject: [PATCH 012/398] metal : simplify kernel arguments using a struct (#3229) (#12194) * metal : refactor im2col parameters into a struct * metal: Change im2col offset types from int32_t to uint64_t to support larger memory offsets * metal : refactor sum_rows parameters into a struct * metal : refactor soft_max parameters into a struct * metal : refactor diag_mask_inf parameters into a struct * metal : refactor ssm_conv parameters into a struct * metal : refactor ssm_scan parameters into a struct * metal : refactor get_rows parameters into a struct * metal : refactor group_norm parameters into a struct * metal : refactor conv_transpose_1d parameters into a struct * metal : refactor upscale parameters into a struct * metal : refactor pad parameters into a struct * metal : refactor pad_reflect_1d parameters into a struct * metal : refactor arange parameters into a struct * metal : refactor timestep_embedding parameters into a struct * metal : refactor argsort parameters into a struct * metal : refactor leaky_relu parameters into a struct * metal : refactor pool_2d parameters into a struct * metal : fix trailing whitespace --------- Co-authored-by: alexju --- ggml/src/ggml-metal/ggml-metal-impl.h | 235 ++++++++++ ggml/src/ggml-metal/ggml-metal.m | 466 ++++++++++--------- ggml/src/ggml-metal/ggml-metal.metal | 627 ++++++++------------------ 3 files changed, 685 insertions(+), 643 deletions(-) diff --git a/ggml/src/ggml-metal/ggml-metal-impl.h b/ggml/src/ggml-metal/ggml-metal-impl.h index e3dc25f1686fb..a58c474eb007e 100644 --- a/ggml/src/ggml-metal/ggml-metal-impl.h +++ b/ggml/src/ggml-metal/ggml-metal-impl.h @@ -285,4 +285,239 @@ typedef struct { float eps; } ggml_metal_kargs_rms_norm; +typedef struct { + int64_t ne00; + int64_t ne01; + int64_t ne02; + uint64_t nb00; + uint64_t nb01; + uint64_t nb02; + int32_t n_groups; + float eps; +} ggml_metal_kargs_group_norm; + +typedef struct { + int32_t IC; + int32_t IL; + int32_t K; + int32_t s0; + uint64_t nb0; + uint64_t nb1; +} ggml_metal_kargs_conv_transpose_1d; + +typedef struct { + uint64_t ofs0; + uint64_t ofs1; + int32_t IW; + int32_t IH; + int32_t CHW; + int32_t s0; + int32_t s1; + int32_t p0; + int32_t p1; + int32_t d0; + int32_t d1; + int32_t N; + int32_t KH; + int32_t KW; + int32_t KHW; // KH * KW, pre-computed on CPU to save GPU resources +} ggml_metal_kargs_im2col; + +typedef struct { + int64_t ne00; + int64_t ne01; + int64_t ne02; + int64_t ne03; + uint64_t nb00; + uint64_t nb01; + uint64_t nb02; + uint64_t nb03; + int64_t ne10; + int64_t ne11; + int64_t ne12; + int64_t ne13; + uint64_t nb10; + uint64_t nb11; + uint64_t nb12; + uint64_t nb13; + int64_t ne0; + int64_t ne1; + int64_t ne2; + int64_t ne3; + uint64_t nb0; + uint64_t nb1; + uint64_t nb2; + uint64_t nb3; +} ggml_metal_kargs_sum_rows; + +typedef struct { + int64_t ne00; + int64_t ne01; + int64_t ne02; + float scale; + float max_bias; + float m0; + float m1; + uint32_t n_head_log2; +} ggml_metal_kargs_soft_max; + +typedef struct { + int64_t ne00; + int64_t ne01; + int n_past; +} ggml_metal_kargs_diag_mask_inf; + +typedef struct { + int64_t ne00; + int64_t ne01; + int64_t ne02; + uint64_t nb00; + uint64_t nb01; + uint64_t nb02; + int64_t ne10; + int64_t ne11; + uint64_t nb10; + uint64_t nb11; + int64_t ne0; + int64_t ne1; + int64_t ne2; + uint64_t nb0; + uint64_t nb1; + uint64_t nb2; +} ggml_metal_kargs_ssm_conv; + +typedef struct { + int64_t d_state; + int64_t d_inner; + int64_t n_seq_tokens; + int64_t n_seqs; + uint64_t nb00; + uint64_t nb01; + uint64_t nb02; + uint64_t nb10; + uint64_t nb11; + uint64_t nb12; + uint64_t nb13; + uint64_t nb20; + uint64_t nb21; + uint64_t nb22; + uint64_t nb30; + uint64_t nb31; + uint64_t nb40; + uint64_t nb41; + uint64_t nb42; + uint64_t nb50; + uint64_t nb51; + uint64_t nb52; +} ggml_metal_kargs_ssm_scan; + +typedef struct { + int64_t ne00; + uint64_t nb01; + uint64_t nb02; + int64_t ne10; + uint64_t nb10; + uint64_t nb11; + uint64_t nb1; + uint64_t nb2; +} ggml_metal_kargs_get_rows; + +typedef struct { + int64_t ne00; + int64_t ne01; + int64_t ne02; + int64_t ne03; + uint64_t nb00; + uint64_t nb01; + uint64_t nb02; + uint64_t nb03; + int64_t ne0; + int64_t ne1; + int64_t ne2; + int64_t ne3; + uint64_t nb0; + uint64_t nb1; + uint64_t nb2; + uint64_t nb3; + float sf0; + float sf1; + float sf2; + float sf3; +} ggml_metal_kargs_upscale; + +typedef struct { + int64_t ne00; + int64_t ne01; + int64_t ne02; + int64_t ne03; + uint64_t nb00; + uint64_t nb01; + uint64_t nb02; + uint64_t nb03; + int64_t ne0; + int64_t ne1; + int64_t ne2; + int64_t ne3; + uint64_t nb0; + uint64_t nb1; + uint64_t nb2; + uint64_t nb3; +} ggml_metal_kargs_pad; + +typedef struct { + int64_t ne00; + int64_t ne01; + int64_t ne02; + int64_t ne03; + uint64_t nb00; + uint64_t nb01; + uint64_t nb02; + uint64_t nb03; + int64_t ne0; + int64_t ne1; + int64_t ne2; + int64_t ne3; + uint64_t nb0; + uint64_t nb1; + uint64_t nb2; + uint64_t nb3; + int32_t p0; + int32_t p1; +} ggml_metal_kargs_pad_reflect_1d; + +typedef struct { + uint64_t nb1; + int dim; + int max_period; +} ggml_metal_kargs_timestep_embedding; + +typedef struct { + float slope; +} ggml_metal_kargs_leaky_relu; + +typedef struct { + int64_t ncols; + int64_t ncols_pad; +} ggml_metal_kargs_argsort; + +typedef struct { + int64_t ne0; + float start; + float step; +} ggml_metal_kargs_arange; + +typedef struct { + int32_t k0; + int32_t k1; + int32_t s0; + int32_t s1; + int32_t p0; + int32_t p1; + int64_t IH; + int64_t IW; + int64_t OH; + int64_t OW; + int64_t parallel_elements; +} ggml_metal_kargs_pool_2d; + #endif // GGML_METAL_IMPL diff --git a/ggml/src/ggml-metal/ggml-metal.m b/ggml/src/ggml-metal/ggml-metal.m index 1f45ebad146df..1158b285c19bc 100644 --- a/ggml/src/ggml-metal/ggml-metal.m +++ b/ggml/src/ggml-metal/ggml-metal.m @@ -1945,34 +1945,38 @@ static void ggml_metal_encode_node( id pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SUM_ROWS].pipeline; - // TODO: add ggml_metal_kargs struct + + ggml_metal_kargs_sum_rows args = { + /*.ne00 =*/ ne00, + /*.ne01 =*/ ne01, + /*.ne02 =*/ ne02, + /*.ne03 =*/ ne03, + /*.nb00 =*/ nb00, + /*.nb01 =*/ nb01, + /*.nb02 =*/ nb02, + /*.nb03 =*/ nb03, + /*.ne10 =*/ ne10, + /*.ne11 =*/ ne11, + /*.ne12 =*/ ne12, + /*.ne13 =*/ ne13, + /*.nb10 =*/ nb10, + /*.nb11 =*/ nb11, + /*.nb12 =*/ nb12, + /*.nb13 =*/ nb13, + /*.ne0 =*/ ne0, + /*.ne1 =*/ ne1, + /*.ne2 =*/ ne2, + /*.ne3 =*/ ne3, + /*.nb0 =*/ nb0, + /*.nb1 =*/ nb1, + /*.nb2 =*/ nb2, + /*.nb3 =*/ nb3, + }; + [encoder setComputePipelineState:pipeline]; [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; - [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:2]; - [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:3]; - [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:4]; - [encoder setBytes:&ne03 length:sizeof(ne03) atIndex:5]; - [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:6]; - [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:7]; - [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:8]; - [encoder setBytes:&nb03 length:sizeof(nb03) atIndex:9]; - [encoder setBytes:&ne10 length:sizeof(ne10) atIndex:10]; - [encoder setBytes:&ne11 length:sizeof(ne11) atIndex:11]; - [encoder setBytes:&ne12 length:sizeof(ne12) atIndex:12]; - [encoder setBytes:&ne13 length:sizeof(ne13) atIndex:13]; - [encoder setBytes:&nb10 length:sizeof(nb10) atIndex:14]; - [encoder setBytes:&nb11 length:sizeof(nb11) atIndex:15]; - [encoder setBytes:&nb12 length:sizeof(nb12) atIndex:16]; - [encoder setBytes:&nb13 length:sizeof(nb13) atIndex:17]; - [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:18]; - [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:19]; - [encoder setBytes:&ne2 length:sizeof(ne2) atIndex:20]; - [encoder setBytes:&ne3 length:sizeof(ne3) atIndex:21]; - [encoder setBytes:&nb0 length:sizeof(nb0) atIndex:22]; - [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:23]; - [encoder setBytes:&nb2 length:sizeof(nb2) atIndex:24]; - [encoder setBytes:&nb3 length:sizeof(nb3) atIndex:25]; + [encoder setBytes:&args length:sizeof(args) atIndex:2]; [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; } break; @@ -2021,8 +2025,17 @@ static void ggml_metal_encode_node( const float m0 = powf(2.0f, -(max_bias ) / n_head_log2); const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2); - // TODO: add ggml_metal_kargs struct - // TODO: optimize (see https://github.com/ggml-org/llama.cpp/pull/10238/commits/7941b6b9ec29a2866fec6fa6c51612515ca509f6) + ggml_metal_kargs_soft_max args = { + /*.ne00 =*/ ne00, + /*.ne01 =*/ ne01, + /*.ne02 =*/ ne02, + /*.scale =*/ scale, + /*.max_bias =*/ max_bias, + /*.m0 =*/ m0, + /*.m1 =*/ m1, + /*.n_head_log2 =*/ n_head_log2, + }; + [encoder setComputePipelineState:pipeline]; [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; if (id_src1) { @@ -2031,14 +2044,7 @@ static void ggml_metal_encode_node( [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1]; } [encoder setBuffer:id_dst offset:offs_dst atIndex:2]; - [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3]; - [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:4]; - [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:5]; - [encoder setBytes:&scale length:sizeof(scale) atIndex:6]; - [encoder setBytes:&max_bias length:sizeof(max_bias) atIndex:7]; - [encoder setBytes:&m0 length:sizeof(m0) atIndex:8]; - [encoder setBytes:&m1 length:sizeof(m1) atIndex:9]; - [encoder setBytes:&n_head_log2 length:sizeof(n_head_log2) atIndex:10]; + [encoder setBytes:&args length:sizeof(args) atIndex:3]; [encoder setThreadgroupMemoryLength:32*sizeof(float) atIndex:0]; @@ -2056,13 +2062,16 @@ static void ggml_metal_encode_node( pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF].pipeline; } - // TODO: add ggml_metal_kargs struct + ggml_metal_kargs_diag_mask_inf args = { + /*.ne00 =*/ ne00, + /*.ne01 =*/ ne01, + /*.n_past =*/ n_past, + }; + [encoder setComputePipelineState:pipeline]; [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; - [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:2]; - [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:3]; - [encoder setBytes:&n_past length:sizeof(int) atIndex:4]; + [encoder setBytes:&args length:sizeof(args) atIndex:2]; if (ne00%8 == 0) { [encoder dispatchThreadgroups:MTLSizeMake(ne00*ne01*ne02/8, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; @@ -2081,27 +2090,30 @@ static void ggml_metal_encode_node( id pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SSM_CONV_F32].pipeline; - // TODO: add ggml_metal_kargs struct + ggml_metal_kargs_ssm_conv args = { + /*.ne00 =*/ ne00, + /*.ne01 =*/ ne01, + /*.ne02 =*/ ne02, + /*.nb00 =*/ nb00, + /*.nb01 =*/ nb01, + /*.nb02 =*/ nb02, + /*.ne10 =*/ ne10, + /*.ne11 =*/ ne11, + /*.nb10 =*/ nb10, + /*.nb11 =*/ nb11, + /*.ne0 =*/ ne0, + /*.ne1 =*/ ne1, + /*.ne2 =*/ ne2, + /*.nb0 =*/ nb0, + /*.nb1 =*/ nb1, + /*.nb2 =*/ nb2, + }; + [encoder setComputePipelineState:pipeline]; [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1]; [encoder setBuffer:id_dst offset:offs_dst atIndex:2]; - [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3]; - [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:4]; - [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:5]; - [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:6]; - [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:7]; - [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:8]; - [encoder setBytes:&ne10 length:sizeof(ne10) atIndex:9]; - [encoder setBytes:&ne11 length:sizeof(ne11) atIndex:10]; - [encoder setBytes:&nb10 length:sizeof(nb10) atIndex:11]; - [encoder setBytes:&nb11 length:sizeof(nb11) atIndex:12]; - [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:13]; - [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:14]; - [encoder setBytes:&ne2 length:sizeof(ne2) atIndex:15]; - [encoder setBytes:&nb0 length:sizeof(nb0) atIndex:16]; - [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:17]; - [encoder setBytes:&nb2 length:sizeof(nb2) atIndex:18]; + [encoder setBytes:&args length:sizeof(args) atIndex:3]; [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne1, ne02) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; } break; @@ -2152,7 +2164,31 @@ static void ggml_metal_encode_node( id pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SSM_SCAN_F32].pipeline; - // TODO: add ggml_metal_kargs struct + ggml_metal_kargs_ssm_scan args = { + /*.d_state =*/ d_state, + /*.d_inner =*/ d_inner, + /*.n_seq_tokens =*/ n_seq_tokens, + /*.n_seqs =*/ n_seqs, + /*.nb00 =*/ nb00, + /*.nb01 =*/ nb01, + /*.nb02 =*/ nb02, + /*.nb10 =*/ nb10, + /*.nb11 =*/ nb11, + /*.nb12 =*/ nb12, + /*.nb13 =*/ nb13, + /*.nb20 =*/ nb20, + /*.nb21 =*/ nb21, + /*.nb22 =*/ nb22, + /*.nb30 =*/ nb30, + /*.nb31 =*/ nb31, + /*.nb40 =*/ nb40, + /*.nb41 =*/ nb41, + /*.nb42 =*/ nb42, + /*.nb50 =*/ nb50, + /*.nb51 =*/ nb51, + /*.nb52 =*/ nb52, + }; + [encoder setComputePipelineState:pipeline]; [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1]; @@ -2161,30 +2197,7 @@ static void ggml_metal_encode_node( [encoder setBuffer:id_src4 offset:offs_src4 atIndex:4]; [encoder setBuffer:id_src5 offset:offs_src5 atIndex:5]; [encoder setBuffer:id_dst offset:offs_dst atIndex:6]; - - [encoder setBytes:&d_state length:sizeof(d_state) atIndex:7]; - [encoder setBytes:&d_inner length:sizeof(d_inner) atIndex:8]; - [encoder setBytes:&n_seq_tokens length:sizeof(n_seq_tokens) atIndex:9]; - [encoder setBytes:&n_seqs length:sizeof(n_seqs) atIndex:10]; - - [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:11]; - [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:12]; - [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:13]; - [encoder setBytes:&nb10 length:sizeof(nb10) atIndex:14]; - [encoder setBytes:&nb11 length:sizeof(nb11) atIndex:15]; - [encoder setBytes:&nb12 length:sizeof(nb12) atIndex:16]; - [encoder setBytes:&nb13 length:sizeof(nb13) atIndex:17]; - [encoder setBytes:&nb20 length:sizeof(nb20) atIndex:18]; - [encoder setBytes:&nb21 length:sizeof(nb21) atIndex:19]; - [encoder setBytes:&nb22 length:sizeof(nb22) atIndex:20]; - [encoder setBytes:&nb30 length:sizeof(nb30) atIndex:21]; - [encoder setBytes:&nb31 length:sizeof(nb31) atIndex:22]; - [encoder setBytes:&nb40 length:sizeof(nb40) atIndex:23]; - [encoder setBytes:&nb41 length:sizeof(nb41) atIndex:24]; - [encoder setBytes:&nb42 length:sizeof(nb42) atIndex:25]; - [encoder setBytes:&nb50 length:sizeof(nb50) atIndex:26]; - [encoder setBytes:&nb51 length:sizeof(nb51) atIndex:27]; - [encoder setBytes:&nb52 length:sizeof(nb52) atIndex:28]; + [encoder setBytes:&args length:sizeof(args) atIndex:7]; [encoder dispatchThreadgroups:MTLSizeMake(d_inner, n_seqs, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; } break; @@ -3041,19 +3054,22 @@ static void ggml_metal_encode_node( default: GGML_ABORT("not implemented"); } - // TODO: add ggml_metal_kargs struct + ggml_metal_kargs_get_rows args = { + /*.ne00 =*/ ne00, + /*.nb01 =*/ nb01, + /*.nb02 =*/ nb02, + /*.ne10 =*/ ne10, + /*.nb10 =*/ nb10, + /*.nb11 =*/ nb11, + /*.nb1 =*/ nb1, + /*.nb2 =*/ nb2, + }; + [encoder setComputePipelineState:pipeline]; [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1]; [encoder setBuffer:id_dst offset:offs_dst atIndex:2]; - [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:3]; - [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:4]; - [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:5]; - [encoder setBytes:&ne10 length:sizeof( int64_t) atIndex:6]; - [encoder setBytes:&nb10 length:sizeof( int64_t) atIndex:7]; - [encoder setBytes:&nb11 length:sizeof( int64_t) atIndex:8]; - [encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:9]; - [encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:10]; + [encoder setBytes:&args length:sizeof(args) atIndex:3]; [encoder dispatchThreadgroups:MTLSizeMake(ne10, ne11, 1) threadsPerThreadgroup:MTLSizeMake(32, 1, 1)]; } break; @@ -3110,18 +3126,21 @@ static void ggml_metal_encode_node( id pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GROUP_NORM].pipeline; - // TODO: add ggml_metal_kargs struct + ggml_metal_kargs_group_norm args = { + /*.ne00 =*/ ne00, + /*.ne01 =*/ ne01, + /*.ne02 =*/ ne02, + /*.nb00 =*/ nb00, + /*.nb01 =*/ nb01, + /*.nb02 =*/ nb02, + /*.n_groups =*/ n_groups, + /*.eps =*/ eps, + }; + [encoder setComputePipelineState:pipeline]; [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; - [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2]; - [encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:3]; - [encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:4]; - [encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:5]; - [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:6]; - [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:7]; - [encoder setBytes:&n_groups length:sizeof( int32_t) atIndex:8]; - [encoder setBytes:&eps length:sizeof( float) atIndex:9]; + [encoder setBytes:&args length:sizeof(args) atIndex:2]; [encoder setThreadgroupMemoryLength:32*sizeof(float) atIndex:0]; [encoder dispatchThreadgroups:MTLSizeMake(n_groups, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)]; @@ -3279,8 +3298,8 @@ static void ggml_metal_encode_node( const int32_t CHW = IC * KH * KW; - const int32_t ofs0 = src1->nb[is_2D ? 3 : 2] / 4; - const int32_t ofs1 = src1->nb[is_2D ? 2 : 1] / 4; + const uint64_t ofs0 = src1->nb[is_2D ? 3 : 2] / 4; + const uint64_t ofs1 = src1->nb[is_2D ? 2 : 1] / 4; id pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_IM2COL_F32].pipeline; @@ -3302,27 +3321,30 @@ static void ggml_metal_encode_node( default: GGML_ABORT("fatal error"); }; - // TODO: add ggml_metal_kargs struct + ggml_metal_kargs_im2col args = { + /*.ofs0 =*/ ofs0, + /*.ofs1 =*/ ofs1, + /*.IW =*/ IW, + /*.IH =*/ IH, + /*.CHW =*/ CHW, + /*.s0 =*/ s0, + /*.s1 =*/ s1, + /*.p0 =*/ p0, + /*.p1 =*/ p1, + /*.d0 =*/ d0, + /*.d1 =*/ d1, + /*.N =*/ N, + /*.KH =*/ KH, + /*.KW =*/ KW, + /*.KHW =*/ KH * KW, + }; + [encoder setComputePipelineState:pipeline]; [encoder setBuffer:id_src1 offset:offs_src1 atIndex:0]; [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; - [encoder setBytes:&ofs0 length:sizeof(int32_t) atIndex:2]; - [encoder setBytes:&ofs1 length:sizeof(int32_t) atIndex:3]; - [encoder setBytes:&IW length:sizeof(int32_t) atIndex:4]; - [encoder setBytes:&IH length:sizeof(int32_t) atIndex:5]; - [encoder setBytes:&CHW length:sizeof(int32_t) atIndex:6]; - [encoder setBytes:&s0 length:sizeof(int32_t) atIndex:7]; - [encoder setBytes:&s1 length:sizeof(int32_t) atIndex:8]; - [encoder setBytes:&p0 length:sizeof(int32_t) atIndex:9]; - [encoder setBytes:&p1 length:sizeof(int32_t) atIndex:10]; - [encoder setBytes:&d0 length:sizeof(int32_t) atIndex:11]; - [encoder setBytes:&d1 length:sizeof(int32_t) atIndex:12]; + [encoder setBytes:&args length:sizeof(args) atIndex:2]; if (is_gt_mttpt) { - [encoder setBytes:&N length:sizeof(int32_t) atIndex:13]; - [encoder setBytes:&KH length:sizeof(int32_t) atIndex:14]; - [encoder setBytes:&KW length:sizeof(int32_t) atIndex:15]; - const uint64_t n_threads = MIN(pipeline.maxTotalThreadsPerThreadgroup, (uint64_t)N); const int64_t quotient = N / n_threads + (N % n_threads > 0 ? 1 : 0); @@ -3362,16 +3384,20 @@ static void ggml_metal_encode_node( default: GGML_ABORT("fatal error"); }; + ggml_metal_kargs_conv_transpose_1d args = { + /*.IC =*/ IC, + /*.IL =*/ IL, + /*.K =*/ K, + /*.s0 =*/ s0, + /*.nb0 =*/ nb0, + /*.nb1 =*/ nb1, + }; + [encoder setComputePipelineState:pipeline]; [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1]; [encoder setBuffer:id_dst offset:offs_dst atIndex:2]; - [encoder setBytes:&IC length:sizeof( int32_t) atIndex:3]; - [encoder setBytes:&IL length:sizeof( int32_t) atIndex:4]; - [encoder setBytes:&K length:sizeof( int32_t) atIndex:5]; - [encoder setBytes:&s0 length:sizeof( int32_t) atIndex:6]; - [encoder setBytes:&nb0 length:sizeof(uint64_t) atIndex:7]; - [encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:8]; + [encoder setBytes:&args length:sizeof(args) atIndex:3]; [encoder dispatchThreadgroups:MTLSizeMake(OL, OC, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; } break; @@ -3386,30 +3412,33 @@ static void ggml_metal_encode_node( const id pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_UPSCALE_F32].pipeline; - // TODO: add ggml_metal_kargs struct + ggml_metal_kargs_upscale args = { + /*.ne00 =*/ ne00, + /*.ne01 =*/ ne01, + /*.ne02 =*/ ne02, + /*.ne03 =*/ ne03, + /*.nb00 =*/ nb00, + /*.nb01 =*/ nb01, + /*.nb02 =*/ nb02, + /*.nb03 =*/ nb03, + /*.ne0 =*/ ne0, + /*.ne1 =*/ ne1, + /*.ne2 =*/ ne2, + /*.ne3 =*/ ne3, + /*.nb0 =*/ nb0, + /*.nb1 =*/ nb1, + /*.nb2 =*/ nb2, + /*.nb3 =*/ nb3, + /*.sf0 =*/ sf0, + /*.sf1 =*/ sf1, + /*.sf2 =*/ sf2, + /*.sf3 =*/ sf3 + }; + [encoder setComputePipelineState:pipeline]; [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; - [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:2]; - [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:3]; - [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:4]; - [encoder setBytes:&ne03 length:sizeof(ne03) atIndex:5]; - [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:6]; - [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:7]; - [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:8]; - [encoder setBytes:&nb03 length:sizeof(nb03) atIndex:9]; - [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:10]; - [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:11]; - [encoder setBytes:&ne2 length:sizeof(ne2) atIndex:12]; - [encoder setBytes:&ne3 length:sizeof(ne3) atIndex:13]; - [encoder setBytes:&nb0 length:sizeof(nb0) atIndex:14]; - [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:15]; - [encoder setBytes:&nb2 length:sizeof(nb2) atIndex:16]; - [encoder setBytes:&nb3 length:sizeof(nb3) atIndex:17]; - [encoder setBytes:&sf0 length:sizeof(sf0) atIndex:18]; - [encoder setBytes:&sf1 length:sizeof(sf1) atIndex:19]; - [encoder setBytes:&sf2 length:sizeof(sf2) atIndex:20]; - [encoder setBytes:&sf3 length:sizeof(sf3) atIndex:21]; + [encoder setBytes:&args length:sizeof(args) atIndex:2]; const int nth = MIN((int) pipeline.maxTotalThreadsPerThreadgroup, ne0); @@ -3421,26 +3450,29 @@ static void ggml_metal_encode_node( id pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_PAD_F32].pipeline; - // TODO: add ggml_metal_kargs struct + ggml_metal_kargs_pad args = { + /*.ne00 =*/ ne00, + /*.ne01 =*/ ne01, + /*.ne02 =*/ ne02, + /*.ne03 =*/ ne03, + /*.nb00 =*/ nb00, + /*.nb01 =*/ nb01, + /*.nb02 =*/ nb02, + /*.nb03 =*/ nb03, + /*.ne0 =*/ ne0, + /*.ne1 =*/ ne1, + /*.ne2 =*/ ne2, + /*.ne3 =*/ ne3, + /*.nb0 =*/ nb0, + /*.nb1 =*/ nb1, + /*.nb2 =*/ nb2, + /*.nb3 =*/ nb3 + }; + [encoder setComputePipelineState:pipeline]; [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; - [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:2]; - [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:3]; - [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:4]; - [encoder setBytes:&ne03 length:sizeof(ne03) atIndex:5]; - [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:6]; - [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:7]; - [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:8]; - [encoder setBytes:&nb03 length:sizeof(nb03) atIndex:9]; - [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:10]; - [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:11]; - [encoder setBytes:&ne2 length:sizeof(ne2) atIndex:12]; - [encoder setBytes:&ne3 length:sizeof(ne3) atIndex:13]; - [encoder setBytes:&nb0 length:sizeof(nb0) atIndex:14]; - [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:15]; - [encoder setBytes:&nb2 length:sizeof(nb2) atIndex:16]; - [encoder setBytes:&nb3 length:sizeof(nb3) atIndex:17]; + [encoder setBytes:&args length:sizeof(args) atIndex:2]; const int nth = MIN(1024, ne0); @@ -3455,24 +3487,31 @@ static void ggml_metal_encode_node( id pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_PAD_REFLECT_1D_F32].pipeline; + ggml_metal_kargs_pad_reflect_1d args = { + /*.ne00 =*/ ne00, + /*.ne01 =*/ ne01, + /*.ne02 =*/ ne02, + /*.ne03 =*/ ne03, + /*.nb00 =*/ nb00, + /*.nb01 =*/ nb01, + /*.nb02 =*/ nb02, + /*.nb03 =*/ nb03, + /*.ne0 =*/ ne0, + /*.ne1 =*/ ne1, + /*.ne2 =*/ ne2, + /*.ne3 =*/ ne3, + /*.nb0 =*/ nb0, + /*.nb1 =*/ nb1, + /*.nb2 =*/ nb2, + /*.nb3 =*/ nb3, + /*.p0 =*/ p0, + /*.p1 =*/ p1 + }; + [encoder setComputePipelineState:pipeline]; [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; - [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:2]; - [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:3]; - [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:4]; - [encoder setBytes:&ne03 length:sizeof(ne03) atIndex:5]; - [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:6]; - [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:7]; - [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:8]; - [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:9]; - [encoder setBytes:&nb03 length:sizeof(nb03) atIndex:10]; - [encoder setBytes:&nb0 length:sizeof(nb0) atIndex:11]; - [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:12]; - [encoder setBytes:&nb2 length:sizeof(nb2) atIndex:13]; - [encoder setBytes:&nb3 length:sizeof(nb3) atIndex:14]; - [encoder setBytes:&p0 length:sizeof(p0) atIndex:15]; - [encoder setBytes:&p1 length:sizeof(p1) atIndex:16]; + [encoder setBytes:&args length:sizeof(args) atIndex:2]; const int nth = MIN(1024, ne0); @@ -3490,12 +3529,15 @@ static void ggml_metal_encode_node( id pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ARANGE_F32].pipeline; - // TODO: add ggml_metal_kargs struct + ggml_metal_kargs_arange args = { + /*.ne0 =*/ ne0, + /*.start =*/ start, + /*.step =*/ step + }; + [encoder setComputePipelineState:pipeline]; - [encoder setBuffer:id_dst offset:offs_dst atIndex:0]; - [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:1]; - [encoder setBytes:&start length:sizeof(start) atIndex:2]; - [encoder setBytes:&step length:sizeof(step) atIndex:3]; + [encoder setBuffer:id_dst offset:offs_dst atIndex:0]; + [encoder setBytes:&args length:sizeof(args) atIndex:1]; const int nth = MIN(1024, ne0); @@ -3512,13 +3554,16 @@ static void ggml_metal_encode_node( id pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_TIMESTEP_EMBEDDING_F32].pipeline; - // TODO: add ggml_metal_kargs struct + ggml_metal_kargs_timestep_embedding args = { + /*.nb1 =*/ nb1, + /*.dim =*/ dim, + /*.max_period =*/ max_period + }; + [encoder setComputePipelineState:pipeline]; [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; - [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:2]; - [encoder setBytes:&dim length:sizeof(dim) atIndex:3]; - [encoder setBytes:&max_period length:sizeof(max_period) atIndex:4]; + [encoder setBytes:&args length:sizeof(args) atIndex:2]; const int nth = MIN(1024, half); @@ -3551,12 +3596,15 @@ static void ggml_metal_encode_node( default: GGML_ABORT("fatal error"); }; - // TODO: add ggml_metal_kargs struct + ggml_metal_kargs_argsort args = { + /*.ncols =*/ ne00, + /*.ncols_pad =*/ ne00_padded + }; + [encoder setComputePipelineState:pipeline]; - [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; - [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; - [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2]; - [encoder setBytes:&ne00_padded length:sizeof( int64_t) atIndex:3]; + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; + [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; + [encoder setBytes:&args length:sizeof(args) atIndex:2]; [encoder setThreadgroupMemoryLength:mem_size atIndex:0]; [encoder dispatchThreadgroups:MTLSizeMake(1, nrows, 1) threadsPerThreadgroup:MTLSizeMake(ne00_padded, 1, 1)]; @@ -3570,11 +3618,14 @@ static void ggml_metal_encode_node( id pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_LEAKY_RELU_F32].pipeline; - // TODO: add ggml_metal_kargs struct + ggml_metal_kargs_leaky_relu args = { + /*.slope =*/ slope + }; + [encoder setComputePipelineState:pipeline]; [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; - [encoder setBytes:&slope length:sizeof(slope) atIndex:2]; + [encoder setBytes:&args length:sizeof(args) atIndex:2]; const int64_t n = ggml_nelements(dst); @@ -4150,21 +4201,24 @@ static void ggml_metal_encode_node( const int64_t n_threads = MIN((int64_t)[pipeline maxTotalThreadsPerThreadgroup], parallel_elements); const int64_t n_tg = (parallel_elements + n_threads - 1) / n_threads; - // TODO: add ggml_metal_kargs struct + ggml_metal_kargs_pool_2d args_pool_2d = { + /* .k0 = */ k0, + /* .k1 = */ k1, + /* .s0 = */ s0, + /* .s1 = */ s1, + /* .p0 = */ p0, + /* .p1 = */ p1, + /* .IH = */ IH, + /* .IW = */ IW, + /* .OH = */ OH, + /* .OW = */ OW, + /* .parallel_elements = */ parallel_elements + }; + [encoder setComputePipelineState:pipeline]; - [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; - [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; - [encoder setBytes:&k0 length:sizeof(int32_t) atIndex:2]; - [encoder setBytes:&k1 length:sizeof(int32_t) atIndex:3]; - [encoder setBytes:&s0 length:sizeof(int32_t) atIndex:4]; - [encoder setBytes:&s1 length:sizeof(int32_t) atIndex:5]; - [encoder setBytes:&p0 length:sizeof(int32_t) atIndex:6]; - [encoder setBytes:&p1 length:sizeof(int32_t) atIndex:7]; - [encoder setBytes:&IH length:sizeof(int64_t) atIndex:8]; - [encoder setBytes:&IW length:sizeof(int64_t) atIndex:9]; - [encoder setBytes:&OH length:sizeof(int64_t) atIndex:10]; - [encoder setBytes:&OW length:sizeof(int64_t) atIndex:11]; - [encoder setBytes:¶llel_elements length:sizeof(int64_t) atIndex:12]; + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; + [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; + [encoder setBytes:&args_pool_2d length:sizeof(args_pool_2d) atIndex:2]; [encoder dispatchThreadgroups:MTLSizeMake(n_tg, 1, 1) threadsPerThreadgroup:MTLSizeMake(n_threads, 1, 1)]; } break; diff --git a/ggml/src/ggml-metal/ggml-metal.metal b/ggml/src/ggml-metal/ggml-metal.metal index c46a13050891f..ad9d42a3eaa9e 100644 --- a/ggml/src/ggml-metal/ggml-metal.metal +++ b/ggml/src/ggml-metal/ggml-metal.metal @@ -947,45 +947,22 @@ kernel void kernel_cos( kernel void kernel_sum_rows( device const float * src0, device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant int64_t & ne03, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant uint64_t & nb03, - constant int64_t & ne10, - constant int64_t & ne11, - constant int64_t & ne12, - constant int64_t & ne13, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, - constant uint64_t & nb13, - constant int64_t & ne0, - constant int64_t & ne1, - constant int64_t & ne2, - constant int64_t & ne3, - constant uint64_t & nb0, - constant uint64_t & nb1, - constant uint64_t & nb2, - constant uint64_t & nb3, + constant ggml_metal_kargs_sum_rows & args, uint3 tpig[[thread_position_in_grid]]) { int64_t i3 = tpig.z; int64_t i2 = tpig.y; int64_t i1 = tpig.x; - if (i3 >= ne03 || i2 >= ne02 || i1 >= ne01) { + if (i3 >= args.ne03 || i2 >= args.ne02 || i1 >= args.ne01) { return; } - device const float * src_row = (device const float *) ((device const char *) src0 + i1*nb01 + i2*nb02 + i3*nb03); - device float * dst_row = (device float *) ((device char *) dst + i1*nb1 + i2*nb2 + i3*nb3); + device const float * src_row = (device const float *) ((device const char *) src0 + i1*args.nb01 + i2*args.nb02 + i3*args.nb03); + device float * dst_row = (device float *) ((device char *) dst + i1*args.nb1 + i2*args.nb2 + i3*args.nb3); float row_sum = 0; - for (int64_t i0 = 0; i0 < ne00; i0++) { + for (int64_t i0 = 0; i0 < args.ne00; i0++) { row_sum += src_row[i0]; } @@ -997,36 +974,29 @@ kernel void kernel_soft_max( device const char * src0, device const char * src1, device char * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant float & scale, - constant float & max_bias, - constant float & m0, - constant float & m1, - constant uint32_t & n_head_log2, + constant ggml_metal_kargs_soft_max & args, threadgroup float * buf [[threadgroup(0)]], uint tgpig[[threadgroup_position_in_grid]], uint tpitg[[thread_position_in_threadgroup]], uint sgitg[[simdgroup_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], uint ntg[[threads_per_threadgroup]]) { - const int64_t i03 = (tgpig) / (ne02*ne01); - const int64_t i02 = (tgpig - i03*ne02*ne01) / ne01; - const int64_t i01 = (tgpig - i03*ne02*ne01 - i02*ne01); + const int64_t i03 = (tgpig) / (args.ne02*args.ne01); + const int64_t i02 = (tgpig - i03*args.ne02*args.ne01) / args.ne01; + const int64_t i01 = (tgpig - i03*args.ne02*args.ne01 - i02*args.ne01); - device const float * psrc0 = (device const float *) src0 + (i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00); - device const T * pmask = src1 != src0 ? (device const T *) src1 + i01*ne00 : nullptr; - device float * pdst = (device float *) dst + (i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00); + device const float * psrc0 = (device const float *) src0 + (i03*args.ne02*args.ne01*args.ne00 + i02*args.ne01*args.ne00 + i01*args.ne00); + device const T * pmask = src1 != src0 ? (device const T *) src1 + i01*args.ne00 : nullptr; + device float * pdst = (device float *) dst + (i03*args.ne02*args.ne01*args.ne00 + i02*args.ne01*args.ne00 + i01*args.ne00); float slope = 1.0f; // ALiBi - if (max_bias > 0.0f) { + if (args.max_bias > 0.0f) { const int64_t h = i02; - const float base = h < n_head_log2 ? m0 : m1; - const int exp = h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1; + const float base = h < args.n_head_log2 ? args.m0 : args.m1; + const int exp = h < args.n_head_log2 ? h + 1 : 2*(h - args.n_head_log2) + 1; slope = pow(base, exp); } @@ -1034,8 +1004,8 @@ kernel void kernel_soft_max( // parallel max float lmax = -INFINITY; - for (int i00 = tpitg; i00 < ne00; i00 += ntg) { - lmax = MAX(lmax, psrc0[i00]*scale + (pmask ? slope*pmask[i00] : 0.0f)); + for (int i00 = tpitg; i00 < args.ne00; i00 += ntg) { + lmax = MAX(lmax, psrc0[i00]*args.scale + (pmask ? slope*pmask[i00] : 0.0f)); } // find the max value in the block @@ -1059,8 +1029,8 @@ kernel void kernel_soft_max( // parallel sum float lsum = 0.0f; - for (int i00 = tpitg; i00 < ne00; i00 += ntg) { - const float exp_psrc0 = exp((psrc0[i00]*scale + (pmask ? slope*pmask[i00] : 0.0f)) - max_val); + for (int i00 = tpitg; i00 < args.ne00; i00 += ntg) { + const float exp_psrc0 = exp((psrc0[i00]*args.scale + (pmask ? slope*pmask[i00] : 0.0f)) - max_val); lsum += exp_psrc0; pdst[i00] = exp_psrc0; } @@ -1090,7 +1060,7 @@ kernel void kernel_soft_max( const float inv_sum = 1.0f/sum; - for (int i00 = tpitg; i00 < ne00; i00 += ntg) { + for (int i00 = tpitg; i00 < args.ne00; i00 += ntg) { pdst[i00] *= inv_sum; } } @@ -1100,35 +1070,28 @@ kernel void kernel_soft_max_4( device const char * src0, device const char * src1, device char * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant float & scale, - constant float & max_bias, - constant float & m0, - constant float & m1, - constant uint32_t & n_head_log2, + constant ggml_metal_kargs_soft_max & args, threadgroup float * buf [[threadgroup(0)]], uint tgpig[[threadgroup_position_in_grid]], uint tpitg[[thread_position_in_threadgroup]], uint sgitg[[simdgroup_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], uint ntg[[threads_per_threadgroup]]) { - const int64_t i03 = (tgpig) / (ne02*ne01); - const int64_t i02 = (tgpig - i03*ne02*ne01) / ne01; - const int64_t i01 = (tgpig - i03*ne02*ne01 - i02*ne01); + const int64_t i03 = (tgpig) / (args.ne02*args.ne01); + const int64_t i02 = (tgpig - i03*args.ne02*args.ne01) / args.ne01; + const int64_t i01 = (tgpig - i03*args.ne02*args.ne01 - i02*args.ne01); - device const float4 * psrc4 = (device const float4 *) src0 + (i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00)/4; - device const T * pmask = src1 != src0 ? (device const T *) src1 + i01*ne00/4 : nullptr; - device float4 * pdst4 = (device float4 *) dst + (i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00)/4; + device const float4 * psrc4 = (device const float4 *) src0 + (i03*args.ne02*args.ne01*args.ne00 + i02*args.ne01*args.ne00 + i01*args.ne00)/4; + device const T * pmask = src1 != src0 ? (device const T *) src1 + i01*args.ne00/4 : nullptr; + device float4 * pdst4 = (device float4 *) dst + (i03*args.ne02*args.ne01*args.ne00 + i02*args.ne01*args.ne00 + i01*args.ne00)/4; float slope = 1.0f; - if (max_bias > 0.0f) { + if (args.max_bias > 0.0f) { const int64_t h = i02; - const float base = h < n_head_log2 ? m0 : m1; - const int exp = h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1; + const float base = h < args.n_head_log2 ? args.m0 : args.m1; + const int exp = h < args.n_head_log2 ? h + 1 : 2*(h - args.n_head_log2) + 1; slope = pow(base, exp); } @@ -1136,8 +1099,8 @@ kernel void kernel_soft_max_4( // parallel max float4 lmax4 = -INFINITY; - for (int i00 = tpitg; i00 < ne00/4; i00 += ntg) { - lmax4 = fmax(lmax4, psrc4[i00]*scale + (float4)((pmask ? slope*pmask[i00] : 0.0f))); + for (int i00 = tpitg; i00 < args.ne00/4; i00 += ntg) { + lmax4 = fmax(lmax4, psrc4[i00]*args.scale + (float4)((pmask ? slope*pmask[i00] : 0.0f))); } const float lmax = MAX(MAX(lmax4[0], lmax4[1]), MAX(lmax4[2], lmax4[3])); @@ -1162,8 +1125,8 @@ kernel void kernel_soft_max_4( // parallel sum float4 lsum4 = 0.0f; - for (int i00 = tpitg; i00 < ne00/4; i00 += ntg) { - const float4 exp_psrc4 = exp((psrc4[i00]*scale + (float4)((pmask ? slope*pmask[i00] : 0.0f))) - max_val); + for (int i00 = tpitg; i00 < args.ne00/4; i00 += ntg) { + const float4 exp_psrc4 = exp((psrc4[i00]*args.scale + (float4)((pmask ? slope*pmask[i00] : 0.0f))) - max_val); lsum4 += exp_psrc4; pdst4[i00] = exp_psrc4; } @@ -1195,7 +1158,7 @@ kernel void kernel_soft_max_4( const float inv_sum = 1.0f/sum; - for (int i00 = tpitg; i00 < ne00/4; i00 += ntg) { + for (int i00 = tpitg; i00 < args.ne00/4; i00 += ntg) { pdst4[i00] *= inv_sum; } } @@ -1211,27 +1174,23 @@ template [[host_name("kernel_soft_max_f32_4")]] kernel kernel_soft_max_4_t kerne kernel void kernel_diag_mask_inf( device const float * src0, device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int & n_past, + constant ggml_metal_kargs_diag_mask_inf & args, uint3 tpig[[thread_position_in_grid]]) { const int64_t i02 = tpig[2]; const int64_t i01 = tpig[1]; const int64_t i00 = tpig[0]; - if (i00 > n_past + i01) { - dst[i02*ne01*ne00 + i01*ne00 + i00] = -INFINITY; + if (i00 > args.n_past + i01) { + dst[i02*args.ne01*args.ne00 + i01*args.ne00 + i00] = -INFINITY; } else { - dst[i02*ne01*ne00 + i01*ne00 + i00] = src0[i02*ne01*ne00 + i01*ne00 + i00]; + dst[i02*args.ne01*args.ne00 + i01*args.ne00 + i00] = src0[i02*args.ne01*args.ne00 + i01*args.ne00 + i00]; } } kernel void kernel_diag_mask_inf_8( device const float4 * src0, device float4 * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int & n_past, + constant ggml_metal_kargs_diag_mask_inf & args, uint3 tpig[[thread_position_in_grid]]) { const int64_t i = 2*tpig[0]; @@ -1239,42 +1198,26 @@ kernel void kernel_diag_mask_inf_8( dst[i+0] = src0[i+0]; dst[i+1] = src0[i+1]; int64_t i4 = 4*i; - const int64_t i02 = i4/(ne00*ne01); i4 -= i02*ne00*ne01; - const int64_t i01 = i4/(ne00); i4 -= i01*ne00; + const int64_t i02 = i4/(args.ne00*args.ne01); i4 -= i02*args.ne00*args.ne01; + const int64_t i01 = i4/(args.ne00); i4 -= i01*args.ne00; const int64_t i00 = i4; for (int k = 3; k >= 0; --k) { - if (i00 + 4 + k <= n_past + i01) { + if (i00 + 4 + k <= args.n_past + i01) { break; } dst[i+1][k] = -INFINITY; - if (i00 + k > n_past + i01) { + if (i00 + k > args.n_past + i01) { dst[i][k] = -INFINITY; } } } // ref: ggml.c:ggml_compute_forward_ssm_conv_f32 -// TODO: optimize kernel void kernel_ssm_conv_f32( device const void * src0, device const void * src1, device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int64_t & ne10, - constant int64_t & ne11, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant int64_t & ne0, - constant int64_t & ne1, - constant int64_t & ne2, - constant uint64_t & nb0, - constant uint64_t & nb1, - constant uint64_t & nb2, + constant ggml_metal_kargs_ssm_conv & args, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { @@ -1282,15 +1225,15 @@ kernel void kernel_ssm_conv_f32( const int64_t i2 = tgpig.y; const int64_t i3 = tgpig.z; - const int64_t nc = ne10; - //const int64_t ncs = ne00; - //const int64_t nr = ne01; - //const int64_t n_t = ne1; - //const int64_t n_s = ne2; + const int64_t nc = args.ne10; + //const int64_t ncs = args.ne00; + //const int64_t nr = args.ne01; + //const int64_t n_t = args.ne1; + //const int64_t n_s = args.ne2; - device const float * s = (device const float *) ((device const char *) src0 + ir*nb01 + i2*nb00 + i3*nb02); - device const float * c = (device const float *) ((device const char *) src1 + ir*nb11); - device float * x = (device float *) ((device char *) dst + ir*nb0 + i2*nb1 + i3*nb2); + device const float * s = (device const float *) ((device const char *) src0 + ir*args.nb01 + i2*args.nb00 + i3*args.nb02); + device const float * c = (device const float *) ((device const char *) src1 + ir*args.nb11); + device float * x = (device float *) ((device char *) dst + ir*args.nb0 + i2*args.nb1 + i3*args.nb2); float sumf = 0.0f; @@ -1302,7 +1245,6 @@ kernel void kernel_ssm_conv_f32( } // ref: ggml.c:ggml_compute_forward_ssm_scan_f32 -// TODO: optimize kernel void kernel_ssm_scan_f32( device const void * src0, device const void * src1, @@ -1311,48 +1253,27 @@ kernel void kernel_ssm_scan_f32( device const void * src4, device const void * src5, device float * dst, - constant int64_t & d_state, - constant int64_t & d_inner, - constant int64_t & n_seq_tokens, - constant int64_t & n_seqs, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, - constant uint64_t & nb13, - constant uint64_t & nb20, - constant uint64_t & nb21, - constant uint64_t & nb22, - constant uint64_t & nb30, - constant uint64_t & nb31, - constant uint64_t & nb40, - constant uint64_t & nb41, - constant uint64_t & nb42, - constant uint64_t & nb50, - constant uint64_t & nb51, - constant uint64_t & nb52, + constant ggml_metal_kargs_ssm_scan & args, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { const int64_t ir = tgpig.x; const int64_t i3 = tgpig.y; - const int64_t nc = d_state; - //const int64_t nr = d_inner; - const int64_t n_t = n_seq_tokens; - //const int64_t n_s = n_seqs; + const int64_t nc = args.d_state; + // const int64_t nr = args.d_inner; + const int64_t n_t = args.n_seq_tokens; + // const int64_t n_s = args.n_seqs; for (int64_t i2 = 0; i2 < n_t; ++i2) { - device const float * s0 = (device const float *) ((device const char *) src0 + ir*nb01 + i3*nb02); - device const float * x = (device const float *) ((device const char *) src1 + ir*nb10 + i2*nb11 + i3*nb12); - device const float * dt = (device const float *) ((device const char *) src2 + ir*nb20 + i2*nb21 + i3*nb22); - device const float * A = (device const float *) ((device const char *) src3 + ir*nb31); - device const float * B = (device const float *) ((device const char *) src4 + i2*nb41 + i3*nb42); - device const float * C = (device const float *) ((device const char *) src5 + i2*nb51 + i3*nb52); - device float * y = (device float *) ((device char *) dst + ir*nb10 + i2*nb11 + i3*nb12); // TODO: do not use src1 strides - device float * s = (device float *) ((device char *) dst + ir*nb01 + i3*nb02 + nb13); + device const float * s0 = (device const float *) ((device const char *) src0 + ir*args.nb01 + i3*args.nb02); + device const float * x = (device const float *) ((device const char *) src1 + ir*args.nb10 + i2*args.nb11 + i3*args.nb12); + device const float * dt = (device const float *) ((device const char *) src2 + ir*args.nb20 + i2*args.nb21 + i3*args.nb22); + device const float * A = (device const float *) ((device const char *) src3 + ir*args.nb31); + device const float * B = (device const float *) ((device const char *) src4 + i2*args.nb41 + i3*args.nb42); + device const float * C = (device const float *) ((device const char *) src5 + i2*args.nb51 + i3*args.nb52); + device float * y = (device float *) ((device char *) dst + ir*args.nb10 + i2*args.nb11 + i3*args.nb12); // TODO: do not use src1 strides + device float * s = (device float *) ((device char *) dst + ir*args.nb01 + i3*args.nb02 + args.nb13); if (i2 > 0) { s0 = s; @@ -1545,22 +1466,15 @@ kernel void kernel_rms_norm( kernel void kernel_group_norm( device const float * src0, device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int32_t & n_groups, - constant float & eps, + constant ggml_metal_kargs_group_norm & args, threadgroup float * buf [[threadgroup(0)]], uint tgpig[[threadgroup_position_in_grid]], uint tpitg[[thread_position_in_threadgroup]], uint sgitg[[simdgroup_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], uint ntg[[threads_per_threadgroup]]) { - const int64_t ne = ne00*ne01*ne02; - const int64_t gs = ne00*ne01*((ne02 + n_groups - 1) / n_groups); + const int64_t ne = args.ne00*args.ne01*args.ne02; + const int64_t gs = args.ne00*args.ne01*((args.ne02 + args.n_groups - 1) / args.n_groups); int start = tgpig * gs; int end = start + gs; @@ -1624,7 +1538,7 @@ kernel void kernel_group_norm( } const float variance = tmp / gs; - const float scale = 1.0f/sqrt(variance + eps); + const float scale = 1.0f/sqrt(variance + args.eps); for (int j = start; j < end; j += ntg) { dst[j] *= scale; } @@ -2588,17 +2502,7 @@ template [[host_name("kernel_rope_neox_f16")]] kernel kernel_rope_neox_t kernel_ typedef void (im2col_t)( device const float * x, device char * dst, - constant int32_t & ofs0, - constant int32_t & ofs1, - constant int32_t & IW, - constant int32_t & IH, - constant int32_t & CHW, - constant int32_t & s0, - constant int32_t & s1, - constant int32_t & p0, - constant int32_t & p1, - constant int32_t & d0, - constant int32_t & d1, + constant ggml_metal_kargs_im2col & args, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tgpg[[threadgroups_per_grid]], uint3 tpitg[[thread_position_in_threadgroup]], @@ -2608,17 +2512,7 @@ template kernel void kernel_im2col( device const float * x, device char * dst, - constant int32_t & ofs0, - constant int32_t & ofs1, - constant int32_t & IW, - constant int32_t & IH, - constant int32_t & CHW, - constant int32_t & s0, - constant int32_t & s1, - constant int32_t & p0, - constant int32_t & p1, - constant int32_t & d0, - constant int32_t & d1, + constant ggml_metal_kargs_im2col & args, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tgpg[[threadgroups_per_grid]], uint3 tpitg[[thread_position_in_threadgroup]], @@ -2639,17 +2533,17 @@ kernel void kernel_im2col( const int64_t ioh = tgpig[1]; const int64_t iow = tgpig[2]; - const int64_t iiw = iow*s0 + ikw*d0 - p0; - const int64_t iih = ioh*s1 + ikh*d1 - p1; + const int64_t iiw = iow*args.s0 + ikw*args.d0 - args.p0; + const int64_t iih = ioh*args.s1 + ikh*args.d1 - args.p1; - const int64_t offset_dst = (in*OH*OW + ioh*OW + iow)*CHW + (iic*(KH*KW) + ikh*KW + ikw); + const int64_t offset_dst = (in*OH*OW + ioh*OW + iow)*args.CHW + (iic*(KH*KW) + ikh*KW + ikw); device T * pdst = (device T *) (dst); - if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) { + if (iih < 0 || iih >= args.IH || iiw < 0 || iiw >= args.IW) { pdst[offset_dst] = 0.0f; } else { - const int64_t offset_src = in*ofs0 + iic*ofs1 + iih*IW + iiw; + const int64_t offset_src = in*args.ofs0 + iic*args.ofs1 + iih*args.IW + iiw; pdst[offset_dst] = x[offset_src]; } } @@ -2660,20 +2554,7 @@ template [[host_name("kernel_im2col_f16")]] kernel im2col_t kernel_im2col; typedef void (im2col_ext_t)( device const float * x, device char * dst, - constant int32_t & ofs0, - constant int32_t & ofs1, - constant int32_t & IW, - constant int32_t & IH, - constant int32_t & CHW, - constant int32_t & s0, - constant int32_t & s1, - constant int32_t & p0, - constant int32_t & p1, - constant int32_t & d0, - constant int32_t & d1, - constant int32_t & N, - constant int32_t & KH, - constant int32_t & KW, + constant ggml_metal_kargs_im2col & args, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tgpg[[threadgroups_per_grid]], uint3 tpitg[[thread_position_in_threadgroup]], @@ -2683,53 +2564,40 @@ template kernel void kernel_im2col_ext( device const float * x, device char * dst, - constant int32_t & ofs0, - constant int32_t & ofs1, - constant int32_t & IW, - constant int32_t & IH, - constant int32_t & CHW, - constant int32_t & s0, - constant int32_t & s1, - constant int32_t & p0, - constant int32_t & p1, - constant int32_t & d0, - constant int32_t & d1, - constant int32_t & N, - constant int32_t & KH, - constant int32_t & KW, + constant ggml_metal_kargs_im2col & args, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tgpg[[threadgroups_per_grid]], // tgpg[0] = D x IC x KH x KW, CHW = IC x KH x KW uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { // [M, 1, 1] - const int64_t KHW = KH * KW; // KHW == ntg[1] * ntg[2], KW == ntg[2] + const int64_t KHW = (int64_t)args.KHW; - const int64_t d = tgpig[0] / CHW; - const int64_t chw = tgpig[0] % CHW; + const int64_t d = tgpig[0] / args.CHW; + const int64_t chw = tgpig[0] % args.CHW; const int64_t tgpig_0 = chw / KHW; // 0 ~ (IC - 1) const int64_t HW = tgpig[0] % KHW; const int64_t tpitg_0 = (d * ntg[0]) + tpitg[0]; - if (tpitg_0 >= N) { + if (tpitg_0 >= args.N) { return; } - const int64_t tpitg_1 = HW / KW; - const int64_t tpitg_2 = HW % KW; + const int64_t tpitg_1 = HW / args.KW; + const int64_t tpitg_2 = HW % args.KW; - const int64_t iiw = tgpig[2] * s0 + tpitg_2 * d0 - p0; - const int64_t iih = tgpig[1] * s1 + tpitg_1 * d1 - p1; + const int64_t iiw = tgpig[2] * args.s0 + tpitg_2 * args.d0 - args.p0; + const int64_t iih = tgpig[1] * args.s1 + tpitg_1 * args.d1 - args.p1; const int64_t offset_dst = - (tpitg_0 * tgpg[1] * tgpg[2] + tgpig[1] * tgpg[2] + tgpig[2]) * CHW + - (tgpig_0 * KHW + tpitg_1 * KW + tpitg_2); + (tpitg_0 * tgpg[1] * tgpg[2] + tgpig[1] * tgpg[2] + tgpig[2]) * args.CHW + + (tgpig_0 * KHW + tpitg_1 * args.KW + tpitg_2); device T * pdst = (device T *) (dst); - if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) { + if (iih < 0 || iih >= args.IH || iiw < 0 || iiw >= args.IW) { pdst[offset_dst] = 0.0f; } else { - const int64_t offset_src = tpitg_0 * ofs0 + tgpig_0 * ofs1; - pdst[offset_dst] = x[offset_src + iih * IW + iiw]; + const int64_t offset_src = tpitg_0 * args.ofs0 + tgpig_0 * args.ofs1; + pdst[offset_dst] = x[offset_src + iih * args.IW + iiw]; } } @@ -2740,12 +2608,7 @@ typedef void (conv_transpose_1d_t)( device const float * src0, device const float * src1, device char * dst, - constant int32_t & IC, - constant int32_t & IL, - constant int32_t & K, - constant int32_t & s0, - constant uint64_t & nb0, - constant uint64_t & nb1, + constant ggml_metal_kargs_conv_transpose_1d & args, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tgpg[[threadgroups_per_grid]]); @@ -2754,29 +2617,24 @@ kernel void kernel_conv_transpose_1d( device const T * src0, device const float * src1, device char * dst, - constant int32_t & IC, - constant int32_t & IL, - constant int32_t & K, - constant int32_t & s0, - constant uint64_t & nb0, - constant uint64_t & nb1, + constant ggml_metal_kargs_conv_transpose_1d & args, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tgpg[[threadgroups_per_grid]]) { float v = 0.0f; - for (int64_t c = 0; c < IC; c++) { - const int32_t kernel_offset = c * tgpg[1] * K + K * tgpig[1]; - const int32_t input_offset = c * IL; + for (int64_t c = 0; c < args.IC; c++) { + const int32_t kernel_offset = c * tgpg[1] * args.K + args.K * tgpig[1]; + const int32_t input_offset = c * args.IL; - for (int64_t i = 0; i < IL; i++) { - if (tgpig[0] >= i * s0 && tgpig[0] < i * s0 + K) { - v += src0[kernel_offset + tgpig[0] - i * s0] * src1[input_offset + i]; + for (int64_t i = 0; i < args.IL; i++) { + if (tgpig[0] >= i * args.s0 && tgpig[0] < i * args.s0 + args.K) { + v += src0[kernel_offset + tgpig[0] - i * args.s0] * src1[input_offset + i]; } } } - device float * dst_ptr = (device float *) (dst + tgpig[0] * nb0 + tgpig[1] * nb1); + device float * dst_ptr = (device float *) (dst + tgpig[0] * args.nb0 + tgpig[1] * args.nb1); dst_ptr[0] = v; } @@ -2786,12 +2644,7 @@ kernel void kernel_conv_transpose_1d( device const float * src0, device const float * src1, device char * dst, - constant int32_t & IC, - constant int32_t & IL, - constant int32_t & K, - constant int32_t & s0, - constant uint64_t & nb0, - constant uint64_t & nb1, + constant ggml_metal_kargs_conv_transpose_1d & args, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tgpg[[threadgroups_per_grid]]); @@ -2800,38 +2653,14 @@ kernel void kernel_conv_transpose_1d( device const half * src0, device const float * src1, device char * dst, - constant int32_t & IC, - constant int32_t & IL, - constant int32_t & K, - constant int32_t & s0, - constant uint64_t & nb0, - constant uint64_t & nb1, + constant ggml_metal_kargs_conv_transpose_1d & args, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tgpg[[threadgroups_per_grid]]); kernel void kernel_upscale_f32( device const char * src0, device char * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant int64_t & ne03, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant uint64_t & nb03, - constant int64_t & ne0, - constant int64_t & ne1, - constant int64_t & ne2, - constant int64_t & ne3, - constant uint64_t & nb0, - constant uint64_t & nb1, - constant uint64_t & nb2, - constant uint64_t & nb3, - constant float & sf0, - constant float & sf1, - constant float & sf2, - constant float & sf3, + constant ggml_metal_kargs_upscale & args, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { @@ -2840,15 +2669,15 @@ kernel void kernel_upscale_f32( const int64_t i2 = tgpig.y; const int64_t i1 = tgpig.x; - const int64_t i03 = i3/sf3; - const int64_t i02 = i2/sf2; - const int64_t i01 = i1/sf1; + const int64_t i03 = i3/args.sf3; + const int64_t i02 = i2/args.sf2; + const int64_t i01 = i1/args.sf1; - for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) { - const int64_t i00 = i0/sf0; + for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) { + const int64_t i00 = i0/args.sf0; - device const float * src0_ptr = (device const float *) (src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00); - device float * dst_ptr = (device float *) (dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); + device const float * src0_ptr = (device const float *) (src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + i00*args.nb00); + device float * dst_ptr = (device float *) (dst + i3*args.nb3 + i2*args.nb2 + i1*args.nb1 + i0*args.nb0); dst_ptr[0] = src0_ptr[0]; } @@ -2857,22 +2686,7 @@ kernel void kernel_upscale_f32( kernel void kernel_pad_f32( device const char * src0, device char * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant int64_t & ne03, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant uint64_t & nb03, - constant int64_t & ne0, - constant int64_t & ne1, - constant int64_t & ne2, - constant int64_t & ne3, - constant uint64_t & nb0, - constant uint64_t & nb1, - constant uint64_t & nb2, - constant uint64_t & nb3, + constant ggml_metal_kargs_pad & args, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { @@ -2885,12 +2699,12 @@ kernel void kernel_pad_f32( const int64_t i02 = i2; const int64_t i01 = i1; - device const float * src0_ptr = (device const float *) (src0 + i03*nb03 + i02*nb02 + i01*nb01); - device float * dst_ptr = (device float *) (dst + i3*nb3 + i2*nb2 + i1*nb1); + device const float * src0_ptr = (device const float *) (src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01); + device float * dst_ptr = (device float *) (dst + i3*args.nb3 + i2*args.nb2 + i1*args.nb1); - if (i1 < ne01 && i2 < ne02 && i3 < ne03) { - for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) { - if (i0 < ne00) { + if (i1 < args.ne01 && i2 < args.ne02 && i3 < args.ne03) { + for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) { + if (i0 < args.ne00) { dst_ptr[i0] = src0_ptr[i0]; } else { dst_ptr[i0] = 0.0f; @@ -2900,7 +2714,7 @@ kernel void kernel_pad_f32( return; } - for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) { + for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) { dst_ptr[i0] = 0.0f; } } @@ -2908,21 +2722,7 @@ kernel void kernel_pad_f32( kernel void kernel_pad_reflect_1d_f32( device const char * src0, device char * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant int64_t & ne03, - constant int64_t & ne0, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant uint64_t & nb03, - constant uint64_t & nb0, - constant uint64_t & nb1, - constant uint64_t & nb2, - constant uint64_t & nb3, - constant int32_t & p0, - constant int32_t & p1, + constant ggml_metal_kargs_pad_reflect_1d & args, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tgpg[[threadgroups_per_grid]], uint3 tpitg[[thread_position_in_threadgroup]], @@ -2936,17 +2736,17 @@ kernel void kernel_pad_reflect_1d_f32( const int64_t i02 = i2; const int64_t i01 = i1; - device const float * src0_ptr = (device const float *) (src0 + i03*nb03 + i02*nb02 + i01*nb01); - device float * dst_ptr = (device float *) (dst + i3*nb3 + i2*nb2 + i1*nb1); + device const float * src0_ptr = (device const float *) (src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01); + device float * dst_ptr = (device float *) (dst + i3*args.nb3 + i2*args.nb2 + i1*args.nb1); - if (i1 < ne01 && i2 < ne02 && i3 < ne03) { - for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) { - if (i0 < p0) { - dst_ptr[i0] = src0_ptr[p0 - i0]; - } else if (i0 < ne0 - p1) { - dst_ptr[i0] = src0_ptr[i0 - p0]; + if (i1 < args.ne01 && i2 < args.ne02 && i3 < args.ne03) { + for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) { + if (i0 < args.p0) { + dst_ptr[i0] = src0_ptr[args.p0 - i0]; + } else if (i0 < args.ne0 - args.p1) { + dst_ptr[i0] = src0_ptr[i0 - args.p0]; } else { - dst_ptr[i0] = src0_ptr[(ne0 - p1 - p0) - (p1 + 1 - (ne0 - i0)) - 1]; + dst_ptr[i0] = src0_ptr[(args.ne0 - args.p1 - args.p0) - (args.p1 + 1 - (args.ne0 - i0)) - 1]; } } } @@ -2954,44 +2754,40 @@ kernel void kernel_pad_reflect_1d_f32( kernel void kernel_arange_f32( device char * dst, - constant int64_t & ne0, - constant float & start, - constant float & step, + constant ggml_metal_kargs_arange & args, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { device float * dst_ptr = (device float *) dst; - for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) { - dst_ptr[i0] = start + step * i0; + for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) { + dst_ptr[i0] = args.start + args.step * i0; } } kernel void kernel_timestep_embedding_f32( device const char * src0, device char * dst, - constant uint64_t & nb1, - constant int & dim, - constant int & max_period, + constant ggml_metal_kargs_timestep_embedding & args, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { int i = tgpig.x; - device float * embed_data = (device float *)(dst + i*nb1); + device float * embed_data = (device float *)(dst + i*args.nb1); - int half_ = dim / 2; + int half_ = args.dim / 2; for (int j = tpitg.x; j < half_; j += ntg.x) { float timestep = ((device float *)src0)[i]; - float freq = (float)exp(-log((float)max_period) * j / half_); + float freq = (float)exp(-log((float)args.max_period) * j / half_); float arg = timestep * freq; embed_data[j ] = cos(arg); embed_data[j + half_] = sin(arg); } - if (dim % 2 != 0 && tpitg.x == 0) { - embed_data[dim] = 0.f; + if (args.dim % 2 != 0 && tpitg.x == 0) { + embed_data[args.dim] = 0.f; } } @@ -2999,8 +2795,7 @@ kernel void kernel_timestep_embedding_f32( typedef void (argsort_t)( device const float * x, device int32_t * dst, - constant int64_t & ncols, - constant int64_t & ncols_pad, + constant ggml_metal_kargs_argsort & args, threadgroup int32_t * shared_values [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]]); @@ -3009,8 +2804,7 @@ template kernel void kernel_argsort_f32_i32( device const float * x, device int32_t * dst, - constant int64_t & ncols, - constant int64_t & ncols_pad, + constant ggml_metal_kargs_argsort & args, threadgroup int32_t * shared_values [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]]) { @@ -3018,9 +2812,9 @@ kernel void kernel_argsort_f32_i32( int col = tpitg[0]; int row = tgpig[1]; - if (col >= ncols_pad) return; + if (col >= args.ncols_pad) return; - device const float * x_row = x + row * ncols; + device const float * x_row = x + row * args.ncols; threadgroup int32_t * dst_row = shared_values; // initialize indices @@ -3028,21 +2822,21 @@ kernel void kernel_argsort_f32_i32( threadgroup_barrier(mem_flags::mem_threadgroup); - for (int k = 2; k <= ncols_pad; k *= 2) { + for (int k = 2; k <= args.ncols_pad; k *= 2) { for (int j = k / 2; j > 0; j /= 2) { int ixj = col ^ j; if (ixj > col) { if ((col & k) == 0) { - if (dst_row[col] >= ncols || - (dst_row[ixj] < ncols && (order == GGML_SORT_ORDER_ASC ? + if (dst_row[col] >= args.ncols || + (dst_row[ixj] < args.ncols && (order == GGML_SORT_ORDER_ASC ? x_row[dst_row[col]] > x_row[dst_row[ixj]] : x_row[dst_row[col]] < x_row[dst_row[ixj]])) ) { SWAP(dst_row[col], dst_row[ixj]); } } else { - if (dst_row[ixj] >= ncols || - (dst_row[col] < ncols && (order == GGML_SORT_ORDER_ASC ? + if (dst_row[ixj] >= args.ncols || + (dst_row[col] < args.ncols && (order == GGML_SORT_ORDER_ASC ? x_row[dst_row[col]] < x_row[dst_row[ixj]] : x_row[dst_row[col]] > x_row[dst_row[ixj]])) ) { @@ -3055,8 +2849,8 @@ kernel void kernel_argsort_f32_i32( } // copy the result to dst without the padding - if (col < ncols) { - dst[row * ncols + col] = dst_row[col]; + if (col < args.ncols) { + dst[row * args.ncols + col] = dst_row[col]; } } @@ -3066,9 +2860,9 @@ template [[host_name("kernel_argsort_f32_i32_desc")]] kernel argsort_t kernel_ar kernel void kernel_leaky_relu_f32( device const float * src0, device float * dst, - constant float & slope, + constant ggml_metal_kargs_leaky_relu & args, uint tpig[[thread_position_in_grid]]) { - dst[tpig] = src0[tpig] > 0.0f ? src0[tpig] : src0[tpig] * slope; + dst[tpig] = src0[tpig] > 0.0f ? src0[tpig] : src0[tpig] * args.slope; } // ref: https://arxiv.org/pdf/2307.08691.pdf @@ -6009,28 +5803,21 @@ kernel void kernel_get_rows_q( device const void * src0, device const void * src1, device float * dst, - constant int64_t & ne00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int64_t & ne10, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb1, - constant uint64_t & nb2, + constant ggml_metal_kargs_get_rows & args, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint3 tptg [[threads_per_threadgroup]]) { const int64_t i10 = tgpig.x; const int64_t i11 = tgpig.y; - const int64_t r = ((const device int32_t *) ((const device char *) src1 + i11*nb11 + i10*nb10))[0]; + const int64_t r = ((const device int32_t *) ((const device char *) src1 + i11*args.nb11 + i10*args.nb10))[0]; const int64_t i02 = i11; - for (int64_t ind = tiitg; ind < ne00/16; ind += tptg.x) { + for (int64_t ind = tiitg; ind < args.ne00/16; ind += tptg.x) { float4x4 temp; - dequantize_func(((device const block_q *) ((const device char *) src0 + r*nb01 + i02*nb02)) + ind/nl, ind%nl, temp); - *(((device float4x4 *) ((device char *) dst + i11*nb2 + i10*nb1)) + ind) = temp; + dequantize_func(((device const block_q *) ((const device char *) src0 + r*args.nb01 + i02*args.nb02)) + ind/nl, ind%nl, temp); + *(((device float4x4 *) ((device char *) dst + i11*args.nb2 + i10*args.nb1)) + ind) = temp; } } @@ -6039,27 +5826,20 @@ kernel void kernel_get_rows_f( device const void * src0, device const void * src1, device float * dst, - constant int64_t & ne00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int64_t & ne10, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb1, - constant uint64_t & nb2, + constant ggml_metal_kargs_get_rows & args, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint3 tptg [[threads_per_threadgroup]]) { const int64_t i10 = tgpig.x; const int64_t i11 = tgpig.y; - const int64_t r = ((const device int32_t *) ((const device char *) src1 + i11*nb11 + i10*nb10))[0]; + const int64_t r = ((const device int32_t *) ((const device char *) src1 + i11*args.nb11 + i10*args.nb10))[0]; const int64_t i02 = i11; - for (int ind = tiitg; ind < ne00; ind += tptg.x) { - (( device float *) (( device char *) dst + i11*nb2 + i10*nb1))[ind] = - ((const device T *) ((const device char *) src0 + i02*nb02 + r*nb01))[ind]; + for (int ind = tiitg; ind < args.ne00; ind += tptg.x) { + (( device float *) (( device char *) dst + i11*args.nb2 + i10*args.nb1))[ind] = + ((const device T *) ((const device char *) src0 + i02*args.nb02 + r*args.nb01))[ind]; } } @@ -6067,27 +5847,20 @@ kernel void kernel_get_rows_i32( device const void * src0, device const void * src1, device int32_t * dst, - constant int64_t & ne00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int64_t & ne10, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb1, - constant uint64_t & nb2, + constant ggml_metal_kargs_get_rows & args, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint3 tptg [[threads_per_threadgroup]]) { const int64_t i10 = tgpig.x; const int64_t i11 = tgpig.y; - const int64_t r = ((const device int32_t *) ((const device char *) src1 + i11*nb11 + i10*nb10))[0]; + const int64_t r = ((const device int32_t *) ((const device char *) src1 + i11*args.nb11 + i10*args.nb10))[0]; const int64_t i02 = i11; - for (int ind = tiitg; ind < ne00; ind += tptg.x) { - (( device int32_t *) (( device char *) dst + i11*nb2 + i10*nb1))[ind] = - ((const device int32_t *) ((const device char *) src0 + i02*nb02 + r*nb01))[ind]; + for (int ind = tiitg; ind < args.ne00; ind += tptg.x) { + (( device int32_t *) (( device char *) dst + i11*args.nb2 + i10*args.nb1))[ind] = + ((const device int32_t *) ((const device char *) src0 + i02*args.nb02 + r*args.nb01))[ind]; } } @@ -6689,98 +6462,78 @@ template [[host_name("kernel_mul_mv_id_iq4_xs_f32")]] kernel kernel_mul_mv_id_t kernel void kernel_pool_2d_max_f32( device const float * src0, device float * dst, - constant int32_t & k0, - constant int32_t & k1, - constant int32_t & s0, - constant int32_t & s1, - constant int32_t & p0, - constant int32_t & p1, - constant int64_t & IH, - constant int64_t & IW, - constant int64_t & OH, - constant int64_t & OW, - constant int64_t & parallel_elements, + constant ggml_metal_kargs_pool_2d & args, uint gid[[thread_position_in_grid]]) { - if (gid >= parallel_elements) { + if (gid >= args.parallel_elements) { return; } const int idx = gid; - const int I_HW = IH * IW; - const int O_HW = OH * OW; + const int I_HW = args.IH * args.IW; + const int O_HW = args.OH * args.OW; const int nc = idx / O_HW; - const int cur_oh = idx % O_HW / OW; - const int cur_ow = idx % O_HW % OW; + const int cur_oh = idx % O_HW / args.OW; + const int cur_ow = idx % O_HW % args.OW; device const float * i_ptr = src0 + nc * I_HW; device float * o_ptr = dst + nc * O_HW; - const int start_h = cur_oh * s1 - p1; + const int start_h = cur_oh * args.s1 - args.p1; const int bh = MAX(0, start_h); - const int eh = MIN(IH, start_h + k1); - const int start_w = cur_ow * s0 - p0; + const int eh = MIN(args.IH, start_h + args.k1); + const int start_w = cur_ow * args.s0 - args.p0; const int bw = MAX(0, start_w); - const int ew = MIN(IW, start_w + k0); + const int ew = MIN(args.IW, start_w + args.k0); float res = -INFINITY; for (int i = bh; i < eh; i += 1) { for (int j = bw; j < ew; j += 1) { - res = MAX(res, i_ptr[i * IW + j]); + res = MAX(res, i_ptr[i * args.IW + j]); } } - o_ptr[cur_oh * OW + cur_ow] = res; + o_ptr[cur_oh * args.OW + cur_ow] = res; } kernel void kernel_pool_2d_avg_f32( device const float * src0, device float * dst, - constant int32_t & k0, - constant int32_t & k1, - constant int32_t & s0, - constant int32_t & s1, - constant int32_t & p0, - constant int32_t & p1, - constant int64_t & IH, - constant int64_t & IW, - constant int64_t & OH, - constant int64_t & OW, - constant int64_t & parallel_elements, + constant ggml_metal_kargs_pool_2d & args, uint gid[[thread_position_in_grid]]) { - if (gid >= parallel_elements) { + if (gid >= args.parallel_elements) { return; } const int idx = gid; - const int I_HW = IH * IW; - const int O_HW = OH * OW; + const int I_HW = args.IH * args.IW; + const int O_HW = args.OH * args.OW; const int nc = idx / O_HW; - const int cur_oh = idx % O_HW / OW; - const int cur_ow = idx % O_HW % OW; + const int cur_oh = idx % O_HW / args.OW; + const int cur_ow = idx % O_HW % args.OW; device const float * i_ptr = src0 + nc * I_HW; device float * o_ptr = dst + nc * O_HW; - const int start_h = cur_oh * s1 - p1; + const int start_h = cur_oh * args.s1 - args.p1; const int bh = MAX(0, start_h); - const int eh = MIN(IH, start_h + k1); - const int start_w = cur_ow * s0 - p0; + const int eh = MIN(args.IH, start_h + args.k1); + const int start_w = cur_ow * args.s0 - args.p0; const int bw = MAX(0, start_w); - const int ew = MIN(IW, start_w + k0); + const int ew = MIN(args.IW, start_w + args.k0); // const float scale = 1. / ((eh - bh) * (ew - bw)); - const float scale = 1. / (k0 * k1); + const float scale = 1. / (args.k0 * args.k1); float res = 0; for (int i = bh; i < eh; i += 1) { for (int j = bw; j < ew; j += 1) { - float cur = i_ptr[i * IW + j]; + float cur = i_ptr[i * args.IW + j]; res += cur * scale; } } - o_ptr[cur_oh * OW + cur_ow] = res; + o_ptr[cur_oh * args.OW + cur_ow] = res; } From 7cf64f6beecf54c6ac71503181f154667fd4228a Mon Sep 17 00:00:00 2001 From: Olivier Chafik Date: Fri, 7 Mar 2025 09:33:37 +0000 Subject: [PATCH 013/398] sync: minja - support QwQ-32B (#12235) https://github.com/google/minja/commit/8a76f7815e8a3ae00bd233c2b5a8b7d4e86564ec --- common/minja/minja.hpp | 42 +++++++++++++++++++++++++++++++++++++----- 1 file changed, 37 insertions(+), 5 deletions(-) diff --git a/common/minja/minja.hpp b/common/minja/minja.hpp index c58dd66e067b1..fa4c34d6e414a 100644 --- a/common/minja/minja.hpp +++ b/common/minja/minja.hpp @@ -1378,13 +1378,27 @@ struct ArgumentsExpression { } }; -static std::string strip(const std::string & s) { - auto start = s.find_first_not_of(" \t\n\r"); +static std::string strip(const std::string & s, const std::string & chars = "", bool left = true, bool right = true) { + auto charset = chars.empty() ? " \t\n\r" : chars; + auto start = left ? s.find_first_not_of(charset) : 0; if (start == std::string::npos) return ""; - auto end = s.find_last_not_of(" \t\n\r"); + auto end = right ? s.find_last_not_of(charset) : s.size() - 1; return s.substr(start, end - start + 1); } +static std::vector split(const std::string & s, const std::string & sep) { + std::vector result; + size_t start = 0; + size_t end = s.find(sep); + while (end != std::string::npos) { + result.push_back(s.substr(start, end - start)); + start = end + sep.length(); + end = s.find(sep, start); + } + result.push_back(s.substr(start)); + return result; +} + static std::string capitalize(const std::string & s) { if (s.empty()) return s; auto result = s; @@ -1467,8 +1481,26 @@ class MethodCallExpr : public Expression { } else if (obj.is_string()) { auto str = obj.get(); if (method->get_name() == "strip") { - vargs.expectArgs("strip method", {0, 0}, {0, 0}); - return Value(strip(str)); + vargs.expectArgs("strip method", {0, 1}, {0, 0}); + auto chars = vargs.args.empty() ? "" : vargs.args[0].get(); + return Value(strip(str, chars)); + } else if (method->get_name() == "lstrip") { + vargs.expectArgs("lstrip method", {0, 1}, {0, 0}); + auto chars = vargs.args.empty() ? "" : vargs.args[0].get(); + return Value(strip(str, chars, /* left= */ true, /* right= */ false)); + } else if (method->get_name() == "rstrip") { + vargs.expectArgs("rstrip method", {0, 1}, {0, 0}); + auto chars = vargs.args.empty() ? "" : vargs.args[0].get(); + return Value(strip(str, chars, /* left= */ false, /* right= */ true)); + } else if (method->get_name() == "split") { + vargs.expectArgs("split method", {1, 1}, {0, 0}); + auto sep = vargs.args[0].get(); + auto parts = split(str, sep); + Value result = Value::array(); + for (const auto& part : parts) { + result.push_back(Value(part)); + } + return result; } else if (method->get_name() == "capitalize") { vargs.expectArgs("capitalize method", {0, 0}, {0, 0}); return Value(capitalize(str)); From 8fad3c7a7c54a25a1ca38dfb08244df55288e675 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Fri, 7 Mar 2025 11:15:33 +0100 Subject: [PATCH 014/398] server : Log original chat template parsing error (#12233) --- examples/server/server.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 2a526b0e7acdd..e1371dbf8cbb0 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -1900,6 +1900,7 @@ struct server_context { try { common_chat_format_example(chat_templates.get(), params.use_jinja); } catch (const std::exception & e) { + SRV_WRN("%s: Chat template parsing error: %s\n", __func__, e.what()); SRV_WRN("%s: The chat template that comes with this model is not yet supported, falling back to chatml. This may cause the model to output suboptimal responses\n", __func__); chat_templates = common_chat_templates_init(model, "chatml"); } From ea002810a209246d034d1b6ddac387f778751588 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 7 Mar 2025 12:19:31 +0200 Subject: [PATCH 015/398] ci : fix save-load test invocations (#12245) --- ci/run.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ci/run.sh b/ci/run.sh index 77c32ce0050aa..9fc19c89d80d2 100755 --- a/ci/run.sh +++ b/ci/run.sh @@ -352,10 +352,10 @@ function gg_run_open_llama_7b_v2 { (time ./bin/llama-imatrix --model ${model_f16} -f ${wiki_test} -t 1 -ngl 99 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-imatrix.log - (time ./bin/llama-save-load-state--model ${model_q4_0} -ngl 10 -c 0 ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log - (time ./bin/llama-save-load-state--model ${model_q4_0} -ngl 10 -c 0 -fa ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log - (time ./bin/llama-save-load-state--model ${model_q4_0} -ngl 99 -c 0 ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log - (time ./bin/llama-save-load-state--model ${model_q4_0} -ngl 99 -c 0 -fa ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log + (time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 10 -c 0 ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log + (time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 10 -c 0 -fa ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log + (time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 99 -c 0 ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log + (time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 99 -c 0 -fa ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log function check_ppl { qnt="$1" From 68d0027f3d19eb579c1863814c91e37ffa699014 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9my=20O?= Date: Fri, 7 Mar 2025 12:54:22 +0100 Subject: [PATCH 016/398] ggml-cpu: faster AVX2 variant for IQ1_M (#12216) --- ggml/src/ggml-cpu/ggml-cpu-quants.c | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/ggml/src/ggml-cpu/ggml-cpu-quants.c b/ggml/src/ggml-cpu/ggml-cpu-quants.c index 2ae66591d2175..8c7dbd1ccb5fe 100644 --- a/ggml/src/ggml-cpu/ggml-cpu-quants.c +++ b/ggml/src/ggml-cpu/ggml-cpu-quants.c @@ -11718,9 +11718,12 @@ void ggml_vec_dot_iq1_m_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const #elif defined __AVX2__ - const __m256i mask = _mm256_set1_epi16(2 * 0x7); + const __m256i mask = _mm256_set1_epi16(0x7); const __m256i mone = _mm256_set1_epi16(1); const __m256i mone8 = _mm256_set1_epi8(1); + const __m256i mtwo8 = _mm256_set1_epi8(2); + // VPSHUFB cannot cross 128-bit lanes so odd shifts go to upper half. + const __m256i scales_shift = _mm256_set_epi64x(9, 3, 6, 0); __m256 accum1 = _mm256_setzero_ps(); __m256 accum2 = _mm256_setzero_ps(); @@ -11732,6 +11735,14 @@ void ggml_vec_dot_iq1_m_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const const uint16_t * sc = (const uint16_t *)x[i].scales; scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000); + // Extract 3-bit scales (16 values) + __m256i scales = _mm256_set1_epi64x(*(const uint64_t*)sc); + scales = _mm256_srlv_epi64(scales, scales_shift); + scales = _mm256_add_epi16(_mm256_slli_epi16(_mm256_and_si256(scales, mask), 1), mone); + + // Indices to repeat each scale 8 times. + __m256i scales_idx1 = _mm256_set1_epi16(0x0100); + __m256i scales_idx2 = _mm256_add_epi8(scales_idx1, _mm256_set1_epi8(8)); __m256i sumi1 = _mm256_setzero_si256(); __m256i sumi2 = _mm256_setzero_si256(); @@ -11777,11 +11788,12 @@ void ggml_vec_dot_iq1_m_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const const __m256i dot3 = _mm256_maddubs_epi16(mone8, _mm256_sign_epi8(q8b_1, delta1)); const __m256i dot4 = _mm256_maddubs_epi16(mone8, _mm256_sign_epi8(q8b_2, delta2)); - __m256i scale1 = MM256_SET_M128I(_mm_set1_epi16(sc[ib/2] >> 2), _mm_set1_epi16(sc[ib/2] << 1)); - __m256i scale2 = MM256_SET_M128I(_mm_set1_epi16(sc[ib/2] >> 8), _mm_set1_epi16(sc[ib/2] >> 5)); + __m256i scale1 = _mm256_shuffle_epi8(scales, scales_idx1); + __m256i scale2 = _mm256_shuffle_epi8(scales, scales_idx2); + + scales_idx1 = _mm256_add_epi8(scales_idx1, mtwo8); + scales_idx2 = _mm256_add_epi8(scales_idx2, mtwo8); - scale1 = _mm256_add_epi16(_mm256_and_si256(scale1, mask), mone); - scale2 = _mm256_add_epi16(_mm256_and_si256(scale2, mask), mone); const __m256i p1 = _mm256_madd_epi16(dot1, scale1); const __m256i p2 = _mm256_madd_epi16(dot2, scale2); const __m256i p3 = _mm256_madd_epi16(dot3, scale1); From d6ae2fa06139e496880cbf65197c84341e9d98e7 Mon Sep 17 00:00:00 2001 From: vmobilis <75476228+vmobilis@users.noreply.github.com> Date: Fri, 7 Mar 2025 11:11:40 +0300 Subject: [PATCH 017/398] ggml : ggml_compute_forward_concat() for arbitrary tensor type (ggml/1118) * ggml_compute_forward_concat() for arbitrary tensor type * Check that tensors' type match * ggml-cpu.c: check type of source tensors * ggml-cpu.c: move tensor type check to ggml_compute_forward_concat() * ggml.c: check concatenated tensor type * Remove tensor type check from ggml_compute_forward_concat() in ggml-cpu.c ..., as it was moved to ggml.c. --- ggml/src/ggml-cpu/ggml-cpu.c | 143 ++++++++++++++++++++++++++++++++++- ggml/src/ggml.c | 1 + 2 files changed, 142 insertions(+), 2 deletions(-) diff --git a/ggml/src/ggml-cpu/ggml-cpu.c b/ggml/src/ggml-cpu/ggml-cpu.c index c67fdd0456bf3..f2ab4c5d69582 100644 --- a/ggml/src/ggml-cpu/ggml-cpu.c +++ b/ggml/src/ggml-cpu/ggml-cpu.c @@ -6648,6 +6648,135 @@ static void ggml_compute_forward_repeat_back( // ggml_compute_forward_concat +static void ggml_compute_forward_concat_any( + const struct ggml_compute_params * params, + struct ggml_tensor * dst) { + + const struct ggml_tensor * src0 = dst->src[0]; + const struct ggml_tensor * src1 = dst->src[1]; + + const size_t len = ggml_type_size(src0->type); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_BINARY_OP_LOCALS + + const int32_t dim = ggml_get_op_params_i32(dst, 0); + + GGML_ASSERT(dim >= 0 && dim < 4); + + int64_t o[4] = {0, 0, 0, 0}; + o[dim] = src0->ne[dim]; + + const char * x; + + // TODO: smarter multi-theading + for (int i3 = 0; i3 < ne3; i3++) { + for (int i2 = ith; i2 < ne2; i2 += nth) { + for (int i1 = 0; i1 < ne1; i1++) { + for (int i0 = 0; i0 < ne0; i0++) { + if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) { + x = (const char *)src0->data + (i0 )*nb00 + (i1 )*nb01 + (i2 )*nb02 + (i3 )*nb03; + } else { + x = (const char *)src1->data + (i0 - o[0])*nb10 + (i1 - o[1])*nb11 + (i2 - o[2])*nb12 + (i3 - o[3])*nb13; + } + + char * y = (char *)dst->data + i0*nb0 + i1*nb1 + i2*nb2 + i3*nb3; + + memcpy(y, x, len); + } + } + } + } +} + +static void ggml_compute_forward_concat_i8( + const struct ggml_compute_params * params, + struct ggml_tensor * dst) { + + const struct ggml_tensor * src0 = dst->src[0]; + const struct ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_type_size(src0->type) == sizeof(int8_t)); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_BINARY_OP_LOCALS + + const int32_t dim = ggml_get_op_params_i32(dst, 0); + + GGML_ASSERT(dim >= 0 && dim < 4); + + int64_t o[4] = {0, 0, 0, 0}; + o[dim] = src0->ne[dim]; + + const int8_t * x; + + // TODO: smarter multi-theading + for (int i3 = 0; i3 < ne3; i3++) { + for (int i2 = ith; i2 < ne2; i2 += nth) { + for (int i1 = 0; i1 < ne1; i1++) { + for (int i0 = 0; i0 < ne0; i0++) { + if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) { + x = (const int8_t *) ((const char *)src0->data + (i0 )*nb00 + (i1 )*nb01 + (i2 )*nb02 + (i3 )*nb03); + } else { + x = (const int8_t *) ((const char *)src1->data + (i0 - o[0])*nb10 + (i1 - o[1])*nb11 + (i2 - o[2])*nb12 + (i3 - o[3])*nb13); + } + + int8_t * y = (int8_t *)((char *)dst->data + i0*nb0 + i1*nb1 + i2*nb2 + i3*nb3); + + *y = *x; + } + } + } + } +} + +static void ggml_compute_forward_concat_f16( + const struct ggml_compute_params * params, + struct ggml_tensor * dst) { + + const struct ggml_tensor * src0 = dst->src[0]; + const struct ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_type_size(src0->type) == sizeof(ggml_fp16_t)); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_BINARY_OP_LOCALS + + const int32_t dim = ggml_get_op_params_i32(dst, 0); + + GGML_ASSERT(dim >= 0 && dim < 4); + + int64_t o[4] = {0, 0, 0, 0}; + o[dim] = src0->ne[dim]; + + const ggml_fp16_t * x; + + // TODO: smarter multi-theading + for (int i3 = 0; i3 < ne3; i3++) { + for (int i2 = ith; i2 < ne2; i2 += nth) { + for (int i1 = 0; i1 < ne1; i1++) { + for (int i0 = 0; i0 < ne0; i0++) { + if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) { + x = (const ggml_fp16_t *) ((const char *)src0->data + (i0 )*nb00 + (i1 )*nb01 + (i2 )*nb02 + (i3 )*nb03); + } else { + x = (const ggml_fp16_t *) ((const char *)src1->data + (i0 - o[0])*nb10 + (i1 - o[1])*nb11 + (i2 - o[2])*nb12 + (i3 - o[3])*nb13); + } + + ggml_fp16_t * y = (ggml_fp16_t *)((char *)dst->data + i0*nb0 + i1*nb1 + i2*nb2 + i3*nb3); + + *y = *x; + } + } + } + } +} + static void ggml_compute_forward_concat_f32( const struct ggml_compute_params * params, struct ggml_tensor * dst) { @@ -6655,7 +6784,7 @@ static void ggml_compute_forward_concat_f32( const struct ggml_tensor * src0 = dst->src[0]; const struct ggml_tensor * src1 = dst->src[1]; - GGML_ASSERT(src0->nb[0] == sizeof(float)); + GGML_ASSERT(ggml_type_size(src0->type) == sizeof(float)); const int ith = params->ith; const int nth = params->nth; @@ -6698,6 +6827,16 @@ static void ggml_compute_forward_concat( const struct ggml_tensor * src0 = dst->src[0]; switch (src0->type) { + case GGML_TYPE_F16: + case GGML_TYPE_BF16: + case GGML_TYPE_I16: + { + ggml_compute_forward_concat_f16(params, dst); + } break; + case GGML_TYPE_I8: + { + ggml_compute_forward_concat_i8(params, dst); + } break; case GGML_TYPE_F32: case GGML_TYPE_I32: { @@ -6705,7 +6844,7 @@ static void ggml_compute_forward_concat( } break; default: { - GGML_ABORT("fatal error"); + ggml_compute_forward_concat_any(params, dst); } } } diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c index 084240331ef93..89409bb0e42a5 100644 --- a/ggml/src/ggml.c +++ b/ggml/src/ggml.c @@ -2332,6 +2332,7 @@ struct ggml_tensor * ggml_concat( struct ggml_tensor * b, int dim) { GGML_ASSERT(dim >= 0 && dim < GGML_MAX_DIMS); + GGML_ASSERT(a->type == b->type); int64_t ne[GGML_MAX_DIMS]; for (int d = 0; d < GGML_MAX_DIMS; ++d) { From 102ac1891db32c346a7b6b96145a2a23c1e4c352 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 7 Mar 2025 14:00:27 +0200 Subject: [PATCH 018/398] sync : ggml ggml-ci --- scripts/sync-ggml.last | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/sync-ggml.last b/scripts/sync-ggml.last index 040b53ca3922d..c7944d1d429c6 100644 --- a/scripts/sync-ggml.last +++ b/scripts/sync-ggml.last @@ -1 +1 @@ -58ecf6b96d887e408b6869915863fa1126483d51 +c7dfe3d174f98b14801f9ed12f129179d3e7b638 From 7c7f3b7f435f41f2508e0e3010f0013cd8335156 Mon Sep 17 00:00:00 2001 From: Daniel Bevenius Date: Fri, 7 Mar 2025 14:15:27 +0100 Subject: [PATCH 019/398] ggml : skip intermediate .air file when compiling .metallib (#12247) This commit updates the compilation of default.metallib to skip the intermediate .air (Apple Intermediate Representation) file. The motivation for this change is to simplify the custom command a little and avoid generating and then removing the .air file. --- ggml/src/ggml-metal/CMakeLists.txt | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/ggml/src/ggml-metal/CMakeLists.txt b/ggml/src/ggml-metal/CMakeLists.txt index be3fb3fa95183..e222327809c31 100644 --- a/ggml/src/ggml-metal/CMakeLists.txt +++ b/ggml/src/ggml-metal/CMakeLists.txt @@ -88,9 +88,8 @@ else() add_custom_command( OUTPUT ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib - COMMAND xcrun -sdk macosx metal ${XC_FLAGS} -c ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.metal -o ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.air - COMMAND xcrun -sdk macosx metallib ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.air -o ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib - COMMAND rm -f ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.air + COMMAND xcrun -sdk macosx metal ${XC_FLAGS} -c ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.metal -o - | + xcrun -sdk macosx metallib - -o ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib COMMAND rm -f ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-common.h COMMAND rm -f ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.metal DEPENDS ggml-metal.metal ${METALLIB_COMMON} From 7ab364390f92b0b8d83f69821a536b424838f3f8 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 7 Mar 2025 20:54:30 +0200 Subject: [PATCH 020/398] server : infill gen ends on new line (#12254) --- examples/server/server.cpp | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index e1371dbf8cbb0..8386f4eebba48 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -1312,7 +1312,7 @@ struct server_slot { return task_type == SERVER_TASK_TYPE_EMBEDDING || task_type == SERVER_TASK_TYPE_RERANK; } - bool can_batch_with(server_slot & other_slot) { + bool can_batch_with(server_slot & other_slot) const { return is_non_causal() == other_slot.is_non_causal() && are_lora_equal(lora, other_slot.lora); } @@ -2157,14 +2157,6 @@ struct server_context { } if (slot.has_new_line) { - // if we have already seen a new line, we stop after a certain time limit - if (slot.params.t_max_predict_ms > 0 && (ggml_time_us() - slot.t_start_generation > 1000.0f*slot.params.t_max_predict_ms)) { - slot.stop = STOP_TYPE_LIMIT; - slot.has_next_token = false; - - SLT_DBG(slot, "stopped by time limit, n_decoded = %d, t_max_predict_ms = %d ms\n", slot.n_decoded, (int) slot.params.t_max_predict_ms); - } - // require that each new line has a whitespace prefix (i.e. indentation) of at least slot.params.n_indent if (slot.params.n_indent > 0) { // check the current indentation @@ -2203,6 +2195,14 @@ struct server_context { // check if there is a new line in the generated text if (result.text_to_send.find('\n') != std::string::npos) { slot.has_new_line = true; + + // if we have seen a new line, we stop after a certain time limit, but only upon another new line + if (slot.params.t_max_predict_ms > 0 && (ggml_time_us() - slot.t_start_generation > 1000.0f*slot.params.t_max_predict_ms)) { + slot.stop = STOP_TYPE_LIMIT; + slot.has_next_token = false; + + SLT_DBG(slot, "stopped by time limit, n_decoded = %d, t_max_predict_ms = %d ms\n", slot.n_decoded, (int) slot.params.t_max_predict_ms); + } } // if context shift is disabled, we stop when it reaches the context limit From 6fefc05a7a4e676780ae10b0a4d0728e5281f367 Mon Sep 17 00:00:00 2001 From: "Jason C.H" Date: Sun, 9 Mar 2025 00:02:39 +0800 Subject: [PATCH 021/398] ggml-backend : make path_str compatible with C++20 (#12269) --- AUTHORS | 1 + ggml/src/ggml-backend-reg.cpp | 7 +++++++ 2 files changed, 8 insertions(+) diff --git a/AUTHORS b/AUTHORS index 6796b29413595..ddcb156388bf0 100644 --- a/AUTHORS +++ b/AUTHORS @@ -1045,3 +1045,4 @@ zrm 蕭澧邦 <45505768+shou692199@users.noreply.github.com> 谢乃闻 Нияз Гарифзянов <112617865+garrnizon@users.noreply.github.com> +Jason C.H diff --git a/ggml/src/ggml-backend-reg.cpp b/ggml/src/ggml-backend-reg.cpp index d0d68becd89db..9bedeae78affb 100644 --- a/ggml/src/ggml-backend-reg.cpp +++ b/ggml/src/ggml-backend-reg.cpp @@ -76,7 +76,14 @@ namespace fs = std::filesystem; static std::string path_str(const fs::path & path) { std::string u8path; try { +#if defined(__cpp_lib_char8_t) + // C++20 and later: u8string() returns std::u8string + std::u8string u8str = path.u8string(); + u8path = std::string(reinterpret_cast(u8str.c_str())); +#else + // C++17: u8string() returns std::string u8path = path.u8string(); +#endif } catch (...) { } return u8path; From 0fd7ca7a210bd4abc995cd728491043491dbdef7 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sat, 8 Mar 2025 18:26:00 +0200 Subject: [PATCH 022/398] authors : update (#12271) --- AUTHORS | 62 +++++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 60 insertions(+), 2 deletions(-) diff --git a/AUTHORS b/AUTHORS index ddcb156388bf0..0af9f44ad4a16 100644 --- a/AUTHORS +++ b/AUTHORS @@ -1,4 +1,4 @@ -# date: Tue Feb 4 13:04:05 EET 2025 +# date: Sat Mar 8 18:23:52 EET 2025 # this file is auto-generated by scripts/gen-authors.sh 0cc4m @@ -8,10 +8,12 @@ 3ooabkhxtn <31479382+3ooabkhxtn@users.noreply.github.com> 44670 <44670@users.noreply.github.com> 65a <10104049+65a@users.noreply.github.com> +708-145 <40387547+708-145@users.noreply.github.com> AN Long AT Aarni Koskela Aaron Miller +Aaron Teo <57927438+taronaeo@users.noreply.github.com> Aaryaman Vasishta Abheek Gulati Abhilash Majumder <30946547+abhilash1910@users.noreply.github.com> @@ -20,6 +22,7 @@ Adithya Balaji AdithyanI Adrian Adrian Hesketh +Adrian Kretz Adrien Gallouët Adrien Gallouët Ahmad Tameem <113388789+Tameem-10xE@users.noreply.github.com> @@ -28,15 +31,18 @@ AidanBeltonS <87009434+AidanBeltonS@users.noreply.github.com> AidanBeltonS Aisuko Akarshan Biswas +Akarshan Biswas Akarshan Biswas Al Mochkin <14274697+amochkin@users.noreply.github.com> Albert Jin Alberto <57916483+albbus-stack@users.noreply.github.com> Alberto Cabrera Pérez Alberto Cabrera Pérez +Aleksei Nikiforov <103434461+AlekseiNikiforovIBM@users.noreply.github.com> Alex Alex Azarov Alex Azarov +Alex Brooks Alex Klinkhamer Alex Klinkhamer Alex Nguyen @@ -67,6 +73,7 @@ Andrew Minh Nguyen <40281306+amqdn@users.noreply.github.com> Andy Salerno Andy Tai Anthony Van de Gejuchte +Antoine Viallon Antonis Makropoulos Arik Poznanski Armen Kaleshian @@ -83,6 +90,7 @@ Atsushi Tatsuma Austin <77757836+teleprint-me@users.noreply.github.com> AustinMroz BADR +BB-fat <45072480+BB-fat@users.noreply.github.com> Bach Le Bailey Chittle <39804642+bachittle@users.noreply.github.com> BarfingLemurs <128182951+BarfingLemurs@users.noreply.github.com> @@ -101,6 +109,7 @@ Bert Wagner Billel Mokeddem Bingan <70050083+binganao@users.noreply.github.com> Bjarke Viksøe <164612031+bviksoe@users.noreply.github.com> +Bodhi <3882561+BodhiHu@users.noreply.github.com> Bodo Graumann Bono Lv Borislav Stanimirov @@ -128,6 +137,7 @@ CentricStorm Chad Brewbaker Changyeon Kim Chao Jiang +Charles Duffy Charles Xu <63788048+chaxu01@users.noreply.github.com> Charles Xu Chen Xi @@ -139,12 +149,14 @@ Chris Kuehl Christian Demsar Christian Demsar Christian Falch <875252+chrfalch@users.noreply.github.com> +Christian Fillion Christian Kastner Christian Kögler Christian Köhnenkamp Christian Zhou-Zheng <59622928+christianazinn@users.noreply.github.com> Christopher Nielsen <62156882+mascguy@users.noreply.github.com> Clark Saben <76020733+csaben@users.noreply.github.com> +Clauszy Clint Herron Conrad Kramer Corentin REGAL @@ -163,6 +175,7 @@ Daniel Hiltgen Daniel Illescas Romero Daniel Kleine <53251018+d-kleine@users.noreply.github.com> Daniele <57776841+daniandtheweb@users.noreply.github.com> +Danny Milosavljevic DannyDaemonic Dat Quoc Nguyen <2412555+datquocnguyen@users.noreply.github.com> Dave @@ -170,6 +183,7 @@ Dave Airlie Dave Airlie Dave Della Costa David Friehs +David Huang <1969802+hjc4869@users.noreply.github.com> David Kennedy David Pflug David Renshaw @@ -236,6 +250,7 @@ Felix Finn Voorhees Firat FirstTimeEZ <179362031+FirstTimeEZ@users.noreply.github.com> +Florent BENOIT Folko-Ven <71110216+Folko-Ven@users.noreply.github.com> Foul-Tarnished <107711110+Foul-Tarnished@users.noreply.github.com> Francisco Melo <43780565+francis2tm@users.noreply.github.com> @@ -254,6 +269,7 @@ Gary Mulder Gavin Zhao Genkagaku.GPT Georgi Gerganov +Gian-Carlo Pascutto Gilad S Gilad S. <7817232+giladgd@users.noreply.github.com> Giuseppe Scrivano @@ -267,7 +283,9 @@ Guspan Tanadi <36249910+guspan-tanadi@users.noreply.github.com> Gustavo Rocha Dias <91472747+gustrd@users.noreply.github.com> Haggai Nuchi Halalaluyafail3 <55773281+Halalaluyafail3@users.noreply.github.com> +Hale Chan Hamdoud Hakem <90524568+hamdoudhakem@users.noreply.github.com> +Han Yin HanishKVC Haohui Mai Haoxiang Fei @@ -278,6 +296,7 @@ Haus1 Henk Poley Henri Vasserman Henrik Forstén +Henry Linjamäki Herman Semenov Hesen Peng HimariO @@ -307,6 +326,7 @@ Ivan Ivan Filipov <159561759+vanaka11@users.noreply.github.com> Ivan Komarov Ivan Stepanov +JC <43374599+MrSMlT@users.noreply.github.com> JFLFY2255 JH23X <165871467+JH23X@users.noreply.github.com> Jack Mousseau @@ -325,6 +345,7 @@ Jan Ploski Jannis Schönleber Jared Van Bortel Jared Van Bortel +Jason C.H Jason McCartney Jason Stillerman Jean-Christophe Hoelt @@ -342,6 +363,7 @@ Jiahao Li Jian Liao JidongZhang-THU <1119708529@qq.com> Jinwoo Jeong <33892306+williamjeong2@users.noreply.github.com> +Jinyang He Jiří Podivín <66251151+jpodivin@users.noreply.github.com> Jiří Sejkora Joan Fontanals @@ -379,6 +401,7 @@ Justine Tunney Juuso Alasuutari KASR Kamil Tomšík +Kante Yin Karol Kontny <82021046+kkontny@users.noreply.github.com> Karsten Weiss Karthick @@ -419,6 +442,7 @@ LoganDark Loïc Carrère LostRuins <39025047+LostRuins@users.noreply.github.com> LostRuins Concedo <39025047+LostRuins@users.noreply.github.com> +Lucas Moura Belo Luciano Luo Tian Lyle Dean @@ -463,6 +487,7 @@ Matthew Tejo Matvey Soloviev Max Krasnyansky Max Krasnyansky +Maxim Evtush <154841002+maximevtush@users.noreply.github.com> Maxime <672982+maximegmd@users.noreply.github.com> Maximilian Winter Meng Zhang @@ -494,6 +519,7 @@ Miwa / Ensan <63481257+ensan-hcl@users.noreply.github.com> Mohammadreza Hendiani Mohammadreza Hendiani Molly Sophia +MoonRide303 <130458190+MoonRide303@users.noreply.github.com> MorganRO8 <47795945+MorganRO8@users.noreply.github.com> Murilo Santana Musab Gultekin @@ -524,6 +550,7 @@ Nikolas <127742645+nneubacher@users.noreply.github.com> Nindaleth Nuno OSecret <135510162+OLSecret@users.noreply.github.com> +Oleksandr Kuvshynov <661042+okuvshynov@users.noreply.github.com> Oleksandr Nikitin Oleksii Maryshchenko Olivier Chafik @@ -533,6 +560,7 @@ PAB Pablo Duboue Pascal Patry Patrice Ferlet +Patrick Peng Paul Tsochantaris Pavel Zloi Pavol Rusnak @@ -549,6 +577,7 @@ Pieter Ouwerkerk Plamen Minev Prashant Vithule <119530321+Vithulep@users.noreply.github.com> Przemysław Pawełczyk +PureJourney Qin Yue Chen <71813199+chenqiny@users.noreply.github.com> Qingyou Meng Qu Zongfu <43257352+yancaoweidaode@users.noreply.github.com> @@ -564,14 +593,17 @@ Rand Xie Randall Fitzgerald Random Fly Reinforce-II +Rémy O Rémy Oudompheng Ren Xuancheng Rene Leonhardt <65483435+reneleonhardt@users.noreply.github.com> Reza Kakhki +Reza Rahemtola <49811529+RezaRahemtola@users.noreply.github.com> RhinoDevel Riccardo Orlando Riceball LEE Rich Dougherty +Richard Richard Kiss Richard Roberson Rick G <26732651+TheFlipbook@users.noreply.github.com> @@ -588,6 +620,7 @@ Robert Sung-wook Shin Robey Holderith Robyn Roger Meier +Rohanjames1997 Roland <14355895+rbur0425@users.noreply.github.com> Romain Biessy Romain D <90720+Artefact2@users.noreply.github.com> @@ -610,6 +643,7 @@ Ryan Landay Ryder Wishart Ryuei Rőczey Barnabás <31726601+An0nie@users.noreply.github.com> +SAMI SRHMorris <69468379+SRHMorris@users.noreply.github.com> SXX SakuraUmi @@ -634,6 +668,8 @@ Shane A Shangning Xu <32517059+xushangning@users.noreply.github.com> Shankar Shanshan Shen <467638484@qq.com> +Shelby Jenkins <47464908+ShelbyJenkins@users.noreply.github.com> +Sheldon Robinson Shijie <821898965@qq.com> Shintarou Okada Shouzheng Liu <61452103+lshzh-ww@users.noreply.github.com> @@ -713,18 +749,24 @@ Victor Nogueira Victor Z. Peng Viet-Anh NGUYEN (Andrew) Vinesh Janarthanan <36610342+VJHack@users.noreply.github.com> +Vitali Lovich +Vivian Vlad Vladimir Vladimir Malyutin +Vladimir Vuksanovic <109677816+vvuksanovic@users.noreply.github.com> Vladimir Zorin VoidIsVoid <343750470@qq.com> Volodymyr Vitvitskyi <72226+signalpillar@users.noreply.github.com> +Wagner Bruna Wang Qin <37098874+wangqin0@users.noreply.github.com> Wang Ran (汪然) WangHaoranRobin <56047610+WangHaoranRobin@users.noreply.github.com> Weird Constructor +Weizhao Ouyang Welby Seely Wentai Zhang +Wilken Gottwalt <12194808+wgottwalt@users.noreply.github.com> WillCorticesAI <150854901+WillCorticesAI@users.noreply.github.com> William Tambellini William Tambellini @@ -816,6 +858,8 @@ chaihahaha chiranko <96988916+chiranko@users.noreply.github.com> clibdev <52199778+clibdev@users.noreply.github.com> clyang +cmdr2 +cmdr2 cocktailpeanut <121128867+cocktailpeanut@users.noreply.github.com> codezjx coezbek @@ -835,6 +879,7 @@ deepdiffuser <112834445+deepdiffuser@users.noreply.github.com> devojony <61173062+devojony@users.noreply.github.com> ditsuke divinity76 +dm4 dm4 dotpy314 <33351922+dotpy314@users.noreply.github.com> drbh @@ -849,6 +894,7 @@ fairydreaming <166155368+fairydreaming@users.noreply.github.com> fengerhu1 <2748250768@qq.com> fj-y-saito <85871716+fj-y-saito@users.noreply.github.com> fraxy-v <65565042+fraxy-v@users.noreply.github.com> +fxzjshm <11426482+fxzjshm@users.noreply.github.com> github-actions[bot] gliptic gn64 @@ -873,6 +919,7 @@ hydai iSma iacore <74560659+iacore@users.noreply.github.com> icppWorld <124377669+icppWorld@users.noreply.github.com> +igardev <49397134+igardev@users.noreply.github.com> igarnier intelmatt <61025942+intelmatt@users.noreply.github.com> iohub @@ -880,6 +927,7 @@ issixx <46835150+issixx@users.noreply.github.com> jacobi petrucciani <8117202+jpetrucciani@users.noreply.github.com> jaime-m-p <167997752+jaime-m-p@users.noreply.github.com> jameswu2014 <545426914@qq.com> +jason_w jdomke <28772296+jdomke@users.noreply.github.com> jiahao su jiez <373447296@qq.com> @@ -891,6 +939,7 @@ jon-chuang <9093549+jon-chuang@users.noreply.github.com> jp-x-g jukofyork <69222624+jukofyork@users.noreply.github.com> junchao-loongson <68935141+junchao-loongson@users.noreply.github.com> +junchao-zhao <68935141+junchao-loongson@users.noreply.github.com> jwj7140 <32943891+jwj7140@users.noreply.github.com> k.h.lai kaizau @@ -925,6 +974,7 @@ ltoniazzi <61414566+ltoniazzi@users.noreply.github.com> luoyu-intel m3ndax maddes8cht <55592906+maddes8cht@users.noreply.github.com> +magicse mahorozte <41834471+mahorozte@users.noreply.github.com> makomk manikbhandari @@ -935,6 +985,7 @@ matt23654 matteo mdrokz mgroeber9110 <45620825+mgroeber9110@users.noreply.github.com> +midnight minarchist mj-shifu <77107165+mj-shifu@users.noreply.github.com> mmyjona @@ -958,10 +1009,12 @@ omahs <73983677+omahs@users.noreply.github.com> oobabooga <112222186+oobabooga@users.noreply.github.com> opparco ostix360 <55257054+ostix360@users.noreply.github.com> +pascal-lc <49066376+pascal-lc@users.noreply.github.com> pculliton peidaqi pengxin99 perserk +petterreinholdtsen piDack <104877312+piDack@users.noreply.github.com> pmysl postmasters @@ -983,6 +1036,7 @@ semidark serhii-nakon <57632032+serhii-nakon@users.noreply.github.com> sharpHL <132747147+sharpHL@users.noreply.github.com> shibe2 +simon886212 <37953122+simon886212@users.noreply.github.com> singularity <12184989+singularity-s0@users.noreply.github.com> sjinzh sjxx <63994076+ylsdamxssjxxdd@users.noreply.github.com> @@ -1000,10 +1054,12 @@ tarcey tc-mb <157115220+tc-mb@users.noreply.github.com> texmex76 <40733439+texmex76@users.noreply.github.com> thement <40525767+thement@users.noreply.github.com> +theraininsky <76763719+theraininsky@users.noreply.github.com> thewh1teagle <61390950+thewh1teagle@users.noreply.github.com> tjohnman toyer <2042519524@qq.com> tslmy +tv1wnd <55383215+tv1wnd@users.noreply.github.com> ubik2 uint256_t uint256_t @@ -1014,6 +1070,7 @@ valiray <133289098+valiray@users.noreply.github.com> vb vik viric +vmobilis <75476228+vmobilis@users.noreply.github.com> vodkaslime <646329483@qq.com> vvhg1 <94630311+vvhg1@users.noreply.github.com> vxiiduu <73044267+vxiiduu@users.noreply.github.com> @@ -1028,6 +1085,8 @@ wzy <32936898+Freed-Wu@users.noreply.github.com> xaedes xaedes xctan +xiaobing318 <71554036+xiaobing318@users.noreply.github.com> +xiaofei xloem <0xloem@gmail.com> yangli2 ymcki <84055651+ymcki@users.noreply.github.com> @@ -1045,4 +1104,3 @@ zrm 蕭澧邦 <45505768+shou692199@users.noreply.github.com> 谢乃闻 Нияз Гарифзянов <112617865+garrnizon@users.noreply.github.com> -Jason C.H From 1e2f78a00450593e2dfa458796fcdd9987300dfc Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 9 Mar 2025 19:08:20 +0200 Subject: [PATCH 023/398] server : add speculative decoding presets for FIM (#12287) --- common/arg.cpp | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/common/arg.cpp b/common/arg.cpp index 3e549ede0e296..b96a5678f7346 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -2571,5 +2571,43 @@ common_params_context common_params_parser_init(common_params & params, llama_ex } ).set_examples({LLAMA_EXAMPLE_SERVER})); + add_opt(common_arg( + {"--fim-qwen-7b-spec"}, + string_format("use Qwen 2.5 Coder 7B + 0.5B draft for speculative decoding (note: can download weights from the internet)"), + [](common_params & params) { + params.hf_repo = "ggml-org/Qwen2.5-Coder-7B-Q8_0-GGUF"; + params.hf_file = "qwen2.5-coder-7b-q8_0.gguf"; + params.speculative.hf_repo = "ggml-org/Qwen2.5-Coder-0.5B-Q8_0-GGUF"; + params.speculative.hf_file = "qwen2.5-coder-0.5b-q8_0.gguf"; + params.speculative.n_gpu_layers = 99; + params.port = 8012; + params.n_gpu_layers = 99; + params.flash_attn = true; + params.n_ubatch = 1024; + params.n_batch = 1024; + params.n_ctx = 0; + params.n_cache_reuse = 256; + } + ).set_examples({LLAMA_EXAMPLE_SERVER})); + + add_opt(common_arg( + {"--fim-qwen-14b-spec"}, + string_format("use Qwen 2.5 Coder 14B + 0.5B draft for speculative decoding (note: can download weights from the internet)"), + [](common_params & params) { + params.hf_repo = "ggml-org/Qwen2.5-Coder-14B-Q8_0-GGUF"; + params.hf_file = "qwen2.5-coder-14b-q8_0.gguf"; + params.speculative.hf_repo = "ggml-org/Qwen2.5-Coder-0.5B-Q8_0-GGUF"; + params.speculative.hf_file = "qwen2.5-coder-0.5b-q8_0.gguf"; + params.speculative.n_gpu_layers = 99; + params.port = 8012; + params.n_gpu_layers = 99; + params.flash_attn = true; + params.n_ubatch = 1024; + params.n_batch = 1024; + params.n_ctx = 0; + params.n_cache_reuse = 256; + } + ).set_examples({LLAMA_EXAMPLE_SERVER})); + return ctx_arg; } From 8352cdc87b207a735d34c431a36c425728cb4586 Mon Sep 17 00:00:00 2001 From: tc-mb <157115220+tc-mb@users.noreply.github.com> Date: Mon, 10 Mar 2025 16:33:24 +0800 Subject: [PATCH 024/398] llava : fix bug in minicpm-v code (#11513) * fix bug in minicpm-v code * update readme of minicpm-v --- examples/llava/README-minicpmo2.6.md | 34 +++---- examples/llava/README-minicpmv2.5.md | 88 ++++------------- examples/llava/README-minicpmv2.6.md | 96 ++++--------------- examples/llava/clip.cpp | 1 + examples/llava/minicpmv-cli.cpp | 35 +++++-- .../minicpmv-convert-image-encoder-to-gguf.py | 1 - 6 files changed, 80 insertions(+), 175 deletions(-) diff --git a/examples/llava/README-minicpmo2.6.md b/examples/llava/README-minicpmo2.6.md index 8f591506dbbb0..48c423238395b 100644 --- a/examples/llava/README-minicpmo2.6.md +++ b/examples/llava/README-minicpmo2.6.md @@ -5,13 +5,25 @@ Currently, this readme only supports minicpm-omni's image capabilities, and we w Download [MiniCPM-o-2_6](https://huggingface.co/openbmb/MiniCPM-o-2_6) PyTorch model from huggingface to "MiniCPM-o-2_6" folder. + +### Build llama.cpp +Readme modification time: 20250206 + +If there are differences in usage, please refer to the official build [documentation](https://github.com/ggerganov/llama.cpp/blob/master/docs/build.md) + Clone llama.cpp: ```bash -git clone git@github.com:OpenBMB/llama.cpp.git +git clone https://github.com/ggerganov/llama.cpp cd llama.cpp -git checkout minicpm-omni ``` +Build llama.cpp using `CMake`: +```bash +cmake -B build +cmake --build build --config Release +``` + + ### Usage of MiniCPM-o 2.6 Convert PyTorch model to gguf files (You can also download the converted [gguf](https://huggingface.co/openbmb/MiniCPM-o-2_6-gguf) by us) @@ -22,25 +34,15 @@ python ./examples/llava/minicpmv-convert-image-encoder-to-gguf.py -m ../MiniCPM- python ./convert_hf_to_gguf.py ../MiniCPM-o-2_6/model # quantize int4 version -./llama-quantize ../MiniCPM-o-2_6/model/ggml-model-f16.gguf ../MiniCPM-o-2_6/model/ggml-model-Q4_K_M.gguf Q4_K_M +./build/bin/llama-quantize ../MiniCPM-o-2_6/model/ggml-model-f16.gguf ../MiniCPM-o-2_6/model/ggml-model-Q4_K_M.gguf Q4_K_M ``` -Build llama.cpp using `CMake`: -https://github.com/ggml-org/llama.cpp/blob/master/docs/build.md - -```bash -cmake -B build -cmake --build build --config Release -``` Inference on Linux or Mac -``` +```bash # run f16 version -./llama-minicpmv-cli -m ../MiniCPM-o-2_6/model/ggml-model-f16.gguf --mmproj ../MiniCPM-o-2_6/mmproj-model-f16.gguf -c 4096 --temp 0.7 --top-p 0.8 --top-k 100 --repeat-penalty 1.05 --image xx.jpg -p "What is in the image?" +./build/bin/llama-minicpmv-cli -m ../MiniCPM-o-2_6/model/ggml-model-f16.gguf --mmproj ../MiniCPM-o-2_6/mmproj-model-f16.gguf -c 4096 --temp 0.7 --top-p 0.8 --top-k 100 --repeat-penalty 1.05 --image xx.jpg -p "What is in the image?" # run quantized int4 version -./llama-minicpmv-cli -m ../MiniCPM-o-2_6/model/ggml-model-Q4_K_M.gguf --mmproj ../MiniCPM-o-2_6/mmproj-model-f16.gguf -c 4096 --temp 0.7 --top-p 0.8 --top-k 100 --repeat-penalty 1.05 --image xx.jpg -p "What is in the image?" - -# or run in interactive mode -./llama-minicpmv-cli -m ../MiniCPM-o-2_6/model/ggml-model-Q4_K_M.gguf --mmproj ../MiniCPM-o-2_6/mmproj-model-f16.gguf -c 4096 --temp 0.7 --top-p 0.8 --top-k 100 --repeat-penalty 1.05 --image xx.jpg -i +./build/bin/llama-minicpmv-cli -m ../MiniCPM-o-2_6/model/ggml-model-Q4_K_M.gguf --mmproj ../MiniCPM-o-2_6/mmproj-model-f16.gguf -c 4096 --temp 0.7 --top-p 0.8 --top-k 100 --repeat-penalty 1.05 --image xx.jpg -p "What is in the image?" ``` diff --git a/examples/llava/README-minicpmv2.5.md b/examples/llava/README-minicpmv2.5.md index b0e72a0fa7a78..6bfe7abd16487 100644 --- a/examples/llava/README-minicpmv2.5.md +++ b/examples/llava/README-minicpmv2.5.md @@ -4,13 +4,26 @@ Download [MiniCPM-Llama3-V-2_5](https://huggingface.co/openbmb/MiniCPM-Llama3-V-2_5) PyTorch model from huggingface to "MiniCPM-Llama3-V-2_5" folder. + +### Build llama.cpp +Readme modification time: 20250206 + +If there are differences in usage, please refer to the official build [documentation](https://github.com/ggerganov/llama.cpp/blob/master/docs/build.md) + Clone llama.cpp: ```bash git clone https://github.com/ggml-org/llama.cpp cd llama.cpp ``` -### Usage +Build llama.cpp using `CMake`: +```bash +cmake -B build +cmake --build build --config Release +``` + + +### Usage of MiniCPM-Llama3-V 2.5 Convert PyTorch model to gguf files (You can also download the converted [gguf](https://huggingface.co/openbmb/MiniCPM-Llama3-V-2_5-gguf) by us) @@ -20,80 +33,15 @@ python ./examples/llava/minicpmv-convert-image-encoder-to-gguf.py -m ../MiniCPM- python ./convert_hf_to_gguf.py ../MiniCPM-Llama3-V-2_5/model # quantize int4 version -./llama-quantize ../MiniCPM-Llama3-V-2_5/model/model-8B-F16.gguf ../MiniCPM-Llama3-V-2_5/model/ggml-model-Q4_K_M.gguf Q4_K_M +./build/bin/llama-quantize ../MiniCPM-Llama3-V-2_5/model/model-8B-F16.gguf ../MiniCPM-Llama3-V-2_5/model/ggml-model-Q4_K_M.gguf Q4_K_M ``` -Build for Linux or Mac - -```bash -make -make llama-minicpmv-cli -``` Inference on Linux or Mac -``` +```bash # run f16 version -./llama-minicpmv-cli -m ../MiniCPM-Llama3-V-2_5/model/model-8B-F16.gguf --mmproj ../MiniCPM-Llama3-V-2_5/mmproj-model-f16.gguf -c 4096 --temp 0.7 --top-p 0.8 --top-k 100 --repeat-penalty 1.05 --image xx.jpg -p "What is in the image?" +./build/bin/llama-minicpmv-cli -m ../MiniCPM-Llama3-V-2_5/model/model-8B-F16.gguf --mmproj ../MiniCPM-Llama3-V-2_5/mmproj-model-f16.gguf -c 4096 --temp 0.7 --top-p 0.8 --top-k 100 --repeat-penalty 1.05 --image xx.jpg -p "What is in the image?" # run quantized int4 version -./llama-minicpmv-cli -m ../MiniCPM-Llama3-V-2_5/model/ggml-model-Q4_K_M.gguf --mmproj ../MiniCPM-Llama3-V-2_5/mmproj-model-f16.gguf -c 4096 --temp 0.7 --top-p 0.8 --top-k 100 --repeat-penalty 1.05 --image xx.jpg -p "What is in the image?" - -# or run in interactive mode -./llama-minicpmv-cli -m ../MiniCPM-Llama3-V-2_5/model/ggml-model-Q4_K_M.gguf --mmproj ../MiniCPM-Llama3-V-2_5/mmproj-model-f16.gguf -c 4096 --temp 0.7 --top-p 0.8 --top-k 100 --repeat-penalty 1.05 --image xx.jpg -i -``` - -### Android - -#### Build on Android device using Termux -We found that build on Android device would bring better runtime performance, so we recommend to build on device. - -[Termux](https://github.com/termux/termux-app#installation) is a terminal app on Android device (no root required). - -Install tools in Termux: -``` -apt update && apt upgrade -y -apt install git make cmake -``` - -It's recommended to move your model inside the `~/` directory for best performance: -``` -cd storage/downloads -mv model.gguf ~/ -``` - -#### Building the Project using Android NDK -Obtain the [Android NDK](https://developer.android.com/ndk) and then build with CMake. - -Execute the following commands on your computer to avoid downloading the NDK to your mobile. Alternatively, you can also do this in Termux: - -```bash -mkdir build-android -cd build-android -export NDK=/your_ndk_path -cmake -DCMAKE_TOOLCHAIN_FILE=$NDK/build/cmake/android.toolchain.cmake -DANDROID_ABI=arm64-v8a -DANDROID_PLATFORM=android-23 -DCMAKE_C_FLAGS=-march=armv8.4a+dotprod .. -make -``` - -Install [termux](https://github.com/termux/termux-app#installation) on your device and run `termux-setup-storage` to get access to your SD card (if Android 11+ then run the command twice). - -Finally, copy these built `llama` binaries and the model file to your device storage. Because the file permissions in the Android sdcard cannot be changed, you can copy the executable files to the `/data/data/com.termux/files/home/bin` path, and then execute the following commands in Termux to add executable permission: - -(Assumed that you have pushed the built executable files to the /sdcard/llama.cpp/bin path using `adb push`) -``` -$cp -r /sdcard/llama.cpp/bin /data/data/com.termux/files/home/ -$cd /data/data/com.termux/files/home/bin -$chmod +x ./* -``` - -Download models and push them to `/sdcard/llama.cpp/`, then move it to `/data/data/com.termux/files/home/model/` - -``` -$mv /sdcard/llama.cpp/ggml-model-Q4_K_M.gguf /data/data/com.termux/files/home/model/ -$mv /sdcard/llama.cpp/mmproj-model-f16.gguf /data/data/com.termux/files/home/model/ -``` - -Now, you can start chatting: -``` -$cd /data/data/com.termux/files/home/bin -$./llama-minicpmv-cli -m ../model/ggml-model-Q4_K_M.gguf --mmproj ../model/mmproj-model-f16.gguf -c 4096 --temp 0.7 --top-p 0.8 --top-k 100 --repeat-penalty 1.05 --image xx.jpg -p "What is in the image?" +./build/bin/llama-minicpmv-cli -m ../MiniCPM-Llama3-V-2_5/model/ggml-model-Q4_K_M.gguf --mmproj ../MiniCPM-Llama3-V-2_5/mmproj-model-f16.gguf -c 4096 --temp 0.7 --top-p 0.8 --top-k 100 --repeat-penalty 1.05 --image xx.jpg -p "What is in the image?" ``` diff --git a/examples/llava/README-minicpmv2.6.md b/examples/llava/README-minicpmv2.6.md index c4be5e5dd6484..2df39cdbac78a 100644 --- a/examples/llava/README-minicpmv2.6.md +++ b/examples/llava/README-minicpmv2.6.md @@ -4,13 +4,25 @@ Download [MiniCPM-V-2_6](https://huggingface.co/openbmb/MiniCPM-V-2_6) PyTorch model from huggingface to "MiniCPM-V-2_6" folder. + +### Build llama.cpp +Readme modification time: 20250206 + +If there are differences in usage, please refer to the official build [documentation](https://github.com/ggerganov/llama.cpp/blob/master/docs/build.md) + Clone llama.cpp: ```bash -git clone git@github.com:OpenBMB/llama.cpp.git +git clone https://github.com/ggerganov/llama.cpp cd llama.cpp -git checkout minicpmv-main ``` +Build llama.cpp using `CMake`: +```bash +cmake -B build +cmake --build build --config Release +``` + + ### Usage of MiniCPM-V 2.6 Convert PyTorch model to gguf files (You can also download the converted [gguf](https://huggingface.co/openbmb/MiniCPM-V-2_6-gguf) by us) @@ -21,87 +33,15 @@ python ./examples/llava/minicpmv-convert-image-encoder-to-gguf.py -m ../MiniCPM- python ./convert_hf_to_gguf.py ../MiniCPM-V-2_6/model # quantize int4 version -./llama-quantize ../MiniCPM-V-2_6/model/ggml-model-f16.gguf ../MiniCPM-V-2_6/model/ggml-model-Q4_K_M.gguf Q4_K_M +./build/bin/llama-quantize ../MiniCPM-V-2_6/model/ggml-model-f16.gguf ../MiniCPM-V-2_6/model/ggml-model-Q4_K_M.gguf Q4_K_M ``` -Build for Linux or Mac - -```bash -make -make llama-minicpmv-cli -``` Inference on Linux or Mac -``` +```bash # run f16 version -./llama-minicpmv-cli -m ../MiniCPM-V-2_6/model/ggml-model-f16.gguf --mmproj ../MiniCPM-V-2_6/mmproj-model-f16.gguf -c 4096 --temp 0.7 --top-p 0.8 --top-k 100 --repeat-penalty 1.05 --image xx.jpg -p "What is in the image?" +./build/bin/llama-minicpmv-cli -m ../MiniCPM-V-2_6/model/ggml-model-f16.gguf --mmproj ../MiniCPM-V-2_6/mmproj-model-f16.gguf -c 4096 --temp 0.7 --top-p 0.8 --top-k 100 --repeat-penalty 1.05 --image xx.jpg -p "What is in the image?" # run quantized int4 version -./llama-minicpmv-cli -m ../MiniCPM-V-2_6/model/ggml-model-Q4_K_M.gguf --mmproj ../MiniCPM-V-2_6/mmproj-model-f16.gguf -c 4096 --temp 0.7 --top-p 0.8 --top-k 100 --repeat-penalty 1.05 --image xx.jpg -p "What is in the image?" - -# or run in interactive mode -./llama-minicpmv-cli -m ../MiniCPM-V-2_6/model/ggml-model-Q4_K_M.gguf --mmproj ../MiniCPM-V-2_6/mmproj-model-f16.gguf -c 4096 --temp 0.7 --top-p 0.8 --top-k 100 --repeat-penalty 1.05 --image xx.jpg -i -``` - -### Video -Install FFmpeg -``` -brew install ffmpeg -brew install pkg-config -``` - -### Android - -#### Build on Android device using Termux -We found that build on Android device would bring better runtime performance, so we recommend to build on device. - -[Termux](https://github.com/termux/termux-app#installation) is a terminal app on Android device (no root required). - -Install tools in Termux: -``` -apt update && apt upgrade -y -apt install git make cmake -``` - -It's recommended to move your model inside the `~/` directory for best performance: -``` -cd storage/downloads -mv model.gguf ~/ -``` - -#### Building the Project using Android NDK -Obtain the [Android NDK](https://developer.android.com/ndk) and then build with CMake. - -Execute the following commands on your computer to avoid downloading the NDK to your mobile. Alternatively, you can also do this in Termux: - -```bash -mkdir build-android -cd build-android -export NDK=/your_ndk_path -cmake -DCMAKE_TOOLCHAIN_FILE=$NDK/build/cmake/android.toolchain.cmake -DANDROID_ABI=arm64-v8a -DANDROID_PLATFORM=android-23 -DCMAKE_C_FLAGS=-march=armv8.4a+dotprod .. -make -``` - -Install [termux](https://github.com/termux/termux-app#installation) on your device and run `termux-setup-storage` to get access to your SD card (if Android 11+ then run the command twice). - -Finally, copy these built `llama` binaries and the model file to your device storage. Because the file permissions in the Android sdcard cannot be changed, you can copy the executable files to the `/data/data/com.termux/files/home/bin` path, and then execute the following commands in Termux to add executable permission: - -(Assumed that you have pushed the built executable files to the /sdcard/llama.cpp/bin path using `adb push`) -``` -$cp -r /sdcard/llama.cpp/bin /data/data/com.termux/files/home/ -$cd /data/data/com.termux/files/home/bin -$chmod +x ./* -``` - -Download models and push them to `/sdcard/llama.cpp/`, then move it to `/data/data/com.termux/files/home/model/` - -``` -$mv /sdcard/llama.cpp/ggml-model-Q4_K_M.gguf /data/data/com.termux/files/home/model/ -$mv /sdcard/llama.cpp/mmproj-model-f16.gguf /data/data/com.termux/files/home/model/ -``` - -Now, you can start chatting: -``` -$cd /data/data/com.termux/files/home/bin -$./llama-minicpmv-cli -m ../model/ggml-model-Q4_K_M.gguf --mmproj ../model/mmproj-model-f16.gguf -c 4096 --temp 0.7 --top-p 0.8 --top-k 100 --repeat-penalty 1.05 --image xx.jpg -p "What is in the image?" +./build/bin/llama-minicpmv-cli -m ../MiniCPM-V-2_6/model/ggml-model-Q4_K_M.gguf --mmproj ../MiniCPM-V-2_6/mmproj-model-f16.gguf -c 4096 --temp 0.7 --top-p 0.8 --top-k 100 --repeat-penalty 1.05 --image xx.jpg -p "What is in the image?" ``` diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp index 76d4a78520575..3f558b7bdbdf3 100644 --- a/examples/llava/clip.cpp +++ b/examples/llava/clip.cpp @@ -1378,6 +1378,7 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { LOG_INF("%s: vision_encoder: %d\n", __func__, new_clip->has_vision_encoder); LOG_INF("%s: llava_projector: %d\n", __func__, new_clip->has_llava_projector); LOG_INF("%s: minicpmv_projector: %d\n", __func__, new_clip->has_minicpmv_projector); + LOG_INF("%s: minicpmv_version: %d\n", __func__, new_clip->minicpmv_version); LOG_INF("%s: glm_projector: %d\n", __func__, new_clip->has_glm_projector); LOG_INF("%s: model size: %.2f MB\n", __func__, model_size / 1024.0 / 1024.0); LOG_INF("%s: metadata size: %.2f MB\n", __func__, ggml_get_mem_size(meta) / 1024.0 / 1024.0); diff --git a/examples/llava/minicpmv-cli.cpp b/examples/llava/minicpmv-cli.cpp index 53d902d616e85..23b3de4db273a 100644 --- a/examples/llava/minicpmv-cli.cpp +++ b/examples/llava/minicpmv-cli.cpp @@ -148,19 +148,34 @@ static void process_image(struct llava_context * ctx_llava, struct llava_image_e process_eval_image_embed(ctx_llava, embeds, params->n_batch, &n_past, idx++); eval_string(ctx_llava->ctx_llama, std::string("").c_str(), params->n_batch, &n_past, false); if (num_image_embeds > 1) { - size_t num_image_embeds_col = clip_uhd_num_image_embeds_col(ctx_llava->ctx_clip); - eval_string(ctx_llava->ctx_llama, std::string("").c_str(), params->n_batch, &n_past, false); - for (size_t i = 0; i < (num_image_embeds-1)/num_image_embeds_col; ++i) { - for (size_t j = 0; j < num_image_embeds_col; ++j) { - eval_string(ctx_llava->ctx_llama, std::string("").c_str(), params->n_batch, &n_past, false); - process_eval_image_embed(ctx_llava, embeds, params->n_batch, &n_past, idx++); - eval_string(ctx_llava->ctx_llama, std::string("").c_str(), params->n_batch, &n_past, false); - if (j == num_image_embeds_col - 1) { - eval_string(ctx_llava->ctx_llama, std::string("\n").c_str(), params->n_batch, &n_past, false); + if (has_minicpmv_projector == 2) { + size_t num_image_embeds_col = clip_uhd_num_image_embeds_col(ctx_llava->ctx_clip); + eval_string(ctx_llava->ctx_llama, std::string("").c_str(), params->n_batch, &n_past, false); + for (size_t i = 0; i < (num_image_embeds-1)/num_image_embeds_col; ++i) { + for (size_t j = 0; j < num_image_embeds_col; ++j) { + eval_string(ctx_llava->ctx_llama, std::string("").c_str(), params->n_batch, &n_past, false); + process_eval_image_embed(ctx_llava, embeds, params->n_batch, &n_past, idx++); + eval_string(ctx_llava->ctx_llama, std::string("").c_str(), params->n_batch, &n_past, false); + if (j == num_image_embeds_col - 1) { + eval_string(ctx_llava->ctx_llama, std::string("\n").c_str(), params->n_batch, &n_past, false); + } + } + } + eval_string(ctx_llava->ctx_llama, std::string("").c_str(), params->n_batch, &n_past, false); + } + else if (has_minicpmv_projector == 3 || has_minicpmv_projector == 4) { + size_t num_image_embeds_col = clip_uhd_num_image_embeds_col(ctx_llava->ctx_clip); + for (size_t i = 0; i < (num_image_embeds-1)/num_image_embeds_col; ++i) { + for (size_t j = 0; j < num_image_embeds_col; ++j) { + eval_string(ctx_llava->ctx_llama, std::string("").c_str(), params->n_batch, &n_past, false); + process_eval_image_embed(ctx_llava, embeds, params->n_batch, &n_past, idx++); + eval_string(ctx_llava->ctx_llama, std::string("").c_str(), params->n_batch, &n_past, false); + if (j == num_image_embeds_col - 1) { + eval_string(ctx_llava->ctx_llama, std::string("\n").c_str(), params->n_batch, &n_past, false); + } } } } - eval_string(ctx_llava->ctx_llama, std::string("").c_str(), params->n_batch, &n_past, false); } LOG_INF("%s: image token past: %d\n", __func__, n_past); } diff --git a/examples/llava/minicpmv-convert-image-encoder-to-gguf.py b/examples/llava/minicpmv-convert-image-encoder-to-gguf.py index 9b196757f07c9..cfe0961f9891a 100644 --- a/examples/llava/minicpmv-convert-image-encoder-to-gguf.py +++ b/examples/llava/minicpmv-convert-image-encoder-to-gguf.py @@ -597,7 +597,6 @@ def bytes_to_unicode(): fname_middle = "mmproj-" has_text_encoder = False has_minicpmv_projector = True - minicpmv_version = 4 elif args.vision_only: fname_middle = "vision-" has_text_encoder = False From 2b3a25c212f8c4d8a49cec23e03343a7719d51c9 Mon Sep 17 00:00:00 2001 From: Olivier Chafik Date: Mon, 10 Mar 2025 09:44:42 +0000 Subject: [PATCH 025/398] `sampler`: fixes trigger tokens + lazy grammars (fix typo cast from token to string) (#12291) * Fix typo in lazy grammar handling (fixes trigger tokens) Co-authored-by: Georgi Gerganov --------- Co-authored-by: Georgi Gerganov --- examples/server/server.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 8386f4eebba48..aec8b9eed05c0 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -384,8 +384,9 @@ struct server_task { SRV_DBG("Grammar trigger token: %d (`%s`)\n", token, word.c_str()); common_grammar_trigger trigger; trigger.type = COMMON_GRAMMAR_TRIGGER_TYPE_TOKEN; - trigger.value = (llama_token) token; - params.sampling.grammar_triggers.push_back(trigger); + trigger.value = word; + trigger.token = token; + params.sampling.grammar_triggers.push_back(std::move(trigger)); } else { SRV_DBG("Grammar trigger word: `%s`\n", word.c_str()); params.sampling.grammar_triggers.push_back({COMMON_GRAMMAR_TRIGGER_TYPE_WORD, word}); From 87c2630546cd8ccc836ae4c7d87d03fa597d2267 Mon Sep 17 00:00:00 2001 From: Olivier Chafik Date: Mon, 10 Mar 2025 09:45:07 +0000 Subject: [PATCH 026/398] allow missing content in message if tool_calls provided (#12293) --- common/chat.cpp | 29 ++++++++++++++++------------- tests/test-chat.cpp | 15 +++++++++++++++ 2 files changed, 31 insertions(+), 13 deletions(-) diff --git a/common/chat.cpp b/common/chat.cpp index 1b10219ccab04..1b3f286afc9e1 100644 --- a/common/chat.cpp +++ b/common/chat.cpp @@ -60,7 +60,9 @@ std::vector common_chat_msgs_parse_oaicompat(const json & messa } msg.role = message.at("role"); - if (message.contains("content")) { + auto has_content = message.contains("content"); + auto has_tool_calls = message.contains("tool_calls"); + if (has_content) { const auto & content = message.at("content"); if (content.is_string()) { msg.content = content; @@ -81,19 +83,8 @@ std::vector common_chat_msgs_parse_oaicompat(const json & messa } else if (!content.is_null()) { throw std::runtime_error("Invalid 'content' type: expected string or array, got " + content.dump() + " (ref: https://github.com/ggml-org/llama.cpp/issues/8367)"); } - } else { - throw std::runtime_error("Expected 'content' (ref: https://github.com/ggml-org/llama.cpp/issues/8367)"); - } - if (message.contains("reasoning_content")) { - msg.reasoning_content = message.at("reasoning_content"); - } - if (message.contains("name")) { - msg.tool_name = message.at("name"); - } - if (message.contains("tool_call_id")) { - msg.tool_call_id = message.at("tool_call_id"); } - if (message.contains("tool_calls")) { + if (has_tool_calls) { for (const auto & tool_call : message.at("tool_calls")) { common_chat_tool_call tc; if (!tool_call.contains("type")) { @@ -118,6 +109,18 @@ std::vector common_chat_msgs_parse_oaicompat(const json & messa msg.tool_calls.push_back(tc); } } + if (!has_content && !has_tool_calls) { + throw std::runtime_error("Expected 'content' or 'tool_calls' (ref: https://github.com/ggml-org/llama.cpp/issues/8367 & https://github.com/ggml-org/llama.cpp/issues/12279)"); + } + if (message.contains("reasoning_content")) { + msg.reasoning_content = message.at("reasoning_content"); + } + if (message.contains("name")) { + msg.tool_name = message.at("name"); + } + if (message.contains("tool_call_id")) { + msg.tool_call_id = message.at("tool_call_id"); + } msgs.push_back(msg); } diff --git a/tests/test-chat.cpp b/tests/test-chat.cpp index 35a307c632e68..35c7ee34e3234 100644 --- a/tests/test-chat.cpp +++ b/tests/test-chat.cpp @@ -480,6 +480,21 @@ static void test_msgs_oaicompat_json_conversion() { "]" ), common_chat_msgs_to_json_oaicompat({message_assist_call_python}).dump(2)); + + auto res = common_chat_msgs_parse_oaicompat(json::parse("[{\"role\": \"assistant\", \"tool_calls\": []}]")); + assert_equals(1, res.size()); + assert_equals(res[0].role, "assistant"); + assert_equals(true, res[0].content.empty()); + assert_equals(true, res[0].tool_calls.empty()); + + try { + common_chat_msgs_parse_oaicompat(json::parse("[{\"role\": \"assistant\"}]")); + throw std::runtime_error("Expected exception"); + } catch (const std::exception & e) { + if (std::string(e.what()).find("'content'") == std::string::npos) { + throw std::runtime_error("Expected exception about missing 'content'"); + } + } } static void test_tools_oaicompat_json_conversion() { From be421fc429795d135786f5a0e489709220a9c43a Mon Sep 17 00:00:00 2001 From: Olivier Chafik Date: Mon, 10 Mar 2025 09:45:29 +0000 Subject: [PATCH 027/398] `tool-call`: ensure there's always a non-empty tool call id (#12292) --- examples/server/server.cpp | 5 ++++- examples/server/tests/unit/test_tool_call.py | 3 +++ examples/server/utils.hpp | 4 ++++ 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index aec8b9eed05c0..8cb8d0033f7d9 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -751,7 +751,10 @@ struct server_task_result_cmpl_final : server_task_result { {"name", tc.name}, {"arguments", tc.arguments}, }}, - {"id", tc.id}, + // Some templates generate and require an id (sometimes in a very specific format, e.g. Mistral Nemo). + // We only generate a random id for the ones that don't generate one by themselves + // (they also won't get to see it as their template likely doesn't use it, so it's all for the client) + {"id", tc.id.empty() ? gen_tool_call_id() : tc.id}, }); } message["tool_calls"] = tool_calls; diff --git a/examples/server/tests/unit/test_tool_call.py b/examples/server/tests/unit/test_tool_call.py index 25bddbaee7d56..569c2a1f8ea31 100755 --- a/examples/server/tests/unit/test_tool_call.py +++ b/examples/server/tests/unit/test_tool_call.py @@ -92,6 +92,7 @@ def do_test_completion_with_required_tool_tiny(server: ServerProcess, tool: dict assert tool_calls and len(tool_calls) == 1, f'Expected 1 tool call in {choice["message"]}' tool_call = tool_calls[0] assert choice["message"].get("content") in (None, ""), f'Expected no content in {choice["message"]}' + assert len(tool_call.get("id", "")) > 0, f'Expected non empty tool call id in {tool_call}' expected_function_name = "python" if tool["type"] == "code_interpreter" else tool["function"]["name"] assert expected_function_name == tool_call["function"]["name"] actual_arguments = tool_call["function"]["arguments"] @@ -373,6 +374,7 @@ def do_test_weather(server: ServerProcess, **kwargs): tool_call = tool_calls[0] # assert choice["message"].get("content") in (None, ""), f'Expected no content in {choice["message"]}' assert tool_call["function"]["name"] == WEATHER_TOOL["function"]["name"], f'Expected weather tool call, got {tool_call["function"]["name"]}' + assert len(tool_call.get("id", "")) > 0, f'Expected non empty tool call id in {tool_call}' actual_arguments = json.loads(tool_call["function"]["arguments"]) assert 'location' in actual_arguments, f"location not found in {json.dumps(actual_arguments)}" location = actual_arguments["location"] @@ -596,6 +598,7 @@ def do_test_hello_world(server: ServerProcess, **kwargs): tool_call = tool_calls[0] # assert choice["message"].get("content") in (None, ""), f'Expected no content in {choice["message"]}' assert tool_call["function"]["name"] == PYTHON_TOOL["function"]["name"] + assert len(tool_call.get("id", "")) > 0, f'Expected non empty tool call id in {tool_call}' actual_arguments = json.loads(tool_call["function"]["arguments"]) assert 'code' in actual_arguments, f"code not found in {json.dumps(actual_arguments)}" code = actual_arguments["code"] diff --git a/examples/server/utils.hpp b/examples/server/utils.hpp index 393e3927c7fd6..36ad276fd3ce0 100644 --- a/examples/server/utils.hpp +++ b/examples/server/utils.hpp @@ -435,6 +435,10 @@ static std::string gen_chatcmplid() { return "chatcmpl-" + random_string(); } +static std::string gen_tool_call_id() { + return random_string(); +} + // // other common utils // From 4e39a3c332f84b890e91353ec448502cb8373a6f Mon Sep 17 00:00:00 2001 From: Olivier Chafik Date: Mon, 10 Mar 2025 10:59:03 +0000 Subject: [PATCH 028/398] `server`: extract tags from qwq outputs (#12297) * extract tags from qwq outputs * const for all static regexes in chat.cpp --- common/chat.cpp | 280 +++++++++++++++++++++++--------------------- common/chat.h | 1 + tests/test-chat.cpp | 13 ++ 3 files changed, 161 insertions(+), 133 deletions(-) diff --git a/common/chat.cpp b/common/chat.cpp index 1b3f286afc9e1..62ca26ad7609c 100644 --- a/common/chat.cpp +++ b/common/chat.cpp @@ -445,6 +445,7 @@ std::string common_chat_format_name(common_chat_format format) { case COMMON_CHAT_FORMAT_FUNCTIONARY_V3_2: return "Functionary v3.2"; case COMMON_CHAT_FORMAT_FUNCTIONARY_V3_1_LLAMA_3_1: return "Functionary v3.1 Llama 3.1"; case COMMON_CHAT_FORMAT_HERMES_2_PRO: return "Hermes 2 Pro"; + case COMMON_CHAT_FORMAT_HERMES_2_PRO_EXTRACT_REASONING: return "Hermes 2 Pro (extract reasoning)"; case COMMON_CHAT_FORMAT_COMMAND_R7B: return "Command R7B"; case COMMON_CHAT_FORMAT_COMMAND_R7B_EXTRACT_REASONING: return "Command R7B (extract reasoning)"; default: @@ -878,9 +879,9 @@ static common_chat_params common_chat_params_init_command_r7b(const common_chat_ return data; } static common_chat_msg common_chat_parse_command_r7b(const std::string & input, bool extract_reasoning) { - static std::regex thought_regex("(<\\|START_THINKING\\|>([\\s\\S]*?)<\\|END_THINKING\\|>)([\\s\\S]*)"); - static std::regex action_regex("<\\|START_ACTION\\|>([\\s\\S]*?)<\\|END_ACTION\\|>"); - static std::regex response_regex("(?:<\\|START_RESPONSE\\|>)?([\\s\\S]*?)<\\|END_RESPONSE\\|>"); + static const std::regex thought_regex("(<\\|START_THINKING\\|>([\\s\\S]*?)<\\|END_THINKING\\|>)([\\s\\S]*)"); + static const std::regex action_regex("<\\|START_ACTION\\|>([\\s\\S]*?)<\\|END_ACTION\\|>"); + static const std::regex response_regex("(?:<\\|START_RESPONSE\\|>)?([\\s\\S]*?)<\\|END_RESPONSE\\|>"); std::smatch match; @@ -1012,10 +1013,10 @@ static common_chat_params common_chat_params_init_llama_3_1_tool_calls(const com } static common_chat_msg common_chat_parse_llama_3_1(const std::string & input, bool with_builtin_tools = false) { // TODO: tighten & simplify the parser, don't accept leading text context. - static std::regex function_regex( + static const std::regex function_regex( "\\s*\\{\\s*(?:\"type\"\\s*:\\s*\"function\"\\s*,\\s*)?\"name\"\\s*:\\s*\"([^\"]+)\"\\s*,\\s*\"parameters\"\\s*: "); - static std::regex close_regex("\\}\\s*"); - static std::regex builtin_call_regex("<\\|python_tag\\|>\\s*([^.(]+)\\s*\\.\\s*call\\s*\\(\\s*([\\w]+)\\s*=\\s*([\\s\\S]*?)\\)"); + static const std::regex close_regex("\\}\\s*"); + static const std::regex builtin_call_regex("<\\|python_tag\\|>\\s*([^.(]+)\\s*\\.\\s*call\\s*\\(\\s*([\\w]+)\\s*=\\s*([\\s\\S]*?)\\)"); if (with_builtin_tools) { std::smatch match; @@ -1105,34 +1106,42 @@ static common_chat_params common_chat_params_init_deepseek_r1(const common_chat_ data.format = inputs.extract_reasoning ? COMMON_CHAT_FORMAT_DEEPSEEK_R1_EXTRACT_REASONING : COMMON_CHAT_FORMAT_DEEPSEEK_R1; return data; } -static common_chat_msg common_chat_parse_deepseek_r1(const std::string & input, bool extract_reasoning) { - static std::regex function_regex("<|tool▁call▁begin|>function<|tool▁sep|>([^\n]+)\n```json\n"); - static std::regex close_regex("```[\\s\\r\\n]*<|tool▁call▁end|>"); - static std::regex reasoning_content_regex("((?:)?([\\s\\S\\r\\n]*?))?([\\s\\S\\r\\n]*)"); - static std::regex tool_calls_regex("[\\s\\r\\n]*(?:<|tool▁calls▁begin|>|<|tool_calls_begin|>|<|tool calls begin|>|<|tool\\\\_calls\\\\_begin|>)([\\s\\S\\r\\n]*?)<|tool▁calls▁end|>"); - common_chat_msg msg; - msg.role = "assistant"; +static common_chat_msg handle_think_tag_prelude(const std::string & input, bool extract_reasoning, const std::function & rest_parser) { std::smatch match; + static const std::regex reasoning_content_regex("((?:)?([\\s\\S\\r\\n]*?))?([\\s\\S\\r\\n]*)"); if (std::regex_match(input, match, reasoning_content_regex)) { - std::string rest; + auto rest = match[3].str(); + auto msg = rest_parser(rest); + auto reasoning_content = string_strip(match[2].str()); if (extract_reasoning) { - msg.reasoning_content = string_strip(match[2].str()); - } else { - msg.content = match[1].str(); + msg.reasoning_content = reasoning_content; + } else if (!reasoning_content.empty()) { + std::ostringstream content; + content << "" << reasoning_content << "" << msg.content; + msg.content = content.str(); } - rest = match[3].str(); + return msg; + } + return rest_parser(input); +} +static common_chat_msg common_chat_parse_deepseek_r1(const std::string & input, bool extract_reasoning) { + return handle_think_tag_prelude(input, extract_reasoning, [](const std::string & input) { + static const std::regex function_regex("<|tool▁call▁begin|>function<|tool▁sep|>([^\n]+)\n```json\n"); + static const std::regex close_regex("```[\\s\\r\\n]*<|tool▁call▁end|>"); + static const std::regex tool_calls_regex("[\\s\\r\\n]*(?:<|tool▁calls▁begin|>|<|tool_calls_begin|>|<|tool calls begin|>|<|tool\\\\_calls\\\\_begin|>)([\\s\\S\\r\\n]*?)<|tool▁calls▁end|>"); - if (std::regex_search(rest, match, tool_calls_regex)) { + common_chat_msg msg; + msg.role = "assistant"; + std::smatch match; + if (std::regex_search(input, match, tool_calls_regex)) { auto tool_calls = match[1].str(); auto msg2 = parse_json_tool_calls(tool_calls, std::nullopt, function_regex, close_regex); msg.tool_calls = std::move(msg2.tool_calls); } else { - msg.content += std::string(rest.begin() + rest.find_first_not_of(" \r\n"), rest.end()); + msg.content = input; } - } else { - msg.content = input; - } - return msg; + return msg; + }); } static common_chat_params common_chat_params_init_firefunction_v2(const common_chat_template & tmpl, const struct templates_params & inputs) { @@ -1237,8 +1246,8 @@ static common_chat_params common_chat_params_init_functionary_v3_2(const common_ } static common_chat_msg common_chat_parse_functionary_v3_2(const std::string & input) { - static std::regex function_regex(R"((?:>>>)?(?:assistant<|end_header_id|>\n)?(\w+)\n)"); - static std::regex close_regex(R"($|(?=>>>))"); + static const std::regex function_regex(R"((?:>>>)?(?:assistant<|end_header_id|>\n)?(\w+)\n)"); + static const std::regex close_regex(R"($|(?=>>>))"); std::string content; auto it = input.begin(); @@ -1327,7 +1336,7 @@ static common_chat_params common_chat_params_init_functionary_v3_1_llama_3_1(con } static common_chat_msg common_chat_parse_functionary_v3_1_llama_3_1(const std::string & input) { // This version of Functionary still supports the llama 3.1 tool call format for the python tool. - static std::regex python_tag_regex(R"(<\|python_tag\|>([\s\S\n]*)$)"); + static const std::regex python_tag_regex(R"(<\|python_tag\|>([\s\S\n]*)$)"); std::smatch match; if (std::regex_search(input, match, python_tag_regex)) { auto code = match[1].str(); @@ -1341,8 +1350,8 @@ static common_chat_msg common_chat_parse_functionary_v3_1_llama_3_1(const std::s }); return msg; } - static std::regex function_regex(R"()"); - static std::regex close_regex(R"()"); + static const std::regex function_regex(R"()"); + static const std::regex close_regex(R"()"); // TODO: tighten & simplify. return parse_json_tool_calls(input, std::nullopt, function_regex, close_regex); } @@ -1409,6 +1418,8 @@ static common_chat_params common_chat_params_init_hermes_2_pro(const common_chat "(?:```(?:json|xml)?\n\\s*)?(?:|||)?\\s*\\{\\s*\"", //name\"\\s*:\\s*\"" + escaped_name + "\"", }); data.preserved_tokens = { + "", + "", "", "", "" // match 2 (open_tag) - "|" - "|" - "|" - "|" - "|" - "|" - "|" - ")?" - "(\\s*\\{\\s*\"name\"\\s*:[\\s\\S]*)" // match 3 (named tool call + rest) - ")" - "|" - "(?:]+)>" // match 4 (function name) - "|)" // match 5 (function name again) - "([\\s\\S]*)" // match 6 (function arguments + rest)})" - ); +static common_chat_msg common_chat_parse_hermes_2_pro(const std::string& input, bool extract_reasoning) { + return handle_think_tag_prelude(input, extract_reasoning, [](const std::string & input) { + static const std::regex open_regex( + "(?:" + "(```(?:xml|json)?\\n\\s*)?" // match 1 (block_start) + "(" // match 2 (open_tag) + "|" + "|" + "|" + "|" + "|" + "|" + "|" + ")?" + "(\\s*\\{\\s*\"name\"\\s*:[\\s\\S]*)" // match 3 (named tool call + rest) + ")" + "|" + "(?:]+)>" // match 4 (function name) + "|)" // match 5 (function name again) + "([\\s\\S]*)" // match 6 (function arguments + rest)})" + ); - try { - - common_chat_msg msg; - msg.role = "assistant"; + try { + common_chat_msg msg; + msg.role = "assistant"; - std::string::const_iterator it = input.begin(); - const std::string::const_iterator end = input.end(); - std::smatch match; + std::string::const_iterator it = input.begin(); + const std::string::const_iterator end = input.end(); + std::smatch match; - while (it != end) { - if (std::regex_search(it, end, match, open_regex)) { - // Add content before the match - msg.content += std::string(it, match[0].first); + while (it != end) { + if (std::regex_search(it, end, match, open_regex)) { + // Add content before the match + msg.content += std::string(it, match[0].first); - auto block_start = match[1].str(); - std::string block_end = block_start.empty() ? "" : "```"; + auto block_start = match[1].str(); + std::string block_end = block_start.empty() ? "" : "```"; - auto open_tag = match[2].str(); - std::string close_tag; + auto open_tag = match[2].str(); + std::string close_tag; - if (match[3].matched) { - close_tag = open_tag.empty() ? "" : ""; - // Start parsing from after the opening tags - auto json_it = match[6].first; - json arguments; - if (parse_json(json_it, end, arguments)) { - msg.tool_calls.emplace_back(process_tool_call({ - {"name", function_name}, - {"arguments", arguments}, - })); - it = json_it; // Move iterator past parsed JSON - - // Handle close tags - consume_spaces(it, end); - if (!close_tag.empty() && !parse_literal(it, end, close_tag)) { - throw std::runtime_error("Failed to parse closing tag"); + auto function_name = match[4].str(); + if (function_name.empty()) { + function_name = match[5].str(); } - consume_spaces(it, end); - if (!block_end.empty() && !parse_literal(it, end, block_end)) { - throw std::runtime_error("Failed to parse block end"); + GGML_ASSERT(!function_name.empty()); + + close_tag = ""; + // Start parsing from after the opening tags + auto json_it = match[6].first; + json arguments; + if (parse_json(json_it, end, arguments)) { + msg.tool_calls.emplace_back(process_tool_call({ + {"name", function_name}, + {"arguments", arguments}, + })); + it = json_it; // Move iterator past parsed JSON + + // Handle close tags + consume_spaces(it, end); + if (!close_tag.empty() && !parse_literal(it, end, close_tag)) { + throw std::runtime_error("Failed to parse closing tag"); + } + consume_spaces(it, end); + if (!block_end.empty() && !parse_literal(it, end, block_end)) { + throw std::runtime_error("Failed to parse block end"); + } + consume_spaces(it, end); + } else { + // Not a valid tool call, treat as content + msg.content += std::string(match[0].first, match[0].second); + it = match[0].second; } - consume_spaces(it, end); - } else { - // Not a valid tool call, treat as content - msg.content += std::string(match[0].first, match[0].second); - it = match[0].second; } + } else { + // Add remaining content + msg.content += std::string(it, end); + break; } - } else { - // Add remaining content - msg.content += std::string(it, end); - break; } + return msg; + } catch (const std::exception & e) { + LOG_ERR("Failed to parse hermes 2 pro input: %s\n", e.what()); + common_chat_msg msg; + msg.role = "assistant"; + msg.content = input; + return msg; } - return msg; - } catch (const std::exception & e) { - LOG_ERR("Failed to parse hermes 2 pro input: %s\n", e.what()); - common_chat_msg msg; - msg.role = "assistant"; - msg.content = input; - return msg; - } + }); } static common_chat_params common_chat_params_init_without_tools(const common_chat_template & tmpl, const struct templates_params & inputs) { @@ -1609,6 +1621,11 @@ static common_chat_params common_chat_templates_apply_jinja( return common_chat_params_init_command_r7b(tmpl, params); } + // Hermes 2/3 Pro, Qwen 2.5 Instruct (w/ tools) + if (src.find("") != std::string::npos && params.json_schema.is_null()) { + return common_chat_params_init_hermes_2_pro(tmpl, params); + } + // Use generic handler when mixing tools + JSON schema. // TODO: support that mix in handlers below. if ((params.tools.is_array() && params.json_schema.is_object())) { @@ -1630,11 +1647,6 @@ static common_chat_params common_chat_templates_apply_jinja( return common_chat_params_init_without_tools(tmpl, params); } - // Hermes 2/3 Pro, Qwen 2.5 Instruct (w/ tools) - if (src.find("") != std::string::npos) { - return common_chat_params_init_hermes_2_pro(tmpl, params); - } - // Functionary v3.1 (w/ tools) if (src.find("<|start_header_id|>") != std::string::npos && src.find("I'm thinkingHello, world!\nWhat's up?", + COMMON_CHAT_FORMAT_HERMES_2_PRO)); + assert_msg_equals(message_assist_thoughts_unparsed_think, + common_chat_parse("I'm thinkingHello, world!\nWhat's up?", + COMMON_CHAT_FORMAT_HERMES_2_PRO)); + assert_msg_equals(message_assist_thoughts, + common_chat_parse("I'm thinkingHello, world!\nWhat's up?", + COMMON_CHAT_FORMAT_HERMES_2_PRO_EXTRACT_REASONING)); + assert_msg_equals(message_assist_thoughts, + common_chat_parse("I'm thinkingHello, world!\nWhat's up?", + COMMON_CHAT_FORMAT_HERMES_2_PRO_EXTRACT_REASONING)); + test_templates(tmpls.get(), end_tokens, message_assist, tools, "Hello, world!\nWhat's up?", /* expect_grammar_triggered= */ false); test_templates(tmpls.get(), end_tokens, message_assist_call, tools, "\n" From 6ef79a67caf1159b0150b44ee80888c7ec98b83f Mon Sep 17 00:00:00 2001 From: marcoStocchi Date: Mon, 10 Mar 2025 12:34:13 +0100 Subject: [PATCH 029/398] common : refactor '-o' option (#12278) As discussed in PR 'llama-tts : add -o option' (#12042): * common_params : 'out_file' string is the only output file name parameter left in common_params. It's intended to be used in all example programs implementing an '-o' option. * cvector-generator, export-lora, imatrix : default output filenames moved from 'common_params' to the 'main()' of each example program. --- common/arg.cpp | 9 +-------- common/common.h | 8 +++----- examples/cvector-generator/cvector-generator.cpp | 4 +++- examples/export-lora/export-lora.cpp | 6 ++++-- examples/imatrix/imatrix.cpp | 5 ++--- 5 files changed, 13 insertions(+), 19 deletions(-) diff --git a/common/arg.cpp b/common/arg.cpp index b96a5678f7346..8531f0871d44a 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -1867,16 +1867,9 @@ common_params_context common_params_parser_init(common_params & params, llama_ex ).set_examples({LLAMA_EXAMPLE_PASSKEY})); add_opt(common_arg( {"-o", "--output", "--output-file"}, "FNAME", - string_format("output file (default: '%s')", - ex == LLAMA_EXAMPLE_EXPORT_LORA - ? params.lora_outfile.c_str() - : ex == LLAMA_EXAMPLE_CVECTOR_GENERATOR - ? params.cvector_outfile.c_str() - : params.out_file.c_str()), + string_format("output file (default: '%s')", params.out_file.c_str()), [](common_params & params, const std::string & value) { params.out_file = value; - params.cvector_outfile = value; - params.lora_outfile = value; } ).set_examples({LLAMA_EXAMPLE_IMATRIX, LLAMA_EXAMPLE_CVECTOR_GENERATOR, LLAMA_EXAMPLE_EXPORT_LORA})); add_opt(common_arg( diff --git a/common/common.h b/common/common.h index 733f7f1c8d662..1c0f199774976 100644 --- a/common/common.h +++ b/common/common.h @@ -407,8 +407,6 @@ struct common_params { int32_t i_pos = -1; // position of the passkey in the junk text // imatrix params - std::string out_file = "imatrix.dat"; // save the resulting imatrix to this file - int32_t n_out_freq = 10; // output the imatrix every n_out_freq iterations int32_t n_save_freq = 0; // save the imatrix every n_save_freq iterations int32_t i_chunk = 0; // start processing from this chunk @@ -420,16 +418,16 @@ struct common_params { int n_pca_batch = 100; int n_pca_iterations = 1000; dimre_method cvector_dimre_method = DIMRE_METHOD_PCA; - std::string cvector_outfile = "control_vector.gguf"; std::string cvector_positive_file = "examples/cvector-generator/positive.txt"; std::string cvector_negative_file = "examples/cvector-generator/negative.txt"; bool spm_infill = false; // suffix/prefix/middle pattern for infill - std::string lora_outfile = "ggml-lora-merged-f16.gguf"; - // batched-bench params bool batched_bench_output_jsonl = false; + + // common params + std::string out_file; // output filename for all example programs }; // call once at the start of a program if it uses libcommon diff --git a/examples/cvector-generator/cvector-generator.cpp b/examples/cvector-generator/cvector-generator.cpp index 413b71d34c52b..c72528dac3ff0 100644 --- a/examples/cvector-generator/cvector-generator.cpp +++ b/examples/cvector-generator/cvector-generator.cpp @@ -394,6 +394,8 @@ static int prepare_entries(common_params & params, train_context & ctx_train) { int main(int argc, char ** argv) { common_params params; + params.out_file = "control_vector.gguf"; + if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_CVECTOR_GENERATOR, print_usage)) { return 1; } @@ -498,7 +500,7 @@ int main(int argc, char ** argv) { } // write output vectors to gguf - export_gguf(ctx_train.v_final, params.cvector_outfile, model_hint); + export_gguf(ctx_train.v_final, params.out_file, model_hint); llama_backend_free(); diff --git a/examples/export-lora/export-lora.cpp b/examples/export-lora/export-lora.cpp index 91238e4beb26c..e7d0fbfffedb0 100644 --- a/examples/export-lora/export-lora.cpp +++ b/examples/export-lora/export-lora.cpp @@ -413,20 +413,22 @@ static void print_usage(int, char ** argv) { int main(int argc, char ** argv) { common_params params; + params.out_file = "ggml-lora-merged-f16.gguf"; + if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_EXPORT_LORA, print_usage)) { return 1; } g_verbose = (params.verbosity > 1); try { - lora_merge_ctx ctx(params.model, params.lora_adapters, params.lora_outfile, params.cpuparams.n_threads); + lora_merge_ctx ctx(params.model, params.lora_adapters, params.out_file, params.cpuparams.n_threads); ctx.run_merge(); } catch (const std::exception & err) { fprintf(stderr, "%s\n", err.what()); exit(EXIT_FAILURE); } - printf("done, output file is %s\n", params.lora_outfile.c_str()); + printf("done, output file is %s\n", params.out_file.c_str()); return 0; } diff --git a/examples/imatrix/imatrix.cpp b/examples/imatrix/imatrix.cpp index 4edc0bfacf125..91649c45065f4 100644 --- a/examples/imatrix/imatrix.cpp +++ b/examples/imatrix/imatrix.cpp @@ -206,9 +206,6 @@ bool IMatrixCollector::collect_imatrix(struct ggml_tensor * t, bool ask, void * void IMatrixCollector::save_imatrix(int ncall) const { auto fname = m_params.out_file; - if (fname.empty()) { - fname = "imatrix.dat"; - } if (ncall > 0) { fname += ".at_"; @@ -583,6 +580,8 @@ static bool compute_imatrix(llama_context * ctx, const common_params & params) { int main(int argc, char ** argv) { common_params params; + params.out_file = "imatrix.dat" ; + params.n_ctx = 512; params.logits_all = true; params.escape = false; From e128a1bf5b65741b485dd094a4201264d0580d68 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 10 Mar 2025 14:07:15 +0200 Subject: [PATCH 030/398] tests : fix test-quantize-fns to init the CPU backend (#12306) ggml-ci --- tests/test-quantize-fns.cpp | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/tests/test-quantize-fns.cpp b/tests/test-quantize-fns.cpp index c77c8ed1388d7..037c0582bbbf8 100644 --- a/tests/test-quantize-fns.cpp +++ b/tests/test-quantize-fns.cpp @@ -120,13 +120,7 @@ int main(int argc, char * argv[]) { generate_data(0.0, test_data.size(), test_data.data()); generate_data(1.0, test_data2.size(), test_data2.data()); - // Initialize GGML, ensures float conversion tables are initialized - struct ggml_init_params ggml_params = { - /* .mem_size = */ 1*1024, - /* .mem_buffer = */ NULL, - /* .no_alloc = */ true, - }; - struct ggml_context * ctx = ggml_init(ggml_params); + ggml_cpu_init(); int num_failed = 0; bool failed = false; @@ -188,7 +182,5 @@ int main(int argc, char * argv[]) { printf("%d tests failed\n", num_failed); } - ggml_free(ctx); - return num_failed > 0; } From 89b2b56e8658800375a8314200870b1ad4208a0b Mon Sep 17 00:00:00 2001 From: John Bean <113509988+johnbean393@users.noreply.github.com> Date: Mon, 10 Mar 2025 22:13:09 +0800 Subject: [PATCH 031/398] readme: added Sidekick to available UIs (#12311) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index e371c44ed1b6b..1eec944f273a8 100644 --- a/README.md +++ b/README.md @@ -172,6 +172,7 @@ Instructions for adding support for new models: [HOWTO-add-model.md](docs/develo - [eva](https://github.com/ylsdamxssjxxdd/eva) (MIT) - [iohub/collama](https://github.com/iohub/coLLaMA) (Apache-2.0) - [janhq/jan](https://github.com/janhq/jan) (AGPL) +- [johnbean393/Sidekick](https://github.com/johnbean393/Sidekick) (MIT) - [KanTV](https://github.com/zhouwg/kantv?tab=readme-ov-file) (Apache-2.0) - [KodiBot](https://github.com/firatkiral/kodibot) (GPL) - [llama.vim](https://github.com/ggml-org/llama.vim) (MIT) From 8acdacb3ea00697477eb019efbb6fc183371055c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Henry=20Linjam=C3=A4ki?= Date: Mon, 10 Mar 2025 18:57:00 +0200 Subject: [PATCH 032/398] opencl: use OpenCL C standard supported by the device (#12221) This patch nudges the llama.cpp a bit to be supported on PoCL which doesn't support OpenCL C CL2.0. The issue is solved by querying the device for the supported OpenCL C versions and using the highest one available. --- ggml/CMakeLists.txt | 2 + ggml/src/ggml-opencl/CMakeLists.txt | 1 + ggml/src/ggml-opencl/ggml-opencl.cpp | 176 ++++++++++++++++++++------- 3 files changed, 136 insertions(+), 43 deletions(-) diff --git a/ggml/CMakeLists.txt b/ggml/CMakeLists.txt index 412d294dca42b..9a4ee4992d0c7 100644 --- a/ggml/CMakeLists.txt +++ b/ggml/CMakeLists.txt @@ -195,6 +195,8 @@ option(GGML_OPENCL "ggml: use OpenCL" option(GGML_OPENCL_PROFILING "ggml: use OpenCL profiling (increases overhead)" OFF) option(GGML_OPENCL_EMBED_KERNELS "ggml: embed kernels" ON) option(GGML_OPENCL_USE_ADRENO_KERNELS "ggml: use optimized kernels for Adreno" ON) +set (GGML_OPENCL_TARGET_VERSION "300" CACHE STRING + "gmml: OpenCL API version to target") # toolchain for vulkan-shaders-gen set (GGML_VULKAN_SHADERS_GEN_TOOLCHAIN "" CACHE FILEPATH "ggml: toolchain file for vulkan-shaders-gen") diff --git a/ggml/src/ggml-opencl/CMakeLists.txt b/ggml/src/ggml-opencl/CMakeLists.txt index 45328a6579320..59a208fe9c7e4 100644 --- a/ggml/src/ggml-opencl/CMakeLists.txt +++ b/ggml/src/ggml-opencl/CMakeLists.txt @@ -15,6 +15,7 @@ if (GGML_OPENCL_PROFILING) endif () add_compile_definitions(GGML_OPENCL_SOA_Q) +add_compile_definitions(GGML_OPENCL_TARGET_VERSION=${GGML_OPENCL_TARGET_VERSION}) if (GGML_OPENCL_USE_ADRENO_KERNELS) message(STATUS "OpenCL will use matmul kernels optimized for Adreno") diff --git a/ggml/src/ggml-opencl/ggml-opencl.cpp b/ggml/src/ggml-opencl/ggml-opencl.cpp index b85a895c45c43..14d9934fb1b73 100644 --- a/ggml/src/ggml-opencl/ggml-opencl.cpp +++ b/ggml/src/ggml-opencl/ggml-opencl.cpp @@ -1,4 +1,4 @@ -#define CL_TARGET_OPENCL_VERSION 220 +#define CL_TARGET_OPENCL_VERSION GGML_OPENCL_TARGET_VERSION #define CL_USE_DEPRECATED_OPENCL_1_2_APIS // suppress warnings in CL headers for GCC and Clang @@ -25,6 +25,8 @@ #include #include #include +#include +#include #undef MIN #undef MAX @@ -62,6 +64,97 @@ enum ADRENO_GPU_GEN { X1E, }; +struct ggml_cl_version { + cl_uint major = 0; + cl_uint minor = 0; +}; + +// Parses a version string of form "XX.YY ". On an error returns ggml_cl_version with all zeroes. +static ggml_cl_version parse_cl_version(std::string_view str) { + size_t major_str_begin = 0; + size_t major_str_end = str.find(".", major_str_begin); + if (major_str_end == std::string::npos) { + return {}; + } + + size_t minor_str_begin = major_str_end + 1; + size_t minor_str_end = str.find(" ", minor_str_begin); + if (minor_str_end == std::string::npos) { + return {}; + } + + cl_uint version_major; + if (std::from_chars(str.data() + major_str_begin, str.data() + major_str_end, version_major).ec != std::errc{}) { + return {}; + } + + cl_uint version_minor; + if (std::from_chars(str.data() + minor_str_begin, str.data() + minor_str_end, version_minor).ec != std::errc{}) { + return {}; + } + return { version_major, version_minor }; +} + +// Returns OpenCL platform's version. On an error returns ggml_cl_version with all zeroes. +static ggml_cl_version get_opencl_platform_version(cl_platform_id platform) { + size_t param_size; + CL_CHECK(clGetPlatformInfo(platform, CL_PLATFORM_VERSION, 0, nullptr, ¶m_size)); + std::unique_ptr param_storage(new char[param_size]); + CL_CHECK(clGetPlatformInfo(platform, CL_PLATFORM_VERSION, param_size, param_storage.get(), nullptr)); + + auto param_value = std::string_view(param_storage.get(), param_size); + const std::string version_prefix = "OpenCL "; // Suffix: "XX.YY " + if (param_value.find(version_prefix) != 0) { + return {}; + } + param_value.remove_prefix(version_prefix.length()); + return parse_cl_version(param_value); +} + +// Return a version to use in OpenCL C compilation. On an error returns ggml_cl_version with all zeroes. +static ggml_cl_version get_opencl_c_version(ggml_cl_version platform_version, cl_device_id device) { + size_t param_size; + +#if CL_TARGET_OPENCL_VERSION >= 300 + if (platform_version.major >= 3) { + CL_CHECK(clGetDeviceInfo(device, CL_DEVICE_OPENCL_C_ALL_VERSIONS, 0, nullptr, ¶m_size)); + if (!param_size) { + return {}; + } + + std::unique_ptr versions(new cl_name_version[param_size]); + CL_CHECK(clGetDeviceInfo(device, CL_DEVICE_OPENCL_C_ALL_VERSIONS, param_size, versions.get(), nullptr)); + unsigned versions_count = param_size / sizeof(cl_name_version); + + cl_version version_max = 0; + for (unsigned i = 0; i < versions_count; i++) { + version_max = std::max(versions[i].version, version_max); + } + + return { CL_VERSION_MAJOR(version_max), CL_VERSION_MINOR(version_max) }; + } +#else + GGML_UNUSED(platform_version); +#endif // CL_TARGET_OPENCL_VERSION >= 300 + + CL_CHECK(clGetDeviceInfo(device, CL_DEVICE_OPENCL_C_VERSION, 0, nullptr, ¶m_size)); + if (!param_size) { + return {}; + } + + std::unique_ptr param_storage(new char[param_size]); + CL_CHECK(clGetDeviceInfo(device, CL_DEVICE_OPENCL_C_VERSION, param_size, param_storage.get(), nullptr)); + auto param_value = std::string_view(param_storage.get(), param_size); + + const std::string version_prefix = "OpenCL C "; // Suffix: "XX.YY " + if (param_value.find(version_prefix) != 0) { + return {}; + } + param_value.remove_prefix(version_prefix.length()); + + return parse_cl_version(param_value); +} + static ADRENO_GPU_GEN get_adreno_gpu_gen(const char *device_name) { if (strstr(device_name, "730") || strstr(device_name, "740") || @@ -470,16 +563,11 @@ static ggml_backend_opencl_context * ggml_cl2_init(ggml_backend_dev_t dev) { // A local ref of cl_device_id for convenience cl_device_id device = backend_ctx->device; + ggml_cl_version platform_version = get_opencl_platform_version(default_device->platform->id); + // Check device OpenCL version, OpenCL 2.0 or above is required - size_t device_ver_str_size; - clGetDeviceInfo(device, CL_DEVICE_VERSION, 0, NULL, &device_ver_str_size); - char *device_ver_buffer = (char *)alloca(device_ver_str_size + 1); - clGetDeviceInfo(device, CL_DEVICE_VERSION, device_ver_str_size, device_ver_buffer, NULL); - device_ver_buffer[device_ver_str_size] = '\0'; - GGML_LOG_INFO("ggml_opencl: device OpenCL version: %s\n", device_ver_buffer); - - if (strstr(device_ver_buffer, "OpenCL 2") == NULL && - strstr(device_ver_buffer, "OpenCL 3") == NULL) { + ggml_cl_version opencl_c_version = get_opencl_c_version(platform_version, device); + if (opencl_c_version.major < 2) { GGML_LOG_ERROR("ggml_opencl: OpenCL 2.0 or above is required\n"); return backend_ctx; } @@ -516,8 +604,7 @@ static ggml_backend_opencl_context * ggml_cl2_init(ggml_backend_dev_t dev) { // If OpenCL 3.0 is supported, then check for cl_khr_subgroups, which becomes // optional in OpenCL 3.0 (cl_khr_subgroup is mandatory in OpenCL 2.x) - if (strstr(device_ver_buffer, "OpenCL 3") && - strstr(ext_buffer, "cl_khr_subgroups") == NULL && + if (opencl_c_version.major == 3 && strstr(ext_buffer, "cl_khr_subgroups") == NULL && strstr(ext_buffer, "cl_intel_subgroups") == NULL) { GGML_LOG_ERROR("ggml_opencl: device does not support subgroups (cl_khr_subgroups or cl_intel_subgroups) " "(note that subgroups is an optional feature in OpenCL 3.0)\n"); @@ -581,9 +668,12 @@ static ggml_backend_opencl_context * ggml_cl2_init(ggml_backend_dev_t dev) { const std::string kernel_src = read_file("ggml-opencl.cl"); #endif - std::string compile_opts = - "-cl-std=CL2.0 -cl-mad-enable -cl-unsafe-math-optimizations " - "-cl-finite-math-only -cl-fast-relaxed-math "; + auto opencl_c_std = + std::string("CL") + std::to_string(opencl_c_version.major) + "." + std::to_string(opencl_c_version.minor); + + std::string compile_opts = std::string("-cl-std=") + opencl_c_std + + " -cl-mad-enable -cl-unsafe-math-optimizations" + " -cl-finite-math-only -cl-fast-relaxed-math"; backend_ctx->program = build_program_from_source(context, device, kernel_src.c_str(), compile_opts); // Non matmul kernels. @@ -693,10 +783,10 @@ static ggml_backend_opencl_context * ggml_cl2_init(ggml_backend_dev_t dev) { CL_CHECK((backend_ctx->kernel_transpose_16 = clCreateKernel(backend_ctx->program_transpose_16, "kernel_transpose_16", &err), err)); // Gemv general - std::string CL_gemv_compile_opts = - " -cl-std=CL2.0 " - " -cl-mad-enable " - " -DSIMDGROUP_WIDTH=" + std::to_string(backend_ctx->adreno_wave_size); + std::string CL_gemv_compile_opts = std::string("-cl-std=") + opencl_c_std + + " -cl-mad-enable " + " -DSIMDGROUP_WIDTH=" + + std::to_string(backend_ctx->adreno_wave_size); if (has_vector_subgroup_broadcast) { CL_gemv_compile_opts += " -DVECTOR_SUB_GROUP_BROADCAT "; } @@ -713,12 +803,12 @@ static ggml_backend_opencl_context * ggml_cl2_init(ggml_backend_dev_t dev) { CL_CHECK((backend_ctx->CL_mul_mat_vec_q4_0_f32_1d_4x_flat_general = clCreateKernel(backend_ctx->program_CL_gemv_general, "kernel_gemv_noshuffle", &err), err)); // Gemv 2048, 16384 - CL_gemv_compile_opts = - " -cl-std=CL2.0 " - " -cl-mad-enable " - " -DLINE_STRIDE_A=2048 " - " -DBLOCK_STRIDE_A=16384 " - " -DSIMDGROUP_WIDTH=" + std::to_string(backend_ctx->adreno_wave_size); + CL_gemv_compile_opts = std::string("-cl-std=") + opencl_c_std + + " -cl-mad-enable " + " -DLINE_STRIDE_A=2048 " + " -DBLOCK_STRIDE_A=16384 " + " -DSIMDGROUP_WIDTH=" + + std::to_string(backend_ctx->adreno_wave_size); if (has_vector_subgroup_broadcast) { CL_gemv_compile_opts += " -DVECTOR_SUB_GROUP_BROADCAT "; } @@ -735,12 +825,12 @@ static ggml_backend_opencl_context * ggml_cl2_init(ggml_backend_dev_t dev) { CL_CHECK((backend_ctx->CL_mul_mat_vec_q4_0_f32_1d_4x_flat_4096_1_4096 = clCreateKernel(backend_ctx->program_CL_gemv_4096_1_4096, "kernel_gemv_noshuffle", &err), err)); // Gemv 2048, 16384 - CL_gemv_compile_opts = - " -cl-std=CL2.0 " - " -cl-mad-enable " - " -DLINE_STRIDE_A=2048 " - " -DBLOCK_STRIDE_A=16384 " - " -DSIMDGROUP_WIDTH=" + std::to_string(backend_ctx->adreno_wave_size); + CL_gemv_compile_opts = std::string("-cl-std=") + opencl_c_std + + " -cl-mad-enable " + " -DLINE_STRIDE_A=2048 " + " -DBLOCK_STRIDE_A=16384 " + " -DSIMDGROUP_WIDTH=" + + std::to_string(backend_ctx->adreno_wave_size); if (has_vector_subgroup_broadcast) { CL_gemv_compile_opts += " -DVECTOR_SUB_GROUP_BROADCAT "; } @@ -750,12 +840,12 @@ static ggml_backend_opencl_context * ggml_cl2_init(ggml_backend_dev_t dev) { CL_CHECK((backend_ctx->CL_mul_mat_vec_q4_0_f32_1d_4x_flat_4096_1_11008 = clCreateKernel(backend_ctx->program_CL_gemv_4096_1_11008, "kernel_gemv_noshuffle", &err), err)); // Gemv 5504, 44032 - CL_gemv_compile_opts = - " -cl-std=CL2.0 " - " -cl-mad-enable " - " -DLINE_STRIDE_A=5504 " - " -DBLOCK_STRIDE_A=44032 " - " -DSIMDGROUP_WIDTH=" + std::to_string(backend_ctx->adreno_wave_size); + CL_gemv_compile_opts = std::string("-cl-std=") + opencl_c_std + + " -cl-mad-enable " + " -DLINE_STRIDE_A=5504 " + " -DBLOCK_STRIDE_A=44032 " + " -DSIMDGROUP_WIDTH=" + + std::to_string(backend_ctx->adreno_wave_size); if (has_vector_subgroup_broadcast) { CL_gemv_compile_opts += " -DVECTOR_SUB_GROUP_BROADCAT "; } @@ -765,12 +855,12 @@ static ggml_backend_opencl_context * ggml_cl2_init(ggml_backend_dev_t dev) { CL_CHECK((backend_ctx->CL_mul_mat_vec_q4_0_f32_1d_4x_flat_11008_1_4096 = clCreateKernel(backend_ctx->program_CL_gemv_11008_1_4096, "kernel_gemv_noshuffle", &err), err)); // Gemv 16000, 128000 - CL_gemv_compile_opts = - " -cl-std=CL2.0 " - " -cl-mad-enable " - " -DLINE_STRIDE_A=16000 " - " -DBLOCK_STRIDE_A=128000 " - " -DSIMDGROUP_WIDTH=" + std::to_string(backend_ctx->adreno_wave_size); + CL_gemv_compile_opts = std::string("-cl-std=") + opencl_c_std + + " -cl-mad-enable " + " -DLINE_STRIDE_A=16000 " + " -DBLOCK_STRIDE_A=128000 " + " -DSIMDGROUP_WIDTH=" + + std::to_string(backend_ctx->adreno_wave_size); if (has_vector_subgroup_broadcast) { CL_gemv_compile_opts += " -DVECTOR_SUB_GROUP_BROADCAT "; } From 251364549fe4a78d4e2e66f1adfaf2bd53041d2c Mon Sep 17 00:00:00 2001 From: R0CKSTAR Date: Tue, 11 Mar 2025 01:18:25 +0800 Subject: [PATCH 033/398] musa: support new arch mp_31 and update doc (#12296) Signed-off-by: Xiaodong Ye --- Makefile | 2 +- docs/build.md | 46 +++++++++++++++++++++++-------- ggml/src/ggml-musa/CMakeLists.txt | 2 +- 3 files changed, 37 insertions(+), 13 deletions(-) diff --git a/Makefile b/Makefile index 5339d490b4e68..1f9455eff0aec 100644 --- a/Makefile +++ b/Makefile @@ -836,7 +836,7 @@ ifdef GGML_MUSA else MUSA_PATH ?= /opt/musa endif - MUSA_ARCHITECTURES ?= 21;22 + MUSA_ARCHITECTURES ?= 21;22;31 MK_CPPFLAGS += -DGGML_USE_MUSA -DGGML_USE_CUDA MK_LDFLAGS += -L$(MUSA_PATH)/lib -Wl,-rpath=$(MUSA_PATH)/lib diff --git a/docs/build.md b/docs/build.md index 3d8333328fce0..2e3975c145360 100644 --- a/docs/build.md +++ b/docs/build.md @@ -197,28 +197,52 @@ The following compilation options are also available to tweak performance: ## MUSA -This provides GPU acceleration using the MUSA cores of your Moore Threads MTT GPU. Make sure to have the MUSA SDK installed. You can download it from here: [MUSA SDK](https://developer.mthreads.com/sdk/download/musa). +This provides GPU acceleration using a Moore Threads GPU. Make sure to have the [MUSA SDK](https://developer.mthreads.com/musa/musa-sdk) installed. -- Using `CMake`: +#### Download directly from Moore Threads - ```bash - cmake -B build -DGGML_MUSA=ON - cmake --build build --config Release - ``` +You may find the official downloads here: [Moore Threads developer site](https://developer.mthreads.com/sdk/download/musa). - For static build: +### Compilation - ```bash +```bash +cmake -B build -DGGML_MUSA=ON +cmake --build build --config Release +``` + +#### Override Compute Capability Specifications + +By default, all supported compute capabilities are enabled. To customize this behavior, you can specify the `MUSA_ARCHITECTURES` option in the CMake command: + +```bash +cmake -B build -DGGML_MUSA=ON -DMUSA_ARCHITECTURES="21" +``` + +This configuration enables only compute capability `2.1` (MTT S80) during compilation, which can help reduce compilation time. + +#### Compilation options + +Most of the compilation options available for CUDA should also be available for MUSA, though they haven't been thoroughly tested yet. + +- For static builds, add `-DBUILD_SHARED_LIBS=OFF` and `-DCMAKE_POSITION_INDEPENDENT_CODE=ON`: + ``` cmake -B build -DGGML_MUSA=ON \ -DBUILD_SHARED_LIBS=OFF -DCMAKE_POSITION_INDEPENDENT_CODE=ON cmake --build build --config Release ``` -The environment variable [`MUSA_VISIBLE_DEVICES`](https://docs.mthreads.com/musa-sdk/musa-sdk-doc-online/programming_guide/Z%E9%99%84%E5%BD%95/) can be used to specify which GPU(s) will be used. +### Runtime MUSA environmental variables -The environment variable `GGML_CUDA_ENABLE_UNIFIED_MEMORY=1` can be used to enable unified memory in Linux. This allows swapping to system RAM instead of crashing when the GPU VRAM is exhausted. +You may set the [musa environmental variables](https://docs.mthreads.com/musa-sdk/musa-sdk-doc-online/programming_guide/Z%E9%99%84%E5%BD%95/) at runtime. -Most of the compilation options available for CUDA should also be available for MUSA, though they haven't been thoroughly tested yet. +```bash +# Use `MUSA_VISIBLE_DEVICES` to hide the first compute device. +MUSA_VISIBLE_DEVICES="-0" ./build/bin/llama-server --model /srv/models/llama.gguf +``` + +### Unified Memory + +The environment variable `GGML_CUDA_ENABLE_UNIFIED_MEMORY=1` can be used to enable unified memory in Linux. This allows swapping to system RAM instead of crashing when the GPU VRAM is exhausted. ## HIP diff --git a/ggml/src/ggml-musa/CMakeLists.txt b/ggml/src/ggml-musa/CMakeLists.txt index 2c75abf61d672..166970ca6bfb8 100644 --- a/ggml/src/ggml-musa/CMakeLists.txt +++ b/ggml/src/ggml-musa/CMakeLists.txt @@ -21,7 +21,7 @@ if (MUSAToolkit_FOUND) message(STATUS "MUSA Toolkit found") if (NOT DEFINED MUSA_ARCHITECTURES) - set(MUSA_ARCHITECTURES "21;22") + set(MUSA_ARCHITECTURES "21;22;31") endif() message(STATUS "Using MUSA architectures: ${MUSA_ARCHITECTURES}") From 2c9f833d17bb5b8ea89dec663b072b5420fc5438 Mon Sep 17 00:00:00 2001 From: Eve <139727413+netrunnereve@users.noreply.github.com> Date: Mon, 10 Mar 2025 19:28:11 +0000 Subject: [PATCH 034/398] mat vec double buffer (#12188) --- .../vulkan-shaders/mul_mat_vec_q2_k.comp | 47 ++++++++++--------- .../vulkan-shaders/mul_mat_vec_q3_k.comp | 26 +++++----- .../vulkan-shaders/mul_mat_vec_q6_k.comp | 12 ++--- 3 files changed, 43 insertions(+), 42 deletions(-) diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q2_k.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q2_k.comp index 8cdc640e80e31..423ceb8a3df46 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q2_k.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q2_k.comp @@ -5,23 +5,24 @@ layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in; -shared FLOAT_TYPE sccache1[BLOCK_SIZE/16][16]; -shared FLOAT_TYPE sccache2[BLOCK_SIZE/16][16]; +shared FLOAT_TYPE sccache1[2][BLOCK_SIZE/16][16]; +shared FLOAT_TYPE sccache2[2][BLOCK_SIZE/16][16]; FLOAT_TYPE temp[NUM_COLS][NUM_ROWS]; +uint csel = 0; void calc_superblock(const uint a_offset, const uint b_offset, const uint itid, const uint v_im, const uint ix, const uint q_offset, const uint y_offset, const uint i, const uint num_blocks_per_row, const uint first_row, const uint num_rows, const bool all_threads) { const uint y_idx = i * QUANT_K + y_offset; [[unroll]] for (uint n = 0; n < num_rows; ++n) { const uint ib0 = a_offset / QUANT_K + (first_row+n)*num_blocks_per_row; + csel ^= 1; - barrier(); if (!all_threads) { // when we don't have enough blocks to use all threads if (i < num_blocks_per_row) { const uint32_t scale = uint32_t(data_a[ib0 + i].scales[itid]); - sccache1[ix][itid] = FLOAT_TYPE(scale & 0xF); - sccache2[ix][itid] = FLOAT_TYPE((scale >> 4) & 0xF); + sccache1[csel][ix][itid] = FLOAT_TYPE(scale & 0xF); + sccache2[csel][ix][itid] = FLOAT_TYPE((scale >> 4) & 0xF); } barrier(); @@ -29,8 +30,8 @@ void calc_superblock(const uint a_offset, const uint b_offset, const uint itid, continue; } else { const uint32_t scale = uint32_t(data_a[ib0 + i].scales[itid]); - sccache1[ix][itid] = FLOAT_TYPE(scale & 0xF); - sccache2[ix][itid] = FLOAT_TYPE((scale >> 4) & 0xF); + sccache1[csel][ix][itid] = FLOAT_TYPE(scale & 0xF); + sccache2[csel][ix][itid] = FLOAT_TYPE((scale >> 4) & 0xF); barrier(); } @@ -57,22 +58,22 @@ void calc_superblock(const uint a_offset, const uint b_offset, const uint itid, FLOAT_TYPE sum1 = FLOAT_TYPE(0.0); FLOAT_TYPE sum2 = FLOAT_TYPE(0.0); [[unroll]] for (int l = 0; l < 2; ++l) { - sum1 = fma(FLOAT_TYPE(b0[l]), sccache1[ix][ 8*v_im] * qs_u32_0[l ], - fma(FLOAT_TYPE(b16[l]), sccache1[ix][1 + 8*v_im] * qs_u32_0[l+2], - fma(FLOAT_TYPE(b32[l]), sccache1[ix][2 + 8*v_im] * qs_u32_2[l ], - fma(FLOAT_TYPE(b48[l]), sccache1[ix][3 + 8*v_im] * qs_u32_2[l+2], - fma(FLOAT_TYPE(b64[l]), sccache1[ix][4 + 8*v_im] * qs_u32_4[l ], - fma(FLOAT_TYPE(b80[l]), sccache1[ix][5 + 8*v_im] * qs_u32_4[l+2], - fma(FLOAT_TYPE(b96[l]), sccache1[ix][6 + 8*v_im] * qs_u32_6[l ], - fma(FLOAT_TYPE(b112[l]), sccache1[ix][7 + 8*v_im] * qs_u32_6[l+2], sum1)))))))); - sum2 = fma(FLOAT_TYPE(b0[l]), sccache2[ix][ 8*v_im], - fma(FLOAT_TYPE(b16[l]), sccache2[ix][1 + 8*v_im], - fma(FLOAT_TYPE(b32[l]), sccache2[ix][2 + 8*v_im], - fma(FLOAT_TYPE(b48[l]), sccache2[ix][3 + 8*v_im], - fma(FLOAT_TYPE(b64[l]), sccache2[ix][4 + 8*v_im], - fma(FLOAT_TYPE(b80[l]), sccache2[ix][5 + 8*v_im], - fma(FLOAT_TYPE(b96[l]), sccache2[ix][6 + 8*v_im], - fma(FLOAT_TYPE(b112[l]), sccache2[ix][7 + 8*v_im], sum2)))))))); + sum1 = fma(FLOAT_TYPE(b0[l]), sccache1[csel][ix][ 8*v_im] * qs_u32_0[l ], + fma(FLOAT_TYPE(b16[l]), sccache1[csel][ix][1 + 8*v_im] * qs_u32_0[l+2], + fma(FLOAT_TYPE(b32[l]), sccache1[csel][ix][2 + 8*v_im] * qs_u32_2[l ], + fma(FLOAT_TYPE(b48[l]), sccache1[csel][ix][3 + 8*v_im] * qs_u32_2[l+2], + fma(FLOAT_TYPE(b64[l]), sccache1[csel][ix][4 + 8*v_im] * qs_u32_4[l ], + fma(FLOAT_TYPE(b80[l]), sccache1[csel][ix][5 + 8*v_im] * qs_u32_4[l+2], + fma(FLOAT_TYPE(b96[l]), sccache1[csel][ix][6 + 8*v_im] * qs_u32_6[l ], + fma(FLOAT_TYPE(b112[l]), sccache1[csel][ix][7 + 8*v_im] * qs_u32_6[l+2], sum1)))))))); + sum2 = fma(FLOAT_TYPE(b0[l]), sccache2[csel][ix][ 8*v_im], + fma(FLOAT_TYPE(b16[l]), sccache2[csel][ix][1 + 8*v_im], + fma(FLOAT_TYPE(b32[l]), sccache2[csel][ix][2 + 8*v_im], + fma(FLOAT_TYPE(b48[l]), sccache2[csel][ix][3 + 8*v_im], + fma(FLOAT_TYPE(b64[l]), sccache2[csel][ix][4 + 8*v_im], + fma(FLOAT_TYPE(b80[l]), sccache2[csel][ix][5 + 8*v_im], + fma(FLOAT_TYPE(b96[l]), sccache2[csel][ix][6 + 8*v_im], + fma(FLOAT_TYPE(b112[l]), sccache2[csel][ix][7 + 8*v_im], sum2)))))))); } temp[j][n] = fma(dall, sum1, fma(-dmin, sum2, temp[j][n])); } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q3_k.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q3_k.comp index 3116fad165be0..e91724a28db22 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q3_k.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q3_k.comp @@ -5,20 +5,21 @@ layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in; -shared FLOAT_TYPE sccache[BLOCK_SIZE/16][2][8]; +shared FLOAT_TYPE sccache[2][BLOCK_SIZE/16][2][8]; FLOAT_TYPE temp[NUM_COLS][NUM_ROWS]; +uint csel = 0; void calc_superblock(const uint a_offset, const uint b_offset, const uint ix, const uint itid8, const uint v_im, const uint v_im4, const uint v_in, const uint32_t hm_m[4], const uint q_offset, const uint y_offset, const uint s_shift, const uint i, const uint num_blocks_per_row, const uint first_row, const uint num_rows, const bool all_threads) { const uint y_idx = i * QUANT_K + y_offset; [[unroll]] for (uint n = 0; n < num_rows; ++n) { const uint ib0 = a_offset / QUANT_K + (first_row+n)*num_blocks_per_row; + csel ^= 1; if (!all_threads) { // when we don't have enough blocks to use all threads - barrier(); if (i < num_blocks_per_row) - sccache[ix][v_im][itid8] = FLOAT_TYPE(int8_t(((data_a[ib0+i].scales[itid8] >> v_im4) & 0xF) | (((data_a[ib0+i].scales[itid8%4+8] >> s_shift) & 3) << 4)) - 32); + sccache[csel][ix][v_im][itid8] = FLOAT_TYPE(int8_t(((data_a[ib0+i].scales[itid8] >> v_im4) & 0xF) | (((data_a[ib0+i].scales[itid8%4+8] >> s_shift) & 3) << 4)) - 32); barrier(); if (i >= num_blocks_per_row) @@ -40,8 +41,7 @@ void calc_superblock(const uint a_offset, const uint b_offset, const uint ix, co const vec4 qs_u32_6 = vec4(unpack8((qs_u32 >> 6) & 0x03030303)); if (all_threads) { - barrier(); - sccache[ix][v_im][itid8] = FLOAT_TYPE(int8_t(((data_a[ib0+i].scales[itid8] >> v_im4) & 0xF) | (((data_a[ib0+i].scales[itid8%4+8] >> s_shift) & 3) << 4)) - 32); + sccache[csel][ix][v_im][itid8] = FLOAT_TYPE(int8_t(((data_a[ib0+i].scales[itid8] >> v_im4) & 0xF) | (((data_a[ib0+i].scales[itid8%4+8] >> s_shift) & 3) << 4)) - 32); barrier(); } @@ -59,14 +59,14 @@ void calc_superblock(const uint a_offset, const uint b_offset, const uint ix, co FLOAT_TYPE sum = FLOAT_TYPE(0.0); [[unroll]] for (int l = 0; l < 2; ++l) { - sum = fma(FLOAT_TYPE( b0[l]) * sccache[ix][v_im][0], qs_u32_0[l ] - hmk_0[l ], - fma(FLOAT_TYPE( b16[l]) * sccache[ix][v_im][1], qs_u32_0[l+2] - hmk_0[l+2], - fma(FLOAT_TYPE( b32[l]) * sccache[ix][v_im][2], qs_u32_2[l ] - hmk_1[l ], - fma(FLOAT_TYPE( b48[l]) * sccache[ix][v_im][3], qs_u32_2[l+2] - hmk_1[l+2], - fma(FLOAT_TYPE( b64[l]) * sccache[ix][v_im][4], qs_u32_4[l ] - hmk_2[l ], - fma(FLOAT_TYPE( b80[l]) * sccache[ix][v_im][5], qs_u32_4[l+2] - hmk_2[l+2], - fma(FLOAT_TYPE( b96[l]) * sccache[ix][v_im][6], qs_u32_6[l ] - hmk_3[l ], - fma(FLOAT_TYPE(b112[l]) * sccache[ix][v_im][7], qs_u32_6[l+2] - hmk_3[l+2], sum)))))))); + sum = fma(FLOAT_TYPE( b0[l]) * sccache[csel][ix][v_im][0], qs_u32_0[l ] - hmk_0[l ], + fma(FLOAT_TYPE( b16[l]) * sccache[csel][ix][v_im][1], qs_u32_0[l+2] - hmk_0[l+2], + fma(FLOAT_TYPE( b32[l]) * sccache[csel][ix][v_im][2], qs_u32_2[l ] - hmk_1[l ], + fma(FLOAT_TYPE( b48[l]) * sccache[csel][ix][v_im][3], qs_u32_2[l+2] - hmk_1[l+2], + fma(FLOAT_TYPE( b64[l]) * sccache[csel][ix][v_im][4], qs_u32_4[l ] - hmk_2[l ], + fma(FLOAT_TYPE( b80[l]) * sccache[csel][ix][v_im][5], qs_u32_4[l+2] - hmk_2[l+2], + fma(FLOAT_TYPE( b96[l]) * sccache[csel][ix][v_im][6], qs_u32_6[l ] - hmk_3[l ], + fma(FLOAT_TYPE(b112[l]) * sccache[csel][ix][v_im][7], qs_u32_6[l+2] - hmk_3[l+2], sum)))))))); } temp[j][n] = fma(d, sum, temp[j][n]); } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp index f05f96b5efb9d..d53d9ee0a2723 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp @@ -6,20 +6,21 @@ layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in; -shared FLOAT_TYPE sccache[BLOCK_SIZE/16][16]; +shared FLOAT_TYPE sccache[2][BLOCK_SIZE/16][16]; FLOAT_TYPE temp[NUM_COLS][NUM_ROWS]; +uint csel = 0; void calc_superblock(const uint a_offset, const uint b_offset, const uint itid, const uint ix, const uint ql_offset, const uint qh_offset, const uint s_offset, const uint y_offset, const uint i, const uint num_blocks_per_row, const uint first_row, const uint num_rows, const bool all_threads) { const uint y_idx = i * QUANT_K + y_offset; [[unroll]] for (uint n = 0; n < num_rows; ++n) { const uint ib0 = a_offset / QUANT_K + (first_row+n)*num_blocks_per_row; + csel ^= 1; if (!all_threads) { // when we don't have enough blocks to use all threads - barrier(); if (i < num_blocks_per_row) - sccache[ix][itid] = FLOAT_TYPE(data_a[ib0 + i].scales[itid]); + sccache[csel][ix][itid] = FLOAT_TYPE(data_a[ib0 + i].scales[itid]); barrier(); if (i >= num_blocks_per_row) @@ -51,8 +52,7 @@ void calc_superblock(const uint a_offset, const uint b_offset, const uint itid, const vec4 q3 = vec4(unpack8(q3_u32)) - 32; if (all_threads) { - barrier(); - sccache[ix][itid] = FLOAT_TYPE(data_a[ib0 + i].scales[itid]); + sccache[csel][ix][itid] = FLOAT_TYPE(data_a[ib0 + i].scales[itid]); barrier(); } @@ -71,7 +71,7 @@ void calc_superblock(const uint a_offset, const uint b_offset, const uint itid, sum[2] = fma(FLOAT_TYPE(by64[l]), q2[l], sum[2]); sum[3] = fma(FLOAT_TYPE(by96[l]), q3[l], sum[3]); } - temp[j][n] = fma(fma(sum[0], sccache[ix][s_offset], fma(sum[1], sccache[ix][s_offset + 2], fma(sum[2], sccache[ix][s_offset + 4], sum[3] * sccache[ix][s_offset + 6]))), d, temp[j][n]); + temp[j][n] = fma(fma(sum[0], sccache[csel][ix][s_offset], fma(sum[1], sccache[csel][ix][s_offset + 2], fma(sum[2], sccache[csel][ix][s_offset + 4], sum[3] * sccache[csel][ix][s_offset + 6]))), d, temp[j][n]); } } } From 96e1280839561aaabb73851f94972a2cd37b2d96 Mon Sep 17 00:00:00 2001 From: Xuan-Son Nguyen Date: Tue, 11 Mar 2025 09:20:16 +0100 Subject: [PATCH 035/398] clip : bring back GPU support (#12322) * clip : bring back GPU support * use n_gpu_layers param * fix double free * ggml_backend_init_by_type * clean up --- examples/llava/clip.cpp | 149 ++++++++++++++++---------------- examples/llava/clip.h | 11 ++- examples/llava/minicpmv-cli.cpp | 6 +- 3 files changed, 89 insertions(+), 77 deletions(-) diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp index 3f558b7bdbdf3..7f892beb6edb1 100644 --- a/examples/llava/clip.cpp +++ b/examples/llava/clip.cpp @@ -4,31 +4,12 @@ // Note: Even when using identical normalized image inputs (see normalize_image_u8_to_f32()) we have a significant difference in resulting embeddings compared to pytorch #include "clip.h" #include "ggml.h" +#include "ggml-cpp.h" #include "ggml-cpu.h" #include "ggml-alloc.h" #include "ggml-backend.h" #include "gguf.h" -//#ifdef GGML_USE_CUDA -//#include "ggml-cuda.h" -//#endif -// -//#ifdef GGML_USE_SYCL -//#include "ggml-sycl.h" -//#endif -// -//#ifdef GGML_USE_METAL -//#include "ggml-metal.h" -//#endif -// -//#ifdef GGML_USE_CANN -//#include "ggml-cann.h" -//#endif -// -//#ifdef GGML_USE_VULKAN -//#include "ggml-vulkan.h" -//#endif - #define STB_IMAGE_IMPLEMENTATION #include "stb_image.h" @@ -600,18 +581,54 @@ struct clip_ctx { bool has_post_norm = false; bool has_patch_bias = false; - struct gguf_context * ctx_gguf; - struct ggml_context * ctx_data; + struct gguf_context * ctx_gguf = nullptr; + struct ggml_context * ctx_data = nullptr; std::vector buf_compute_meta; - // memory buffers to evaluate the model - ggml_backend_buffer_t params_buffer = NULL; + std::vector backend_ptrs; + std::vector backend_buft; + + ggml_backend_t backend = nullptr; + ggml_backend_t backend_cpu = nullptr; + ggml_backend_buffer_t buf = nullptr; - ggml_backend_t backend = NULL; - ggml_gallocr_t compute_alloc = NULL; + ggml_backend_sched_ptr sched; struct clip_image_size * load_image_size; + + clip_ctx(clip_context_params & ctx_params) { + backend_cpu = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr); + backend = ctx_params.use_gpu + ? ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_GPU, nullptr) + : nullptr; + + if (backend) { + LOG_INF("%s: CLIP using %s backend\n", __func__, ggml_backend_name(backend)); + backend_ptrs.push_back(backend); + backend_buft.push_back(ggml_backend_get_default_buffer_type(backend)); + } else { + backend = backend_cpu; + LOG_INF("%s: CLIP using CPU backend\n", __func__); + } + + backend_ptrs.push_back(backend_cpu); + backend_buft.push_back(ggml_backend_get_default_buffer_type(backend_cpu)); + + sched.reset( + ggml_backend_sched_new(backend_ptrs.data(), backend_buft.data(), backend_ptrs.size(), 8192, false) + ); + } + + ~clip_ctx() { + ggml_free(ctx_data); + gguf_free(ctx_gguf); + ggml_backend_buffer_free(buf); + ggml_backend_free(backend); + if (backend_cpu != backend) { + ggml_backend_free(backend_cpu); + } + } }; static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32_batch * imgs, struct clip_image_size * load_image_size, bool is_inf = false) { @@ -1184,6 +1201,14 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32 // read and create ggml_context containing the tensors and their data struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { + return clip_init(fname, clip_context_params{ + /* use_gpu */ true, + /* verbosity */ verbosity, + }); +} + +struct clip_ctx * clip_init(const char * fname, struct clip_context_params ctx_params) { + int verbosity = ctx_params.verbosity; struct ggml_context * meta = NULL; struct gguf_init_params params = { @@ -1277,7 +1302,7 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { } } - clip_ctx * new_clip = new clip_ctx{}; + clip_ctx * new_clip = new clip_ctx(ctx_params); // update projector type { @@ -1296,36 +1321,6 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { } } -//#ifdef GGML_USE_CUDA -// new_clip->backend = ggml_backend_cuda_init(0); -// LOG_INF("%s: CLIP using CUDA backend\n", __func__); -//#endif -// -//#ifdef GGML_USE_METAL -// new_clip->backend = ggml_backend_metal_init(); -// LOG_INF("%s: CLIP using Metal backend\n", __func__); -//#endif -// -//#ifdef GGML_USE_CANN -// new_clip->backend = ggml_backend_cann_init(0); -// LOG_INF("%s: CLIP using CANN backend\n", __func__); -//#endif -// -//#ifdef GGML_USE_VULKAN -// new_clip->backend = ggml_backend_vk_init(0); -// LOG_INF("%s: CLIP using Vulkan backend\n", __func__); -//#endif -// -//#ifdef GGML_USE_SYCL -// new_clip->backend = ggml_backend_sycl_init(0); -// LOG_INF("%s: CLIP using SYCL backend\n", __func__); -//#endif - - if (!new_clip->backend) { - new_clip->backend = ggml_backend_cpu_init(); - LOG_INF("%s: CLIP using CPU backend\n", __func__); - } - // model size and capabilities { int idx = get_key_idx(ctx, KEY_HAS_TEXT_ENC); @@ -1421,7 +1416,9 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { } // alloc memory and offload data - new_clip->params_buffer = ggml_backend_alloc_ctx_tensors(new_clip->ctx_data, new_clip->backend); + ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(new_clip->backend); + new_clip->buf = ggml_backend_alloc_ctx_tensors_from_buft(new_clip->ctx_data, buft); + ggml_backend_buffer_set_usage(new_clip->buf, GGML_BACKEND_BUFFER_USAGE_WEIGHTS); for (int i = 0; i < n_tensors; ++i) { const char * name = gguf_get_tensor_name(ctx, i); struct ggml_tensor * cur = ggml_get_tensor(new_clip->ctx_data, name); @@ -1434,7 +1431,7 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { return nullptr; } int num_bytes = ggml_nbytes(cur); - if (ggml_backend_buffer_is_host(new_clip->params_buffer)) { + if (ggml_backend_buft_is_host(buft)) { // for the CPU and Metal backend, we can read directly into the tensor fin.read(reinterpret_cast(cur->data), num_bytes); } else { @@ -1720,14 +1717,21 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { // measure mem requirement and allocate { new_clip->buf_compute_meta.resize(GGML_DEFAULT_GRAPH_SIZE * ggml_tensor_overhead() + ggml_graph_overhead()); - new_clip->compute_alloc = ggml_gallocr_new(ggml_backend_get_default_buffer_type(new_clip->backend)); clip_image_f32_batch batch; batch.size = 1; batch.data = nullptr; ggml_cgraph * gf = clip_image_build_graph(new_clip, &batch, nullptr, false); - ggml_gallocr_reserve(new_clip->compute_alloc, gf); - size_t compute_memory_buffer_size = ggml_gallocr_get_buffer_size(new_clip->compute_alloc, 0); - LOG_INF("%s: compute allocated memory: %.2f MB\n", __func__, compute_memory_buffer_size /1024.0/1024.0); + ggml_backend_sched_reserve(new_clip->sched.get(), gf); + for (size_t i = 0; i < new_clip->backend_ptrs.size(); ++i) { + ggml_backend_t backend = new_clip->backend_ptrs[i]; + ggml_backend_buffer_type_t buft = new_clip->backend_buft[i]; + size_t size = ggml_backend_sched_get_buffer_size(new_clip->sched.get(), backend); + if (size > 1) { + LOG_INF("%s: %10s compute buffer size = %8.2f MiB\n", __func__, + ggml_backend_buft_name(buft), + size / 1024.0 / 1024.0); + } + } } return new_clip; @@ -2408,12 +2412,6 @@ ggml_tensor * clip_get_newline_tensor(const struct clip_ctx * ctx) { } void clip_free(clip_ctx * ctx) { - ggml_free(ctx->ctx_data); - gguf_free(ctx->ctx_gguf); - - ggml_backend_buffer_free(ctx->params_buffer); - ggml_backend_free(ctx->backend); - ggml_gallocr_free(ctx->compute_alloc); delete ctx; } @@ -2609,8 +2607,9 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima } // build the inference graph + ggml_backend_sched_reset(ctx->sched.get()); ggml_cgraph * gf = clip_image_build_graph(ctx, imgs, ctx->load_image_size, true); - ggml_gallocr_alloc_graph(ctx->compute_alloc, gf); + ggml_backend_sched_alloc_graph(ctx->sched.get(), gf); // set inputs const auto & model = ctx->vision_model; @@ -2775,11 +2774,13 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima } } - if (ggml_backend_is_cpu(ctx->backend)) { - ggml_backend_cpu_set_n_threads(ctx->backend, n_threads); - } + ggml_backend_cpu_set_n_threads(ctx->backend_cpu, n_threads); - ggml_backend_graph_compute(ctx->backend, gf); + auto status = ggml_backend_sched_graph_compute(ctx->sched.get(), gf); + if (status != GGML_STATUS_SUCCESS) { + LOG_ERR("%s: ggml_backend_sched_graph_compute failed with error %d\n", __func__, status); + return false; + } // the last node is the embedding tensor struct ggml_tensor * embeddings = ggml_graph_node(gf, -1); diff --git a/examples/llava/clip.h b/examples/llava/clip.h index 002c419653a01..47059ca1b9f78 100644 --- a/examples/llava/clip.h +++ b/examples/llava/clip.h @@ -39,8 +39,15 @@ struct clip_image_f32_batch { size_t size; }; -CLIP_API struct clip_ctx * clip_model_load (const char * fname, int verbosity); -CLIP_API struct clip_ctx * clip_model_load_cpu(const char * fname, int verbosity); +struct clip_context_params { + bool use_gpu; + int verbosity; +}; + +// deprecated, use clip_init +CLIP_API struct clip_ctx * clip_model_load(const char * fname, int verbosity); + +CLIP_API struct clip_ctx * clip_init(const char * fname, struct clip_context_params ctx_params); CLIP_API void clip_free(struct clip_ctx * ctx); diff --git a/examples/llava/minicpmv-cli.cpp b/examples/llava/minicpmv-cli.cpp index 23b3de4db273a..12f536cf5cfff 100644 --- a/examples/llava/minicpmv-cli.cpp +++ b/examples/llava/minicpmv-cli.cpp @@ -86,7 +86,11 @@ static struct clip_ctx * clip_init_context(common_params * params) { if (prompt.empty()) { prompt = "describe the image in detail."; } - auto * ctx_clip = clip_model_load(clip_path, /*verbosity=*/ 1); + struct clip_context_params clip_params = { + /* use_gpu */ params->n_gpu_layers != 0, + /* verbosity */ params->verbosity, + }; + auto * ctx_clip = clip_init(clip_path, clip_params); return ctx_clip; } From 6ab2e4765a673abcd162258a2671560a76106d69 Mon Sep 17 00:00:00 2001 From: BB-fat <45072480+BB-fat@users.noreply.github.com> Date: Tue, 11 Mar 2025 19:45:02 +0800 Subject: [PATCH 036/398] metal : Cache the Metal library at the device context level (#12265) --- ggml/src/ggml-metal/ggml-metal.m | 255 ++++++++++++++++--------------- 1 file changed, 135 insertions(+), 120 deletions(-) diff --git a/ggml/src/ggml-metal/ggml-metal.m b/ggml/src/ggml-metal/ggml-metal.m index 1158b285c19bc..e51a4169a23bf 100644 --- a/ggml/src/ggml-metal/ggml-metal.m +++ b/ggml/src/ggml-metal/ggml-metal.m @@ -46,6 +46,7 @@ static struct ggml_backend_metal_device_context { id mtl_device; int mtl_device_ref_count; + id mtl_library; bool has_simdgroup_reduction; bool has_simdgroup_mm; @@ -57,6 +58,7 @@ } g_ggml_ctx_dev_main = { /*.mtl_device =*/ nil, /*.mtl_device_ref_count =*/ 0, + /*.mtl_library =*/ nil, /*.has_simdgroup_reduction =*/ false, /*.has_simdgroup_mm =*/ false, /*.has_residency_sets =*/ false, @@ -108,6 +110,11 @@ static void ggml_backend_metal_device_rel(struct ggml_backend_metal_device_conte ctx->mtl_device_ref_count--; if (ctx->mtl_device_ref_count == 0) { + if (ctx->mtl_library) { + [ctx->mtl_library release]; + ctx->mtl_library = nil; + } + if (ctx->mtl_device) { [ctx->mtl_device release]; ctx->mtl_device = nil; @@ -495,163 +502,174 @@ @implementation GGMLMetalClass return data; } -static struct ggml_backend_metal_context * ggml_metal_init(ggml_backend_dev_t dev) { - GGML_LOG_INFO("%s: allocating\n", __func__); - -#if TARGET_OS_OSX && !GGML_METAL_NDEBUG - // Show all the Metal device instances in the system - NSArray * devices = MTLCopyAllDevices(); - for (id device in devices) { - GGML_LOG_INFO("%s: found device: %s\n", __func__, [[device name] UTF8String]); - } - [devices release]; // since it was created by a *Copy* C method -#endif - - // init context - struct ggml_backend_metal_context * ctx = calloc(1, sizeof(struct ggml_backend_metal_context)); - struct ggml_backend_metal_device_context * ctx_dev = dev->context; - - id device = ggml_backend_metal_device_acq(ctx_dev); - GGML_LOG_INFO("%s: picking default device: %s\n", __func__, [[device name] UTF8String]); - - ctx->queue = [device newCommandQueue]; - if (ctx->queue == nil) { - GGML_LOG_ERROR("%s: error: failed to create command queue\n", __func__); - return NULL; - } - - ctx->d_queue = dispatch_queue_create("ggml-metal", DISPATCH_QUEUE_CONCURRENT); - +// load library +// +// - first check if the library is embedded +// - then check if the library is in the bundle +// - if not found, load the source and compile it +// - if that fails, return NULL +static id ggml_metal_load_library(id device, bool use_bfloat) { id metal_library = nil; - - // load library - // - // - first check if the library is embedded - // - then check if the library is in the bundle - // - if not found, load the source and compile it - // - if that fails, return NULL - { - NSError * error = nil; - NSString * src = nil; + NSError * error = nil; + NSString * src = nil; #if GGML_METAL_EMBED_LIBRARY - GGML_LOG_INFO("%s: using embedded metal library\n", __func__); + GGML_LOG_INFO("%s: using embedded metal library\n", __func__); - extern const char ggml_metallib_start[]; - extern const char ggml_metallib_end[]; + extern const char ggml_metallib_start[]; + extern const char ggml_metallib_end[]; - src = [[NSString alloc] initWithBytes:ggml_metallib_start length:(ggml_metallib_end-ggml_metallib_start) encoding:NSUTF8StringEncoding]; + src = [[NSString alloc] initWithBytes:ggml_metallib_start length:(ggml_metallib_end-ggml_metallib_start) encoding:NSUTF8StringEncoding]; #else #ifdef SWIFT_PACKAGE - NSBundle * bundle = SWIFTPM_MODULE_BUNDLE; + NSBundle * bundle = SWIFTPM_MODULE_BUNDLE; #else - NSBundle * bundle = [NSBundle bundleForClass:[GGMLMetalClass class]]; + NSBundle * bundle = [NSBundle bundleForClass:[GGMLMetalClass class]]; #endif - NSString * path_lib = [bundle pathForResource:@"default" ofType:@"metallib"]; - if (path_lib == nil) { - // Try to find the resource in the directory where the current binary located. - NSString * current_binary = [[NSProcessInfo processInfo] arguments][0]; - NSString * bin_dir = [current_binary stringByDeletingLastPathComponent]; - NSString * default_metallib_path = [NSString pathWithComponents:@[bin_dir, @"default.metallib"]]; - if ([[NSFileManager defaultManager] isReadableFileAtPath:default_metallib_path]) { - GGML_LOG_INFO("%s: found '%s'\n", __func__, [default_metallib_path UTF8String]); - NSDictionary * atts = [[NSFileManager defaultManager] attributesOfItemAtPath:default_metallib_path error:&error]; - if (atts && atts[NSFileType] == NSFileTypeSymbolicLink) { - // Optionally, if this is a symlink, try to resolve it. - default_metallib_path = [[NSFileManager defaultManager] destinationOfSymbolicLinkAtPath:default_metallib_path error:&error]; - if (default_metallib_path && [default_metallib_path length] > 0 && ![[default_metallib_path substringToIndex:1] isEqualToString:@"/"]) { - // It is a relative path, adding the binary directory as directory prefix. - default_metallib_path = [NSString pathWithComponents:@[bin_dir, default_metallib_path]]; - } - if (!default_metallib_path || ![[NSFileManager defaultManager] isReadableFileAtPath:default_metallib_path]) { - // Link to the resource could not be resolved. - default_metallib_path = nil; - } else { - GGML_LOG_INFO("%s: symlink resolved '%s'\n", __func__, [default_metallib_path UTF8String]); - } + NSString * path_lib = [bundle pathForResource:@"default" ofType:@"metallib"]; + if (path_lib == nil) { + // Try to find the resource in the directory where the current binary located. + NSString * current_binary = [[NSProcessInfo processInfo] arguments][0]; + NSString * bin_dir = [current_binary stringByDeletingLastPathComponent]; + NSString * default_metallib_path = [NSString pathWithComponents:@[bin_dir, @"default.metallib"]]; + if ([[NSFileManager defaultManager] isReadableFileAtPath:default_metallib_path]) { + GGML_LOG_INFO("%s: found '%s'\n", __func__, [default_metallib_path UTF8String]); + NSDictionary * atts = [[NSFileManager defaultManager] attributesOfItemAtPath:default_metallib_path error:&error]; + if (atts && atts[NSFileType] == NSFileTypeSymbolicLink) { + // Optionally, if this is a symlink, try to resolve it. + default_metallib_path = [[NSFileManager defaultManager] destinationOfSymbolicLinkAtPath:default_metallib_path error:&error]; + if (default_metallib_path && [default_metallib_path length] > 0 && ![[default_metallib_path substringToIndex:1] isEqualToString:@"/"]) { + // It is a relative path, adding the binary directory as directory prefix. + default_metallib_path = [NSString pathWithComponents:@[bin_dir, default_metallib_path]]; + } + if (!default_metallib_path || ![[NSFileManager defaultManager] isReadableFileAtPath:default_metallib_path]) { + // Link to the resource could not be resolved. + default_metallib_path = nil; + } else { + GGML_LOG_INFO("%s: symlink resolved '%s'\n", __func__, [default_metallib_path UTF8String]); } - } else { - // The resource couldn't be found in the binary's directory. - default_metallib_path = nil; } - path_lib = default_metallib_path; + } else { + // The resource couldn't be found in the binary's directory. + default_metallib_path = nil; } + path_lib = default_metallib_path; + } - if (path_lib != nil) { - // pre-compiled library found - NSURL * libURL = [NSURL fileURLWithPath:path_lib]; - GGML_LOG_INFO("%s: loading '%s'\n", __func__, [path_lib UTF8String]); + if (path_lib != nil) { + // pre-compiled library found + NSURL * libURL = [NSURL fileURLWithPath:path_lib]; + GGML_LOG_INFO("%s: loading '%s'\n", __func__, [path_lib UTF8String]); - metal_library = [device newLibraryWithURL:libURL error:&error]; - if (error) { - GGML_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]); - return NULL; - } - } else { - GGML_LOG_INFO("%s: default.metallib not found, loading from source\n", __func__); + metal_library = [device newLibraryWithURL:libURL error:&error]; + if (error) { + GGML_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]); + return NULL; + } + } else { + GGML_LOG_INFO("%s: default.metallib not found, loading from source\n", __func__); - NSString * path_source; - NSString * path_resource = [[NSProcessInfo processInfo].environment objectForKey:@"GGML_METAL_PATH_RESOURCES"]; + NSString * path_source; + NSString * path_resource = [[NSProcessInfo processInfo].environment objectForKey:@"GGML_METAL_PATH_RESOURCES"]; - GGML_LOG_INFO("%s: GGML_METAL_PATH_RESOURCES = %s\n", __func__, path_resource ? [path_resource UTF8String] : "nil"); + GGML_LOG_INFO("%s: GGML_METAL_PATH_RESOURCES = %s\n", __func__, path_resource ? [path_resource UTF8String] : "nil"); - if (path_resource) { - path_source = [path_resource stringByAppendingPathComponent:@"ggml-metal.metal"]; - } else { - path_source = [bundle pathForResource:@"ggml-metal" ofType:@"metal"]; - } + if (path_resource) { + path_source = [path_resource stringByAppendingPathComponent:@"ggml-metal.metal"]; + } else { + path_source = [bundle pathForResource:@"ggml-metal" ofType:@"metal"]; + } - if (path_source == nil) { - GGML_LOG_WARN("%s: error: could not use bundle path to find ggml-metal.metal, falling back to trying cwd\n", __func__); - path_source = @"ggml-metal.metal"; - } + if (path_source == nil) { + GGML_LOG_WARN("%s: error: could not use bundle path to find ggml-metal.metal, falling back to trying cwd\n", __func__); + path_source = @"ggml-metal.metal"; + } - GGML_LOG_INFO("%s: loading '%s'\n", __func__, [path_source UTF8String]); + GGML_LOG_INFO("%s: loading '%s'\n", __func__, [path_source UTF8String]); - src = [NSString stringWithContentsOfFile:path_source encoding:NSUTF8StringEncoding error:&error]; - if (error) { - GGML_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]); - return NULL; - } + src = [NSString stringWithContentsOfFile:path_source encoding:NSUTF8StringEncoding error:&error]; + if (error) { + GGML_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]); + return NULL; } + } #endif - if (!metal_library) { - @autoreleasepool { - // dictionary of preprocessor macros - NSMutableDictionary * prep = [NSMutableDictionary dictionary]; + if (!metal_library) { + @autoreleasepool { + // dictionary of preprocessor macros + NSMutableDictionary * prep = [NSMutableDictionary dictionary]; - if (ctx_dev->use_bfloat) { - [prep setObject:@"1" forKey:@"GGML_METAL_USE_BF16"]; - } + if (use_bfloat) { + [prep setObject:@"1" forKey:@"GGML_METAL_USE_BF16"]; + } #if GGML_METAL_EMBED_LIBRARY - [prep setObject:@"1" forKey:@"GGML_METAL_EMBED_LIBRARY"]; + [prep setObject:@"1" forKey:@"GGML_METAL_EMBED_LIBRARY"]; #endif - MTLCompileOptions * options = [MTLCompileOptions new]; - options.preprocessorMacros = prep; + MTLCompileOptions * options = [MTLCompileOptions new]; + options.preprocessorMacros = prep; - //[options setFastMathEnabled:false]; + //[options setFastMathEnabled:false]; - metal_library = [device newLibraryWithSource:src options:options error:&error]; - if (error) { - GGML_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]); - return NULL; - } + metal_library = [device newLibraryWithSource:src options:options error:&error]; + if (error) { + GGML_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]); + return NULL; + } #if !__has_feature(objc_arc) - [options release]; + [options release]; #endif - } } + } #if GGML_METAL_EMBED_LIBRARY - [src release]; + [src release]; #endif // GGML_METAL_EMBED_LIBRARY + + return metal_library; +} + +static struct ggml_backend_metal_context * ggml_metal_init(ggml_backend_dev_t dev) { + GGML_LOG_INFO("%s: allocating\n", __func__); + +#if TARGET_OS_OSX && !GGML_METAL_NDEBUG + // Show all the Metal device instances in the system + NSArray * devices = MTLCopyAllDevices(); + for (id device in devices) { + GGML_LOG_INFO("%s: found device: %s\n", __func__, [[device name] UTF8String]); + } + [devices release]; // since it was created by a *Copy* C method +#endif + + // init context + struct ggml_backend_metal_context * ctx = calloc(1, sizeof(struct ggml_backend_metal_context)); + struct ggml_backend_metal_device_context * ctx_dev = dev->context; + + id device = ggml_backend_metal_device_acq(ctx_dev); + GGML_LOG_INFO("%s: picking default device: %s\n", __func__, [[device name] UTF8String]); + + ctx->queue = [device newCommandQueue]; + if (ctx->queue == nil) { + GGML_LOG_ERROR("%s: error: failed to create command queue\n", __func__); + return NULL; + } + + ctx->d_queue = dispatch_queue_create("ggml-metal", DISPATCH_QUEUE_CONCURRENT); + + // load library + if (ctx_dev->mtl_library == nil) { + ctx_dev->mtl_library = ggml_metal_load_library(device, ctx_dev->use_bfloat); + } + id metal_library = ctx_dev->mtl_library; + if (metal_library == nil) { + GGML_LOG_ERROR("%s: error: metal library is nil\n", __func__); + return NULL; } // print MTL GPU family: @@ -725,7 +743,6 @@ @implementation GGMLMetalClass [metal_function release]; \ if (error) { \ GGML_LOG_ERROR("%s: error: load pipeline error: %s\n", __func__, [[error description] UTF8String]); \ - [metal_library release]; \ return NULL; \ } \ } else { \ @@ -1044,8 +1061,6 @@ @implementation GGMLMetalClass GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_POOL_2D_MAX_F32, pool_2d_max_f32, true); } - [metal_library release]; - return ctx; } From ba7654380a3c7c1b5ae154bea19134a3a9417a1e Mon Sep 17 00:00:00 2001 From: jklincn <985765408@qq.com> Date: Tue, 11 Mar 2025 21:25:17 +0800 Subject: [PATCH 037/398] ggml-backend : fix backend search path (#12330) * Fix backend search path * replace .native() with '/' * reverted .native() --- ggml/src/ggml-backend-reg.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/ggml/src/ggml-backend-reg.cpp b/ggml/src/ggml-backend-reg.cpp index 9bedeae78affb..405d8e31514b5 100644 --- a/ggml/src/ggml-backend-reg.cpp +++ b/ggml/src/ggml-backend-reg.cpp @@ -497,7 +497,7 @@ static ggml_backend_reg_t ggml_backend_load_best(const char * name, bool silent, search_paths.push_back(get_executable_path()); search_paths.push_back(fs::current_path()); } else { - search_paths.push_back(user_search_path); + search_paths.push_back(fs::u8path(user_search_path)); } int best_score = 0; @@ -511,9 +511,9 @@ static ggml_backend_reg_t ggml_backend_load_best(const char * name, bool silent, fs::directory_iterator dir_it(search_path, fs::directory_options::skip_permission_denied); for (const auto & entry : dir_it) { if (entry.is_regular_file()) { - auto filename = entry.path().filename().native(); - auto ext = entry.path().extension().native(); - if (filename.find(file_prefix) == 0 && ext == file_extension) { + auto filename = entry.path().filename(); + auto ext = entry.path().extension(); + if (filename.native().find(file_prefix) == 0 && ext == file_extension) { dl_handle_ptr handle { dl_load_library(entry) }; if (!handle && !silent) { GGML_LOG_ERROR("%s: failed to load %s\n", __func__, path_str(entry.path()).c_str()); @@ -544,7 +544,7 @@ static ggml_backend_reg_t ggml_backend_load_best(const char * name, bool silent, // try to load the base backend for (const auto & search_path : search_paths) { fs::path filename = backend_filename_prefix().native() + name_path.native() + backend_filename_extension().native(); - fs::path path = search_path.native() + filename.native(); + fs::path path = search_path / filename; if (fs::exists(path)) { return get_reg().load_backend(path, silent); } From 10f2e81809bbb69ecfe64fc8b4686285f84b0c07 Mon Sep 17 00:00:00 2001 From: uvos Date: Tue, 11 Mar 2025 20:16:03 +0100 Subject: [PATCH 038/398] CUDA/HIP: refractor mmqv to unify the calculation of nwarps and rows per block between host and device code. (#12177) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit refactor mmqv to unify the calculation of nwarps and rows per block between host and device code. --------- Co-authored-by: Johannes Gäßler --- ggml/src/ggml-cuda/common.cuh | 4 +- ggml/src/ggml-cuda/mmvq.cu | 197 ++++++++++++++++++++++++---------- 2 files changed, 142 insertions(+), 59 deletions(-) diff --git a/ggml/src/ggml-cuda/common.cuh b/ggml/src/ggml-cuda/common.cuh index 1832314ec133b..4d4ac47c034e1 100644 --- a/ggml/src/ggml-cuda/common.cuh +++ b/ggml/src/ggml-cuda/common.cuh @@ -395,11 +395,11 @@ static __device__ __forceinline__ uint32_t __hgt2_mask(const half2 a, const half static __device__ __forceinline__ int ggml_cuda_dp4a(const int a, const int b, int c) { #if defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) -#if defined(__gfx906__) || defined(__gfx908__) || defined(__gfx90a__) || defined(RDNA2) +#if defined(CDNA) || defined(RDNA2) || defined(__gfx906__) c = __builtin_amdgcn_sdot4(a, b, c, false); #elif defined(RDNA3) c = __builtin_amdgcn_sudot4( true, a, true, b, c, false); -#elif defined(__gfx1010__) || defined(__gfx900__) +#elif defined(RDNA1) || defined(__gfx900__) int tmp1; int tmp2; asm("\n \ diff --git a/ggml/src/ggml-cuda/mmvq.cu b/ggml/src/ggml-cuda/mmvq.cu index 4fb466ca00627..a7d518a574ddc 100644 --- a/ggml/src/ggml-cuda/mmvq.cu +++ b/ggml/src/ggml-cuda/mmvq.cu @@ -47,11 +47,89 @@ static constexpr __device__ int get_vdr_mmvq(ggml_type type) { 1; } +enum mmvq_parameter_table_id { + MMVQ_PARAMETERS_GENERIC = 0, + MMVQ_PARAMETERS_GCN, + MMVQ_PARAMETERS_RDNA2 +}; + +static constexpr __device__ mmvq_parameter_table_id get_device_table_id() { +#if defined(RDNA2) || defined(RDNA3) + return MMVQ_PARAMETERS_RDNA2; +#elif defined(GCN) || defined(CDNA) + return MMVQ_PARAMETERS_GCN; +#else + return MMVQ_PARAMETERS_GENERIC; +#endif +} + +static __host__ mmvq_parameter_table_id get_device_table_id(int cc) { + if (GGML_CUDA_CC_IS_RDNA2(cc) || GGML_CUDA_CC_IS_RDNA3(cc)) { + return MMVQ_PARAMETERS_RDNA2; + } + if (GGML_CUDA_CC_IS_GCN(cc) || GGML_CUDA_CC_IS_CDNA(cc)) { + return MMVQ_PARAMETERS_GCN; + } + return MMVQ_PARAMETERS_GENERIC; +} + +static constexpr __host__ __device__ int calc_nwarps(int ncols_y, mmvq_parameter_table_id table_id) { + if (table_id == MMVQ_PARAMETERS_GENERIC) { + switch (ncols_y) { + case 1: + case 2: + case 3: + case 4: + return 4; + case 5: + case 6: + case 7: + case 8: + return 2; + default: + return 1; + } + } else if (table_id == MMVQ_PARAMETERS_GCN) { + switch (ncols_y) { + case 1: + case 2: + case 3: + case 4: + return 2; + case 5: + case 6: + case 7: + case 8: + default: + return 1; + } + } + return 1; +} + +static constexpr __host__ __device__ int calc_rows_per_block(int ncols_y, int table_id) { + if (table_id == MMVQ_PARAMETERS_GENERIC || table_id == MMVQ_PARAMETERS_GCN) { + switch (ncols_y) { + case 1: + return 1; + case 2: + case 3: + case 4: + case 5: + case 6: + case 7: + case 8: + return 2; + default: + return 1; + } + } + return 1; +} + template -#if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) // tell the compiler to use as many registers as it wants, see nwarps definition below -__launch_bounds__((ncols_y <= 4 ? 4 : 2)*WARP_SIZE, 1) -#endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) +__launch_bounds__(calc_nwarps(ncols_y, get_device_table_id())*ggml_cuda_get_physical_warp_size(), 1) static __global__ void mul_mat_vec_q( const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { @@ -59,24 +137,20 @@ static __global__ void mul_mat_vec_q( constexpr int qk = ggml_cuda_type_traits::qk; constexpr int qi = ggml_cuda_type_traits::qi; constexpr int vdr = get_vdr_mmvq(type); + constexpr mmvq_parameter_table_id table_id = get_device_table_id(); + constexpr int nwarps = calc_nwarps(ncols_y, table_id); + constexpr int rows_per_cuda_block = calc_rows_per_block(ncols_y, table_id); + constexpr int warp_size = ggml_cuda_get_physical_warp_size(); constexpr vec_dot_q_cuda_t vec_dot_q_cuda = get_vec_dot_q_cuda(type); -#if defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) && (defined(RDNA2) || defined(RDNA3)) - constexpr int nwarps = 1; - constexpr int rows_per_cuda_block = 1; -#else - constexpr int nwarps = ncols_y <= 4 ? 4 : 2; - constexpr int rows_per_cuda_block = ncols_y == 1 ? 1 : 2; -#endif // defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) && !defined(RDNA2) && !defined(RDNA3) - - const int tid = WARP_SIZE*threadIdx.y + threadIdx.x; + const int tid = warp_size*threadIdx.y + threadIdx.x; const int row0 = rows_per_cuda_block*blockIdx.x; const int blocks_per_row_x = ncols_x / qk; const int blocks_per_col_y = nrows_y / QK8_1; - constexpr int blocks_per_iter = vdr * nwarps*WARP_SIZE / qi; + constexpr int blocks_per_iter = vdr * nwarps*warp_size / qi; -// partial sum for each thread + // partial sum for each thread float tmp[ncols_y][rows_per_cuda_block] = {0.0f}; const block_q8_1 * y = (const block_q8_1 *) vy; @@ -96,7 +170,7 @@ static __global__ void mul_mat_vec_q( } } - __shared__ float tmp_shared[nwarps-1 > 0 ? nwarps-1 : 1][ncols_y][rows_per_cuda_block][WARP_SIZE]; + __shared__ float tmp_shared[nwarps-1 > 0 ? nwarps-1 : 1][ncols_y][rows_per_cuda_block][warp_size]; if (threadIdx.y > 0) { #pragma unroll for (int j = 0; j < ncols_y; ++j) { @@ -120,7 +194,7 @@ static __global__ void mul_mat_vec_q( for (int l = 0; l < nwarps-1; ++l) { tmp[j][i] += tmp_shared[l][j][i][threadIdx.x]; } - tmp[j][i] = warp_reduce_sum(tmp[j][i]); + tmp[j][i] = warp_reduce_sum(tmp[j][i]); } if (threadIdx.x < rows_per_cuda_block && (rows_per_cuda_block == 1 || row0 + threadIdx.x < nrows_dst)) { @@ -129,6 +203,13 @@ static __global__ void mul_mat_vec_q( } } +static std::pair calc_launch_params(const int ncols_y, const int nrows_x, const int warp_size, const mmvq_parameter_table_id table_id) { + const int64_t nblocks = (nrows_x + calc_rows_per_block(ncols_y, table_id) - 1) / calc_rows_per_block(ncols_y, table_id); + const dim3 block_nums(nblocks, 1, 1); + const dim3 block_dims(warp_size, calc_nwarps(ncols_y, table_id), 1); + return {block_nums, block_dims}; +} + template static void mul_mat_vec_q_cuda( const void * vx, const void * vy, float * dst, @@ -137,65 +218,67 @@ static void mul_mat_vec_q_cuda( GGML_ASSERT(ncols_x % ggml_blck_size(type) == 0); GGML_ASSERT(ncols_y <= MMVQ_MAX_BATCH_SIZE); - int id = ggml_cuda_get_device(); - - int64_t nwarps = 1; - int64_t rows_per_cuda_block = 1; - - if (ggml_cuda_info().devices[id].cc < GGML_CUDA_CC_RDNA2) { // NVIDIA and AMD older than RDNA2 - switch(ncols_y) { - case 1: - nwarps = 4; - rows_per_cuda_block = 1; - break; - case 2: - case 3: - case 4: - nwarps = 4; - rows_per_cuda_block = 2; - break; - case 5: - case 6: - case 7: - case 8: - nwarps = 2; - rows_per_cuda_block = 2; - break; - default: - GGML_ABORT("fatal error"); - break; - } - } - - const int64_t nblocks = (nrows_x + rows_per_cuda_block - 1) / rows_per_cuda_block; - const dim3 block_nums(nblocks, 1, 1); - const dim3 block_dims(WARP_SIZE, nwarps, 1); + const int device = ggml_cuda_get_device(); + const int warp_size = ggml_cuda_info().devices[device].warp_size; + const mmvq_parameter_table_id table_id = get_device_table_id(ggml_cuda_info().devices[device].cc); switch (ncols_y) { case 1: - mul_mat_vec_q<<>>(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); + { + constexpr int c_ncols_y = 1; + std::pair dims = calc_launch_params(c_ncols_y, nrows_x, warp_size, table_id); + mul_mat_vec_q<<>>(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); break; + } case 2: - mul_mat_vec_q<<>>(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); + { + constexpr int c_ncols_y = 2; + std::pair dims = calc_launch_params(c_ncols_y, nrows_x, warp_size, table_id); + mul_mat_vec_q<<>>(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); break; + } case 3: - mul_mat_vec_q<<>>(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); + { + constexpr int c_ncols_y = 3; + std::pair dims = calc_launch_params(c_ncols_y, nrows_x, warp_size, table_id); + mul_mat_vec_q<<>>(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); break; + } case 4: - mul_mat_vec_q<<>>(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); + { + constexpr int c_ncols_y = 4; + std::pair dims = calc_launch_params(c_ncols_y, nrows_x, warp_size, table_id); + mul_mat_vec_q<<>>(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); break; + } case 5: - mul_mat_vec_q<<>>(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); + { + constexpr int c_ncols_y = 5; + std::pair dims = calc_launch_params(c_ncols_y, nrows_x, warp_size, table_id); + mul_mat_vec_q<<>>(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); break; + } case 6: - mul_mat_vec_q<<>>(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); + { + constexpr int c_ncols_y = 6; + std::pair dims = calc_launch_params(c_ncols_y, nrows_x, warp_size, table_id); + mul_mat_vec_q<<>>(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); break; + } case 7: - mul_mat_vec_q<<>>(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); + { + constexpr int c_ncols_y = 7; + std::pair dims = calc_launch_params(c_ncols_y, nrows_x, warp_size, table_id); + mul_mat_vec_q<<>>(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); break; + } case 8: - mul_mat_vec_q<<>>(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); + { + constexpr int c_ncols_y = 8; + std::pair dims = calc_launch_params(c_ncols_y, nrows_x, warp_size, table_id); + mul_mat_vec_q<<>>(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); break; + } default: GGML_ABORT("fatal error"); break; From bf69cfe62f9ccc01112c0232a55820b95a8c1fda Mon Sep 17 00:00:00 2001 From: Jeff Bolz Date: Wed, 12 Mar 2025 00:59:19 -0500 Subject: [PATCH 039/398] vulkan: fix bug in coopmat1 mul_mat_id (#12316) * tests: run mul_mat_id with a larger N * vulkan: fix bug in coopmat1 mul_mat_id --- ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp | 2 +- tests/test-backend-ops.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp index a8fd93fdeadee..0d03411f24ca4 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp @@ -777,7 +777,7 @@ void main() { [[unroll]] for (uint cm_col = 0; cm_col < cms_per_col; cm_col++) { coopMatStore(sums[cm_col * cms_per_row + cm_row], coopmat_stage, warp_i * TM * TN, TM, gl_CooperativeMatrixLayoutColumnMajor); - [[unroll]] for (uint col = 0; col < BN; col += storestride) { + [[unroll]] for (uint col = 0; col < TN; col += storestride) { const uint row_i = dc + cm_col * TN + col + store_c; if (row_i >= _ne1) break; diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index b4e3631ed081a..c86ffb64e9e89 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -4113,7 +4113,7 @@ static std::vector> make_test_cases_eval() { for (int n_mats : {4, 8}) { for (int n_used : {1, 2, 4}) { for (bool b : {false, true}) { - for (int n : {1, 32}) { + for (int n : {1, 32, 129}) { int m = 512; int k = 256; test_cases.emplace_back(new test_mul_mat_id(type_a, type_b, n_mats, n_used, b, m, n, k)); From 7841fc723e059d1fd9640e5c0ef19050fcc7c698 Mon Sep 17 00:00:00 2001 From: Xuan-Son Nguyen Date: Wed, 12 Mar 2025 09:30:24 +0100 Subject: [PATCH 040/398] llama : Add Gemma 3 support (+ experimental vision capability) (#12343) * llama : Add Gemma 3 text-only support * fix python coding style * fix compile on ubuntu * python: fix style * fix ubuntu compile * fix build on ubuntu (again) * fix ubuntu build, finally * clip : Experimental support for Gemma 3 vision (#12344) * clip : Experimental support for Gemma 3 vision * fix build * PRId64 --- convert_hf_to_gguf.py | 80 ++++ examples/llava/CMakeLists.txt | 7 + examples/llava/README-gemma3.md | 30 ++ examples/llava/clip.cpp | 210 ++++++++++- examples/llava/gemma3-cli.cpp | 341 ++++++++++++++++++ .../llava/gemma3_convert_encoder_to_gguf.py | 307 ++++++++++++++++ gguf-py/gguf/constants.py | 19 + src/llama-arch.cpp | 21 ++ src/llama-arch.h | 1 + src/llama-model.cpp | 49 +++ src/llama.cpp | 147 ++++++++ 11 files changed, 1202 insertions(+), 10 deletions(-) create mode 100644 examples/llava/README-gemma3.md create mode 100644 examples/llava/gemma3-cli.cpp create mode 100644 examples/llava/gemma3_convert_encoder_to_gguf.py diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 6358a94e9b55f..b5d95bd5639f3 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -861,6 +861,9 @@ def _create_vocab_sentencepiece(self): for token_id, token_data in added_tokens_decoder.items(): token_id = int(token_id) token: str = token_data["content"] + if token_id >= vocab_size: + logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}') + continue if toktypes[token_id] != SentencePieceTokenTypes.UNUSED: if tokens[token_id] != token.encode("utf-8"): logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token!r}') @@ -3322,6 +3325,83 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter return [(self.map_tensor_name(name), data_torch)] +@Model.register("Gemma3ForCausalLM", "Gemma3ForConditionalGeneration") +class Gemma3Model(Model): + model_arch = gguf.MODEL_ARCH.GEMMA3 + has_vision: bool = False + + # we need to merge the text_config into the root level of hparams + def __init__(self, *args, **kwargs): + hparams = Model.load_hparams(kwargs["dir_model"]) + if "text_config" in hparams: + hparams = {**hparams, **hparams["text_config"]} + kwargs["hparams"] = hparams + super().__init__(*args, **kwargs) + if "vision_config" in hparams: + logger.info("Has vision encoder, but it will be ignored") + self.has_vision = True + + def write(self): + super().write() + if self.has_vision: + logger.info("NOTE: this script only convert the language model to GGUF") + logger.info(" for the vision model, please use gemma3_convert_encoder_to_gguf.py") + + def set_vocab(self): + self._set_vocab_sentencepiece() + + self.gguf_writer.add_add_space_prefix(False) + + def set_gguf_parameters(self): + hparams = self.hparams + block_count = hparams["num_hidden_layers"] + + # some default values are not specified in the hparams + self.gguf_writer.add_context_length(hparams.get("max_position_embeddings", 131072)) + self.gguf_writer.add_embedding_length(hparams["hidden_size"]) + self.gguf_writer.add_block_count(block_count) + self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"]) + self.gguf_writer.add_head_count(hparams.get("num_attention_heads", 8)) + self.gguf_writer.add_layer_norm_rms_eps(self.hparams.get("rms_norm_eps", 1e-6)) + self.gguf_writer.add_key_length(hparams.get("head_dim", 256)) + self.gguf_writer.add_value_length(hparams.get("head_dim", 256)) + self.gguf_writer.add_file_type(self.ftype) + self.gguf_writer.add_rope_freq_base(hparams.get("rope_theta", 1_000_000.0)) # for global layers + # both attn_logit_softcapping and final_logit_softcapping are removed in Gemma3 + assert hparams.get("attn_logit_softcapping") is None + assert hparams.get("final_logit_softcapping") is None + self.gguf_writer.add_sliding_window(hparams["sliding_window"]) + self.gguf_writer.add_head_count_kv(hparams.get("num_key_value_heads", 4)) + if hparams.get("rope_scaling") is not None: + assert hparams["rope_scaling"]["rope_type"] == "linear" + # important: this rope_scaling is only applied for global layers, and not used by 1B model + self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR) + self.gguf_writer.add_rope_scaling_factor(hparams["rope_scaling"]["factor"]) + + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + del bid # unused + + if name.startswith("language_model."): + name = name.replace("language_model.", "") + elif name.startswith("multi_modal_projector.") or name.startswith("vision_tower.") \ + or name.startswith("multimodal_projector.") or name.startswith("vision_model."): # this is for old HF model, should be removed later + # ignore vision tensors + return [] + + # remove OOV (out-of-vocabulary) rows in token_embd + if "embed_tokens.weight" in name: + vocab = self._create_vocab_sentencepiece() + tokens = vocab[0] + data_torch = data_torch[:len(tokens)] + + # ref code in Gemma3RMSNorm + # output = output * (1.0 + self.weight.float()) + if name.endswith("norm.weight"): + data_torch = data_torch + 1 + + return [(self.map_tensor_name(name), data_torch)] + + @Model.register("Starcoder2ForCausalLM") class StarCoder2Model(Model): model_arch = gguf.MODEL_ARCH.STARCODER2 diff --git a/examples/llava/CMakeLists.txt b/examples/llava/CMakeLists.txt index 319effd199aa4..f275ce1ccd003 100644 --- a/examples/llava/CMakeLists.txt +++ b/examples/llava/CMakeLists.txt @@ -51,6 +51,13 @@ install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llava ${CMAKE_THREAD_LIBS_INIT}) target_compile_features(${TARGET} PRIVATE cxx_std_17) +set(TARGET llama-gemma3-cli) +add_executable(${TARGET} gemma3-cli.cpp) +set_target_properties(${TARGET} PROPERTIES OUTPUT_NAME llama-gemma3-cli) +install(TARGETS ${TARGET} RUNTIME) +target_link_libraries(${TARGET} PRIVATE common llava ${CMAKE_THREAD_LIBS_INIT}) +target_compile_features(${TARGET} PRIVATE cxx_std_17) + set(TARGET llama-llava-clip-quantize-cli) add_executable(${TARGET} clip-quantize-cli.cpp) set_target_properties(${TARGET} PROPERTIES OUTPUT_NAME llama-llava-clip-quantize-cli) diff --git a/examples/llava/README-gemma3.md b/examples/llava/README-gemma3.md new file mode 100644 index 0000000000000..20bf73fb5c043 --- /dev/null +++ b/examples/llava/README-gemma3.md @@ -0,0 +1,30 @@ +# Gemma 3 vision + +> [!IMPORTANT] +> +> This is very experimental, only used for demo purpose. + +## How to get mmproj.gguf? + +```bash +cd gemma-3-4b-it +python ../llama.cpp/examples/llava/gemma3_convert_encoder_to_gguf.py . + +# output file is mmproj.gguf +``` + +## How to run it? + +What you need: +- The text model GGUF, can be converted using `convert_hf_to_gguf.py` +- The mmproj file from step above +- An image file + +```bash +# build +cmake -B build +cmake --build build --target llama-gemma3-cli + +# run it +./build/bin/llama-gemma3-cli -m {text_model}.gguf --mmproj mmproj.gguf --image your_image.jpg +``` diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp index 7f892beb6edb1..a1f050e39a094 100644 --- a/examples/llava/clip.cpp +++ b/examples/llava/clip.cpp @@ -136,6 +136,8 @@ static std::string format(const char * fmt, ...) { #define TN_MVLM_PROJ_BLOCK "mm.model.mb_block.%d.block.%d.%s" #define TN_MVLM_PROJ_PEG "mm.model.peg.%d.%s" #define TN_IMAGE_NEWLINE "model.image_newline" +#define TN_MM_INP_PROJ "mm.input_projection.weight" // gemma3 +#define TN_MM_SOFT_EMB_N "mm.soft_emb_norm.weight" // gemma3 #define TN_MINICPMV_POS_EMBD_K "resampler.pos_embed_k" #define TN_MINICPMV_QUERY "resampler.query" @@ -162,6 +164,7 @@ enum projector_type { PROJECTOR_TYPE_RESAMPLER, PROJECTOR_TYPE_GLM_EDGE, PROJECTOR_TYPE_MERGER, + PROJECTOR_TYPE_GEMMA3, PROJECTOR_TYPE_UNKNOWN, }; @@ -172,6 +175,7 @@ static std::map PROJECTOR_TYPE_NAMES = { { PROJECTOR_TYPE_RESAMPLER, "resampler"}, { PROJECTOR_TYPE_GLM_EDGE, "adapter"}, { PROJECTOR_TYPE_MERGER, "qwen2vl_merger"}, + { PROJECTOR_TYPE_GEMMA3, "gemma3"}, }; @@ -298,7 +302,7 @@ static projector_type clip_projector_type_from_string(const std::string & name) return kv.first; } } - return PROJECTOR_TYPE_UNKNOWN; + throw std::runtime_error(format("Unknown projector type: %s", name.c_str())); } #ifdef CLIP_DEBUG_FUNCTIONS @@ -555,6 +559,10 @@ struct clip_vision_model { struct ggml_tensor * mm_model_ln_kv_b; struct ggml_tensor * mm_model_ln_post_w; struct ggml_tensor * mm_model_ln_post_b; + + // gemma3 + struct ggml_tensor * mm_input_proj_w; + struct ggml_tensor * mm_soft_emb_norm_w; }; struct clip_ctx { @@ -569,7 +577,7 @@ struct clip_ctx { struct clip_vision_model vision_model; projector_type proj_type = PROJECTOR_TYPE_MLP; - int32_t max_feature_layer; + int32_t max_feature_layer; // unused in newer models like gemma3 float image_mean[3]; float image_std[3]; bool use_gelu = false; @@ -595,7 +603,7 @@ struct clip_ctx { ggml_backend_sched_ptr sched; - struct clip_image_size * load_image_size; + struct clip_image_size * load_image_size = nullptr; clip_ctx(clip_context_params & ctx_params) { backend_cpu = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr); @@ -631,7 +639,159 @@ struct clip_ctx { } }; -static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32_batch * imgs, struct clip_image_size * load_image_size, bool is_inf = false) { +static ggml_cgraph * clip_image_build_graph_siglip(clip_ctx * ctx, const clip_image_f32_batch * imgs) { + const auto & model = ctx->vision_model; + const auto & hparams = model.hparams; + + const int image_size = hparams.image_size; + int image_size_width = image_size; + int image_size_height = image_size; + + const int patch_size = hparams.patch_size; + const int num_patches = ((image_size_width / patch_size) * (image_size_height / patch_size)); + const int hidden_size = hparams.hidden_size; + const int n_head = hparams.n_head; + const int d_head = hidden_size / n_head; + const int n_layer = hparams.n_layer; + const float eps = hparams.eps; + + GGML_ASSERT(imgs->size == 1); // batch_size == 1 + + struct ggml_init_params params = { + /*.mem_size =*/ ctx->buf_compute_meta.size(), + /*.mem_buffer =*/ ctx->buf_compute_meta.data(), + /*.no_alloc =*/ true, + }; + + struct ggml_context * ctx0 = ggml_init(params); + struct ggml_cgraph * gf = ggml_new_graph(ctx0); + + // input raw + struct ggml_tensor * inp_raw = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, image_size_width, image_size_height, 3); + ggml_set_name(inp_raw, "inp_raw"); + ggml_set_input(inp_raw); + + struct ggml_tensor * inp = ggml_conv_2d(ctx0, model.patch_embeddings_0, inp_raw, patch_size, patch_size, 0, 0, 1, 1); + inp = ggml_reshape_2d(ctx0, inp, num_patches, hidden_size); + inp = ggml_cont(ctx0, ggml_transpose(ctx0, inp)); + inp = ggml_add(ctx0, inp, model.patch_bias); + + // position embeddings + struct ggml_tensor * embeddings = ggml_add(ctx0, inp, model.position_embeddings); + + // loop over layers + for (int il = 0; il < n_layer; il++) { + struct ggml_tensor * cur = embeddings; // embeddings = residual, cur = hidden_states + + // layernorm1 + { + cur = ggml_norm(ctx0, cur, eps); + cur = ggml_add(ctx0, ggml_mul(ctx0, cur, model.layers[il].ln_1_w), model.layers[il].ln_1_b); + } + + // self-attention + { + + struct ggml_tensor * Q = + ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].q_w, cur), model.layers[il].q_b); + + Q = ggml_reshape_3d(ctx0, Q, d_head, n_head, num_patches); + Q = ggml_cont(ctx0, ggml_permute(ctx0, Q, 0, 2, 1, 3)); + + struct ggml_tensor * K = + ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].k_w, cur), model.layers[il].k_b); + + K = ggml_reshape_3d(ctx0, K, d_head, n_head, num_patches); + K = ggml_cont(ctx0, ggml_permute(ctx0, K, 0, 2, 1, 3)); + + struct ggml_tensor * V = + ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].v_w, cur), model.layers[il].v_b); + + V = ggml_reshape_3d(ctx0, V, d_head, n_head, num_patches); + V = ggml_cont(ctx0, ggml_permute(ctx0, V, 1, 2, 0, 3)); + + struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q); + KQ = ggml_scale_inplace(ctx0, KQ, 1.0f / sqrtf((float)d_head)); + KQ = ggml_soft_max_inplace(ctx0, KQ); + + struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ); + KQV = ggml_reshape_3d(ctx0, KQV, d_head, num_patches, n_head); + KQV = ggml_permute(ctx0, KQV, 0, 2, 1, 3); + + cur = ggml_cont_2d(ctx0, KQV, hidden_size, num_patches); + } + + // attention output + cur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].o_w, cur), model.layers[il].o_b); + + // re-add the layer input, e.g., residual + cur = ggml_add(ctx0, cur, embeddings); + + embeddings = cur; // embeddings = residual, cur = hidden_states + + // layernorm2 + { + cur = ggml_norm(ctx0, cur, eps); + cur = ggml_add(ctx0, ggml_mul(ctx0, cur, model.layers[il].ln_2_w), model.layers[il].ln_2_b); + } + + cur = ggml_mul_mat(ctx0, model.layers[il].ff_i_w, cur); + cur = ggml_add(ctx0, cur, model.layers[il].ff_i_b); + + // siglip uses gelu + cur = ggml_gelu(ctx0, cur); + + cur = ggml_mul_mat(ctx0, model.layers[il].ff_o_w, cur); + cur = ggml_add(ctx0, cur, model.layers[il].ff_o_b); + + // residual 2 + cur = ggml_add(ctx0, embeddings, cur); + + embeddings = cur; + } + + // post-layernorm + if (ctx->has_post_norm) { + embeddings = ggml_norm(ctx0, embeddings, eps); + ggml_set_name(embeddings, "post_ln"); + + embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.post_ln_w), model.post_ln_b); + } + + if (ctx->proj_type == PROJECTOR_TYPE_GEMMA3) { + const int batch_size = 1; + const int mm_tokens_per_image = 256; // default value for gemma3 + const int tokens_per_side = sqrt(mm_tokens_per_image); + const int patches_per_image = sqrt(num_patches); + const int kernel_size = patches_per_image / tokens_per_side; + + embeddings = ggml_cont(ctx0, ggml_transpose(ctx0, embeddings)); + embeddings = ggml_reshape_4d(ctx0, embeddings, patches_per_image, patches_per_image, hidden_size, batch_size); + + // doing a pool2d to reduce the number of output tokens to 256 + embeddings = ggml_pool_2d(ctx0, embeddings, GGML_OP_POOL_AVG, kernel_size, kernel_size, kernel_size, kernel_size, 0, 0); + embeddings = ggml_reshape_3d(ctx0, embeddings, embeddings->ne[0] * embeddings->ne[0], hidden_size, batch_size); + embeddings = ggml_cont(ctx0, ggml_transpose(ctx0, embeddings)); + + // apply norm before projection + embeddings = ggml_rms_norm(ctx0, embeddings, eps); + embeddings = ggml_mul(ctx0, embeddings, model.mm_soft_emb_norm_w); + + // apply projection + embeddings = ggml_mul_mat(ctx0, + ggml_cont(ctx0, ggml_transpose(ctx0, model.mm_input_proj_w)), + embeddings); + } + + // build the graph + ggml_build_forward_expand(gf, embeddings); + + ggml_free(ctx0); + + return gf; +} + +static ggml_cgraph * clip_image_build_graph_legacy(clip_ctx * ctx, const clip_image_f32_batch * imgs, struct clip_image_size * load_image_size, bool is_inf = false) { if (!ctx->has_vision_encoder) { LOG_ERR("This gguf file seems to have no vision encoder\n"); return nullptr; @@ -1177,7 +1337,8 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32 } else { GGML_ABORT("fatel error"); } - } else if (ctx->proj_type == PROJECTOR_TYPE_MERGER) { + } + else if (ctx->proj_type == PROJECTOR_TYPE_MERGER) { embeddings = ggml_reshape_3d(ctx0, embeddings, hidden_size * 4, num_positions / 4, batch_size); embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings); @@ -1199,6 +1360,15 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32 return gf; } +static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32_batch * imgs, struct clip_image_size * load_image_size, bool is_inf = false) { + if (ctx->proj_type == PROJECTOR_TYPE_GEMMA3) { + return clip_image_build_graph_siglip(ctx, imgs); + } else { + // TODO: we should have one build_* function per model + return clip_image_build_graph_legacy(ctx, imgs, load_image_size, is_inf); + } +} + // read and create ggml_context containing the tensors and their data struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { return clip_init(fname, clip_context_params{ @@ -1358,8 +1528,12 @@ struct clip_ctx * clip_init(const char * fname, struct clip_context_params ctx_p GGML_ASSERT(new_clip->has_vision_encoder); GGML_ASSERT(!new_clip->has_text_encoder); - idx = get_key_idx(ctx, KEY_USE_GELU); - new_clip->use_gelu = gguf_get_val_bool(ctx, idx); + try { + idx = get_key_idx(ctx, KEY_USE_GELU); + new_clip->use_gelu = gguf_get_val_bool(ctx, idx); + } catch (std::runtime_error & /*e*/) { + new_clip->use_gelu = false; + } try { idx = get_key_idx(ctx, KEY_USE_SILU); @@ -1567,11 +1741,17 @@ struct clip_ctx * clip_init(const char * fname, struct clip_context_params ctx_p } try { - vision_model.patch_embeddings_0 = get_tensor(new_clip->ctx_data, TN_PATCH_EMBD); + vision_model.patch_embeddings_0 = get_tensor(new_clip->ctx_data, TN_PATCH_EMBD); + } catch(const std::exception& /*e*/) { + vision_model.patch_embeddings_0 = nullptr; + } + + try { vision_model.position_embeddings = get_tensor(new_clip->ctx_data, format(TN_POS_EMBD, "v")); } catch(const std::exception& /*e*/) { - LOG_ERR("%s: failed to load vision model tensors\n", __func__); + vision_model.position_embeddings = nullptr; } + try { vision_model.patch_embeddings_1 = get_tensor(new_clip->ctx_data, TN_PATCH_EMBD_1); } catch(const std::exception& /*e*/) { @@ -1682,6 +1862,10 @@ struct clip_ctx * clip_init(const char * fname, struct clip_context_params ctx_p vision_model.mm_1_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 2, "weight")); vision_model.mm_1_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 2, "bias")); } + else if (new_clip->proj_type == PROJECTOR_TYPE_GEMMA3) { + vision_model.mm_input_proj_w = get_tensor(new_clip->ctx_data, TN_MM_INP_PROJ); + vision_model.mm_soft_emb_norm_w = get_tensor(new_clip->ctx_data, TN_MM_SOFT_EMB_N); + } else { std::string proj_type = PROJECTOR_TYPE_NAMES[new_clip->proj_type]; throw std::runtime_error(format("%s: don't support projector with: %s currently\n", __func__, proj_type.c_str())); @@ -2223,7 +2407,7 @@ bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, cli return true; } - if (ctx->has_glm_projector) { + if (ctx->has_glm_projector || ctx->proj_type == PROJECTOR_TYPE_GEMMA3) { res_imgs->size = 1; res_imgs->data = new clip_image_f32[res_imgs->size]; clip_image_u8 resized_image; @@ -2748,6 +2932,9 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima ggml_backend_tensor_set(positions, positions_data, 0, ggml_nbytes(positions)); free(positions_data); } + else if (ctx->proj_type == PROJECTOR_TYPE_GEMMA3) { + // do nothing + } else { struct ggml_tensor * positions = ggml_graph_get_tensor(gf, "positions"); @@ -2960,6 +3147,9 @@ int clip_n_mmproj_embd(const struct clip_ctx * ctx) { if (ctx->proj_type == PROJECTOR_TYPE_MERGER) { return ctx->vision_model.mm_1_b->ne[0]; } + if (ctx->proj_type == PROJECTOR_TYPE_GEMMA3) { + return ctx->vision_model.mm_input_proj_w->ne[0]; + } std::string proj_type = PROJECTOR_TYPE_NAMES[ctx->proj_type]; throw std::runtime_error(format("%s: don't support projector with: %s currently\n", __func__, proj_type.c_str())); diff --git a/examples/llava/gemma3-cli.cpp b/examples/llava/gemma3-cli.cpp new file mode 100644 index 0000000000000..a07864d4e59f6 --- /dev/null +++ b/examples/llava/gemma3-cli.cpp @@ -0,0 +1,341 @@ +#include "arg.h" +#include "log.h" +#include "common.h" +#include "sampling.h" +#include "clip.h" +#include "stb_image.h" +#include "llama.h" +#include "ggml.h" +#include "console.h" + +#include +#include +#include + +#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) +#include +#include +#elif defined (_WIN32) +#define WIN32_LEAN_AND_MEAN +#ifndef NOMINMAX +#define NOMINMAX +#endif +#include +#include +#endif + +static bool g_is_generating = false; + +/** + * Please note that this is NOT a production-ready stuff. + * It is a playground for trying Gemma 3 vision capabilities. + * For contributors: please keep this code simple and easy to understand. + */ + +static void show_additional_info(int /*argc*/, char ** argv) { + LOG( + "Experimental CLI for using Gemma 3 vision model\n\n" + "Usage: %s [options] -m --mmproj --image -p \n\n" + " -m and --mmproj are required\n" + " --image and -p are optional, if NOT provided, the CLI will run in chat mode\n", + argv[0] + ); +} + +#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32) +static void sigint_handler(int signo) { + if (signo == SIGINT) { + if (g_is_generating) { + g_is_generating = false; + } else { + console::cleanup(); + LOG("\nInterrupted by user\n"); + _exit(130); + } + } +} +#endif + +struct gemma3_context { + struct clip_ctx * ctx_clip = NULL; + common_init_result llama_init; + + llama_model * model; + llama_context * lctx; + const llama_vocab * vocab; + llama_batch batch; + + int n_threads = 1; + llama_pos n_past = 0; + + gemma3_context(common_params & params) : llama_init(common_init_from_params(params)) { + model = llama_init.model.get(); + lctx = llama_init.context.get(); + vocab = llama_model_get_vocab(model); + n_threads = params.cpuparams.n_threads; + batch = llama_batch_init(params.n_batch, 0, 1); + init_clip_model(params); + } + + void init_clip_model(common_params & params) { + const char * clip_path = params.mmproj.c_str(); + ctx_clip = clip_model_load(clip_path, params.verbosity > 1); + } + + ~gemma3_context() { + clip_free(ctx_clip); + } +}; + +struct decode_embd_batch { + std::vector pos; + std::vector n_seq_id; + std::vector seq_id_0; + std::vector seq_ids; + std::vector logits; + llama_batch batch; + decode_embd_batch(float * embd, int32_t n_tokens, llama_pos pos_0, llama_seq_id seq_id) { + pos .resize(n_tokens); + n_seq_id.resize(n_tokens); + seq_ids .resize(n_tokens + 1); + logits .resize(n_tokens); + seq_id_0.resize(1); + seq_id_0[0] = seq_id; + seq_ids [n_tokens] = nullptr; + batch = { + /*n_tokens =*/ n_tokens, + /*tokens =*/ nullptr, + /*embd =*/ embd, + /*pos =*/ pos.data(), + /*n_seq_id =*/ n_seq_id.data(), + /*seq_id =*/ seq_ids.data(), + /*logits =*/ logits.data(), + }; + for (int i = 0; i < n_tokens; i++) { + batch.pos [i] = pos_0 + i; + batch.n_seq_id[i] = 1; + batch.seq_id [i] = seq_id_0.data(); + batch.logits [i] = false; + } + } +}; + +static int eval_text(gemma3_context & ctx, std::string input, bool logits_last = false) { + llama_tokens tokens = common_tokenize(ctx.lctx, input, false, true); + common_batch_clear(ctx.batch); + for (llama_token & t : tokens) { + common_batch_add(ctx.batch, t, ctx.n_past++, {0}, false); + } + if (logits_last) { + ctx.batch.logits[ctx.batch.n_tokens - 1] = true; + } + // LOG("eval_text (n_tokens = %d): %s\n", (int)tokens.size(), input.c_str()); + if (llama_decode(ctx.lctx, ctx.batch)) { + LOG_ERR("Failed to decode text\n"); + return 1; + } + return 0; +} + +static int eval_image(gemma3_context & ctx, std::string & fname) { + std::vector image_embd_v; + int n_embd = llama_model_n_embd(ctx.model); + int n_tokens = 256; + image_embd_v.resize(n_tokens * n_embd); + + bool ok; + struct clip_image_u8 * img_u8 = clip_image_u8_init(); + ok = clip_image_load_from_file(fname.c_str(), img_u8); + if (!ok) { + LOG_ERR("Unable to load image %s\n", fname.c_str()); + clip_image_u8_free(img_u8); + return 2; // non-fatal error + } + + clip_image_f32_batch batch_f32; + ok = clip_image_preprocess(ctx.ctx_clip, img_u8, &batch_f32); + if (!ok) { + LOG_ERR("Unable to preprocess image\n"); + clip_image_f32_batch_free(&batch_f32); + clip_image_u8_free(img_u8); + return 1; + } + + int64_t t0 = ggml_time_ms(); + LOG("Encoding image %s\n", fname.c_str()); + ok = clip_image_batch_encode(ctx.ctx_clip, ctx.n_threads, &batch_f32, image_embd_v.data()); + if (!ok) { + LOG_ERR("Unable to encode image\n"); + clip_image_f32_batch_free(&batch_f32); + clip_image_u8_free(img_u8); + return 1; + } + LOG("Image encoded in %" PRId64 " ms\n", ggml_time_ms() - t0); + + clip_image_f32_batch_free(&batch_f32); + clip_image_u8_free(img_u8); + + // decode image embeddings + int64_t t1 = ggml_time_ms(); + eval_text(ctx, ""); + llama_set_causal_attn(ctx.lctx, false); + decode_embd_batch batch_img(image_embd_v.data(), n_tokens, ctx.n_past, 0); + if (llama_decode(ctx.lctx, batch_img.batch)) { + LOG_ERR("failed to decode image\n"); + return 1; + } + ctx.n_past += n_tokens; + llama_set_causal_attn(ctx.lctx, true); + eval_text(ctx, ""); + LOG("Image decoded in %" PRId64 " ms\n", ggml_time_ms() - t1); + return 0; +} + +static int generate_response(gemma3_context & ctx, common_sampler * smpl, int n_predict) { + for (int i = 0; i < n_predict; i++) { + if (i > n_predict || !g_is_generating) { + printf("\n"); + break; + } + + llama_token token_id = common_sampler_sample(smpl, ctx.lctx, -1); + common_sampler_accept(smpl, token_id, true); + + if (llama_vocab_is_eog(ctx.vocab, token_id)) { + printf("\n"); + break; // end of generation + } + + printf("%s", common_token_to_piece(ctx.lctx, token_id).c_str()); + fflush(stdout); + + // eval the token + common_batch_clear(ctx.batch); + common_batch_add(ctx.batch, token_id, ctx.n_past++, {0}, true); + if (llama_decode(ctx.lctx, ctx.batch)) { + LOG_ERR("failed to decode token\n"); + return 1; + } + } + return 0; +} + +int main(int argc, char ** argv) { + ggml_time_init(); + + common_params params; + params.sampling.temp = 0.2; // lower temp by default for better quality + + if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_LLAVA, show_additional_info)) { + return 1; + } + + common_init(); + + if (params.mmproj.empty()) { + show_additional_info(argc, argv); + return 1; + } + + gemma3_context ctx(params); + printf("%s: %s\n", __func__, params.model.c_str()); + + bool is_single_turn = !params.prompt.empty() && !params.image.empty(); + + struct common_sampler * smpl = common_sampler_init(ctx.model, params.sampling); + int n_predict = params.n_predict < 0 ? INT_MAX : params.n_predict; + + // ctrl+C handling + { +#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) + struct sigaction sigint_action; + sigint_action.sa_handler = sigint_handler; + sigemptyset (&sigint_action.sa_mask); + sigint_action.sa_flags = 0; + sigaction(SIGINT, &sigint_action, NULL); +#elif defined (_WIN32) + auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL { + return (ctrl_type == CTRL_C_EVENT) ? (sigint_handler(SIGINT), true) : false; + }; + SetConsoleCtrlHandler(reinterpret_cast(console_ctrl_handler), true); +#endif + } + + if (eval_text(ctx, "")) { + return 1; + } + + if (is_single_turn) { + g_is_generating = true; + if (eval_text(ctx, "user\n")) { + return 1; + } + for (auto & fname : params.image) { + if (eval_image(ctx, fname)) { + return 1; + } + } + if (eval_text(ctx, params.prompt + "model\n", true)) { + return 1; + } + if (generate_response(ctx, smpl, n_predict)) { + return 1; + } + + } else { + LOG("\n Running in chat mode, available commands:"); + LOG("\n /image load an image"); + LOG("\n /clear clear the chat history"); + LOG("\n /quit or /exit exit the program"); + LOG("\n"); + + if (eval_text(ctx, "user\n")) { + return 1; + } + + while (true) { + g_is_generating = false; + LOG("\n> "); + console::set_display(console::user_input); + std::string line; + console::readline(line, false); + console::set_display(console::reset); + line = string_strip(line); + if (line.empty()) { + continue; + } + if (line == "/quit" || line == "/exit") { + break; + } + if (line == "/clear") { + ctx.n_past = 0; + llama_kv_cache_seq_rm(ctx.lctx, 0, 1, -1); // keep BOS + LOG("Chat history cleared\n\n"); + continue; + } + g_is_generating = true; + if (line.find("/image") == 0) { + std::string image = line.substr(7); + int res = eval_image(ctx, image); + if (res == 2) { + continue; // image not found + } + if (res) { + return 1; + } + continue; + } + if (eval_text(ctx, line + "model\n", true)) { + return 1; + } + if (generate_response(ctx, smpl, n_predict)) { + return 1; + } + if (eval_text(ctx, "user\n")) { + return 1; + } + } + } + + return 0; +} diff --git a/examples/llava/gemma3_convert_encoder_to_gguf.py b/examples/llava/gemma3_convert_encoder_to_gguf.py new file mode 100644 index 0000000000000..241b526b9ede7 --- /dev/null +++ b/examples/llava/gemma3_convert_encoder_to_gguf.py @@ -0,0 +1,307 @@ +import gguf +import argparse +import logging +import sys +import torch +import json +import os +import numpy as np +from typing import cast, ContextManager, Any, Iterator +from pathlib import Path +from torch import Tensor + +logger = logging.getLogger("gemma3-mmproj") + + +# (copied from convert_hf_to_gguf.py) +# tree of lazy tensors +class LazyTorchTensor(gguf.LazyBase): + _tensor_type = torch.Tensor + # to keep the type-checker happy + dtype: torch.dtype + shape: torch.Size + + # only used when converting a torch.Tensor to a np.ndarray + _dtype_map: dict[torch.dtype, type] = { + torch.float16: np.float16, + torch.float32: np.float32, + } + + # used for safetensors slices + # ref: https://github.com/huggingface/safetensors/blob/079781fd0dc455ba0fe851e2b4507c33d0c0d407/bindings/python/src/lib.rs#L1046 + # TODO: uncomment U64, U32, and U16, ref: https://github.com/pytorch/pytorch/issues/58734 + _dtype_str_map: dict[str, torch.dtype] = { + "F64": torch.float64, + "F32": torch.float32, + "BF16": torch.bfloat16, + "F16": torch.float16, + # "U64": torch.uint64, + "I64": torch.int64, + # "U32": torch.uint32, + "I32": torch.int32, + # "U16": torch.uint16, + "I16": torch.int16, + "U8": torch.uint8, + "I8": torch.int8, + "BOOL": torch.bool, + "F8_E4M3": torch.float8_e4m3fn, + "F8_E5M2": torch.float8_e5m2, + } + + def numpy(self) -> gguf.LazyNumpyTensor: + dtype = self._dtype_map[self.dtype] + return gguf.LazyNumpyTensor( + meta=gguf.LazyNumpyTensor.meta_with_dtype_and_shape(dtype, self.shape), + args=(self,), + func=(lambda s: s.numpy()) + ) + + @classmethod + def meta_with_dtype_and_shape(cls, dtype: torch.dtype, shape: tuple[int, ...]) -> Tensor: + return torch.empty(size=shape, dtype=dtype, device="meta") + + @classmethod + def from_safetensors_slice(cls, st_slice: Any) -> Tensor: + dtype = cls._dtype_str_map[st_slice.get_dtype()] + shape: tuple[int, ...] = tuple(st_slice.get_shape()) + lazy = cls(meta=cls.meta_with_dtype_and_shape(dtype, shape), args=(st_slice,), func=lambda s: s[:]) + return cast(torch.Tensor, lazy) + + @classmethod + def __torch_function__(cls, func, types, args=(), kwargs=None): + del types # unused + + if kwargs is None: + kwargs = {} + + if func is torch.Tensor.numpy: + return args[0].numpy() + + return cls._wrap_fn(func)(*args, **kwargs) + + +class Gemma3VisionTower: + hparams: dict + gguf_writer: gguf.GGUFWriter + fname_out: Path + ftype: gguf.LlamaFileType + + @staticmethod + def load_hparams(dir_model: Path): + with open(dir_model / "config.json", "r", encoding="utf-8") as f: + return json.load(f) + + @staticmethod + def get_model_part_names(dir_model: Path, prefix: str, suffix: str) -> list[str]: + part_names: list[str] = [] + for filename in os.listdir(dir_model): + if filename.startswith(prefix) and filename.endswith(suffix): + part_names.append(filename) + part_names.sort() + return part_names + + def __init__(self, + dir_model: Path, + fname_out: Path, + ftype: gguf.LlamaFileType, + is_big_endian: bool,): + hparams = Gemma3VisionTower.load_hparams(dir_model) + self.hparams = hparams + self.fname_out = fname_out + self.ftype = ftype + endianess = gguf.GGUFEndian.BIG if is_big_endian else gguf.GGUFEndian.LITTLE + self.gguf_writer = gguf.GGUFWriter(path=None, arch="clip", endianess=endianess) + + text_config = hparams["text_config"] + vision_config = hparams["vision_config"] + + assert hparams["architectures"][0] == "Gemma3ForConditionalGeneration" + assert text_config is not None + assert vision_config is not None + + self.gguf_writer.add_string ("clip.projector_type", "gemma3") + self.gguf_writer.add_bool ("clip.has_text_encoder", False) + self.gguf_writer.add_bool ("clip.has_vision_encoder", True) + self.gguf_writer.add_bool ("clip.has_llava_projector", False) # legacy + self.gguf_writer.add_uint32 ("clip.vision.image_size", vision_config["image_size"]) + self.gguf_writer.add_uint32 ("clip.vision.patch_size", vision_config["patch_size"]) + self.gguf_writer.add_uint32 ("clip.vision.embedding_length", vision_config["hidden_size"]) + self.gguf_writer.add_uint32 ("clip.vision.feed_forward_length", vision_config["intermediate_size"]) + self.gguf_writer.add_uint32 ("clip.vision.projection_dim", text_config["hidden_size"]) + self.gguf_writer.add_uint32 ("clip.vision.block_count", vision_config["num_hidden_layers"]) + self.gguf_writer.add_uint32 ("clip.vision.attention.head_count", vision_config["num_attention_heads"]) + self.gguf_writer.add_float32("clip.vision.attention.layer_norm_epsilon", vision_config.get("layer_norm_eps", 1e-6)) + # default values taken from HF tranformers code + self.gguf_writer.add_array ("clip.vision.image_mean", [0.5, 0.5, 0.5]) + self.gguf_writer.add_array ("clip.vision.image_std", [0.5, 0.5, 0.5]) + self.gguf_writer.add_bool ("clip.use_gelu", True) + + # load tensors + for name, data_torch in self.get_tensors(dir_model): + # convert any unsupported data types to float32 + if data_torch.dtype not in (torch.float16, torch.float32): + data_torch = data_torch.to(torch.float32) + self.add_tensor(name, data_torch) + + def get_tensors(self, dir_model: Path) -> Iterator[tuple[str, Tensor]]: + part_names = Gemma3VisionTower.get_model_part_names(dir_model, "model", ".safetensors") + tensor_names_from_parts: set[str] = set() + for part_name in part_names: + logger.info(f"gguf: loading model part '{part_name}'") + from safetensors import safe_open + ctx = cast(ContextManager[Any], safe_open(dir_model / part_name, framework="pt", device="cpu")) + with ctx as model_part: + tensor_names_from_parts.update(model_part.keys()) + + for name in model_part.keys(): + data = model_part.get_slice(name) + data = LazyTorchTensor.from_safetensors_slice(data) + yield name, data + + def add_tensor(self, name: str, data_torch: Tensor): + is_1d = len(data_torch.shape) == 1 + is_embd = ".embeddings." in name + old_dtype = data_torch.dtype + can_quantize = not is_1d and not is_embd + data_qtype = gguf.GGMLQuantizationType.F32 + + # this is to support old checkpoint + # TODO: remove this when we have the final model + name = name.replace("vision_model.vision_model.", "vision_tower.vision_model.") + name = name.replace("multimodal_projector.", "multi_modal_projector.") + + # filter only vision tensors + if not name.startswith("vision_tower.vision_model.") and not name.startswith("multi_modal_projector."): + return + # prefix + name = name.replace("vision_tower.vision_model.encoder.layers.", "v.blk.") + name = name.replace("vision_tower.vision_model.", "v.") + # projector and input embd + name = name.replace(".embeddings.patch_embedding.", ".patch_embd.") + name = name.replace(".embeddings.position_embedding.", ".position_embd.") + name = name.replace( + "multi_modal_projector.mm_input_projection_weight", + "mm.input_projection.weight" + ) + name = name.replace( + "multi_modal_projector.mm_soft_emb_norm.weight", + "mm.soft_emb_norm.weight" + ) + name = name.replace("post_layernorm.", "post_ln.") + # each block + name = name.replace(".self_attn.k_proj.", ".attn_k.") + name = name.replace(".self_attn.v_proj.", ".attn_v.") + name = name.replace(".self_attn.q_proj.", ".attn_q.") + name = name.replace(".self_attn.out_proj.", ".attn_out.") + name = name.replace(".layer_norm1.", ".ln1.") + name = name.replace(".layer_norm2.", ".ln2.") + name = name.replace(".mlp.fc1.", ".ffn_down.") + name = name.replace(".mlp.fc2.", ".ffn_up.") + + if can_quantize: + if self.ftype == gguf.LlamaFileType.ALL_F32: + data_qtype = gguf.GGMLQuantizationType.F32 + elif self.ftype == gguf.LlamaFileType.MOSTLY_F16: + data_qtype = gguf.GGMLQuantizationType.F16 + elif self.ftype == gguf.LlamaFileType.MOSTLY_BF16: + data_qtype = gguf.GGMLQuantizationType.BF16 + elif self.ftype == gguf.LlamaFileType.MOSTLY_Q8_0: + data_qtype = gguf.GGMLQuantizationType.Q8_0 + else: + raise ValueError(f"Unsupported file type: {self.ftype}") + + # corrent norm value ; only this "soft_emb_norm" need to be corrected as it's part of Gemma projector + # the other norm values are part of SigLIP model, and they are already correct + # ref code: Gemma3RMSNorm + if "soft_emb_norm.weight" in name: + logger.info(f"Correcting norm value for '{name}'") + data_torch = data_torch + 1 + + data = data_torch.numpy() + + try: + data = gguf.quants.quantize(data, data_qtype) + except Exception as e: + logger.error(f"Error quantizing tensor '{name}': {e}, fallback to F16") + data_qtype = gguf.GGMLQuantizationType.F16 + data = gguf.quants.quantize(data, data_qtype) + + # reverse shape to make it similar to the internal ggml dimension order + shape_str = f"{{{', '.join(str(n) for n in reversed(data_torch.shape))}}}" + logger.info(f"{f'%-32s' % f'{name},'} {old_dtype} --> {data_qtype.name}, shape = {shape_str}") + + self.gguf_writer.add_tensor(name, data, raw_dtype=data_qtype) + + def write(self): + self.gguf_writer.write_header_to_file(path=self.fname_out) + self.gguf_writer.write_kv_data_to_file() + self.gguf_writer.write_tensors_to_file(progress=True) + self.gguf_writer.close() + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description="Convert Gemma 3 vision tower safetensors to GGUF format",) + parser.add_argument( + "--outfile", type=Path, default="mmproj.gguf", + help="path to write to", + ) + parser.add_argument( + "--outtype", type=str, choices=["f32", "f16", "bf16", "q8_0"], default="f16", + help="output format", + ) + parser.add_argument( + "--bigendian", action="store_true", + help="model is executed on big endian machine", + ) + parser.add_argument( + "model", type=Path, + help="directory containing model file", + nargs="?", + ) + parser.add_argument( + "--verbose", action="store_true", + help="increase output verbosity", + ) + + args = parser.parse_args() + if args.model is None: + parser.error("the following arguments are required: model") + return args + + +def main() -> None: + args = parse_args() + + if args.verbose: + logging.basicConfig(level=logging.DEBUG) + else: + logging.basicConfig(level=logging.INFO) + + dir_model = args.model + + if not dir_model.is_dir(): + logger.error(f'Error: {args.model} is not a directory') + sys.exit(1) + + ftype_map: dict[str, gguf.LlamaFileType] = { + "f32": gguf.LlamaFileType.ALL_F32, + "f16": gguf.LlamaFileType.MOSTLY_F16, + "bf16": gguf.LlamaFileType.MOSTLY_BF16, + "q8_0": gguf.LlamaFileType.MOSTLY_Q8_0, + } + + logger.info(f"Loading model: {dir_model.name}") + + with torch.inference_mode(): + gemma3_vision_tower = Gemma3VisionTower( + dir_model=dir_model, + fname_out=args.outfile, + ftype=ftype_map[args.outtype], + is_big_endian=args.bigendian, + ) + gemma3_vision_tower.write() + + +if __name__ == '__main__': + main() + diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py index ecac5b4bb7f59..19624eae04ece 100644 --- a/gguf-py/gguf/constants.py +++ b/gguf-py/gguf/constants.py @@ -253,6 +253,7 @@ class MODEL_ARCH(IntEnum): MINICPM3 = auto() GEMMA = auto() GEMMA2 = auto() + GEMMA3 = auto() STARCODER2 = auto() RWKV6 = auto() RWKV6QWEN2 = auto() @@ -440,6 +441,7 @@ class MODEL_TENSOR(IntEnum): MODEL_ARCH.MINICPM3: "minicpm3", MODEL_ARCH.GEMMA: "gemma", MODEL_ARCH.GEMMA2: "gemma2", + MODEL_ARCH.GEMMA3: "gemma3", MODEL_ARCH.STARCODER2: "starcoder2", MODEL_ARCH.RWKV6: "rwkv6", MODEL_ARCH.RWKV6QWEN2: "rwkv6qwen2", @@ -1077,6 +1079,23 @@ class MODEL_TENSOR(IntEnum): MODEL_TENSOR.FFN_PRE_NORM, MODEL_TENSOR.FFN_POST_NORM, ], + MODEL_ARCH.GEMMA3: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_Q_NORM, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_K_NORM, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_POST_NORM, + MODEL_TENSOR.FFN_PRE_NORM, + MODEL_TENSOR.FFN_POST_NORM, + ], MODEL_ARCH.STARCODER2: [ MODEL_TENSOR.TOKEN_EMBD, MODEL_TENSOR.OUTPUT_NORM, diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp index 97a1e7e5e01ef..28f2bbc8f72bf 100644 --- a/src/llama-arch.cpp +++ b/src/llama-arch.cpp @@ -36,6 +36,7 @@ static const std::map LLM_ARCH_NAMES = { { LLM_ARCH_MINICPM3, "minicpm3" }, { LLM_ARCH_GEMMA, "gemma" }, { LLM_ARCH_GEMMA2, "gemma2" }, + { LLM_ARCH_GEMMA3, "gemma3" }, { LLM_ARCH_STARCODER2, "starcoder2" }, { LLM_ARCH_MAMBA, "mamba" }, { LLM_ARCH_XVERSE, "xverse" }, @@ -766,6 +767,26 @@ static const std::map> LLM_TENSOR_N { LLM_TENSOR_FFN_POST_NORM, "blk.%d.post_ffw_norm" }, }, }, + { + LLM_ARCH_GEMMA3, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_POST_NORM, "blk.%d.post_attention_norm" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_POST_NORM, "blk.%d.post_ffw_norm" }, + }, + }, { LLM_ARCH_STARCODER2, { diff --git a/src/llama-arch.h b/src/llama-arch.h index 122fdcebe0af6..2ec2e2362eba1 100644 --- a/src/llama-arch.h +++ b/src/llama-arch.h @@ -40,6 +40,7 @@ enum llm_arch { LLM_ARCH_MINICPM3, LLM_ARCH_GEMMA, LLM_ARCH_GEMMA2, + LLM_ARCH_GEMMA3, LLM_ARCH_STARCODER2, LLM_ARCH_MAMBA, LLM_ARCH_XVERSE, diff --git a/src/llama-model.cpp b/src/llama-model.cpp index 1da4eae7e63e2..9f75589d805a9 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -864,6 +865,23 @@ void llama_model::load_hparams(llama_model_loader & ml) { default: type = LLM_TYPE_UNKNOWN; } } break; + case LLM_ARCH_GEMMA3: + { + ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa); + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + switch (hparams.n_layer) { + case 26: type = LLM_TYPE_1B; break; + case 34: type = LLM_TYPE_4B; break; + case 48: type = LLM_TYPE_12B; break; + case 62: type = LLM_TYPE_27B; break; + default: type = LLM_TYPE_UNKNOWN; + } + + hparams.f_attention_scale = type == LLM_TYPE_27B + ? 1.0f / std::sqrt(float(hparams.n_embd / hparams.n_head(0))) + : 1.0f / std::sqrt(float(hparams.n_embd_head_k)); + } break; case LLM_ARCH_STARCODER2: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); @@ -2454,6 +2472,35 @@ bool llama_model::load_tensors(llama_model_loader & ml) { layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0); layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0); + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0); + layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}, 0); + } + } break; + case LLM_ARCH_GEMMA3: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + // output + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); // same as tok_embd, duplicated to allow offloading + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0); + + layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0); + layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0); + layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0); + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); @@ -3650,6 +3697,7 @@ void llama_model::print_info() const { LLAMA_LOG_INFO("%s: f_clamp_kqv = %.1e\n", __func__, hparams.f_clamp_kqv); LLAMA_LOG_INFO("%s: f_max_alibi_bias = %.1e\n", __func__, hparams.f_max_alibi_bias); LLAMA_LOG_INFO("%s: f_logit_scale = %.1e\n", __func__, hparams.f_logit_scale); + LLAMA_LOG_INFO("%s: f_attn_scale = %.1e\n", __func__, hparams.f_attention_scale); LLAMA_LOG_INFO("%s: n_ff = %s\n", __func__, print_f([&](uint32_t il) { return hparams.n_ff(il); }, hparams.n_layer).c_str()); LLAMA_LOG_INFO("%s: n_expert = %u\n", __func__, hparams.n_expert); LLAMA_LOG_INFO("%s: n_expert_used = %u\n", __func__, hparams.n_expert_used); @@ -3923,6 +3971,7 @@ enum llama_rope_type llama_model_rope_type(const struct llama_model * model) { case LLM_ARCH_PHIMOE: case LLM_ARCH_GEMMA: case LLM_ARCH_GEMMA2: + case LLM_ARCH_GEMMA3: case LLM_ARCH_STARCODER2: case LLM_ARCH_OPENELM: case LLM_ARCH_GPTNEOX: diff --git a/src/llama.cpp b/src/llama.cpp index 607f278615969..4a4e91490107c 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -4978,6 +4978,149 @@ struct llm_build_context { return gf; } + struct ggml_cgraph * build_gemma3() { + struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); + + const int64_t n_embd_head_k = hparams.n_embd_head_k; + + struct ggml_tensor * cur; + struct ggml_tensor * inpL; + + inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); + + // important: do not normalize weights for raw embeddings input (i.e. encoded image emdeddings) + if (ubatch.token) { + inpL = ggml_scale(ctx0, inpL, sqrtf(n_embd)); + cb(inpL, "inp_scaled", -1); + } + + // inp_pos - contains the positions + struct ggml_tensor * inp_pos = build_inp_pos(); + + // KQ_mask (mask for 1 head, it will be broadcasted to all heads) + // gemma3 requires different mask for layers using sliding window (SWA) + struct ggml_tensor * KQ_mask = build_inp_KQ_mask(true); + struct ggml_tensor * KQ_mask_swa = build_inp_KQ_mask_swa(true); + + // "5-to-1 interleaved attention" + // 5 layers of local attention followed by 1 layer of global attention + static const int sliding_window_pattern = 6; + + for (int il = 0; il < n_layer; ++il) { + const bool is_sliding = (il + 1) % sliding_window_pattern; + const float freq_base_l = is_sliding ? 10000.0f : freq_base; + const float freq_scale_l = is_sliding ? 1.0f : freq_scale; + struct ggml_tensor * KQ_mask_l = is_sliding ? KQ_mask_swa : KQ_mask; + + // norm + cur = llm_build_norm(ctx0, inpL, hparams, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, cb, il); + cb(cur, "attn_norm", il); + + // self-attention + { + // compute Q and K and RoPE them + struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + + struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + + struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head_k, n_head, n_tokens); + Qcur = llm_build_norm(ctx0, Qcur, hparams, + model.layers[il].attn_q_norm, + NULL, + LLM_NORM_RMS, cb, il); + cb(Qcur, "Qcur_normed", il); + + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base_l, freq_scale_l, + ext_factor, attn_factor, beta_fast, beta_slow); + cb(Qcur, "Qcur", il); + + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head_k, n_head_kv, n_tokens); + Kcur = llm_build_norm(ctx0, Kcur, hparams, + model.layers[il].attn_k_norm, + NULL, + LLM_NORM_RMS, cb, il); + cb(Kcur, "Kcur_normed", il); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base_l, freq_scale_l, + ext_factor, attn_factor, beta_fast, beta_slow); + cb(Kcur, "Kcur", il); + + cur = llm_build_kv(ctx0, lctx, kv_self, gf, + model.layers[il].wo, NULL, + Kcur, Vcur, Qcur, KQ_mask_l, n_tokens, kv_head, n_kv, hparams.f_attention_scale, cb, il); + } + + cur = llm_build_norm(ctx0, cur, hparams, + model.layers[il].attn_post_norm, NULL, + LLM_NORM_RMS, cb, il); + cb(cur, "attn_post_norm", il); + + if (il == n_layer - 1) { + // skip computing output for unused tokens + struct ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); + } + + struct ggml_tensor * sa_out = ggml_add(ctx0, cur, inpL); + cb(sa_out, "sa_out", il); + + cur = llm_build_norm(ctx0, sa_out, hparams, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, cb, il); + cb(cur, "ffn_norm", il); + + // feed-forward network + { + cur = llm_build_ffn(ctx0, lctx, cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_GELU, LLM_FFN_PAR, cb, il); + cb(cur, "ffn_out", il); + } + + cur = llm_build_norm(ctx0, cur, hparams, + model.layers[il].ffn_post_norm, NULL, + LLM_NORM_RMS, cb, -1); + cb(cur, "ffn_post_norm", -1); + + cur = ggml_add(ctx0, cur, sa_out); + cur = lctx.cvec.apply_to(ctx0, cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = llm_build_norm(ctx0, cur, hparams, + model.output_norm, NULL, + LLM_NORM_RMS, cb, -1); + cb(cur, "result_norm", -1); + + // lm_head + cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); + + cb(cur, "result_output", -1); + + ggml_build_forward_expand(gf, cur); + + return gf; + } struct ggml_cgraph * build_starcoder2() { struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); @@ -8298,6 +8441,10 @@ static struct ggml_cgraph * llama_build_graph( { result = llm.build_gemma2(); } break; + case LLM_ARCH_GEMMA3: + { + result = llm.build_gemma3(); + } break; case LLM_ARCH_STARCODER2: { result = llm.build_starcoder2(); From 34c961b181836a4f06ab4c56d5ce61ce03fc478b Mon Sep 17 00:00:00 2001 From: uvos Date: Wed, 12 Mar 2025 10:14:11 +0100 Subject: [PATCH 041/398] CUDA/HIP: Fix fattn-vec-* when device warp size is not 32 (#12315) When fattn-wmma was ported over to warp64 various bits that also touch fattn-vec where converted to selectable warp size, however the fattn-vec kernels dont work with 64 wide warps for now, so we need to avoid launching them with parameters for warp64 --- ggml/src/ggml-cuda/fattn-common.cuh | 52 ++++++++++++---------------- ggml/src/ggml-cuda/fattn-wmma-f16.cu | 7 ++-- 2 files changed, 26 insertions(+), 33 deletions(-) diff --git a/ggml/src/ggml-cuda/fattn-common.cuh b/ggml/src/ggml-cuda/fattn-common.cuh index 46de14093545c..4067fd41bc247 100644 --- a/ggml/src/ggml-cuda/fattn-common.cuh +++ b/ggml/src/ggml-cuda/fattn-common.cuh @@ -52,12 +52,11 @@ typedef half (*vec_dot_KQ_f16_t)( typedef float (*vec_dot_KQ_f32_t)( const char * __restrict__ K_c, const void * __restrict__ Q_v, const int * __restrict__ Q_q8 , const void * __restrict__ Q_ds); -template +template static __device__ __forceinline__ T vec_dot_fattn_vec_KQ_q4_0( const char * __restrict__ K_c, const void * __restrict__ Q_v, const int * __restrict__ Q_q8, const void * __restrict__ Q_ds_v) { const block_q4_0 * K_q4_0 = (const block_q4_0 *) K_c; - constexpr int warp_size = ggml_cuda_get_physical_warp_size(); GGML_UNUSED(Q_v); T sum = 0.0f; @@ -93,12 +92,11 @@ static __device__ __forceinline__ T vec_dot_fattn_vec_KQ_q4_0( return sum; } -template +template static __device__ __forceinline__ T vec_dot_fattn_vec_KQ_q4_1( const char * __restrict__ K_c, const void * __restrict__ Q_v, const int * __restrict__ Q_q8, const void * __restrict__ Q_ds_v) { const block_q4_1 * K_q4_1 = (const block_q4_1 *) K_c; - constexpr int warp_size = ggml_cuda_get_physical_warp_size(); GGML_UNUSED(Q_v); T sum = 0.0f; @@ -138,12 +136,11 @@ static __device__ __forceinline__ T vec_dot_fattn_vec_KQ_q4_1( return sum; } -template +template static __device__ __forceinline__ T vec_dot_fattn_vec_KQ_q5_0( const char * __restrict__ K_c, const void * __restrict__ Q_v, const int * __restrict__ Q_q8, const void * __restrict__ Q_ds_v) { const block_q5_0 * K_q5_0 = (const block_q5_0 *) K_c; - constexpr int warp_size = ggml_cuda_get_physical_warp_size(); GGML_UNUSED(Q_v); T sum = 0.0f; @@ -186,12 +183,11 @@ static __device__ __forceinline__ T vec_dot_fattn_vec_KQ_q5_0( return sum; } -template +template static __device__ __forceinline__ T vec_dot_fattn_vec_KQ_q5_1( const char * __restrict__ K_c, const void * __restrict__ Q_v, const int * __restrict__ Q_q8, const void * __restrict__ Q_ds_v) { const block_q5_1 * K_q5_1 = (const block_q5_1 *) K_c; - constexpr int warp_size = ggml_cuda_get_physical_warp_size(); GGML_UNUSED(Q_v); T sum = 0.0f; @@ -238,12 +234,11 @@ static __device__ __forceinline__ T vec_dot_fattn_vec_KQ_q5_1( return sum; } -template +template static __device__ __forceinline__ T vec_dot_fattn_vec_KQ_q8_0( const char * __restrict__ K_c, const void * __restrict__ Q_v, const int * __restrict__ Q_q8, const void * __restrict__ Q_ds_v) { const block_q8_0 * K_q8_0 = (const block_q8_0 *) K_c; - constexpr int warp_size = ggml_cuda_get_physical_warp_size(); GGML_UNUSED(Q_v); T sum = 0.0f; @@ -272,12 +267,11 @@ static __device__ __forceinline__ T vec_dot_fattn_vec_KQ_q8_0( return sum; } -template +template static __device__ __forceinline__ T vec_dot_fattn_vec_KQ_f16( const char * __restrict__ K_c, const void * __restrict__ Q_v, const int * __restrict__ Q_q8 , const void * __restrict__ Q_ds_v) { const half2 * K_h2 = (const half2 *) K_c; - constexpr int warp_size = ggml_cuda_get_physical_warp_size(); GGML_UNUSED(Q_q8); GGML_UNUSED(Q_ds_v); @@ -480,25 +474,25 @@ static __device__ __forceinline__ T dequantize_1_f16(const void * __restrict__ v return x[i]; } -template +template constexpr __device__ vec_dot_KQ_f16_t get_vec_dot_KQ_f16(ggml_type type_K) { - return type_K == GGML_TYPE_Q4_0 ? vec_dot_fattn_vec_KQ_q4_0 : - type_K == GGML_TYPE_Q4_1 ? vec_dot_fattn_vec_KQ_q4_1 : - type_K == GGML_TYPE_Q5_0 ? vec_dot_fattn_vec_KQ_q5_0 : - type_K == GGML_TYPE_Q5_1 ? vec_dot_fattn_vec_KQ_q5_1 : - type_K == GGML_TYPE_Q8_0 ? vec_dot_fattn_vec_KQ_q8_0 : - type_K == GGML_TYPE_F16 ? vec_dot_fattn_vec_KQ_f16 : + return type_K == GGML_TYPE_Q4_0 ? vec_dot_fattn_vec_KQ_q4_0 : + type_K == GGML_TYPE_Q4_1 ? vec_dot_fattn_vec_KQ_q4_1 : + type_K == GGML_TYPE_Q5_0 ? vec_dot_fattn_vec_KQ_q5_0 : + type_K == GGML_TYPE_Q5_1 ? vec_dot_fattn_vec_KQ_q5_1 : + type_K == GGML_TYPE_Q8_0 ? vec_dot_fattn_vec_KQ_q8_0 : + type_K == GGML_TYPE_F16 ? vec_dot_fattn_vec_KQ_f16 : nullptr; } -template +template constexpr __device__ vec_dot_KQ_f32_t get_vec_dot_KQ_f32(ggml_type type_K) { - return type_K == GGML_TYPE_Q4_0 ? vec_dot_fattn_vec_KQ_q4_0 : - type_K == GGML_TYPE_Q4_1 ? vec_dot_fattn_vec_KQ_q4_1 : - type_K == GGML_TYPE_Q5_0 ? vec_dot_fattn_vec_KQ_q5_0 : - type_K == GGML_TYPE_Q5_1 ? vec_dot_fattn_vec_KQ_q5_1 : - type_K == GGML_TYPE_Q8_0 ? vec_dot_fattn_vec_KQ_q8_0 : - type_K == GGML_TYPE_F16 ? vec_dot_fattn_vec_KQ_f16 : + return type_K == GGML_TYPE_Q4_0 ? vec_dot_fattn_vec_KQ_q4_0 : + type_K == GGML_TYPE_Q4_1 ? vec_dot_fattn_vec_KQ_q4_1 : + type_K == GGML_TYPE_Q5_0 ? vec_dot_fattn_vec_KQ_q5_0 : + type_K == GGML_TYPE_Q5_1 ? vec_dot_fattn_vec_KQ_q5_1 : + type_K == GGML_TYPE_Q8_0 ? vec_dot_fattn_vec_KQ_q8_0 : + type_K == GGML_TYPE_F16 ? vec_dot_fattn_vec_KQ_f16 : nullptr; } @@ -681,7 +675,8 @@ static void on_no_fattn_vec_case(const int D) { template void launch_fattn( ggml_backend_cuda_context & ctx, ggml_tensor * dst, fattn_kernel_t fattn_kernel, - const int nwarps, const size_t nbytes_shared, const bool need_f16_K, const bool need_f16_V + const int nwarps, const size_t nbytes_shared, const bool need_f16_K, const bool need_f16_V, + const int warp_size = WARP_SIZE ) { constexpr int ncols = ncols1 * ncols2; @@ -704,8 +699,6 @@ void launch_fattn( GGML_ASSERT(Q->ne[3] == 1); - const int warp_size = ggml_cuda_info().devices[ctx.device].warp_size; - ggml_cuda_pool & pool = ctx.pool(); cudaStream_t main_stream = ctx.stream(); const int id = ggml_cuda_get_device(); @@ -805,7 +798,6 @@ void launch_fattn( const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2); GGML_ASSERT(block_dim.x % warp_size == 0); - GGML_ASSERT(!GGML_CUDA_CC_IS_AMD(cc) || block_dim.x * block_dim.y <= 4 * (unsigned int)warp_size); fattn_kernel<<>>( (const char *) Q->data, K_data, diff --git a/ggml/src/ggml-cuda/fattn-wmma-f16.cu b/ggml/src/ggml-cuda/fattn-wmma-f16.cu index 622cf28576d29..dab1d5cbcace4 100644 --- a/ggml/src/ggml-cuda/fattn-wmma-f16.cu +++ b/ggml/src/ggml-cuda/fattn-wmma-f16.cu @@ -469,6 +469,7 @@ void ggml_cuda_flash_attn_ext_wmma_f16_case(ggml_backend_cuda_context & ctx, ggm constexpr int frag_m = cols_per_block == 8 && D % 32 == 0 ? 32 : 16; const int blocks_num_pb1 = ((Q->ne[1] + cols_per_block - 1) / cols_per_block)*Q->ne[2]*Q->ne[3]; const int nsm = ggml_cuda_info().devices[ggml_cuda_get_device()].nsm; + const int warp_size = ggml_cuda_info().devices[ggml_cuda_get_device()].warp_size; float logit_softcap; memcpy(&logit_softcap, (const float *) KQV->op_params + 2, sizeof(float)); @@ -485,7 +486,7 @@ void ggml_cuda_flash_attn_ext_wmma_f16_case(ggml_backend_cuda_context & ctx, ggm fattn_kernel = flash_attn_ext_f16< D, cols_per_block, nwarps, get_VKQ_stride(D, nwarps, frag_m), parallel_blocks, KQ_acc_t, use_logit_softcap>; } - launch_fattn(ctx, dst, fattn_kernel, nwarps, 0, true, true); + launch_fattn(ctx, dst, fattn_kernel, nwarps, 0, true, true, warp_size); return; } if (2*blocks_num_pb1 < 2*nsm) { @@ -500,7 +501,7 @@ void ggml_cuda_flash_attn_ext_wmma_f16_case(ggml_backend_cuda_context & ctx, ggm fattn_kernel = flash_attn_ext_f16< D, cols_per_block, nwarps, get_VKQ_stride(D, nwarps, frag_m), parallel_blocks, KQ_acc_t, use_logit_softcap>; } - launch_fattn(ctx, dst, fattn_kernel, nwarps, 0, true, true); + launch_fattn(ctx, dst, fattn_kernel, nwarps, 0, true, true, warp_size); return; } constexpr int parallel_blocks = 1; @@ -514,7 +515,7 @@ void ggml_cuda_flash_attn_ext_wmma_f16_case(ggml_backend_cuda_context & ctx, ggm fattn_kernel = flash_attn_ext_f16< D, cols_per_block, nwarps, get_VKQ_stride(D, nwarps, frag_m), parallel_blocks, KQ_acc_t, use_logit_softcap>; } - launch_fattn(ctx, dst, fattn_kernel, nwarps, 0, true, true); + launch_fattn(ctx, dst, fattn_kernel, nwarps, 0, true, true, warp_size); } void ggml_cuda_flash_attn_ext_wmma_f16(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { From 363f8c5d67dcf80e00c39580dfa86dc2774d74c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alberto=20Cabrera=20P=C3=A9rez?= Date: Wed, 12 Mar 2025 09:57:32 +0000 Subject: [PATCH 042/398] sycl : variable sg_size support for mmvq kernels (#12336) --- ggml/src/ggml-sycl/mmvq.cpp | 152 ++++++++++++++++++------------------ 1 file changed, 75 insertions(+), 77 deletions(-) diff --git a/ggml/src/ggml-sycl/mmvq.cpp b/ggml/src/ggml-sycl/mmvq.cpp index 221f65c21ea36..a96286d710153 100644 --- a/ggml/src/ggml-sycl/mmvq.cpp +++ b/ggml/src/ggml-sycl/mmvq.cpp @@ -3,44 +3,42 @@ #include template -static void mul_mat_vec_q(const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, const int ncols, const int nrows, - const sycl::nd_item<3> &item_ct1) { - const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) + - item_ct1.get_local_id(1); +static void mul_mat_vec_q(const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, + const int ncols, const int nrows, const sycl::nd_item<3> & item_ct1) { + const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) + item_ct1.get_local_id(1); if (row >= nrows) { return; } - const int blocks_per_row = ncols / qk; - const int blocks_per_warp = vdr * QK_WARP_SIZE / qi; - assert(blocks_per_warp>0); + const int blocks_per_row = ncols / qk; + constexpr int blocks_per_warp = (vdr * WARP_SIZE + qi - 1) / qi; // Ensuring blocks_per_warp > 0 -// partial sum for each thread + assert(blocks_per_warp > 0); + + // partial sum for each thread float tmp = 0.0f; - const block_q_t * x = (const block_q_t *) vx; + const block_q_t * x = (const block_q_t *) vx; const block_q8_1 * y = (const block_q8_1 *) vy; - for (int i = item_ct1.get_local_id(2) / (qi / vdr); i < blocks_per_row; - i += blocks_per_warp) { - const int ibx = row*blocks_per_row + i; // x block index + for (int i = item_ct1.get_local_id(2) / (qi / vdr); i < blocks_per_row; i += blocks_per_warp) { + const int ibx = row * blocks_per_row + i; // x block index - const int iby = i * (qk/QK8_1); // y block index that aligns with ibx + const int iby = i * (qk / QK8_1); // y block index that aligns with ibx - const int iqs = - vdr * - (item_ct1.get_local_id(2) % - (qi / vdr)); // x block quant index when casting the quants to int + for (size_t elem = 0; elem < qi / vdr; elem += WARP_SIZE) { + const int iqs = elem + vdr * (item_ct1.get_local_id(2) % + (qi / vdr)); // x block quant index when casting the quants to int - tmp += vec_dot_q_sycl(&x[ibx], &y[iby], iqs); + tmp += vec_dot_q_sycl(&x[ibx], &y[iby], iqs); + } } // sum up partial sums and write back result #pragma unroll - for (int mask = QK_WARP_SIZE / 2; mask > 0; mask >>= 1) { - tmp += - dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask); + for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) { + tmp += dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask); } if (item_ct1.get_local_id(2) == 0) { @@ -62,7 +60,7 @@ static void mul_mat_vec_q_iq2_xxs_q8_1(const void *__restrict__ vx, } const int blocks_per_row = ncols / qk; - const int blocks_per_warp = vdr * QK_WARP_SIZE / qi; + const int blocks_per_warp = vdr * WARP_SIZE / qi; assert(blocks_per_warp>0); // partial sum for each thread @@ -87,7 +85,7 @@ static void mul_mat_vec_q_iq2_xxs_q8_1(const void *__restrict__ vx, // sum up partial sums and write back result #pragma unroll - for (int mask = QK_WARP_SIZE / 2; mask > 0; mask >>= 1) { + for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) { tmp += dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask); } @@ -111,7 +109,7 @@ static void mul_mat_vec_q_iq2_xs_q8_1(const void *__restrict__ vx, } const int blocks_per_row = ncols / qk; - const int blocks_per_warp = vdr * QK_WARP_SIZE / qi; + const int blocks_per_warp = vdr * WARP_SIZE / qi; assert(blocks_per_warp>0); // partial sum for each thread float tmp = 0.0f; @@ -135,7 +133,7 @@ static void mul_mat_vec_q_iq2_xs_q8_1(const void *__restrict__ vx, // sum up partial sums and write back result #pragma unroll - for (int mask = QK_WARP_SIZE / 2; mask > 0; mask >>= 1) { + for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) { tmp += dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask); } @@ -159,7 +157,7 @@ static void mul_mat_vec_q_iq2_s_q8_1(const void *__restrict__ vx, } const int blocks_per_row = ncols / qk; - const int blocks_per_warp = vdr * QK_WARP_SIZE / qi; + const int blocks_per_warp = vdr * WARP_SIZE / qi; assert(blocks_per_warp>0); // partial sum for each thread float tmp = 0.0f; @@ -183,7 +181,7 @@ static void mul_mat_vec_q_iq2_s_q8_1(const void *__restrict__ vx, // sum up partial sums and write back result #pragma unroll - for (int mask = QK_WARP_SIZE / 2; mask > 0; mask >>= 1) { + for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) { tmp += dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask); } @@ -207,7 +205,7 @@ static void mul_mat_vec_q_iq3_xxs_q8_1(const void *__restrict__ vx, } const int blocks_per_row = ncols / qk; - const int blocks_per_warp = vdr * QK_WARP_SIZE / qi; + const int blocks_per_warp = vdr * WARP_SIZE / qi; assert(blocks_per_warp>0); // partial sum for each thread float tmp = 0.0f; @@ -231,7 +229,7 @@ static void mul_mat_vec_q_iq3_xxs_q8_1(const void *__restrict__ vx, // sum up partial sums and write back result #pragma unroll - for (int mask = QK_WARP_SIZE / 2; mask > 0; mask >>= 1) { + for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) { tmp += dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask); } @@ -255,7 +253,7 @@ static void mul_mat_vec_q_iq3_s_q8_1(const void *__restrict__ vx, } const int blocks_per_row = ncols / qk; - const int blocks_per_warp = vdr * QK_WARP_SIZE / qi; + const int blocks_per_warp = vdr * WARP_SIZE / qi; assert(blocks_per_warp>0); // partial sum for each thread float tmp = 0.0f; @@ -279,7 +277,7 @@ static void mul_mat_vec_q_iq3_s_q8_1(const void *__restrict__ vx, // sum up partial sums and write back result #pragma unroll - for (int mask = QK_WARP_SIZE / 2; mask > 0; mask >>= 1) { + for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) { tmp += dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask); } @@ -303,7 +301,7 @@ static void mul_mat_vec_q_iq1_s_q8_1(const void *__restrict__ vx, } const int blocks_per_row = ncols / qk; - const int blocks_per_warp = vdr * QK_WARP_SIZE / qi; + const int blocks_per_warp = vdr * WARP_SIZE / qi; assert(blocks_per_warp>0); // partial sum for each thread float tmp = 0.0f; @@ -327,7 +325,7 @@ static void mul_mat_vec_q_iq1_s_q8_1(const void *__restrict__ vx, // sum up partial sums and write back result #pragma unroll - for (int mask = QK_WARP_SIZE / 2; mask > 0; mask >>= 1) { + for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) { tmp += dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask); } @@ -351,7 +349,7 @@ static void mul_mat_vec_q_iq1_m_q8_1(const void *__restrict__ vx, } const int blocks_per_row = ncols / qk; - const int blocks_per_warp = vdr * QK_WARP_SIZE / qi; + const int blocks_per_warp = vdr * WARP_SIZE / qi; assert(blocks_per_warp>0); // partial sum for each thread float tmp = 0.0f; @@ -375,7 +373,7 @@ static void mul_mat_vec_q_iq1_m_q8_1(const void *__restrict__ vx, // sum up partial sums and write back result #pragma unroll - for (int mask = QK_WARP_SIZE / 2; mask > 0; mask >>= 1) { + for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) { tmp += dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask); } @@ -399,7 +397,7 @@ static void mul_mat_vec_q_iq4_nl_q8_1(const void *__restrict__ vx, } const int blocks_per_row = ncols / qk; - const int blocks_per_warp = vdr * QK_WARP_SIZE / qi; + const int blocks_per_warp = vdr * WARP_SIZE / qi; assert(blocks_per_warp>0); // partial sum for each thread float tmp = 0.0f; @@ -423,7 +421,7 @@ static void mul_mat_vec_q_iq4_nl_q8_1(const void *__restrict__ vx, // sum up partial sums and write back result #pragma unroll - for (int mask = QK_WARP_SIZE / 2; mask > 0; mask >>= 1) { + for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) { tmp += dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask); } @@ -448,7 +446,7 @@ static void mul_mat_vec_q_iq4_xs_q8_1(const void *__restrict__ vx, } const int blocks_per_row = ncols / qk; - const int blocks_per_warp = vdr * QK_WARP_SIZE / qi; + const int blocks_per_warp = vdr * WARP_SIZE / qi; assert(blocks_per_warp>0); // partial sum for each thread float tmp = 0.0f; @@ -472,7 +470,7 @@ static void mul_mat_vec_q_iq4_xs_q8_1(const void *__restrict__ vx, // sum up partial sums and write back result #pragma unroll - for (int mask = QK_WARP_SIZE / 2; mask > 0; mask >>= 1) { + for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) { tmp += dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask); } @@ -489,7 +487,7 @@ static void mul_mat_vec_q4_0_q8_1_sycl(const void *vx, const void *vy, GGML_ASSERT(ncols % QK4_0 == 0); const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y; const sycl::range<3> block_nums(1, 1, block_num_y); - const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, QK_WARP_SIZE); + const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { stream->submit([&](sycl::handler &cgh) { @@ -497,7 +495,7 @@ static void mul_mat_vec_q4_0_q8_1_sycl(const void *vx, const void *vy, cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(QK_WARP_SIZE)]] { + [[intel::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_vec_q( vx, vy, dst, ncols, nrows, item_ct1); @@ -513,7 +511,7 @@ static void mul_mat_vec_q4_1_q8_1_sycl(const void *vx, const void *vy, GGML_ASSERT(ncols % QK4_1 == 0); const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y; const sycl::range<3> block_nums(1, 1, block_num_y); - const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, QK_WARP_SIZE); + const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { stream->submit([&](sycl::handler &cgh) { @@ -521,7 +519,7 @@ static void mul_mat_vec_q4_1_q8_1_sycl(const void *vx, const void *vy, cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(QK_WARP_SIZE)]] { + [[intel::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_vec_q( vx, vy, dst, ncols, nrows, item_ct1); @@ -537,7 +535,7 @@ static void mul_mat_vec_q5_0_q8_1_sycl(const void *vx, const void *vy, GGML_ASSERT(ncols % QK5_0 == 0); const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y; const sycl::range<3> block_nums(1, 1, block_num_y); - const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, QK_WARP_SIZE); + const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { stream->submit([&](sycl::handler &cgh) { @@ -545,7 +543,7 @@ static void mul_mat_vec_q5_0_q8_1_sycl(const void *vx, const void *vy, cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(QK_WARP_SIZE)]] { + [[intel::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_vec_q( vx, vy, dst, ncols, nrows, item_ct1); @@ -561,7 +559,7 @@ static void mul_mat_vec_q5_1_q8_1_sycl(const void *vx, const void *vy, GGML_ASSERT(ncols % QK5_1 == 0); const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y; const sycl::range<3> block_nums(1, 1, block_num_y); - const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, QK_WARP_SIZE); + const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { stream->submit([&](sycl::handler &cgh) { @@ -569,7 +567,7 @@ static void mul_mat_vec_q5_1_q8_1_sycl(const void *vx, const void *vy, cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(QK_WARP_SIZE)]] { + [[intel::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_vec_q( vx, vy, dst, ncols, nrows, item_ct1); @@ -585,7 +583,7 @@ static void mul_mat_vec_q8_0_q8_1_sycl(const void *vx, const void *vy, GGML_ASSERT(ncols % QK8_0 == 0); const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y; const sycl::range<3> block_nums(1, 1, block_num_y); - const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, QK_WARP_SIZE); + const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { stream->submit([&](sycl::handler &cgh) { @@ -593,7 +591,7 @@ static void mul_mat_vec_q8_0_q8_1_sycl(const void *vx, const void *vy, cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(QK_WARP_SIZE)]] { + [[intel::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_vec_q( vx, vy, dst, ncols, nrows, item_ct1); @@ -609,7 +607,7 @@ static void mul_mat_vec_q2_K_q8_1_sycl(const void *vx, const void *vy, GGML_ASSERT(ncols % QK_K == 0); const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y; const sycl::range<3> block_nums(1, 1, block_num_y); - const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, QK_WARP_SIZE); + const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { stream->submit([&](sycl::handler &cgh) { @@ -617,7 +615,7 @@ static void mul_mat_vec_q2_K_q8_1_sycl(const void *vx, const void *vy, cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(QK_WARP_SIZE)]] { + [[intel::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_vec_q( vx, vy, dst, ncols, nrows, item_ct1); @@ -633,7 +631,7 @@ static void mul_mat_vec_q3_K_q8_1_sycl(const void *vx, const void *vy, GGML_ASSERT(ncols % QK_K == 0); const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y; const sycl::range<3> block_nums(1, 1, block_num_y); - const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, QK_WARP_SIZE); + const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { stream->submit([&](sycl::handler &cgh) { @@ -641,7 +639,7 @@ static void mul_mat_vec_q3_K_q8_1_sycl(const void *vx, const void *vy, cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(QK_WARP_SIZE)]] { + [[intel::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_vec_q( vx, vy, dst, ncols, nrows, item_ct1); @@ -657,7 +655,7 @@ static void mul_mat_vec_q4_K_q8_1_sycl(const void *vx, const void *vy, GGML_ASSERT(ncols % QK_K == 0); const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y; const sycl::range<3> block_nums(1, 1, block_num_y); - const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, QK_WARP_SIZE); + const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { stream->submit([&](sycl::handler &cgh) { @@ -665,7 +663,7 @@ static void mul_mat_vec_q4_K_q8_1_sycl(const void *vx, const void *vy, cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(QK_WARP_SIZE)]] { + [[intel::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_vec_q( vx, vy, dst, ncols, nrows, item_ct1); @@ -681,7 +679,7 @@ static void mul_mat_vec_q5_K_q8_1_sycl(const void *vx, const void *vy, GGML_ASSERT(ncols % QK_K == 0); const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y; const sycl::range<3> block_nums(1, 1, block_num_y); - const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, QK_WARP_SIZE); + const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { stream->submit([&](sycl::handler &cgh) { @@ -689,7 +687,7 @@ static void mul_mat_vec_q5_K_q8_1_sycl(const void *vx, const void *vy, cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(QK_WARP_SIZE)]] { + [[intel::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_vec_q( vx, vy, dst, ncols, nrows, item_ct1); @@ -705,7 +703,7 @@ static void mul_mat_vec_q6_K_q8_1_sycl(const void *vx, const void *vy, GGML_ASSERT(ncols % QK_K == 0); const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y; const sycl::range<3> block_nums(1, 1, block_num_y); - const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, QK_WARP_SIZE); + const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { stream->submit([&](sycl::handler &cgh) { @@ -713,7 +711,7 @@ static void mul_mat_vec_q6_K_q8_1_sycl(const void *vx, const void *vy, cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(QK_WARP_SIZE)]] { + [[intel::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_vec_q( vx, vy, dst, ncols, nrows, item_ct1); @@ -730,13 +728,13 @@ static void mul_mat_vec_iq2_xxs_q8_1_sycl(const void *vx, const void *vy, GGML_ASSERT(ncols % QK_K == 0); const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y; const sycl::range<3> block_nums(1, 1, block_num_y); - const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, QK_WARP_SIZE); + const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { stream->submit([&](sycl::handler &cgh) { cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(QK_WARP_SIZE)]] { + [[intel::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_vec_q_iq2_xxs_q8_1( vx, vy, dst, ncols, nrows, item_ct1); }); @@ -751,13 +749,13 @@ static void mul_mat_vec_iq2_xs_q8_1_sycl(const void *vx, const void *vy, GGML_ASSERT(ncols % QK_K == 0); const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y; const sycl::range<3> block_nums(1, 1, block_num_y); - const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, QK_WARP_SIZE); + const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { stream->submit([&](sycl::handler & cgh) { cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(QK_WARP_SIZE)]] { + [[intel::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_vec_q_iq2_xs_q8_1( vx, vy, dst, ncols, nrows, item_ct1); }); @@ -772,14 +770,14 @@ static void mul_mat_vec_iq2_s_q8_1_sycl(const void *vx, const void *vy, GGML_ASSERT(ncols % QK_K == 0); const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y; const sycl::range<3> block_nums(1, 1, block_num_y); - const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, QK_WARP_SIZE); + const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { stream->submit([&](sycl::handler &cgh) { cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(QK_WARP_SIZE)]] { + [[intel::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_vec_q_iq2_s_q8_1( vx, vy, dst, ncols, nrows, item_ct1); }); @@ -794,14 +792,14 @@ static void mul_mat_vec_iq3_xxs_q8_1_sycl(const void *vx, const void *vy, GGML_ASSERT(ncols % QK_K == 0); const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y; const sycl::range<3> block_nums(1, 1, block_num_y); - const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, QK_WARP_SIZE); + const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { stream->submit([&](sycl::handler &cgh) { cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(QK_WARP_SIZE)]] { + [[intel::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_vec_q_iq3_xxs_q8_1( vx, vy, dst, ncols, nrows, item_ct1); }); @@ -816,14 +814,14 @@ static void mul_mat_vec_iq3_s_q8_1_sycl(const void *vx, const void *vy, GGML_ASSERT(ncols % QK_K == 0); const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y; const sycl::range<3> block_nums(1, 1, block_num_y); - const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, QK_WARP_SIZE); + const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { stream->submit([&](sycl::handler &cgh) { cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(QK_WARP_SIZE)]] { + [[intel::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_vec_q_iq3_s_q8_1( vx, vy, dst, ncols, nrows, item_ct1); }); @@ -838,14 +836,14 @@ static void mul_mat_vec_iq1_s_q8_1_sycl(const void *vx, const void *vy, GGML_ASSERT(ncols % QK_K == 0); const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y; const sycl::range<3> block_nums(1, 1, block_num_y); - const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, QK_WARP_SIZE); + const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { stream->submit([&](sycl::handler &cgh) { cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(QK_WARP_SIZE)]] { + [[intel::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_vec_q_iq1_s_q8_1( vx, vy, dst, ncols, nrows, item_ct1); }); @@ -860,13 +858,13 @@ static void mul_mat_vec_iq1_m_q8_1_sycl(const void *vx, const void *vy, GGML_ASSERT(ncols % QK_K == 0); const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y; const sycl::range<3> block_nums(1, 1, block_num_y); - const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, QK_WARP_SIZE); + const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { stream->submit([&](sycl::handler &cgh) { cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(QK_WARP_SIZE)]] { + [[intel::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_vec_q_iq1_m_q8_1( vx, vy, dst, ncols, nrows, item_ct1); }); @@ -881,14 +879,14 @@ static void mul_mat_vec_iq4_nl_q8_1_sycl(const void *vx, const void *vy, GGML_ASSERT(ncols % QK4_NL == 0); const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y; const sycl::range<3> block_nums(1, 1, block_num_y); - const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, QK_WARP_SIZE); + const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { stream->submit([&](sycl::handler &cgh) { cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(QK_WARP_SIZE)]] { + [[intel::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_vec_q_iq4_nl_q8_1( vx, vy, dst, ncols, nrows, item_ct1); }); @@ -903,14 +901,14 @@ static void mul_mat_vec_iq4_xs_q8_1_sycl(const void *vx, const void *vy, GGML_ASSERT(ncols % QK_K == 0); const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y; const sycl::range<3> block_nums(1, 1, block_num_y); - const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, QK_WARP_SIZE); + const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { stream->submit([&](sycl::handler &cgh) { cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(QK_WARP_SIZE)]] { + [[intel::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_vec_q_iq4_xs_q8_1( vx, vy, dst, ncols, nrows, item_ct1); }); From 80a02aa8588ef167d616f76f1781b104c245ace0 Mon Sep 17 00:00:00 2001 From: Daniel Bevenius Date: Wed, 12 Mar 2025 13:45:32 +0100 Subject: [PATCH 043/398] llama.swiftui : fix xcframework dir in README [no ci] (#12353) This commit fixes the path to the xcframework in the README file which I had forgotten to change after renaming the build directory. --- examples/llama.swiftui/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/llama.swiftui/README.md b/examples/llama.swiftui/README.md index 5b0ee947208db..bd7ce37747375 100644 --- a/examples/llama.swiftui/README.md +++ b/examples/llama.swiftui/README.md @@ -16,7 +16,7 @@ Open `llama.swiftui.xcodeproj` project in Xcode and you should be able to build a simulator or a real device. To use the framework with a different project, the XCFramework can be added to the project by -adding `build-ios/llama.xcframework` by dragging and dropping it into the project navigator, or +adding `build-apple/llama.xcframework` by dragging and dropping it into the project navigator, or by manually selecting the framework in the "Frameworks, Libraries, and Embedded Content" section of the project settings. From f08f4b3187b691bb08a8884ed39ebaa94e956707 Mon Sep 17 00:00:00 2001 From: Oscar Barenys Date: Wed, 12 Mar 2025 20:06:58 +0100 Subject: [PATCH 044/398] Update build.yml for Windows Vulkan builder to use Vulkan 1.4.304 SDK for VK_NV_cooperative_matrix2 support (#12301) --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index f2c81c0c26120..1e2429364513a 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -774,7 +774,7 @@ jobs: env: OPENBLAS_VERSION: 0.3.23 SDE_VERSION: 9.33.0-2024-01-07 - VULKAN_VERSION: 1.3.261.1 + VULKAN_VERSION: 1.4.304.1 strategy: matrix: From 2048b5913d51beab82dfe29955f9008130b936c0 Mon Sep 17 00:00:00 2001 From: Ishaan Gandhi Date: Thu, 13 Mar 2025 06:10:05 -0400 Subject: [PATCH 045/398] server : fix crash when using verbose output with input tokens that are not in printable range (#12178) (#12338) * Fix DOS index bug * Remove new APIs * remove extra line * Remove from API * Add extra newline * Update examples/server/server.cpp --------- Co-authored-by: Xuan-Son Nguyen --- examples/server/server.cpp | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 8cb8d0033f7d9..ce0195475d658 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -2040,6 +2040,18 @@ struct server_context { return ret; } + bool can_be_detokenized(const struct llama_context * ctx, const std::vector & tokens) { + const llama_model * model = llama_get_model(ctx); + const llama_vocab * vocab = llama_model_get_vocab(model); + const int32_t n_vocab = llama_vocab_n_tokens(vocab); + for (const auto & token : tokens) { + if (token < 0 || token >= n_vocab) { + return false; + } + } + return true; + } + bool launch_slot_with_task(server_slot & slot, const server_task & task) { slot.reset(); slot.id_task = task.id; @@ -2054,6 +2066,11 @@ struct server_context { slot.lora = task.params.lora; } + bool can_detokenize = can_be_detokenized(ctx, slot.prompt_tokens); + if (!can_detokenize) { + send_error(task, "Prompt contains invalid tokens", ERROR_TYPE_INVALID_REQUEST); + return false; + } SLT_DBG(slot, "launching slot : %s\n", safe_json_to_str(slot.to_json()).c_str()); if (slot.n_predict > 0 && slot.params.n_predict > slot.n_predict) { From e0dbec0bc6cd4b6230cda7a6ed1e9dac08d1600b Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 13 Mar 2025 12:35:44 +0200 Subject: [PATCH 046/398] llama : refactor llama_context, llama_kv_cache, llm_build_context (#12181) * llama : refactor llama_context, llama_kv_cache, llm_build_context ggml-ci * graph : don't mutate the KV cache during defrag ggml-ci * context : reduce virtuals + remove test function ggml-ci * context : move interface implementation to source file + factory ggml-ci * graph : move KV cache build functions to llama_context impl ggml-ci * graph : remove model reference from build_pooling ggml-ci * graph : remove llama_model reference ggml-ci * kv_cache : provide rope factors ggml-ci * graph : rework inputs to use only unique_ptr, remove attn input abstraction ggml-ci * context : remove llama_context_i abstraction ggml-ci * context : clean-up ggml-ci * graph : clean-up ggml-ci * llama : remove redundant keywords (struct, enum) ggml-ci * model : adapt gemma3 ggml-ci * graph : restore same attention ops as on master ggml-ci * llama : remove TODO + fix indent ggml-ci --- common/common.cpp | 6 +- common/speculative.cpp | 8 +- examples/batched-bench/batched-bench.cpp | 4 +- examples/batched.swift/Sources/main.swift | 2 +- .../cvector-generator/cvector-generator.cpp | 2 +- examples/embedding/embedding.cpp | 2 +- examples/gritlm/gritlm.cpp | 4 +- examples/imatrix/imatrix.cpp | 2 +- examples/infill/infill.cpp | 4 +- examples/llama-bench/llama-bench.cpp | 4 +- .../llama/src/main/cpp/llama-android.cpp | 8 +- .../llama.cpp.swift/LibLlama.swift | 8 +- examples/llava/gemma3-cli.cpp | 2 +- examples/lookahead/lookahead.cpp | 12 +- examples/lookup/lookup.cpp | 2 +- examples/main/main.cpp | 12 +- examples/parallel/parallel.cpp | 10 +- examples/passkey/passkey.cpp | 28 +- examples/perplexity/perplexity.cpp | 12 +- examples/quantize-stats/quantize-stats.cpp | 4 +- examples/retrieval/retrieval.cpp | 2 +- examples/run/run.cpp | 4 +- examples/save-load-state/save-load-state.cpp | 4 +- examples/server/server.cpp | 22 +- examples/server/tests/utils.py | 2 +- examples/simple-chat/simple-chat.cpp | 4 +- .../speculative-simple/speculative-simple.cpp | 2 +- examples/speculative/speculative.cpp | 26 +- include/llama.h | 104 +- src/CMakeLists.txt | 7 +- src/llama-adapter.cpp | 39 +- src/llama-adapter.h | 20 +- src/llama-batch.h | 4 +- src/llama-context.cpp | 3463 ++++-- src/llama-context.h | 288 +- src/llama-graph.cpp | 1695 +++ src/llama-graph.h | 576 + src/llama-io.cpp | 15 + src/llama-io.h | 35 + src/llama-kv-cache.cpp | 1297 +- src/llama-kv-cache.h | 288 +- src/llama-memory.cpp | 1 + src/llama-memory.h | 21 + src/llama-model.cpp | 7748 +++++++++++- src/llama-model.h | 19 +- src/llama.cpp | 10035 +--------------- 46 files changed, 13785 insertions(+), 12072 deletions(-) create mode 100644 src/llama-graph.cpp create mode 100644 src/llama-graph.h create mode 100644 src/llama-io.cpp create mode 100644 src/llama-io.h create mode 100644 src/llama-memory.cpp create mode 100644 src/llama-memory.h diff --git a/common/common.cpp b/common/common.cpp index 6448b7b03d6d2..8487e3834bccb 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -955,8 +955,8 @@ struct common_init_result common_init_from_params(common_params & params) { return iparams; } - if (params.ctx_shift && !llama_kv_cache_can_shift(lctx)) { - LOG_WRN("%s: KV cache shifting is not supported for this model, disabling KV cache shifting\n", __func__); + if (params.ctx_shift && !llama_kv_self_can_shift(lctx)) { + LOG_WRN("%s: KV cache shifting is not supported for this context, disabling KV cache shifting\n", __func__); params.ctx_shift = false; } @@ -1060,7 +1060,7 @@ struct common_init_result common_init_from_params(common_params & params) { if (llama_model_has_decoder(model)) { llama_decode(lctx, llama_batch_get_one(tmp.data(), std::min(tmp.size(), (size_t) params.n_batch))); } - llama_kv_cache_clear(lctx); + llama_kv_self_clear(lctx); llama_synchronize(lctx); llama_perf_context_reset(lctx); } diff --git a/common/speculative.cpp b/common/speculative.cpp index 1bac3a1ce101e..ccad70fa9ed85 100644 --- a/common/speculative.cpp +++ b/common/speculative.cpp @@ -173,7 +173,7 @@ llama_tokens common_speculative_gen_draft( result.reserve(params.n_draft); if (reuse_n == 0) { - llama_kv_cache_clear(ctx); + llama_kv_self_clear(ctx); prompt.clear(); } else { @@ -192,14 +192,14 @@ llama_tokens common_speculative_gen_draft( } if (reuse_i > 0) { - llama_kv_cache_seq_rm (ctx, 0, 0, reuse_i); - llama_kv_cache_seq_add(ctx, 0, reuse_i, -1, -reuse_i); + llama_kv_self_seq_rm (ctx, 0, 0, reuse_i); + llama_kv_self_seq_add(ctx, 0, reuse_i, -1, -reuse_i); prompt.erase(prompt.begin(), prompt.begin() + reuse_i); } if (reuse_n < (int) prompt.size()) { - llama_kv_cache_seq_rm (ctx, 0, reuse_n, -1); + llama_kv_self_seq_rm (ctx, 0, reuse_n, -1); prompt.erase(prompt.begin() + reuse_n, prompt.end()); } diff --git a/examples/batched-bench/batched-bench.cpp b/examples/batched-bench/batched-bench.cpp index 0659ab6f119a7..430e8be512653 100644 --- a/examples/batched-bench/batched-bench.cpp +++ b/examples/batched-bench/batched-bench.cpp @@ -132,7 +132,7 @@ int main(int argc, char ** argv) { const auto t_pp_start = ggml_time_us(); - llama_kv_cache_clear(ctx); + llama_kv_self_clear(ctx); if (!decode_helper(ctx, batch, ctx_params.n_batch)) { LOG_ERR("%s: llama_decode() failed\n", __func__); @@ -141,7 +141,7 @@ int main(int argc, char ** argv) { if (is_pp_shared) { for (int32_t i = 1; i < pl; ++i) { - llama_kv_cache_seq_cp(ctx, 0, i, -1, -1); + llama_kv_self_seq_cp(ctx, 0, i, -1, -1); } } diff --git a/examples/batched.swift/Sources/main.swift b/examples/batched.swift/Sources/main.swift index 55c31166ca278..514989e340e2c 100644 --- a/examples/batched.swift/Sources/main.swift +++ b/examples/batched.swift/Sources/main.swift @@ -116,7 +116,7 @@ if llama_decode(context, batch) != 0 { } for i in 1 ..< n_parallel { - llama_kv_cache_seq_cp(context, 0, Int32(i), 0, batch.n_tokens) + llama_kv_self_seq_cp(context, 0, Int32(i), 0, batch.n_tokens) } if n_parallel > 1 { diff --git a/examples/cvector-generator/cvector-generator.cpp b/examples/cvector-generator/cvector-generator.cpp index c72528dac3ff0..2a907155010cb 100644 --- a/examples/cvector-generator/cvector-generator.cpp +++ b/examples/cvector-generator/cvector-generator.cpp @@ -342,7 +342,7 @@ static bool cb_eval(struct ggml_tensor * t, bool ask, void * user_data) { } static bool get_hidden_layers(llama_context * ctx, std::vector & tokens) { - llama_kv_cache_clear(ctx); + llama_kv_self_clear(ctx); if (llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size()))) { fprintf(stderr, "%s : failed to eval\n", __func__); return false; diff --git a/examples/embedding/embedding.cpp b/examples/embedding/embedding.cpp index 3dd9f2b07d177..6f08904159fd5 100644 --- a/examples/embedding/embedding.cpp +++ b/examples/embedding/embedding.cpp @@ -38,7 +38,7 @@ static void batch_decode(llama_context * ctx, llama_batch & batch, float * outpu const struct llama_model * model = llama_get_model(ctx); // clear previous kv_cache values (irrelevant for embeddings) - llama_kv_cache_clear(ctx); + llama_kv_self_clear(ctx); // run model LOG_INF("%s: n_tokens = %d, n_seq = %d\n", __func__, batch.n_tokens, n_seq); diff --git a/examples/gritlm/gritlm.cpp b/examples/gritlm/gritlm.cpp index 72eb46257429e..f7db7861c1ad5 100644 --- a/examples/gritlm/gritlm.cpp +++ b/examples/gritlm/gritlm.cpp @@ -45,7 +45,7 @@ static std::vector> encode(llama_context * ctx, const std::ve } // clear previous kv_cache values (irrelevant for embeddings) - llama_kv_cache_clear(ctx); + llama_kv_self_clear(ctx); llama_set_embeddings(ctx, true); llama_set_causal_attn(ctx, false); @@ -102,7 +102,7 @@ static std::string generate(llama_context * ctx, llama_sampler * smpl, const std llama_token eos_token = llama_vocab_eos(vocab); - llama_kv_cache_clear(ctx); + llama_kv_self_clear(ctx); llama_set_embeddings(ctx, false); llama_set_causal_attn(ctx, true); diff --git a/examples/imatrix/imatrix.cpp b/examples/imatrix/imatrix.cpp index 91649c45065f4..31b675e8f90b9 100644 --- a/examples/imatrix/imatrix.cpp +++ b/examples/imatrix/imatrix.cpp @@ -495,7 +495,7 @@ static bool compute_imatrix(llama_context * ctx, const common_params & params) { const auto t_start = std::chrono::high_resolution_clock::now(); // clear the KV cache - llama_kv_cache_clear(ctx); + llama_kv_self_clear(ctx); llama_batch batch = llama_batch_init(n_batch, 0, 1); diff --git a/examples/infill/infill.cpp b/examples/infill/infill.cpp index 489a208b66b34..4e2f7b7270003 100644 --- a/examples/infill/infill.cpp +++ b/examples/infill/infill.cpp @@ -332,8 +332,8 @@ int main(int argc, char ** argv) { LOG_DBG("context full, swapping: n_past = %d, n_left = %d, n_ctx = %d, n_keep = %d, n_discard = %d\n", n_past, n_left, n_ctx, params.n_keep, n_discard); - llama_kv_cache_seq_rm (ctx, 0, params.n_keep + 1 , params.n_keep + n_discard + 1); - llama_kv_cache_seq_add(ctx, 0, params.n_keep + 1 + n_discard, n_past, -n_discard); + llama_kv_self_seq_rm (ctx, 0, params.n_keep + 1 , params.n_keep + n_discard + 1); + llama_kv_self_seq_add(ctx, 0, params.n_keep + 1 + n_discard, n_past, -n_discard); n_past -= n_discard; diff --git a/examples/llama-bench/llama-bench.cpp b/examples/llama-bench/llama-bench.cpp index f518d02d38689..cbcbfcee861ee 100644 --- a/examples/llama-bench/llama-bench.cpp +++ b/examples/llama-bench/llama-bench.cpp @@ -1578,7 +1578,7 @@ int main(int argc, char ** argv) { test t(inst, lmodel, ctx); - llama_kv_cache_clear(ctx); + llama_kv_self_clear(ctx); // cool off before the test if (params.delay) { @@ -1618,7 +1618,7 @@ int main(int argc, char ** argv) { } for (int i = 0; i < params.reps; i++) { - llama_kv_cache_clear(ctx); + llama_kv_self_clear(ctx); uint64_t t_start = get_time_ns(); diff --git a/examples/llama.android/llama/src/main/cpp/llama-android.cpp b/examples/llama.android/llama/src/main/cpp/llama-android.cpp index 0de61ce77c4fa..9654cd53cf8d5 100644 --- a/examples/llama.android/llama/src/main/cpp/llama-android.cpp +++ b/examples/llama.android/llama/src/main/cpp/llama-android.cpp @@ -194,7 +194,7 @@ Java_android_llama_cpp_LLamaAndroid_bench_1model( } batch->logits[batch->n_tokens - 1] = true; - llama_kv_cache_clear(context); + llama_kv_self_clear(context); const auto t_pp_start = ggml_time_us(); if (llama_decode(context, *batch) != 0) { @@ -206,7 +206,7 @@ Java_android_llama_cpp_LLamaAndroid_bench_1model( LOGi("Benchmark text generation (tg)"); - llama_kv_cache_clear(context); + llama_kv_self_clear(context); const auto t_tg_start = ggml_time_us(); for (i = 0; i < tg; i++) { @@ -223,7 +223,7 @@ Java_android_llama_cpp_LLamaAndroid_bench_1model( const auto t_tg_end = ggml_time_us(); - llama_kv_cache_clear(context); + llama_kv_self_clear(context); const auto t_pp = double(t_pp_end - t_pp_start) / 1000000.0; const auto t_tg = double(t_tg_end - t_tg_start) / 1000000.0; @@ -448,5 +448,5 @@ Java_android_llama_cpp_LLamaAndroid_completion_1loop( extern "C" JNIEXPORT void JNICALL Java_android_llama_cpp_LLamaAndroid_kv_1cache_1clear(JNIEnv *, jobject, jlong context) { - llama_kv_cache_clear(reinterpret_cast(context)); + llama_kv_self_clear(reinterpret_cast(context)); } diff --git a/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift b/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift index ee7141a663224..f6e31abc93c09 100644 --- a/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift +++ b/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift @@ -210,7 +210,7 @@ actor LlamaContext { } batch.logits[Int(batch.n_tokens) - 1] = 1 // true - llama_kv_cache_clear(context) + llama_kv_self_clear(context) let t_pp_start = DispatchTime.now().uptimeNanoseconds / 1000; @@ -223,7 +223,7 @@ actor LlamaContext { // bench text generation - llama_kv_cache_clear(context) + llama_kv_self_clear(context) let t_tg_start = DispatchTime.now().uptimeNanoseconds / 1000; @@ -242,7 +242,7 @@ actor LlamaContext { let t_tg_end = DispatchTime.now().uptimeNanoseconds / 1000; - llama_kv_cache_clear(context) + llama_kv_self_clear(context) let t_pp = Double(t_pp_end - t_pp_start) / 1000000.0 let t_tg = Double(t_tg_end - t_tg_start) / 1000000.0 @@ -292,7 +292,7 @@ actor LlamaContext { func clear() { tokens_list.removeAll() temporary_invalid_cchars.removeAll() - llama_kv_cache_clear(context) + llama_kv_self_clear(context) } private func tokenize(text: String, add_bos: Bool) -> [llama_token] { diff --git a/examples/llava/gemma3-cli.cpp b/examples/llava/gemma3-cli.cpp index a07864d4e59f6..c36bb2eda0c70 100644 --- a/examples/llava/gemma3-cli.cpp +++ b/examples/llava/gemma3-cli.cpp @@ -309,7 +309,7 @@ int main(int argc, char ** argv) { } if (line == "/clear") { ctx.n_past = 0; - llama_kv_cache_seq_rm(ctx.lctx, 0, 1, -1); // keep BOS + llama_kv_self_seq_rm(ctx.lctx, 0, 1, -1); // keep BOS LOG("Chat history cleared\n\n"); continue; } diff --git a/examples/lookahead/lookahead.cpp b/examples/lookahead/lookahead.cpp index b9e8de694c0e8..7df20aee17046 100644 --- a/examples/lookahead/lookahead.cpp +++ b/examples/lookahead/lookahead.cpp @@ -96,7 +96,7 @@ int main(int argc, char ** argv) { llama_decode(ctx, llama_batch_get_one(&inp.back(), 1)); for (int s = 1; s < W + G + 1; ++s) { - llama_kv_cache_seq_cp(ctx, 0, s, -1, -1); + llama_kv_self_seq_cp(ctx, 0, s, -1, -1); } const auto t_enc_end = ggml_time_us(); @@ -438,17 +438,17 @@ int main(int argc, char ** argv) { // KV cache management // if no verification token matched, we simply remove all cells from this batch -> no fragmentation - llama_kv_cache_seq_rm(ctx, -1, n_past, -1); + llama_kv_self_seq_rm(ctx, -1, n_past, -1); if (seq_id_best != 0) { // if a verification token matched, we keep the best sequence and remove the rest // this leads to some KV cache fragmentation - llama_kv_cache_seq_keep(ctx, seq_id_best); - llama_kv_cache_seq_cp (ctx, seq_id_best, 0, -1, -1); - llama_kv_cache_seq_rm (ctx, seq_id_best, -1, -1); + llama_kv_self_seq_keep(ctx, seq_id_best); + llama_kv_self_seq_cp (ctx, seq_id_best, 0, -1, -1); + llama_kv_self_seq_rm (ctx, seq_id_best, -1, -1); for (int s = 1; s < W + G + 1; ++s) { - llama_kv_cache_seq_cp(ctx, 0, s, -1, -1); + llama_kv_self_seq_cp(ctx, 0, s, -1, -1); } } } diff --git a/examples/lookup/lookup.cpp b/examples/lookup/lookup.cpp index dbd0444ec8742..4ae93b2a5ed15 100644 --- a/examples/lookup/lookup.cpp +++ b/examples/lookup/lookup.cpp @@ -192,7 +192,7 @@ int main(int argc, char ** argv){ // KV cache management // clean the cache of draft tokens that weren't accepted - llama_kv_cache_seq_rm(ctx, 0, n_past, -1); + llama_kv_self_seq_rm(ctx, 0, n_past, -1); common_batch_clear(batch_tgt); common_batch_add(batch_tgt, draft[0], n_past, { 0 }, true); diff --git a/examples/main/main.cpp b/examples/main/main.cpp index 4e0c69473badb..fd7410a646c69 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -354,7 +354,7 @@ int main(int argc, char ** argv) { } // remove any "future" tokens that we might have inherited from the previous session - llama_kv_cache_seq_rm(ctx, -1, n_matching_session_tokens, -1); + llama_kv_self_seq_rm(ctx, -1, n_matching_session_tokens, -1); } LOG_DBG("recalculate the cached logits (check): embd_inp.size() %zu, n_matching_session_tokens %zu, embd_inp.size() %zu, session_tokens.size() %zu\n", @@ -602,8 +602,8 @@ int main(int argc, char ** argv) { LOG_DBG("context full, swapping: n_past = %d, n_left = %d, n_ctx = %d, n_keep = %d, n_discard = %d\n", n_past, n_left, n_ctx, params.n_keep, n_discard); - llama_kv_cache_seq_rm (ctx, 0, params.n_keep , params.n_keep + n_discard); - llama_kv_cache_seq_add(ctx, 0, params.n_keep + n_discard, n_past, -n_discard); + llama_kv_self_seq_rm (ctx, 0, params.n_keep , params.n_keep + n_discard); + llama_kv_self_seq_add(ctx, 0, params.n_keep + n_discard, n_past, -n_discard); n_past -= n_discard; @@ -626,9 +626,9 @@ int main(int argc, char ** argv) { LOG_DBG("div: [%6d, %6d] / %6d -> [%6d, %6d]\n", ga_i + ib*bd, ga_i + ib*bd + ga_w, ga_n, (ga_i + ib*bd)/ga_n, (ga_i + ib*bd + ga_w)/ga_n); LOG_DBG("shift: [%6d, %6d] + %6d -> [%6d, %6d]\n", ga_i + ib*bd + ga_w, n_past + ib*bd, dd, ga_i + ib*bd + ga_w + dd, n_past + ib*bd + dd); - llama_kv_cache_seq_add(ctx, 0, ga_i, n_past, ib*bd); - llama_kv_cache_seq_div(ctx, 0, ga_i + ib*bd, ga_i + ib*bd + ga_w, ga_n); - llama_kv_cache_seq_add(ctx, 0, ga_i + ib*bd + ga_w, n_past + ib*bd, dd); + llama_kv_self_seq_add(ctx, 0, ga_i, n_past, ib*bd); + llama_kv_self_seq_div(ctx, 0, ga_i + ib*bd, ga_i + ib*bd + ga_w, ga_n); + llama_kv_self_seq_add(ctx, 0, ga_i + ib*bd + ga_w, n_past + ib*bd, dd); n_past -= bd; diff --git a/examples/parallel/parallel.cpp b/examples/parallel/parallel.cpp index be18909ed4f97..588632f0432b2 100644 --- a/examples/parallel/parallel.cpp +++ b/examples/parallel/parallel.cpp @@ -202,7 +202,7 @@ int main(int argc, char ** argv) { // assign the system KV cache to all parallel sequences for (int32_t i = 1; i <= n_clients; ++i) { - llama_kv_cache_seq_cp(ctx, 0, i, -1, -1); + llama_kv_self_seq_cp(ctx, 0, i, -1, -1); } LOG_INF("\n"); @@ -234,9 +234,9 @@ int main(int argc, char ** argv) { if (batch.n_tokens == 0) { // all sequences have ended - clear the entire KV cache for (int i = 1; i <= n_clients; ++i) { - llama_kv_cache_seq_rm(ctx, i, -1, -1); + llama_kv_self_seq_rm(ctx, i, -1, -1); // but keep the system prompt - llama_kv_cache_seq_cp(ctx, 0, i, -1, -1); + llama_kv_self_seq_cp(ctx, 0, i, -1, -1); } LOG_INF("%s: clearing the KV cache\n", __func__); @@ -372,8 +372,8 @@ int main(int argc, char ** argv) { } // delete only the generated part of the sequence, i.e. keep the system prompt in the cache - llama_kv_cache_seq_rm(ctx, client.id + 1, -1, -1); - llama_kv_cache_seq_cp(ctx, 0, client.id + 1, -1, -1); + llama_kv_self_seq_rm(ctx, client.id + 1, -1, -1); + llama_kv_self_seq_cp(ctx, 0, client.id + 1, -1, -1); const auto t_main_end = ggml_time_us(); diff --git a/examples/passkey/passkey.cpp b/examples/passkey/passkey.cpp index fa85190518ef5..ea3a6c1fca3ee 100644 --- a/examples/passkey/passkey.cpp +++ b/examples/passkey/passkey.cpp @@ -133,11 +133,11 @@ int main(int argc, char ** argv) { const int ib = i/n_batch - 1; const int bd = n_batch_grp*(n_grp - 1); - llama_kv_cache_seq_add (ctx, 0, n_past - n_batch, n_past, ib*bd); - llama_kv_cache_seq_div (ctx, 0, n_past - n_batch + ib*bd, n_past + ib*bd, n_grp); - llama_kv_cache_update (ctx); + llama_kv_self_seq_add (ctx, 0, n_past - n_batch, n_past, ib*bd); + llama_kv_self_seq_div (ctx, 0, n_past - n_batch + ib*bd, n_past + ib*bd, n_grp); + llama_kv_self_update (ctx); - n_past = llama_kv_cache_seq_pos_max(ctx, 0) + 1; + n_past = llama_kv_self_seq_pos_max(ctx, 0) + 1; } common_batch_clear(batch); @@ -167,12 +167,12 @@ int main(int argc, char ** argv) { LOG_INF("%s: shifting KV cache with %d\n", __func__, n_discard); - llama_kv_cache_seq_rm (ctx, 0, n_keep , n_keep + n_discard); - llama_kv_cache_seq_add(ctx, 0, n_keep + n_discard, n_ctx, -n_discard); - //llama_kv_cache_defrag (ctx); - llama_kv_cache_update (ctx); + llama_kv_self_seq_rm (ctx, 0, n_keep , n_keep + n_discard); + llama_kv_self_seq_add(ctx, 0, n_keep + n_discard, n_ctx, -n_discard); + //llama_kv_self_defrag (ctx); + llama_kv_self_update (ctx); - n_past = llama_kv_cache_seq_pos_max(ctx, 0) + 1; + n_past = llama_kv_self_seq_pos_max(ctx, 0) + 1; common_batch_clear(batch); @@ -198,12 +198,12 @@ int main(int argc, char ** argv) { if (n_discard > 0) { LOG_INF("%s: shifting KV cache with %d to free space for the answer\n", __func__, n_discard); - llama_kv_cache_seq_rm (ctx, 0, n_keep , n_keep + n_discard); - llama_kv_cache_seq_add(ctx, 0, n_keep + n_discard, n_ctx, -n_discard); - //llama_kv_cache_defrag (ctx); - llama_kv_cache_update (ctx); + llama_kv_self_seq_rm (ctx, 0, n_keep , n_keep + n_discard); + llama_kv_self_seq_add(ctx, 0, n_keep + n_discard, n_ctx, -n_discard); + //llama_kv_self_defrag (ctx); + llama_kv_self_update (ctx); - n_past = llama_kv_cache_seq_pos_max(ctx, 0) + 1; + n_past = llama_kv_self_seq_pos_max(ctx, 0) + 1; } } diff --git a/examples/perplexity/perplexity.cpp b/examples/perplexity/perplexity.cpp index 5d07421e827d1..8c413f7d66e6d 100644 --- a/examples/perplexity/perplexity.cpp +++ b/examples/perplexity/perplexity.cpp @@ -361,7 +361,7 @@ static results_perplexity perplexity_v2(llama_context * ctx, const common_params const auto t_start = std::chrono::high_resolution_clock::now(); // clear the KV cache - llama_kv_cache_clear(ctx); + llama_kv_self_clear(ctx); llama_batch batch = llama_batch_init(n_batch, 0, 1); @@ -547,7 +547,7 @@ static results_perplexity perplexity(llama_context * ctx, const common_params & const auto t_start = std::chrono::high_resolution_clock::now(); // clear the KV cache - llama_kv_cache_clear(ctx); + llama_kv_self_clear(ctx); for (int j = 0; j < num_batches; ++j) { const int batch_start = start + j * n_batch; @@ -924,7 +924,7 @@ static void hellaswag_score(llama_context * ctx, const common_params & params) { return; } - llama_kv_cache_clear(ctx); + llama_kv_self_clear(ctx); // decode all tasks [i0, i1) if (!decode_helper(ctx, batch, batch_logits, n_batch, n_vocab)) { @@ -1203,7 +1203,7 @@ static void winogrande_score(llama_context * ctx, const common_params & params) return; } - llama_kv_cache_clear(ctx); + llama_kv_self_clear(ctx); // decode all tasks [i0, i1) if (!decode_helper(ctx, batch, batch_logits, n_batch, n_vocab)) { @@ -1575,7 +1575,7 @@ static void multiple_choice_score(llama_context * ctx, const common_params & par return; } - llama_kv_cache_clear(ctx); + llama_kv_self_clear(ctx); // decode all tasks [i0, i1) if (!decode_helper(ctx, batch, batch_logits, n_batch, n_vocab)) { @@ -1765,7 +1765,7 @@ static void kl_divergence(llama_context * ctx, const common_params & params) { } // clear the KV cache - llama_kv_cache_clear(ctx); + llama_kv_self_clear(ctx); llama_batch batch = llama_batch_init(n_batch, 0, 1); diff --git a/examples/quantize-stats/quantize-stats.cpp b/examples/quantize-stats/quantize-stats.cpp index bd2f734670de8..dd07ab9b37456 100644 --- a/examples/quantize-stats/quantize-stats.cpp +++ b/examples/quantize-stats/quantize-stats.cpp @@ -1,6 +1,6 @@ #include "ggml.h" #include "llama.h" -#include "llama-context.h" +#include "llama-model.h" #include "common.h" #include @@ -328,7 +328,7 @@ int main(int argc, char ** argv) { } } - const auto & tensors = llama_internal_get_tensor_map(ctx); + const auto & tensors = llama_internal_get_tensor_map(model); // check layer tensors int included_layers = 0; diff --git a/examples/retrieval/retrieval.cpp b/examples/retrieval/retrieval.cpp index 2439022a229b7..0efe20d4b3f5d 100644 --- a/examples/retrieval/retrieval.cpp +++ b/examples/retrieval/retrieval.cpp @@ -83,7 +83,7 @@ static void batch_add_seq(llama_batch & batch, const std::vector & toke static void batch_decode(llama_context * ctx, llama_batch & batch, float * output, int n_seq, int n_embd) { // clear previous kv_cache values (irrelevant for embeddings) - llama_kv_cache_clear(ctx); + llama_kv_self_clear(ctx); // run model LOG_INF("%s: n_tokens = %d, n_seq = %d\n", __func__, batch.n_tokens, n_seq); diff --git a/examples/run/run.cpp b/examples/run/run.cpp index 38407d5190923..437f2533e5777 100644 --- a/examples/run/run.cpp +++ b/examples/run/run.cpp @@ -891,7 +891,7 @@ static int apply_chat_template(const struct common_chat_templates * tmpls, Llama // Function to tokenize the prompt static int tokenize_prompt(const llama_vocab * vocab, const std::string & prompt, std::vector & prompt_tokens, const LlamaData & llama_data) { - const bool is_first = llama_get_kv_cache_used_cells(llama_data.context.get()) == 0; + const bool is_first = llama_kv_self_used_cells(llama_data.context.get()) == 0; const int n_prompt_tokens = -llama_tokenize(vocab, prompt.c_str(), prompt.size(), NULL, 0, is_first, true); prompt_tokens.resize(n_prompt_tokens); @@ -907,7 +907,7 @@ static int tokenize_prompt(const llama_vocab * vocab, const std::string & prompt // Check if we have enough space in the context to evaluate this batch static int check_context_size(const llama_context_ptr & ctx, const llama_batch & batch) { const int n_ctx = llama_n_ctx(ctx.get()); - const int n_ctx_used = llama_get_kv_cache_used_cells(ctx.get()); + const int n_ctx_used = llama_kv_self_used_cells(ctx.get()); if (n_ctx_used + batch.n_tokens > n_ctx) { printf(LOG_COL_DEFAULT "\n"); printe("context size exceeded\n"); diff --git a/examples/save-load-state/save-load-state.cpp b/examples/save-load-state/save-load-state.cpp index cf7cbd8159cf8..760ebbbf08788 100644 --- a/examples/save-load-state/save-load-state.cpp +++ b/examples/save-load-state/save-load-state.cpp @@ -15,7 +15,7 @@ int main(int argc, char ** argv) { return 1; } - print_build_info(); + common_init(); if (params.n_predict < 0) { params.n_predict = 16; @@ -196,7 +196,7 @@ int main(int argc, char ** argv) { fprintf(stderr, "%s : seq 0 copied, %zd bytes\n", __func__, ncopy); // erase whole kv - llama_kv_cache_clear(ctx3); + llama_kv_self_clear(ctx3); fprintf(stderr, "%s : kv cache cleared\n", __func__); // restore kv into seq 1 diff --git a/examples/server/server.cpp b/examples/server/server.cpp index ce0195475d658..71e053b202cd2 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -2113,7 +2113,7 @@ struct server_context { SRV_DBG("%s", "clearing KV cache\n"); // clear the entire KV cache - llama_kv_cache_clear(ctx); + llama_kv_self_clear(ctx); clean_kv_cache = false; } @@ -2655,8 +2655,8 @@ struct server_context { res->n_tasks_deferred = queue_tasks.queue_tasks_deferred.size(); res->t_start = metrics.t_start; - res->kv_cache_tokens_count = llama_get_kv_cache_token_count(ctx); - res->kv_cache_used_cells = llama_get_kv_cache_used_cells(ctx); + res->kv_cache_tokens_count = llama_kv_self_n_tokens(ctx); + res->kv_cache_used_cells = llama_kv_self_used_cells(ctx); res->n_prompt_tokens_processed_total = metrics.n_prompt_tokens_processed_total; res->t_prompt_processing_total = metrics.t_prompt_processing_total; @@ -2772,7 +2772,7 @@ struct server_context { // Erase token cache const size_t n_erased = slot->cache_tokens.size(); - llama_kv_cache_seq_rm(ctx, slot->id, -1, -1); + llama_kv_self_seq_rm(ctx, slot->id, -1, -1); slot->cache_tokens.clear(); auto res = std::make_unique(); @@ -2840,8 +2840,8 @@ struct server_context { SLT_WRN(slot, "slot context shift, n_keep = %d, n_left = %d, n_discard = %d\n", n_keep, n_left, n_discard); - llama_kv_cache_seq_rm (ctx, slot.id, n_keep , n_keep + n_discard); - llama_kv_cache_seq_add(ctx, slot.id, n_keep + n_discard, slot.n_past, -n_discard); + llama_kv_self_seq_rm (ctx, slot.id, n_keep , n_keep + n_discard); + llama_kv_self_seq_add(ctx, slot.id, n_keep + n_discard, slot.n_past, -n_discard); if (slot.params.cache_prompt) { for (size_t i = n_keep + n_discard; i < slot.cache_tokens.size(); i++) { @@ -3032,8 +3032,8 @@ struct server_context { const int64_t kv_shift = (int64_t) head_p - (int64_t) head_c; - llama_kv_cache_seq_rm (ctx, slot.id, head_p, head_c); - llama_kv_cache_seq_add(ctx, slot.id, head_c, head_c + n_match, kv_shift); + llama_kv_self_seq_rm (ctx, slot.id, head_p, head_c); + llama_kv_self_seq_add(ctx, slot.id, head_c, head_c + n_match, kv_shift); for (size_t i = 0; i < n_match; i++) { slot.cache_tokens[head_p + i] = slot.cache_tokens[head_c + i]; @@ -3071,9 +3071,9 @@ struct server_context { } // keep only the common part - if (!llama_kv_cache_seq_rm(ctx, slot.id, slot.n_past, -1)) { + if (!llama_kv_self_seq_rm(ctx, slot.id, slot.n_past, -1)) { // could not partially delete (likely using a non-Transformer model) - llama_kv_cache_seq_rm(ctx, slot.id, -1, -1); + llama_kv_self_seq_rm(ctx, slot.id, -1, -1); // there is no common part left slot.n_past = 0; @@ -3313,7 +3313,7 @@ struct server_context { slot.cache_tokens.push_back(id); slot.cache_tokens.insert(slot.cache_tokens.end(), ids.begin(), ids.end() - 1); - llama_kv_cache_seq_rm(ctx, slot.id, slot.n_past, -1); + llama_kv_self_seq_rm(ctx, slot.id, slot.n_past, -1); for (size_t i = 0; i < ids.size(); ++i) { completion_token_output result; diff --git a/examples/server/tests/utils.py b/examples/server/tests/utils.py index ec2d8ec55853c..30aa8660950a1 100644 --- a/examples/server/tests/utils.py +++ b/examples/server/tests/utils.py @@ -302,7 +302,7 @@ def tinyllama2() -> ServerProcess: server.model_hf_repo = "ggml-org/models" server.model_hf_file = "tinyllamas/stories260K.gguf" server.model_alias = "tinyllama-2" - server.n_ctx = 256 + server.n_ctx = 512 server.n_batch = 32 server.n_slots = 2 server.n_predict = 64 diff --git a/examples/simple-chat/simple-chat.cpp b/examples/simple-chat/simple-chat.cpp index c5534cc13e4b4..84f4159737260 100644 --- a/examples/simple-chat/simple-chat.cpp +++ b/examples/simple-chat/simple-chat.cpp @@ -98,7 +98,7 @@ int main(int argc, char ** argv) { auto generate = [&](const std::string & prompt) { std::string response; - const bool is_first = llama_get_kv_cache_used_cells(ctx) == 0; + const bool is_first = llama_kv_self_used_cells(ctx) == 0; // tokenize the prompt const int n_prompt_tokens = -llama_tokenize(vocab, prompt.c_str(), prompt.size(), NULL, 0, is_first, true); @@ -113,7 +113,7 @@ int main(int argc, char ** argv) { while (true) { // check if we have enough space in the context to evaluate this batch int n_ctx = llama_n_ctx(ctx); - int n_ctx_used = llama_get_kv_cache_used_cells(ctx); + int n_ctx_used = llama_kv_self_used_cells(ctx); if (n_ctx_used + batch.n_tokens > n_ctx) { printf("\033[0m\n"); fprintf(stderr, "context size exceeded\n"); diff --git a/examples/speculative-simple/speculative-simple.cpp b/examples/speculative-simple/speculative-simple.cpp index 403ba2dd21914..a5d2bc9d09de7 100644 --- a/examples/speculative-simple/speculative-simple.cpp +++ b/examples/speculative-simple/speculative-simple.cpp @@ -217,7 +217,7 @@ int main(int argc, char ** argv) { { LOG_DBG("clear kv cache from any extra tokens, n_past = %d\n", n_past); - llama_kv_cache_seq_rm(ctx_tgt, 0, n_past, -1); + llama_kv_self_seq_rm(ctx_tgt, 0, n_past, -1); } if ((params.n_predict >= 0 && n_predict > params.n_predict) || has_eos) { diff --git a/examples/speculative/speculative.cpp b/examples/speculative/speculative.cpp index c7ccea50dbbd4..bfddc67e034fb 100644 --- a/examples/speculative/speculative.cpp +++ b/examples/speculative/speculative.cpp @@ -420,14 +420,14 @@ int main(int argc, char ** argv) { { LOG_DBG("keeping sequence %d, n_past_tgt = %d, n_past_dft = %d\n", s_keep, n_past_tgt, n_past_dft); - llama_kv_cache_seq_keep(ctx_dft, s_keep); - llama_kv_cache_seq_cp (ctx_dft, s_keep, 0, -1, -1); - llama_kv_cache_seq_keep(ctx_dft, 0); - - llama_kv_cache_seq_rm (ctx_tgt, s_keep, n_past_tgt, -1); - llama_kv_cache_seq_keep(ctx_tgt, s_keep); - llama_kv_cache_seq_cp (ctx_tgt, s_keep, 0, -1, -1); - llama_kv_cache_seq_keep(ctx_tgt, 0); + llama_kv_self_seq_keep(ctx_dft, s_keep); + llama_kv_self_seq_cp (ctx_dft, s_keep, 0, -1, -1); + llama_kv_self_seq_keep(ctx_dft, 0); + + llama_kv_self_seq_rm (ctx_tgt, s_keep, n_past_tgt, -1); + llama_kv_self_seq_keep(ctx_tgt, s_keep); + llama_kv_self_seq_cp (ctx_tgt, s_keep, 0, -1, -1); + llama_kv_self_seq_keep(ctx_tgt, 0); } for (int s = 0; s < n_seq_dft; ++s) { @@ -444,7 +444,7 @@ int main(int argc, char ** argv) { common_batch_clear(batch_dft); common_batch_add (batch_dft, token_id, n_past_dft, { 0 }, true); - llama_kv_cache_seq_rm(ctx_dft, 0, n_past_dft, -1); + llama_kv_self_seq_rm(ctx_dft, 0, n_past_dft, -1); // LOG_DBG("dft batch: %s\n", LOG_BATCH_TOSTR_PRETTY(ctx_dft, batch_dft).c_str()); llama_decode(ctx_dft, batch_dft); @@ -503,8 +503,8 @@ int main(int argc, char ** argv) { if (n_seq_cur < n_seq_dft && cur_p->data[f].p > p_draft_split) { LOG_DBG("splitting seq %3d into %3d\n", s, n_seq_cur); - llama_kv_cache_seq_rm(ctx_dft, n_seq_cur, -1, -1); - llama_kv_cache_seq_cp(ctx_dft, s, n_seq_cur, -1, -1); + llama_kv_self_seq_rm(ctx_dft, n_seq_cur, -1, -1); + llama_kv_self_seq_cp(ctx_dft, s, n_seq_cur, -1, -1); // all previous tokens from this branch are now also part of the new branch for (int t = 0; t < batch_tgt.n_tokens; ++t) { @@ -585,9 +585,9 @@ int main(int argc, char ** argv) { // evaluate the target model on the drafted tokens { - llama_kv_cache_seq_keep(ctx_tgt, 0); + llama_kv_self_seq_keep(ctx_tgt, 0); for (int s = 1; s < n_seq_dft; ++s) { - llama_kv_cache_seq_cp(ctx_tgt, 0, s, -1, -1); + llama_kv_self_seq_cp(ctx_tgt, 0, s, -1, -1); } // LOG_DBG("target batch: %s\n", LOG_BATCH_TOSTR_PRETTY(ctx_tgt, batch_tgt).c_str()); diff --git a/include/llama.h b/include/llama.h index d62792c0a6760..e5286f06162ab 100644 --- a/include/llama.h +++ b/include/llama.h @@ -60,6 +60,7 @@ extern "C" { struct llama_model; struct llama_context; struct llama_sampler; + struct llama_kv_cache; typedef int32_t llama_pos; typedef int32_t llama_token; @@ -469,7 +470,8 @@ extern "C" { DEPRECATED(LLAMA_API int32_t llama_n_vocab (const struct llama_vocab * vocab), "use llama_vocab_n_tokens instead"); LLAMA_API const struct llama_model * llama_get_model (const struct llama_context * ctx); - LLAMA_API enum llama_pooling_type llama_pooling_type(const struct llama_context * ctx); + LLAMA_API struct llama_kv_cache * llama_get_kv_self ( struct llama_context * ctx); + LLAMA_API enum llama_pooling_type llama_pooling_type(const struct llama_context * ctx); // TODO: rename to llama_get_pooling_type LLAMA_API const struct llama_vocab * llama_model_get_vocab(const struct llama_model * model); LLAMA_API enum llama_rope_type llama_model_rope_type(const struct llama_model * model); @@ -586,7 +588,7 @@ extern "C" { // KV cache // - // TODO: remove llama_kv_cache_view_* API + // TODO: start using struct llama_kv_cache // Information associated with an individual cell in the KV cache view. struct llama_kv_cache_view_cell { @@ -641,13 +643,19 @@ extern "C" { // Returns the number of tokens in the KV cache (slow, use only for debug) // If a KV cell has multiple sequences assigned to it, it will be counted multiple times - LLAMA_API int32_t llama_get_kv_cache_token_count(const struct llama_context * ctx); + LLAMA_API int32_t llama_kv_self_n_tokens(const struct llama_context * ctx); + + DEPRECATED(LLAMA_API int32_t llama_get_kv_cache_token_count(const struct llama_context * ctx), + "use llama_kv_self_n_tokens instead"); // Returns the number of used KV cells (i.e. have at least one sequence assigned to them) - LLAMA_API int32_t llama_get_kv_cache_used_cells(const struct llama_context * ctx); + LLAMA_API int32_t llama_kv_self_used_cells(const struct llama_context * ctx); + + DEPRECATED(LLAMA_API int32_t llama_get_kv_cache_used_cells(const struct llama_context * ctx), + "use llama_kv_self_used_cells instead"); // Clear the KV cache - both cell info is erased and KV data is zeroed - LLAMA_API void llama_kv_cache_clear( + LLAMA_API void llama_kv_self_clear( struct llama_context * ctx); // Removes all tokens that belong to the specified sequence and have positions in [p0, p1) @@ -655,7 +663,7 @@ extern "C" { // seq_id < 0 : match any sequence // p0 < 0 : [0, p1] // p1 < 0 : [p0, inf) - LLAMA_API bool llama_kv_cache_seq_rm( + LLAMA_API bool llama_kv_self_seq_rm( struct llama_context * ctx, llama_seq_id seq_id, llama_pos p0, @@ -665,7 +673,7 @@ extern "C" { // Note that this does not allocate extra KV cache memory - it simply assigns the tokens to the new sequence // p0 < 0 : [0, p1] // p1 < 0 : [p0, inf) - LLAMA_API void llama_kv_cache_seq_cp( + LLAMA_API void llama_kv_self_seq_cp( struct llama_context * ctx, llama_seq_id seq_id_src, llama_seq_id seq_id_dst, @@ -673,17 +681,17 @@ extern "C" { llama_pos p1); // Removes all tokens that do not belong to the specified sequence - LLAMA_API void llama_kv_cache_seq_keep( + LLAMA_API void llama_kv_self_seq_keep( struct llama_context * ctx, llama_seq_id seq_id); // Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1) // If the KV cache is RoPEd, the KV data is updated accordingly: // - lazily on next llama_decode() - // - explicitly with llama_kv_cache_update() + // - explicitly with llama_kv_self_update() // p0 < 0 : [0, p1] // p1 < 0 : [p0, inf) - LLAMA_API void llama_kv_cache_seq_add( + LLAMA_API void llama_kv_self_seq_add( struct llama_context * ctx, llama_seq_id seq_id, llama_pos p0, @@ -693,10 +701,10 @@ extern "C" { // Integer division of the positions by factor of `d > 1` // If the KV cache is RoPEd, the KV data is updated accordingly: // - lazily on next llama_decode() - // - explicitly with llama_kv_cache_update() + // - explicitly with llama_kv_self_update() // p0 < 0 : [0, p1] // p1 < 0 : [p0, inf) - LLAMA_API void llama_kv_cache_seq_div( + LLAMA_API void llama_kv_self_seq_div( struct llama_context * ctx, llama_seq_id seq_id, llama_pos p0, @@ -704,24 +712,76 @@ extern "C" { int d); // Returns the largest position present in the KV cache for the specified sequence - LLAMA_API llama_pos llama_kv_cache_seq_pos_max( + LLAMA_API llama_pos llama_kv_self_seq_pos_max( struct llama_context * ctx, - llama_seq_id seq_id); - - // TODO: the llama_kv_cache_defrag and llama_kv_cache_update API tightly couples llama_context with llama_kv_cache - // how to avoid this? + llama_seq_id seq_id); // Defragment the KV cache // This will be applied: // - lazily on next llama_decode() - // - explicitly with llama_kv_cache_update() - LLAMA_API void llama_kv_cache_defrag(struct llama_context * ctx); + // - explicitly with llama_kv_self_update() + LLAMA_API void llama_kv_self_defrag(struct llama_context * ctx); + + // Check if the context supports KV cache shifting + LLAMA_API bool llama_kv_self_can_shift(const struct llama_context * ctx); // Apply the KV cache updates (such as K-shifts, defragmentation, etc.) - LLAMA_API void llama_kv_cache_update(struct llama_context * ctx); + LLAMA_API void llama_kv_self_update(struct llama_context * ctx); + + DEPRECATED(LLAMA_API void llama_kv_cache_clear( + struct llama_context * ctx), + "use llama_kv_self_clear instead"); + + DEPRECATED(LLAMA_API bool llama_kv_cache_seq_rm( + struct llama_context * ctx, + llama_seq_id seq_id, + llama_pos p0, + llama_pos p1), + "use llama_kv_self_seq_rm instead"); + + DEPRECATED(LLAMA_API void llama_kv_cache_seq_cp( + struct llama_context * ctx, + llama_seq_id seq_id_src, + llama_seq_id seq_id_dst, + llama_pos p0, + llama_pos p1), + "use llama_kv_self_seq_cp instead"); + + DEPRECATED(LLAMA_API void llama_kv_cache_seq_keep( + struct llama_context * ctx, + llama_seq_id seq_id), + "use llama_kv_self_seq_keep instead"); + + DEPRECATED(LLAMA_API void llama_kv_cache_seq_add( + struct llama_context * ctx, + llama_seq_id seq_id, + llama_pos p0, + llama_pos p1, + llama_pos delta), + "use llama_kv_self_seq_add instead"); + + DEPRECATED(LLAMA_API void llama_kv_cache_seq_div( + struct llama_context * ctx, + llama_seq_id seq_id, + llama_pos p0, + llama_pos p1, + int d), + "use llama_kv_self_seq_div instead"); + + DEPRECATED(LLAMA_API llama_pos llama_kv_cache_seq_pos_max( + struct llama_context * ctx, + llama_seq_id seq_id), + "use llama_kv_self_seq_pos_max instead"); + + DEPRECATED(LLAMA_API void llama_kv_cache_defrag(struct llama_context * ctx), + "use llama_kv_self_defrag instead"); + + DEPRECATED(LLAMA_API bool llama_kv_cache_can_shift(const struct llama_context * ctx), + "use llama_kv_self_can_shift instead"); + + DEPRECATED(LLAMA_API void llama_kv_cache_update(struct llama_context * ctx), + "use llama_kv_self_update instead"); - // Check if the context supports KV cache shifting - LLAMA_API bool llama_kv_cache_can_shift(struct llama_context * ctx); // // State / sessions diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index e1b02e4c08f07..b340dae5b28cd 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -15,18 +15,21 @@ add_library(llama llama-chat.cpp llama-context.cpp llama-grammar.cpp + llama-graph.cpp llama-hparams.cpp llama-impl.cpp + llama-io.cpp llama-kv-cache.cpp + llama-memory.cpp llama-mmap.cpp llama-model-loader.cpp llama-model.cpp llama-quant.cpp llama-sampling.cpp llama-vocab.cpp - unicode.h - unicode.cpp unicode-data.cpp + unicode.cpp + unicode.h ) target_include_directories(llama PUBLIC . ../include ../common) diff --git a/src/llama-adapter.cpp b/src/llama-adapter.cpp index 8a0800463137e..b448614e471d6 100644 --- a/src/llama-adapter.cpp +++ b/src/llama-adapter.cpp @@ -4,14 +4,13 @@ #include "llama-mmap.h" #include "llama-model.h" -#include #include #include #include // vec -struct ggml_tensor * llama_adapter_cvec::tensor_for(int il) const { +ggml_tensor * llama_adapter_cvec::tensor_for(int il) const { if (il < 0 || il < layer_start || il > layer_end || (size_t) il >= tensors.size()) { return nullptr; } @@ -19,7 +18,7 @@ struct ggml_tensor * llama_adapter_cvec::tensor_for(int il) const { return tensors[il]; } -struct ggml_tensor * llama_adapter_cvec::apply_to(struct ggml_context * ctx, struct ggml_tensor * cur, int il) const { +ggml_tensor * llama_adapter_cvec::apply_to(ggml_context * ctx, ggml_tensor * cur, int il) const { ggml_tensor * layer_dir = tensor_for(il); if (layer_dir != nullptr) { cur = ggml_add(ctx, cur, layer_dir); @@ -40,7 +39,7 @@ bool llama_adapter_cvec::init(const llama_model & model) { auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * { auto it = ctx_map.find(buft); if (it == ctx_map.end()) { - struct ggml_init_params params = { + ggml_init_params params = { /*.mem_size =*/ hparams.n_layer*ggml_tensor_overhead(), /*.mem_buffer =*/ NULL, /*.no_alloc =*/ true, @@ -91,7 +90,7 @@ bool llama_adapter_cvec::init(const llama_model & model) { return true; } -int32_t llama_adapter_cvec::apply( +bool llama_adapter_cvec::apply( const llama_model & model, const float * data, size_t len, @@ -104,17 +103,17 @@ int32_t llama_adapter_cvec::apply( // disable the current control vector (but leave allocated for later) layer_start = -1; layer_end = -1; - return 0; + return true; } if (n_embd != (int) hparams.n_embd) { LLAMA_LOG_ERROR("%s: control vector n_embd does not match model\n", __func__); - return 1; + return false; } if (tensors.empty()) { if (!init(model)) { - return 1; + return false; } } @@ -130,12 +129,12 @@ int32_t llama_adapter_cvec::apply( } } - return 0; + return true; } // lora -llama_adapter_lora_weight * llama_adapter_lora::get_weight(struct ggml_tensor * w) { +llama_adapter_lora_weight * llama_adapter_lora::get_weight(ggml_tensor * w) { const std::string name(w->name); const auto pos = ab_map.find(name); @@ -146,11 +145,11 @@ llama_adapter_lora_weight * llama_adapter_lora::get_weight(struct ggml_tensor * return nullptr; } -static void llama_adapter_lora_init_impl(struct llama_model & model, const char * path_lora, struct llama_adapter_lora & adapter) { +static void llama_adapter_lora_init_impl(llama_model & model, const char * path_lora, llama_adapter_lora & adapter) { LLAMA_LOG_INFO("%s: loading lora adapter from '%s' ...\n", __func__, path_lora); ggml_context * ctx_init; - struct gguf_init_params meta_gguf_params = { + gguf_init_params meta_gguf_params = { /* .no_alloc = */ true, /* .ctx = */ &ctx_init, }; @@ -201,7 +200,7 @@ static void llama_adapter_lora_init_impl(struct llama_model & model, const char auto it = ctx_map.find(buft); if (it == ctx_map.end()) { // add a new context - struct ggml_init_params params = { + ggml_init_params params = { /*.mem_size =*/ n_tensors*ggml_tensor_overhead(), /*.mem_buffer =*/ NULL, /*.no_alloc =*/ true, @@ -264,7 +263,7 @@ static void llama_adapter_lora_init_impl(struct llama_model & model, const char throw std::runtime_error("LoRA tensor '" + name + "' does not exist in base model (hint: maybe wrong base model?)"); } - struct ggml_context * dev_ctx = ctx_for_buft(ggml_backend_buffer_get_type(model_tensor->buffer)); + ggml_context * dev_ctx = ctx_for_buft(ggml_backend_buffer_get_type(model_tensor->buffer)); // validate tensor shape if (is_token_embd) { // expect B to be non-transposed, A and B are flipped; see llm_build_inp_embd() @@ -281,8 +280,8 @@ static void llama_adapter_lora_init_impl(struct llama_model & model, const char } // save tensor to adapter - struct ggml_tensor * tensor_a = ggml_dup_tensor(dev_ctx, w.a); - struct ggml_tensor * tensor_b = ggml_dup_tensor(dev_ctx, w.b); + ggml_tensor * tensor_a = ggml_dup_tensor(dev_ctx, w.a); + ggml_tensor * tensor_b = ggml_dup_tensor(dev_ctx, w.b); ggml_set_name(tensor_a, w.a->name); ggml_set_name(tensor_b, w.b->name); adapter.ab_map[name] = llama_adapter_lora_weight(tensor_a, tensor_b); @@ -308,7 +307,7 @@ static void llama_adapter_lora_init_impl(struct llama_model & model, const char { llama_file gguf_file(path_lora, "rb"); std::vector read_buf; - auto set_tensor = [&](struct ggml_tensor * orig, struct ggml_tensor * dev) { + auto set_tensor = [&](ggml_tensor * orig, ggml_tensor * dev) { size_t offs = gguf_get_data_offset(ctx_gguf.get()) + gguf_get_tensor_offset(ctx_gguf.get(), gguf_find_tensor(ctx_gguf.get(), orig->name)); size_t size = ggml_nbytes(orig); read_buf.resize(size); @@ -327,8 +326,8 @@ static void llama_adapter_lora_init_impl(struct llama_model & model, const char LLAMA_LOG_INFO("%s: loaded %zu tensors from lora file\n", __func__, adapter.ab_map.size()*2); } -struct llama_adapter_lora * llama_adapter_lora_init(struct llama_model * model, const char * path_lora) { - struct llama_adapter_lora * adapter = new llama_adapter_lora(); +llama_adapter_lora * llama_adapter_lora_init(llama_model * model, const char * path_lora) { + llama_adapter_lora * adapter = new llama_adapter_lora(); try { llama_adapter_lora_init_impl(*model, path_lora, *adapter); @@ -342,6 +341,6 @@ struct llama_adapter_lora * llama_adapter_lora_init(struct llama_model * model, return nullptr; } -void llama_adapter_lora_free(struct llama_adapter_lora * adapter) { +void llama_adapter_lora_free(llama_adapter_lora * adapter) { delete adapter; } diff --git a/src/llama-adapter.h b/src/llama-adapter.h index 603fa08f6d186..65824e972765b 100644 --- a/src/llama-adapter.h +++ b/src/llama-adapter.h @@ -15,11 +15,11 @@ // struct llama_adapter_cvec { - struct ggml_tensor * tensor_for(int il) const; + ggml_tensor * tensor_for(int il) const; - struct ggml_tensor * apply_to(struct ggml_context * ctx, struct ggml_tensor * cur, int il) const; + ggml_tensor * apply_to(ggml_context * ctx, ggml_tensor * cur, int il) const; - int32_t apply( + bool apply( const llama_model & model, const float * data, size_t len, @@ -36,7 +36,7 @@ struct llama_adapter_cvec { std::vector ctxs; std::vector bufs; - std::vector tensors; // per layer + std::vector tensors; // per layer }; // @@ -44,8 +44,8 @@ struct llama_adapter_cvec { // struct llama_adapter_lora_weight { - struct ggml_tensor * a = nullptr; - struct ggml_tensor * b = nullptr; + ggml_tensor * a = nullptr; + ggml_tensor * b = nullptr; // get actual scale based on rank and alpha float get_scale(float alpha, float adapter_scale) const { @@ -55,12 +55,12 @@ struct llama_adapter_lora_weight { } llama_adapter_lora_weight() = default; - llama_adapter_lora_weight(struct ggml_tensor * a, struct ggml_tensor * b) : a(a), b(b) {} + llama_adapter_lora_weight(ggml_tensor * a, ggml_tensor * b) : a(a), b(b) {} }; struct llama_adapter_lora { // map tensor name to lora_a_b - std::unordered_map ab_map; + std::unordered_map ab_map; std::vector ctxs; std::vector bufs; @@ -70,5 +70,7 @@ struct llama_adapter_lora { llama_adapter_lora() = default; ~llama_adapter_lora() = default; - llama_adapter_lora_weight * get_weight(struct ggml_tensor * w); + llama_adapter_lora_weight * get_weight(ggml_tensor * w); }; + +using llama_adapter_loras = std::unordered_map; diff --git a/src/llama-batch.h b/src/llama-batch.h index 773c3808b770f..f1df40d27086e 100644 --- a/src/llama-batch.h +++ b/src/llama-batch.h @@ -42,9 +42,9 @@ struct llama_sbatch { bool logits_all; // TODO: remove once lctx.logits_all is removed too // sorted indices into the batch - std::vector ids; + std::vector ids; // batch indices of the output - std::vector out_ids; + std::vector out_ids; std::vector seq; const llama_batch * batch = nullptr; diff --git a/src/llama-context.cpp b/src/llama-context.cpp index 671d2a81adabf..0a43a3af8e003 100644 --- a/src/llama-context.cpp +++ b/src/llama-context.cpp @@ -1,732 +1,832 @@ #include "llama-context.h" #include "llama-impl.h" +#include "llama-io.h" #include "llama-mmap.h" +#include "llama-model.h" +#include "llama-kv-cache.h" #include -#include #include #include +#include -void llama_set_k_shift(struct llama_context & lctx) { - const int64_t kv_size = lctx.kv_self.size; +// +// llama_context +// - assert(ggml_backend_buffer_is_host(lctx.inp_K_shift->buffer)); +llama_context::llama_context( + const llama_model & model, + llama_context_params params) : + model(model) { + LLAMA_LOG_INFO("%s: constructing llama_context\n", __func__); - int32_t * data = (int32_t *) lctx.inp_K_shift->data; + t_start_us = model.t_start_us; + t_load_us = model.t_load_us; - for (int i = 0; i < kv_size; ++i) { - data[i] = lctx.kv_self.cells[i].delta; - } -} + const auto & hparams = model.hparams; -void llama_set_s_copy(struct llama_context & lctx) { - const int64_t kv_size = lctx.kv_self.size; + cparams.n_seq_max = std::max(1u, params.n_seq_max); + cparams.n_threads = params.n_threads; + cparams.n_threads_batch = params.n_threads_batch; + cparams.yarn_ext_factor = params.yarn_ext_factor; + cparams.yarn_attn_factor = params.yarn_attn_factor; + cparams.yarn_beta_fast = params.yarn_beta_fast; + cparams.yarn_beta_slow = params.yarn_beta_slow; + cparams.defrag_thold = params.defrag_thold; + cparams.embeddings = params.embeddings; + cparams.offload_kqv = params.offload_kqv; + cparams.flash_attn = params.flash_attn; + cparams.no_perf = params.no_perf; + cparams.pooling_type = params.pooling_type; - assert(ggml_backend_buffer_is_host(lctx.inp_s_copy->buffer)); + cparams.n_ctx = params.n_ctx == 0 ? hparams.n_ctx_train : params.n_ctx; + cparams.rope_freq_base = params.rope_freq_base == 0.0f ? hparams.rope_freq_base_train : params.rope_freq_base; + cparams.rope_freq_scale = params.rope_freq_scale == 0.0f ? hparams.rope_freq_scale_train : params.rope_freq_scale; - int32_t * data = (int32_t *) lctx.inp_s_copy->data; + cparams.n_ctx_orig_yarn = params.yarn_orig_ctx != 0 ? params.yarn_orig_ctx : + hparams.n_ctx_orig_yarn != 0 ? hparams.n_ctx_orig_yarn : + hparams.n_ctx_train; - for (int i = 0; i < kv_size; ++i) { - data[i] = lctx.kv_self.cells[i].src; - } -} + cparams.cb_eval = params.cb_eval; + cparams.cb_eval_user_data = params.cb_eval_user_data; -// llama input + auto rope_scaling_type = params.rope_scaling_type; + if (rope_scaling_type == LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED) { + rope_scaling_type = hparams.rope_scaling_type_train; + } -static int32_t llama_relative_position_bucket(llama_pos x, llama_pos y, uint64_t n_buckets, bool bidirectional) { - // TODO move to hparams if a T5 variant appears that uses a different value - const int64_t max_distance = 128; + if (rope_scaling_type == LLAMA_ROPE_SCALING_TYPE_NONE) { + cparams.rope_freq_scale = 1.0f; // never scale if scaling type is none + } - if (bidirectional) { - n_buckets >>= 1; + if (cparams.yarn_ext_factor < 0.0f) { // negative indicates 'not set' + cparams.yarn_ext_factor = rope_scaling_type == LLAMA_ROPE_SCALING_TYPE_YARN ? 1.0f : 0.0f; } - const int64_t max_exact = n_buckets >> 1; + cparams.yarn_attn_factor *= hparams.rope_attn_factor; - int32_t relative_position = x - y; - int32_t relative_bucket = 0; - if (bidirectional) { - relative_bucket += (relative_position > 0) * n_buckets; - relative_position = abs(relative_position); + if (cparams.pooling_type == LLAMA_POOLING_TYPE_UNSPECIFIED) { + if (hparams.pooling_type == LLAMA_POOLING_TYPE_UNSPECIFIED) { + cparams.pooling_type = LLAMA_POOLING_TYPE_NONE; + } else { + cparams.pooling_type = hparams.pooling_type; + } + } + + if (params.attention_type == LLAMA_ATTENTION_TYPE_UNSPECIFIED) { + cparams.causal_attn = hparams.causal_attn; } else { - relative_position = -std::min(relative_position, 0); + cparams.causal_attn = params.attention_type == LLAMA_ATTENTION_TYPE_CAUSAL; } - int32_t relative_position_if_large = floorf(max_exact + logf(1.0 * relative_position / max_exact) * (n_buckets - max_exact) / log(1.0 * max_distance / max_exact)); - relative_position_if_large = std::min(relative_position_if_large, n_buckets - 1); - relative_bucket += (relative_position < max_exact ? relative_position : relative_position_if_large); - return relative_bucket; -} -void llama_set_inputs(llama_context & lctx, const llama_ubatch & ubatch) { - // - // set input data - // + // with causal attention, the batch size is limited by the context size + cparams.n_batch = cparams.causal_attn ? std::min(cparams.n_ctx, params.n_batch) : params.n_batch; - const auto & hparams = lctx.model.hparams; - const auto & cparams = lctx.cparams; - const auto & kv_self = lctx.kv_self; + // the batch has to be at least GGML_KQ_MASK_PAD because we will be padding the KQ_mask + // this is required by GPU kernels in order to avoid out-of-bounds accesses (e.g. ggml_flash_attn_ext) + // ref: https://github.com/ggerganov/llama.cpp/pull/5021 + // TODO: this padding is not needed for the cache-less context so we should probably move it to llama_context_kv_self + if (cparams.n_batch < GGML_KQ_MASK_PAD) { + LLAMA_LOG_WARN("%s: n_batch is less than GGML_KQ_MASK_PAD - increasing to %d\n", __func__, GGML_KQ_MASK_PAD); + cparams.n_batch = GGML_KQ_MASK_PAD; + } - if (ubatch.token) { - const int64_t n_tokens = ubatch.n_tokens; + cparams.n_ubatch = std::min(cparams.n_batch, params.n_ubatch == 0 ? params.n_batch : params.n_ubatch); - ggml_backend_tensor_set(lctx.inp_tokens, ubatch.token, 0, n_tokens*ggml_element_size(lctx.inp_tokens)); - } + const uint32_t n_ctx_per_seq = cparams.n_ctx / cparams.n_seq_max; - if (ubatch.embd) { - const int64_t n_embd = hparams.n_embd; - const int64_t n_tokens = ubatch.n_tokens; + LLAMA_LOG_INFO("%s: n_seq_max = %u\n", __func__, cparams.n_seq_max); + LLAMA_LOG_INFO("%s: n_ctx = %u\n", __func__, cparams.n_ctx); + LLAMA_LOG_INFO("%s: n_ctx_per_seq = %u\n", __func__, n_ctx_per_seq); + LLAMA_LOG_INFO("%s: n_batch = %u\n", __func__, cparams.n_batch); + LLAMA_LOG_INFO("%s: n_ubatch = %u\n", __func__, cparams.n_ubatch); + LLAMA_LOG_INFO("%s: causal_attn = %d\n", __func__, cparams.causal_attn); + LLAMA_LOG_INFO("%s: flash_attn = %d\n", __func__, cparams.flash_attn); + LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, cparams.rope_freq_base); + LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, cparams.rope_freq_scale); - ggml_backend_tensor_set(lctx.inp_embd, ubatch.embd, 0, n_tokens*n_embd*ggml_element_size(lctx.inp_embd)); + if (n_ctx_per_seq < hparams.n_ctx_train) { + LLAMA_LOG_WARN("%s: n_ctx_per_seq (%u) < n_ctx_train (%u) -- the full capacity of the model will not be utilized\n", + __func__, n_ctx_per_seq, hparams.n_ctx_train); } - if (ubatch.pos && lctx.inp_pos) { - const int64_t n_tokens = ubatch.n_tokens; - auto n_pos = lctx.n_pos_per_token; - ggml_backend_tensor_set(lctx.inp_pos, ubatch.pos, 0, n_tokens*n_pos*ggml_element_size(lctx.inp_pos)); + if (n_ctx_per_seq > hparams.n_ctx_train) { + LLAMA_LOG_WARN("%s: n_ctx_pre_seq (%u) > n_ctx_train (%u) -- possible training context overflow\n", + __func__, n_ctx_per_seq, hparams.n_ctx_train); } - if (hparams.causal_attn || cparams.pooling_type == LLAMA_POOLING_TYPE_NONE) { - //GGML_ASSERT(lctx.inp_out_ids && "every model that can must skip unused outputs"); - - if (!lctx.inp_out_ids) { - LLAMA_LOG_WARN("%s: 'lctx.inp_out_ids' is not created\n", __func__); - } else { - const int64_t n_tokens = ubatch.n_tokens; + logits_all = params.logits_all; - GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_out_ids->buffer)); - int32_t * data = (int32_t *) lctx.inp_out_ids->data; + if (!hparams.vocab_only) { + // GPU backends + for (auto * dev : model.devices) { + ggml_backend_t backend = ggml_backend_dev_init(dev, nullptr); + if (backend == nullptr) { + throw std::runtime_error(format("failed to initialize %s backend", ggml_backend_dev_name(dev))); + } + backends.emplace_back(backend); + } - if (lctx.n_outputs == n_tokens) { - for (int i = 0; i < n_tokens; ++i) { - data[i] = i; + // add ACCEL backends (such as BLAS) + for (size_t i = 0; i < ggml_backend_dev_count(); ++i) { + ggml_backend_dev_t dev = ggml_backend_dev_get(i); + if (ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_ACCEL) { + ggml_backend_t backend = ggml_backend_dev_init(dev, nullptr); + if (backend == nullptr) { + throw std::runtime_error(format("failed to initialize %s backend", ggml_backend_dev_name(dev))); } - } else if (ubatch.output) { - int32_t n_outputs = 0; - for (int i = 0; i < n_tokens; ++i) { - if (ubatch.output[i]) { - data[n_outputs++] = i; - } + backends.emplace_back(backend); + } + } + + // add CPU backend + backend_cpu = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr); + if (backend_cpu == nullptr) { + throw std::runtime_error("failed to initialize CPU backend"); + } + backends.emplace_back(backend_cpu); + + // create a list of the set_n_threads functions in the backends + for (auto & backend : backends) { + ggml_backend_dev_t dev = ggml_backend_get_device(backend.get()); + ggml_backend_reg_t reg = dev ? ggml_backend_dev_backend_reg(dev) : nullptr; + if (reg) { + auto ggml_backend_set_n_threads_fn = (ggml_backend_set_n_threads_t) ggml_backend_reg_get_proc_address(reg, "ggml_backend_set_n_threads"); + if (ggml_backend_set_n_threads_fn) { + set_n_threads_fns.emplace_back(backend.get(), ggml_backend_set_n_threads_fn); } - // the graph needs to have been passed the correct number of outputs - GGML_ASSERT(lctx.n_outputs == n_outputs); - } else if (lctx.n_outputs == 1) { - // only keep last output - data[0] = n_tokens - 1; - } else { - GGML_ASSERT(lctx.n_outputs == 0); } } - } - GGML_ASSERT( - // (!a || b) is a logical implication (a -> b) - // !hparams.causal_attn -> !cparams.causal_attn - (hparams.causal_attn || !cparams.causal_attn) && - "causal attention is not supported by this model" - ); + llama_set_abort_callback(this, params.abort_callback, params.abort_callback_data); - if (lctx.inp_KQ_mask || lctx.inp_KQ_mask_swa) { - // NOTE: hparams.causal_attn indicates the model is capable of generation and uses the kv cache. - if (cparams.causal_attn && !lctx.is_encoding) { - const int64_t n_kv = kv_self.n; - const int64_t n_tokens = ubatch.n_tokens; - const int64_t n_seq_tokens = ubatch.n_seq_tokens; - const int64_t n_seqs = ubatch.n_seqs; + // graph outputs buffer + { + // resized during inference when a batch uses more outputs + if ((uint32_t) output_reserve(params.n_seq_max) < params.n_seq_max) { + throw std::runtime_error("failed to reserve initial output buffer"); + } + LLAMA_LOG_INFO("%s: %10s output buffer size = %8.2f MiB\n", __func__, + ggml_backend_buffer_name (buf_output.get()), + ggml_backend_buffer_get_size(buf_output.get()) / 1024.0 / 1024.0); + } + } - float * data = nullptr; - float * data_swa = nullptr; + // init the memory module + // TODO: for now, always create a unified KV cache + if (!hparams.vocab_only) { + kv_self.reset(static_cast(model.create_memory())); - if (lctx.inp_KQ_mask) { - GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_mask->buffer)); - data = (float *) lctx.inp_KQ_mask->data; - } + LLAMA_LOG_DEBUG("%s: n_ctx = %u\n", __func__, cparams.n_ctx); - if (lctx.inp_KQ_mask_swa) { - GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_mask_swa->buffer)); - data_swa = (float *) lctx.inp_KQ_mask_swa->data; - } + cparams.n_ctx = GGML_PAD(cparams.n_ctx, kv_self->get_padding(cparams)); - // For causal attention, use only the previous KV cells - // of the correct sequence for each token of the ubatch. - // It's assumed that if a token in the batch has multiple sequences, they are equivalent. - for (int h = 0; h < 1; ++h) { - for (int s = 0; s < n_seqs; ++s) { - const llama_seq_id seq_id = ubatch.seq_id[s][0]; - - for (int j = 0; j < n_seq_tokens; ++j) { - const llama_pos pos = ubatch.pos[s*n_seq_tokens + j]; - - for (int i = 0; i < n_kv; ++i) { - float f; - if (!kv_self.cells[i].has_seq_id(seq_id) || kv_self.cells[i].pos > pos) { - f = -INFINITY; - } else { - if (hparams.use_alibi) { - f = -std::abs(kv_self.cells[i].pos - pos); - } else { - f = 0.0f; - } - } + LLAMA_LOG_DEBUG("%s: n_ctx = %u (padded)\n", __func__, cparams.n_ctx); - if (data) { - data[h*(n_kv*n_tokens) + s*(n_kv*n_seq_tokens) + j*n_kv + i] = f; - } + uint32_t kv_size = cparams.n_ctx; + ggml_type type_k = params.type_k; + ggml_type type_v = params.type_v; - // may need to cut off old tokens for sliding window - if (data_swa) { - if (pos - kv_self.cells[i].pos >= (int32_t)hparams.n_swa) { - f = -INFINITY; - } - data_swa[h*(n_kv*n_tokens) + s*(n_kv*n_seq_tokens) + j*n_kv + i] = f; - } - } - } - } + if (llama_model_is_recurrent(&model)) { + // Mamba needs at least as many KV cells as there are sequences kept at any time + kv_size = std::max((uint32_t) 1, params.n_seq_max); + // it's probably best to keep as much precision as possible for the states + type_k = GGML_TYPE_F32; // required by ggml_ssm_conv for Mamba's conv_states + type_v = GGML_TYPE_F32; // required by ggml_ssm_scan for Mamba's ssm_states + } - if (data) { - for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) { - for (int j = 0; j < n_kv; ++j) { - data[h*(n_kv*n_tokens) + i*n_kv + j] = -INFINITY; - } - } - } + GGML_ASSERT(hparams.n_embd_head_k % ggml_blck_size(type_k) == 0); + GGML_ASSERT(hparams.n_embd_head_v % ggml_blck_size(type_v) == 0); - if (data_swa) { - for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) { - for (int j = 0; j < n_kv; ++j) { - data_swa[h*(n_kv*n_tokens) + i*n_kv + j] = -INFINITY; - } - } - } - } - } else { - const int64_t n_tokens = ubatch.n_tokens; - const int64_t n_seq_tokens = ubatch.n_seq_tokens; - const int64_t n_seqs = ubatch.n_seqs; - // when using kv cache, the mask needs to match the kv cache size - const int64_t n_stride = hparams.causal_attn && !lctx.is_encoding ? kv_self.n : n_tokens; - - GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_mask->buffer)); - - float * data = (float *) lctx.inp_KQ_mask->data; - - for (int h = 0; h < 1; ++h) { - for (int s1 = 0; s1 < n_seqs; ++s1) { - const llama_seq_id seq_id = ubatch.seq_id[s1][0]; - - for (int j = 0; j < n_seq_tokens; ++j) { - const int32_t tj = s1*n_seq_tokens + j; - - for (int s0 = 0; s0 < n_seqs; ++s0) { - for (int i = 0; i < n_seq_tokens; ++i) { - const int32_t ti = s0*n_seq_tokens + i; - float f = -INFINITY; - - for (int s = 0; s < ubatch.n_seq_id[s0]; ++s) { - if (ubatch.seq_id[s0][s] == seq_id) { - if (hparams.use_alibi) { - f = -std::abs(ubatch.pos[ti] - ubatch.pos[tj]); - } else { - f = 0.0f; - } - break; - } - } - - data[h*(n_tokens*n_tokens) + tj*n_stride + ti] = f; - } - } + if (!kv_self->init(model, cparams, type_k, type_v, kv_size, cparams.offload_kqv)) { + throw std::runtime_error("failed to initialize self-attention cache"); + } - for (int i = n_tokens; i < n_stride; ++i) { - data[h*(n_tokens*n_tokens) + tj*n_stride + i] = -INFINITY; - } - } - } - } + { + const size_t memory_size_k = kv_self->size_k_bytes(); + const size_t memory_size_v = kv_self->size_v_bytes(); + + LLAMA_LOG_INFO("%s: KV self size = %7.2f MiB, K (%s): %7.2f MiB, V (%s): %7.2f MiB\n", __func__, + (float)(memory_size_k + memory_size_v) / (1024.0f * 1024.0f), + ggml_type_name(type_k), (float)memory_size_k / (1024.0f * 1024.0f), + ggml_type_name(type_v), (float)memory_size_v / (1024.0f * 1024.0f)); } } - if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_MEAN) { - const int64_t n_tokens = ubatch.n_tokens; - const int64_t n_seq_tokens = ubatch.n_seq_tokens; - const int64_t n_seqs = ubatch.n_seqs; + // init backends + if (!hparams.vocab_only) { + LLAMA_LOG_DEBUG("%s: enumerating backends\n", __func__); - GGML_ASSERT(lctx.inp_mean); - GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_mean->buffer)); + backend_buft.clear(); + backend_ptrs.clear(); - float * data = (float *) lctx.inp_mean->data; - memset(lctx.inp_mean->data, 0, n_tokens * n_tokens * ggml_element_size(lctx.inp_mean)); + for (auto & backend : backends) { + auto * buft = ggml_backend_get_default_buffer_type(backend.get()); + auto backend_type = ggml_backend_dev_type(ggml_backend_get_device(backend.get())); - std::vector sum(n_tokens, 0); + if (backend_type == GGML_BACKEND_DEVICE_TYPE_CPU && !model.devices.empty()) { + // use the host buffer of the first device CPU for faster transfer of the intermediate state + auto * dev = model.devices[0]; + auto * host_buft = ggml_backend_dev_host_buffer_type(dev); + if (host_buft) { + buft = host_buft; + } + } - for (int s = 0; s < n_seqs; ++s) { - const llama_seq_id seq_id = ubatch.seq_id[s][0]; + backend_buft.push_back(buft); + backend_ptrs.push_back(backend.get()); + } - // TODO: adapt limits to n_seqs when ubatch.equal_seqs is true - GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == MEAN"); + LLAMA_LOG_DEBUG("%s: backend_ptrs.size() = %zu\n", __func__, backend_ptrs.size()); - sum[seq_id] += ubatch.n_seq_tokens; - } + const size_t max_nodes = this->graph_max_nodes(); - std::vector div(n_tokens, 0.0f); - for (int i = 0; i < n_tokens; ++i) { - const uint64_t s = sum[i]; - if (s > 0) { - div[i] = 1.0f/float(s); + LLAMA_LOG_DEBUG("%s: max_nodes = %zu\n", __func__, max_nodes); + + // buffer used to store the computation graph and the tensor meta data + buf_compute_meta.resize(ggml_tensor_overhead()*max_nodes + ggml_graph_overhead_custom(max_nodes, false)); + + // TODO: move these checks to ggml_backend_sched + // enabling pipeline parallelism in the scheduler increases memory usage, so it is only done when necessary + bool pipeline_parallel = + model.n_devices() > 1 && + model.params.n_gpu_layers > (int) model.hparams.n_layer && + model.params.split_mode == LLAMA_SPLIT_MODE_LAYER && + cparams.offload_kqv; + + // pipeline parallelism requires support for async compute and events in all devices + if (pipeline_parallel) { + for (auto & backend : backends) { + auto dev_type = ggml_backend_dev_type(ggml_backend_get_device(backend.get())); + if (dev_type == GGML_BACKEND_DEVICE_TYPE_CPU) { + // ignore CPU backend + continue; + } + auto * dev = ggml_backend_get_device(backend.get()); + ggml_backend_dev_props props; + ggml_backend_dev_get_props(dev, &props); + if (!props.caps.async || !props.caps.events) { + // device does not support async compute or events + pipeline_parallel = false; + break; + } } } - for (int s = 0; s < n_seqs; ++s) { - const llama_seq_id seq_id = ubatch.seq_id[s][0]; + sched.reset(ggml_backend_sched_new(backend_ptrs.data(), backend_buft.data(), backend_ptrs.size(), max_nodes, pipeline_parallel)); - for (int i = 0; i < n_seq_tokens; ++i) { - data[seq_id*n_tokens + s*n_seq_tokens + i] = div[seq_id]; - } + if (pipeline_parallel) { + LLAMA_LOG_INFO("%s: pipeline parallelism enabled (n_copies=%d)\n", __func__, ggml_backend_sched_get_n_copies(sched.get())); } } - if (cparams.embeddings && ( - cparams.pooling_type == LLAMA_POOLING_TYPE_CLS || - cparams.pooling_type == LLAMA_POOLING_TYPE_RANK)) { - const int64_t n_tokens = ubatch.n_tokens; - const int64_t n_seq_tokens = ubatch.n_seq_tokens; - const int64_t n_seqs = ubatch.n_seqs; + // reserve worst-case graph + if (!hparams.vocab_only) { + uint32_t n_seqs = 1; // TODO: worst-case number of sequences + uint32_t n_tokens = std::min(cparams.n_ctx, cparams.n_ubatch); - GGML_ASSERT(lctx.inp_cls); - GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_cls->buffer)); + llama_token token = model.vocab.token_bos(); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph - uint32_t * data = (uint32_t *) lctx.inp_cls->data; - memset(lctx.inp_cls->data, 0, n_tokens * ggml_element_size(lctx.inp_cls)); + // max number of outputs + n_outputs = n_tokens; - for (int s = 0; s < n_seqs; ++s) { - const llama_seq_id seq_id = ubatch.seq_id[s][0]; + LLAMA_LOG_DEBUG("%s: n_tokens = %d, n_seqs = %d, n_outputs = %d\n", __func__, n_tokens, n_seqs, n_outputs); - // TODO: adapt limits to n_seqs when ubatch.equal_seqs is true - GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == CLS or RANK"); + int n_splits_pp = -1; + int n_nodes_pp = -1; - for (int i = 0; i < n_seq_tokens; ++i) { - const llama_pos pos = ubatch.pos[s*n_seq_tokens + i]; + int n_splits_tg = -1; + int n_nodes_tg = -1; - if (pos == 0) { - data[seq_id] = s*n_seq_tokens + i; - } + // simulate full KV cache + kv_self->n = kv_self->size; + + cross.v_embd.clear(); + + // reserve pp graph first so that buffers are only allocated once + { + llama_ubatch ubatch_pp = { true, n_tokens, n_tokens / n_seqs, n_seqs, &token, nullptr, nullptr, nullptr, nullptr, nullptr}; + auto * gf = graph_init(); + graph_build(ctx_compute.get(), gf, ubatch_pp, LLM_GRAPH_TYPE_DEFAULT); + if (!ggml_backend_sched_reserve(sched.get(), gf)) { + throw std::runtime_error("failed to allocate compute pp buffers"); } + + n_splits_pp = ggml_backend_sched_get_n_splits(sched.get()); + n_nodes_pp = ggml_graph_n_nodes(gf); + } + + // reserve with tg graph to get the number of splits and nodes + { + llama_ubatch ubatch_tg = { true, 1, 1, n_seqs, &token, nullptr, nullptr, nullptr, nullptr, nullptr}; + auto * gf = graph_init(); + graph_build(ctx_compute.get(), gf, ubatch_tg, LLM_GRAPH_TYPE_DEFAULT); + if (!ggml_backend_sched_reserve(sched.get(), gf)) { + throw std::runtime_error("failed to allocate compute tg buffers"); + } + n_splits_tg = ggml_backend_sched_get_n_splits(sched.get()); + n_nodes_tg = ggml_graph_n_nodes(gf); } - } - if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_LAST) { - const int64_t n_tokens = ubatch.n_tokens; - const int64_t n_seq_tokens = ubatch.n_seq_tokens; - const int64_t n_seqs = ubatch.n_seqs; + // reserve again with pp graph to avoid ggml-alloc reallocations during inference + { + llama_ubatch ubatch_pp = { true, n_tokens, n_tokens / n_seqs, n_seqs, &token, nullptr, nullptr, nullptr, nullptr, nullptr}; + auto * gf = graph_init(); + graph_build(ctx_compute.get(), gf, ubatch_pp, LLM_GRAPH_TYPE_DEFAULT); + if (!ggml_backend_sched_reserve(sched.get(), gf)) { + throw std::runtime_error("failed to allocate compute pp buffers"); + } + } - GGML_ASSERT(lctx.inp_cls); - GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_cls->buffer)); + for (size_t i = 0; i < backend_ptrs.size(); ++i) { + ggml_backend_t backend = backend_ptrs[i]; + ggml_backend_buffer_type_t buft = backend_buft[i]; + size_t size = ggml_backend_sched_get_buffer_size(sched.get(), backend); + if (size > 1) { + LLAMA_LOG_INFO("%s: %10s compute buffer size = %8.2f MiB\n", __func__, + ggml_backend_buft_name(buft), + size / 1024.0 / 1024.0); + } + } - uint32_t * data = (uint32_t *) lctx.inp_cls->data; - memset(lctx.inp_cls->data, 0, n_tokens * ggml_element_size(lctx.inp_cls)); + if (n_nodes_pp == n_nodes_tg) { + LLAMA_LOG_INFO("%s: graph nodes = %d\n", __func__, n_nodes_pp); + } else { + LLAMA_LOG_INFO("%s: graph nodes = %d (with bs=%d), %d (with bs=1)\n", __func__, n_nodes_pp, n_tokens, n_nodes_tg); + } - std::vector last_pos(n_tokens, -1); - std::vector last_row(n_tokens, -1); + if (n_splits_pp == n_splits_tg) { + LLAMA_LOG_INFO("%s: graph splits = %d\n", __func__, n_splits_pp); + } else { + LLAMA_LOG_INFO("%s: graph splits = %d (with bs=%d), %d (with bs=1)\n", __func__, n_splits_pp, n_tokens, n_splits_tg); + } + } +} - for (int s = 0; s < n_seqs; ++s) { - const llama_seq_id seq_id = ubatch.seq_id[s][0]; +llama_context::~llama_context() = default; - // TODO: adapt limits to n_seqs when ubatch.equal_seqs is true - GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == LAST"); +void llama_context::synchronize() { + ggml_backend_sched_synchronize(sched.get()); - for (int i = 0; i < n_seq_tokens; ++i) { - const llama_pos pos = ubatch.pos[s*n_seq_tokens + i]; + // FIXME: if multiple single tokens are evaluated without a synchronization, + // the stats will be added to the prompt evaluation stats + // this should only happen when using batch size 1 to evaluate a batch - if (pos >= last_pos[seq_id]) { - last_pos[seq_id] = pos; - last_row[seq_id] = s*n_seq_tokens + i; - } - } + // add the evaluation to the stats + if (n_queued_tokens == 1) { + if (!cparams.no_perf) { + t_eval_us += ggml_time_us() - t_compute_start_us; } - - for (int i = 0; i < n_tokens; ++i) { - if (last_row[i] >= 0) { - data[i] = last_row[i]; - } + n_eval++; + } else if (n_queued_tokens > 1) { + if (!cparams.no_perf) { + t_p_eval_us += ggml_time_us() - t_compute_start_us; } + n_p_eval += n_queued_tokens; } - if (kv_self.recurrent) { - const int64_t n_kv = kv_self.n; - - if (lctx.inp_s_mask) { - GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_s_mask->buffer)); - float * data = (float *) lctx.inp_s_mask->data; + // get a more accurate load time, upon first eval + if (n_queued_tokens > 0 && !has_evaluated_once) { + t_load_us = ggml_time_us() - t_start_us; + has_evaluated_once = true; + } - // clear unused states - for (int i = 0; i < n_kv; ++i) { - const uint32_t cell_id = i + kv_self.head; - llama_kv_cell & kv_cell = lctx.kv_self.cells[cell_id]; + n_queued_tokens = 0; + t_compute_start_us = 0; +} - data[i] = (float) (kv_cell.src >= 0); +const llama_model & llama_context::get_model() const { + return model; +} - // only clear once - if (kv_cell.src < 0) { - kv_cell.src = cell_id; - } - } - } +uint32_t llama_context::n_ctx() const { + return cparams.n_ctx; +} - if (lctx.inp_s_copy) { - GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_s_copy->buffer)); - int32_t * data = (int32_t *) lctx.inp_s_copy->data; +uint32_t llama_context::n_ctx_per_seq() const { + return cparams.n_ctx / cparams.n_seq_max; +} - // assuming copy destinations ALWAYS happen ONLY on the cells between head and head+n - for (uint32_t i = 0; i < n_kv; ++i) { - const uint32_t cell_id = i + kv_self.head; - llama_kv_cell & kv_cell = lctx.kv_self.cells[cell_id]; +uint32_t llama_context::n_batch() const { + return cparams.n_batch; +} - // prevent out-of-bound sources - if (kv_cell.src < 0 || (uint32_t) kv_cell.src >= kv_self.size) { - kv_cell.src = cell_id; - } +uint32_t llama_context::n_ubatch() const { + return cparams.n_ubatch; +} - data[i] = kv_cell.src; +uint32_t llama_context::n_seq_max() const { + return cparams.n_seq_max; +} - // ensure copy only happens once - if (kv_cell.src != (int32_t) cell_id) { - kv_cell.src = cell_id; - } - } - } - } +uint32_t llama_context::n_threads() const { + return cparams.n_threads; +} - if (lctx.inp_pos_bucket) { - const int64_t n_tokens = ubatch.n_tokens; +uint32_t llama_context::n_threads_batch() const { + return cparams.n_threads_batch; +} - GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_pos_bucket->buffer)); - GGML_ASSERT(!ubatch.equal_seqs); // TODO: use ubatch.n_seqs instead of failing +llama_kv_cache * llama_context::get_kv_self() { + return kv_self.get(); +} - int32_t * data = (int32_t *) lctx.inp_pos_bucket->data; +const llama_kv_cache * llama_context::get_kv_self() const { + return kv_self.get(); +} - if (!lctx.is_encoding) { - const int64_t n_kv = kv_self.n; - for (int h = 0; h < 1; ++h) { - for (int j = 0; j < n_tokens; ++j) { - for (int i = 0; i < n_kv; ++i) { - data[h*(n_kv*n_tokens) + j*n_kv + i] = llama_relative_position_bucket(lctx.kv_self.cells[i].pos, ubatch.pos[j], hparams.n_rel_attn_bkts, lctx.is_encoding); - } - } - } - } else { - for (int h = 0; h < 1; ++h) { - for (int j = 0; j < n_tokens; ++j) { - for (int i = 0; i < n_tokens; ++i) { - data[h*(n_tokens*n_tokens) + j*n_tokens + i] = llama_relative_position_bucket(ubatch.pos[i], ubatch.pos[j], hparams.n_rel_attn_bkts, lctx.is_encoding); - } +ggml_tensor * llama_context::build_rope_shift( + ggml_context * ctx0, + ggml_tensor * cur, + ggml_tensor * shift, + ggml_tensor * factors, + ggml_backend_buffer * bbuf) const { + const auto & n_ctx_orig = cparams.n_ctx_orig_yarn; + const auto & freq_base = cparams.rope_freq_base; + const auto & freq_scale = cparams.rope_freq_scale; + + const auto & yarn_ext_factor = cparams.yarn_ext_factor; + const auto & yarn_attn_factor = cparams.yarn_attn_factor; + const auto & yarn_beta_fast = cparams.yarn_beta_fast; + const auto & yarn_beta_slow = cparams.yarn_beta_slow; + + const auto & hparams = model.hparams; + + const auto & n_rot = hparams.n_rot; + const auto & rope_type = hparams.rope_type; + + ggml_tensor * tmp; + + if (ggml_is_quantized(cur->type)) { + // dequantize to f32 -> RoPE -> quantize back + tmp = ggml_cast(ctx0, cur, GGML_TYPE_F32); + + if (bbuf) { + for (const auto & backend : backends) { + // Figure out which backend KV cache belongs to + if (ggml_backend_supports_buft(backend.get(), ggml_backend_buffer_get_type(bbuf))) { + ggml_backend_sched_set_tensor_backend(sched.get(), tmp, backend.get()); + break; } } } - } - if (!lctx.is_encoding && lctx.inp_embd_enc) { - assert(lctx.inp_embd_enc->type == GGML_TYPE_F32); - assert((size_t) ggml_nelements(lctx.inp_embd_enc) == lctx.embd_enc.size()); + tmp = ggml_rope_ext_inplace(ctx0, tmp, + shift, factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + yarn_ext_factor, yarn_attn_factor, yarn_beta_fast, yarn_beta_slow); - ggml_backend_tensor_set(lctx.inp_embd_enc, lctx.embd_enc.data(), 0, ggml_nbytes(lctx.inp_embd_enc)); + tmp = ggml_cpy(ctx0, tmp, cur); + } else { + // we rotate only the first n_rot dimensions + tmp = ggml_rope_ext_inplace(ctx0, cur, + shift, factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + yarn_ext_factor, yarn_attn_factor, yarn_beta_fast, yarn_beta_slow); } - if (!lctx.is_encoding && lctx.inp_KQ_mask_cross) { - const int64_t n_output_enc = lctx.embd_enc.size() / hparams.n_embd; - const int64_t n_tokens = ubatch.n_tokens; + return tmp; +} - GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_mask_cross->buffer)); - GGML_ASSERT(!ubatch.equal_seqs); // TODO: use ubatch.n_seqs instead of failing +class llm_graph_input_k_shift : public llm_graph_input_i { +public: + llm_graph_input_k_shift(const llama_kv_cache_unified * kv_self) : kv_self(kv_self) {} + virtual ~llm_graph_input_k_shift() = default; - float * data = (float *) lctx.inp_KQ_mask_cross->data; + void set_input(const llama_ubatch * ubatch) override; - for (int h = 0; h < 1; ++h) { - for (int j = 0; j < n_tokens; ++j) { - for (int i = 0; i < n_output_enc; ++i) { - float f = -INFINITY; - for (int s = 0; s < ubatch.n_seq_id[j]; ++s) { - const llama_seq_id seq_id = ubatch.seq_id[j][s]; - if (lctx.seq_ids_enc[i].find(seq_id) != lctx.seq_ids_enc[i].end()) { - f = 0.0f; - } - } - data[h*(n_output_enc*n_tokens) + j*n_output_enc + i] = f; - } - } + ggml_tensor * k_shift; // I32 [kv_size] - for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) { - for (int j = 0; j < n_output_enc; ++j) { - data[h*(n_output_enc*n_tokens) + i*n_output_enc + j] = -INFINITY; - } - } + const llama_kv_cache_unified * kv_self; +}; + +void llm_graph_input_k_shift::set_input(const llama_ubatch * ubatch) { + GGML_UNUSED(ubatch); + + if (k_shift) { + assert(ggml_backend_buffer_is_host(k_shift->buffer)); + + int32_t * data = (int32_t *) k_shift->data; + + for (uint32_t i = 0; i < kv_self->size; ++i) { + data[i] = kv_self->cells[i].delta; } } } -// llama output +llm_graph_result_ptr llama_context::build_kv_self_shift( + ggml_context * ctx0, + ggml_cgraph * gf) const { + auto res = std::make_unique(); -size_t llama_output_reserve(struct llama_context & lctx, size_t n_outputs) { - const auto & cparams = lctx.cparams; - const auto & hparams = lctx.model.hparams; - const auto & vocab = lctx.model.vocab; + const auto & hparams = model.hparams; - const size_t n_outputs_max = std::max(n_outputs, (size_t) cparams.n_seq_max); + const auto & n_layer = hparams.n_layer; - const auto n_batch = cparams.n_batch; - const auto n_vocab = vocab.n_tokens(); - const auto n_embd = hparams.n_embd; + const auto & n_embd_head_k = hparams.n_embd_head_k; + //const auto & n_embd_head_v = hparams.n_embd_head_v; - // TODO: use a per-batch flag for logits presence instead - const bool has_logits = !cparams.embeddings; - const bool has_embd = cparams.embeddings && (cparams.pooling_type == LLAMA_POOLING_TYPE_NONE); + //GGML_ASSERT(kv_self->size == n_ctx); - const size_t logits_size = has_logits ? n_vocab*n_outputs_max : 0; - const size_t embd_size = has_embd ? n_embd*n_outputs_max : 0; + auto inp = std::make_unique(kv_self.get()); - if (lctx.output_ids.empty()) { - // init, never resized afterwards - lctx.output_ids.resize(n_batch); - } + inp->k_shift = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, cparams.n_ctx); + ggml_set_input(inp->k_shift); - const size_t prev_size = lctx.buf_output ? ggml_backend_buffer_get_size(lctx.buf_output.get()) : 0; - const size_t new_size = (logits_size + embd_size) * sizeof(float); + for (uint32_t il = 0; il < n_layer; ++il) { + const int64_t n_head_kv = hparams.n_head_kv(il); + const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(il); - // alloc only when more than the current capacity is required - // TODO: also consider shrinking the buffer - if (!lctx.buf_output || prev_size < new_size) { - if (lctx.buf_output) { -#ifndef NDEBUG - // This doesn't happen often, but may be annoying in some cases (like the HellaSwag benchmark) - LLAMA_LOG_INFO("%s: reallocating output buffer from size %.02f MiB to %.02f MiB\n", __func__, prev_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0); -#endif - lctx.buf_output = nullptr; - lctx.logits = nullptr; - lctx.embd = nullptr; - } + ggml_tensor * rope_factors = kv_self->cbs.get_rope_factors(n_ctx_per_seq(), il); - auto * buft = ggml_backend_cpu_buffer_type(); - // try to use the host buffer of the device where the output tensor is allocated for faster transfer to system memory - auto * output_dev = lctx.model.dev_output(); - auto * output_dev_host_buft = output_dev ? ggml_backend_dev_host_buffer_type(output_dev) : nullptr; - if (output_dev_host_buft) { - buft = output_dev_host_buft; - } - lctx.buf_output.reset(ggml_backend_buft_alloc_buffer(buft, new_size)); - if (lctx.buf_output == nullptr) { - LLAMA_LOG_ERROR("%s: failed to allocate output buffer of size %.2f MiB\n", __func__, new_size / (1024.0 * 1024.0)); - return 0; - } + ggml_tensor * k = + ggml_view_3d(ctx0, kv_self->k_l[il], + n_embd_head_k, n_head_kv, kv_self->size, + ggml_row_size(kv_self->k_l[il]->type, n_embd_head_k), + ggml_row_size(kv_self->k_l[il]->type, n_embd_k_gqa), + 0); + + ggml_tensor * cur = build_rope_shift(ctx0, k, inp->k_shift, rope_factors, kv_self->k_l[il]->buffer); + + ggml_build_forward_expand(gf, cur); } - float * output_base = (float *) ggml_backend_buffer_get_base(lctx.buf_output.get()); + res->add_input(std::move(inp)); + + return res; +} + +llm_graph_result_ptr llama_context::build_kv_self_defrag( + ggml_context * ctx0, + ggml_cgraph * gf) const { + auto res = std::make_unique(); - lctx.logits = has_logits ? output_base : nullptr; - lctx.embd = has_embd ? output_base + logits_size : nullptr; + const auto & hparams = model.hparams; - lctx.output_size = n_outputs_max; - lctx.logits_size = logits_size; - lctx.embd_size = embd_size; + const auto & ids = kv_self->defrag_info.ids; - // set all ids as invalid (negative) - std::fill(lctx.output_ids.begin(), lctx.output_ids.end(), -1); +#if 0 + // CPU defrag + // + // TODO: optimizations are possible: + // - multiple threads + // - avoid copying to the host memory when already there + // + // likely not worth the effort, as we have ggml_graph based defrag + // - ggml_backend_buffer_clear(lctx.buf_output.get(), 0); + const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(); + const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(); - lctx.n_outputs = 0; + const uint32_t kv_size = size; - return n_outputs_max; -} + std::vector buf_k; + std::vector buf_v; -void llama_output_reorder(struct llama_context & ctx) { - std::vector & out_ids = ctx.sbatch.out_ids; - if (!out_ids.empty()) { - const uint32_t n_vocab = ctx.model.vocab.n_tokens(); - const uint32_t n_embd = ctx.model.hparams.n_embd; + for (uint32_t il = 0; il < n_layer; ++il) { + const size_t k_size_row = ggml_row_size(k_l[il]->type, n_embd_k_gqa); + const size_t k_size = ggml_row_size(k_l[il]->type, n_embd_k_gqa*kv_size); - const int32_t n_outputs = ctx.n_outputs; - GGML_ASSERT((size_t) n_outputs == out_ids.size()); + const size_t v_size_el = ggml_type_size(v_l[il]->type); + const size_t v_size = ggml_row_size (v_l[il]->type, n_embd_v_gqa*kv_size); - // TODO: is there something more efficient which also minimizes swaps? - // selection sort, to minimize swaps (from https://en.wikipedia.org/wiki/Selection_sort) - for (int32_t i = 0; i < n_outputs - 1; ++i) { - int32_t j_min = i; - for (int32_t j = i + 1; j < n_outputs; ++j) { - if (out_ids[j] < out_ids[j_min]) { - j_min = j; - } + buf_k.resize(k_size); + buf_v.resize(v_size); + + ggml_backend_tensor_get(k_l[il], buf_k.data(), 0, buf_k.size()); + ggml_backend_tensor_get(v_l[il], buf_v.data(), 0, buf_v.size()); + + // batch move [i, i+nm) to [id, id+nm) + // note: cells can move only to a lower index + for (uint32_t i = 0; i < n_kv; ++i) { + const uint32_t id = ids[i]; + + if (i == id || id == n_kv) { + continue; } - if (j_min == i) { continue; } - std::swap(out_ids[i], out_ids[j_min]); - if (ctx.logits_size > 0) { - for (uint32_t k = 0; k < n_vocab; k++) { - std::swap(ctx.logits[i*n_vocab + k], ctx.logits[j_min*n_vocab + k]); - } + + uint32_t nm = 1; + + while (i + nm < n_kv && ids[i + nm] == id + nm) { + nm++; } - if (ctx.embd_size > 0) { - for (uint32_t k = 0; k < n_embd; k++) { - std::swap(ctx.embd[i*n_embd + k], ctx.embd[j_min*n_embd + k]); + + // move keys + { + const int64_t os = i*k_size_row; + const int64_t od = id*k_size_row; + + memcpy(buf_k.data() + od, buf_k.data() + os, nm*k_size_row); + } + + // move values (note: they are transposed) + { + const int64_t os = i; + const int64_t od = id; + + for (uint32_t j = 0; j < n_embd_v_gqa; ++j) { + memcpy(buf_v.data() + (od + j*kv_size)*v_size_el, buf_v.data() + (os + j*kv_size)*v_size_el, nm*v_size_el); } } + + i += nm - 1; } - std::fill(ctx.output_ids.begin(), ctx.output_ids.end(), -1); - for (int32_t i = 0; i < n_outputs; ++i) { - ctx.output_ids[out_ids[i]] = i; - } - out_ids.clear(); - } -} -// -// interface implementation -// + ggml_backend_tensor_set(k_l[il], buf_k.data(), 0, buf_k.size()); + ggml_backend_tensor_set(v_l[il], buf_v.data(), 0, buf_v.size()); + } +#else + for (uint32_t i = 0; i < ids.size(); ++i) { + const uint32_t id = ids[i]; -void llama_free(struct llama_context * ctx) { - delete ctx; -} + if (i == id || id == ids.size()) { + continue; + } -uint32_t llama_n_ctx(const struct llama_context * ctx) { - return ctx->cparams.n_ctx; -} + uint32_t nm = 1; -uint32_t llama_n_batch(const struct llama_context * ctx) { - return ctx->cparams.n_batch; -} + while (i + nm < ids.size() && ids[i + nm] == id + nm) { + nm++; + } -uint32_t llama_n_ubatch(const struct llama_context * ctx) { - return ctx->cparams.n_ubatch; -} + for (uint32_t il = 0; il < hparams.n_layer; ++il) { // NOLINT + const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(il); + const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa(il); + + ggml_tensor * view_k_src = ggml_view_2d(ctx0, kv_self->k_l[il], + n_embd_k_gqa, nm, + ggml_row_size(kv_self->k_l[il]->type, n_embd_k_gqa), + ggml_row_size(kv_self->k_l[il]->type, n_embd_k_gqa*i)); + + ggml_tensor * view_k_dst = ggml_view_2d(ctx0, kv_self->k_l[il], + n_embd_k_gqa, nm, + ggml_row_size(kv_self->k_l[il]->type, n_embd_k_gqa), + ggml_row_size(kv_self->k_l[il]->type, n_embd_k_gqa*id)); + + ggml_tensor * view_v_src; + ggml_tensor * view_v_dst; + + if (cparams.flash_attn) { + // NOTE: the V cache is not transposed when using flash attention + view_v_src = ggml_view_2d(ctx0, kv_self->v_l[il], + n_embd_v_gqa, nm, + ggml_row_size(kv_self->v_l[il]->type, n_embd_v_gqa), + ggml_row_size(kv_self->v_l[il]->type, n_embd_v_gqa*i)); + + view_v_dst = ggml_view_2d(ctx0, kv_self->v_l[il], + n_embd_v_gqa, nm, + ggml_row_size(kv_self->v_l[il]->type, n_embd_v_gqa), + ggml_row_size(kv_self->v_l[il]->type, n_embd_v_gqa*id)); + } else { + view_v_src = ggml_view_2d(ctx0, kv_self->v_l[il], + nm, n_embd_v_gqa, + ggml_row_size(kv_self->v_l[il]->type, kv_self->size), + ggml_row_size(kv_self->v_l[il]->type, i)); + + view_v_dst = ggml_view_2d(ctx0, kv_self->v_l[il], + nm, n_embd_v_gqa, + ggml_row_size(kv_self->v_l[il]->type, kv_self->size), + ggml_row_size(kv_self->v_l[il]->type, id)); + } -uint32_t llama_n_seq_max(const struct llama_context * ctx) { - return ctx->kv_self.size; -} + ggml_build_forward_expand(gf, ggml_cpy(ctx0, view_k_src, view_k_dst)); + ggml_build_forward_expand(gf, ggml_cpy(ctx0, view_v_src, view_v_dst)); + } -const struct llama_model * llama_get_model(const struct llama_context * ctx) { - return &ctx->model; -} + i += nm - 1; + } -enum llama_pooling_type llama_pooling_type(const struct llama_context * ctx) { - return ctx->cparams.pooling_type; -} + //LLAMA_LOG_INFO("gf->n_nodes = %d\n", gf->n_nodes); +#endif -void llama_attach_threadpool( - struct llama_context * ctx, - ggml_threadpool_t threadpool, - ggml_threadpool_t threadpool_batch) { - ctx->threadpool = threadpool; - ctx->threadpool_batch = threadpool_batch ? threadpool_batch : threadpool; + return res; } -void llama_detach_threadpool(struct llama_context * ctx) { - ctx->threadpool = nullptr; - ctx->threadpool_batch = nullptr; -} +void llama_context::kv_self_update() { + auto & kv = kv_self; -void llama_set_n_threads(struct llama_context * ctx, int32_t n_threads, int32_t n_threads_batch) { - ctx->cparams.n_threads = n_threads; - ctx->cparams.n_threads_batch = n_threads_batch; -} + bool need_reserve = false; -int32_t llama_n_threads(struct llama_context * ctx) { - return ctx->cparams.n_threads; -} + if (kv->has_shift) { + if (!kv->get_can_shift()) { + GGML_ABORT("The current context does not support K-shift"); + } -int32_t llama_n_threads_batch(struct llama_context * ctx) { - return ctx->cparams.n_threads_batch; -} + LLAMA_LOG_DEBUG("%s: applying K-shift\n", __func__); -void llama_set_abort_callback(struct llama_context * ctx, bool (*abort_callback)(void * data), void * abort_callback_data) { - ctx->abort_callback = abort_callback; - ctx->abort_callback_data = abort_callback_data; + // apply K-shift if needed + if (model.hparams.rope_type != LLAMA_ROPE_TYPE_NONE) { + ggml_backend_sched_reset(sched.get()); - for (auto & backend : ctx->backends) { - auto * reg = ggml_backend_dev_backend_reg(ggml_backend_get_device(backend.get())); - auto * set_abort_callback_fn = (ggml_backend_set_abort_callback_t) ggml_backend_reg_get_proc_address(reg, "ggml_backend_set_abort_callback"); - if (set_abort_callback_fn) { - set_abort_callback_fn(backend.get(), ctx->abort_callback, ctx->abort_callback_data); - } - } -} + auto * gf = graph_init(); -void llama_set_embeddings(struct llama_context * ctx, bool embeddings) { - ctx->cparams.embeddings = embeddings; -} + auto res = build_kv_self_shift(ctx_compute.get(), gf); -void llama_set_causal_attn(struct llama_context * ctx, bool causal_attn) { - ctx->cparams.causal_attn = causal_attn; -} + ggml_backend_sched_alloc_graph(sched.get(), gf); -void llama_synchronize(struct llama_context * ctx) { - ggml_backend_sched_synchronize(ctx->sched.get()); + res->set_inputs(nullptr); - // FIXME: if multiple single tokens are evaluated without a synchronization, - // the stats will be added to the prompt evaluation stats - // this should only happen when using batch size 1 to evaluate a batch + graph_compute(gf, false); - // add the evaluation to the stats - if (ctx->n_queued_tokens == 1) { - if (!ctx->cparams.no_perf) { - ctx->t_eval_us += ggml_time_us() - ctx->t_compute_start_us; + need_reserve = true; } - ctx->n_eval++; - } else if (ctx->n_queued_tokens > 1) { - if (!ctx->cparams.no_perf) { - ctx->t_p_eval_us += ggml_time_us() - ctx->t_compute_start_us; + + { + kv->has_shift = false; + + for (uint32_t i = 0; i < kv->size; ++i) { + kv->cells[i].delta = 0; + } } - ctx->n_p_eval += ctx->n_queued_tokens; } - // get a more accurate load time, upon first eval - if (ctx->n_queued_tokens > 0 && !ctx->has_evaluated_once) { - ctx->t_load_us = ggml_time_us() - ctx->t_start_us; - ctx->has_evaluated_once = true; - } + // defragment the KV cache if needed + if (kv->do_defrag) { + LLAMA_LOG_DEBUG("%s: defragmenting KV cache\n", __func__); - ctx->n_queued_tokens = 0; - ctx->t_compute_start_us = 0; -} + if (kv->defrag_prepare(graph_max_nodes())) { + ggml_backend_sched_reset(sched.get()); -float * llama_get_logits(struct llama_context * ctx) { - llama_synchronize(ctx); + auto * gf = graph_init(); - // reorder logits for backward compatibility - // TODO: maybe deprecate this - llama_output_reorder(*ctx); + auto res = build_kv_self_defrag(ctx_compute.get(), gf); - return ctx->logits; -} + ggml_backend_sched_alloc_graph(sched.get(), gf); -float * llama_get_logits_ith(struct llama_context * ctx, int32_t i) { - int32_t j = -1; + res->set_inputs(nullptr); - llama_synchronize(ctx); + graph_compute(gf, false); - try { - if (ctx->logits == nullptr) { - throw std::runtime_error("no logits"); + need_reserve = true; } - if (i < 0) { - j = ctx->n_outputs + i; - if (j < 0) { - throw std::runtime_error(format("negative index out of range [0, %d)", ctx->n_outputs)); - } - } else if ((size_t) i >= ctx->output_ids.size()) { - throw std::runtime_error(format("out of range [0, %zu)", ctx->output_ids.size())); - } else { - j = ctx->output_ids[i]; + kv->do_defrag = false; + } + + // reserve a worst case graph if needed + if (need_reserve) { + LLAMA_LOG_DEBUG("%s: reserving a worst case graph\n", __func__); + + // build worst-case graph + uint32_t n_seqs = 1; // TODO: worst-case number of sequences + uint32_t n_tokens = std::min(cparams.n_ctx, cparams.n_ubatch); + + // simulate full KV cache + kv_self->n = kv_self->size; + + llama_token token = model.vocab.token_bos(); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph + llama_ubatch ubatch = { true, n_tokens, n_tokens / n_seqs, n_seqs, &token, nullptr, nullptr, nullptr, nullptr, nullptr}; + + auto * gf = graph_init(); + graph_build(ctx_compute.get(), gf, ubatch, LLM_GRAPH_TYPE_DEFAULT); + + // initialize scheduler with the worst-case graph + ggml_backend_sched_reset(sched.get()); + if (!ggml_backend_sched_reserve(sched.get(), gf)) { + LLAMA_LOG_ERROR("%s: failed to allocate compute buffers\n", __func__); + } + } +} + +enum llama_pooling_type llama_context::pooling_type() const { + return cparams.pooling_type; +} + +float * llama_context::get_logits() { + // reorder logits for backward compatibility + output_reorder(); + + return logits; +} + +float * llama_context::get_logits_ith(int32_t i) { + int32_t j = -1; + + try { + if (logits == nullptr) { + throw std::runtime_error("no logits"); + } + + if (i < 0) { + j = n_outputs + i; + if (j < 0) { + throw std::runtime_error(format("negative index out of range [0, %d)", n_outputs)); + } + } else if ((size_t) i >= output_ids.size()) { + throw std::runtime_error(format("out of range [0, %zu)", output_ids.size())); + } else { + j = output_ids[i]; } if (j < 0) { throw std::runtime_error(format("batch.logits[%d] != true", i)); } - if (j >= ctx->n_outputs) { + if (j >= n_outputs) { // This should not happen - throw std::runtime_error(format("corrupt output buffer (j=%d, n_outputs=%d)", j, ctx->n_outputs)); + throw std::runtime_error(format("corrupt output buffer (j=%d, n_outputs=%d)", j, n_outputs)); } - return ctx->logits + j*ctx->model.vocab.n_tokens(); + return logits + j*model.vocab.n_tokens(); } catch (const std::exception & err) { LLAMA_LOG_ERROR("%s: invalid logits id %d, reason: %s\n", __func__, i, err.what()); #ifndef NDEBUG @@ -737,46 +837,41 @@ float * llama_get_logits_ith(struct llama_context * ctx, int32_t i) { } } -float * llama_get_embeddings(struct llama_context * ctx) { - llama_synchronize(ctx); - +float * llama_context::get_embeddings() { // reorder embeddings for backward compatibility - // TODO: maybe deprecate this - llama_output_reorder(*ctx); + output_reorder(); - return ctx->embd; + return embd; } -float * llama_get_embeddings_ith(struct llama_context * ctx, int32_t i) { +float * llama_context::get_embeddings_ith(int32_t i) { int32_t j = -1; - llama_synchronize(ctx); - try { - if (ctx->embd == nullptr) { + if (embd == nullptr) { throw std::runtime_error("no embeddings"); } if (i < 0) { - j = ctx->n_outputs + i; + j = n_outputs + i; if (j < 0) { - throw std::runtime_error(format("negative index out of range [0, %d)", ctx->n_outputs)); + throw std::runtime_error(format("negative index out of range [0, %d)", n_outputs)); } - } else if ((size_t) i >= ctx->output_ids.size()) { - throw std::runtime_error(format("out of range [0, %zu)", ctx->output_ids.size())); + } else if ((size_t) i >= output_ids.size()) { + throw std::runtime_error(format("out of range [0, %zu)", output_ids.size())); } else { - j = ctx->output_ids[i]; + j = output_ids[i]; } if (j < 0) { throw std::runtime_error(format("batch.logits[%d] != true", i)); } - if (j >= ctx->n_outputs) { + if (j >= n_outputs) { // This should not happen - throw std::runtime_error(format("corrupt output buffer (j=%d, n_outputs=%d)", j, ctx->n_outputs)); + throw std::runtime_error(format("corrupt output buffer (j=%d, n_outputs=%d)", j, n_outputs)); } - return ctx->embd + j*ctx->model.hparams.n_embd; + return embd + j*model.hparams.n_embd; } catch (const std::exception & err) { LLAMA_LOG_ERROR("%s: invalid embeddings id %d, reason: %s\n", __func__, i, err.what()); #ifndef NDEBUG @@ -787,696 +882,925 @@ float * llama_get_embeddings_ith(struct llama_context * ctx, int32_t i) { } } -float * llama_get_embeddings_seq(struct llama_context * ctx, llama_seq_id seq_id) { - llama_synchronize(ctx); - - auto it = ctx->embd_seq.find(seq_id); - if (it == ctx->embd_seq.end()) { +float * llama_context::get_embeddings_seq(llama_seq_id seq_id) { + auto it = embd_seq.find(seq_id); + if (it == embd_seq.end()) { return nullptr; } return it->second.data(); } -// llama state API +void llama_context::attach_threadpool( + ggml_threadpool_t threadpool, + ggml_threadpool_t threadpool_batch) { + LLAMA_LOG_DEBUG("%s: call\n", __func__); -// deprecated -size_t llama_get_state_size(struct llama_context * ctx) { - return llama_state_get_size(ctx); + this->threadpool = threadpool; + this->threadpool_batch = threadpool_batch ? threadpool_batch : threadpool; } -// deprecated -size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) { - return llama_state_get_data(ctx, dst, -1); -} +void llama_context::detach_threadpool() { + LLAMA_LOG_DEBUG("%s: call\n", __func__); -// deprecated -size_t llama_set_state_data(struct llama_context * ctx, const uint8_t * src) { - return llama_state_set_data(ctx, src, -1); + this->threadpool = nullptr; + this->threadpool_batch = nullptr; } -// deprecated -bool llama_load_session_file(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) { - return llama_state_load_file(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out); -} +void llama_context::set_n_threads(int32_t n_threads, int32_t n_threads_batch) { + LLAMA_LOG_DEBUG("%s: n_threads = %d, n_threads_batch = %d\n", __func__, n_threads, n_threads_batch); -// deprecated -bool llama_save_session_file(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) { - return llama_state_save_file(ctx, path_session, tokens, n_token_count); + cparams.n_threads = n_threads; + cparams.n_threads_batch = n_threads_batch; } -// TODO: replace all non-fatal assertions with returned errors or exceptions -struct llama_data_write { - virtual void write(const void * src, size_t size) = 0; - virtual void write_tensor_data(const struct ggml_tensor * tensor, size_t offset, size_t size) = 0; - virtual size_t get_size_written() = 0; - virtual ~llama_data_write() = default; +void llama_context::set_abort_callback(bool (*abort_callback)(void * data), void * abort_callback_data) { + LLAMA_LOG_DEBUG("%s: call\n", __func__); - void write_string(const std::string & str) { - uint32_t str_size = str.size(); + this->abort_callback = abort_callback; + this->abort_callback_data = abort_callback_data; - write(&str_size, sizeof(str_size)); - write(str.data(), str_size); + for (auto & backend : backends) { + auto * reg = ggml_backend_dev_backend_reg(ggml_backend_get_device(backend.get())); + auto * set_abort_callback_fn = (ggml_backend_set_abort_callback_t) ggml_backend_reg_get_proc_address(reg, "ggml_backend_set_abort_callback"); + if (set_abort_callback_fn) { + set_abort_callback_fn(backend.get(), this->abort_callback, this->abort_callback_data); + } } +} - void write_model_info(const struct llama_context * ctx) { - const std::string arch_str = llm_arch_name(ctx->model.arch); - write_string(arch_str); - // TODO: add more model-specific info which should prevent loading the session file if not identical - } +void llama_context::set_embeddings(bool value) { + LLAMA_LOG_DEBUG("%s: value = %d\n", __func__, value); - //void write_rng(const std::mt19937 & rng) { - // std::ostringstream rng_ss; - // rng_ss << rng; + cparams.embeddings = value; +} - // const std::string & rng_str = rng_ss.str(); +void llama_context::set_causal_attn(bool value) { + LLAMA_LOG_DEBUG("%s: value = %d\n", __func__, value); - // write_string(rng_str); - //} + cparams.causal_attn = value; +} - void write_output_ids(struct llama_context * ctx) { - llama_output_reorder(*ctx); +void llama_context::set_adapter_lora( + llama_adapter_lora * adapter, + float scale) { + LLAMA_LOG_DEBUG("%s: adapter = %p, scale = %f\n", __func__, (void *) adapter, scale); - const uint32_t n_outputs = ctx->n_outputs; + loras[adapter] = scale; +} - std::vector output_pos; +bool llama_context::rm_adapter_lora( + llama_adapter_lora * adapter) { + LLAMA_LOG_DEBUG("%s: adapter = %p\n", __func__, (void *) adapter); - const size_t n_batch = ctx->cparams.n_batch; - const auto & output_ids = ctx->output_ids; + auto pos = loras.find(adapter); + if (pos != loras.end()) { + loras.erase(pos); + return true; + } - GGML_ASSERT(n_outputs <= ctx->output_size); + return false; +} - output_pos.resize(n_outputs); +void llama_context::clear_adapter_lora() { + LLAMA_LOG_DEBUG("%s: call\n", __func__); - // build a more compact representation of the output ids - for (size_t i = 0; i < n_batch; ++i) { - // map an output id to a position in the batch - int32_t pos = output_ids[i]; - if (pos >= 0) { - GGML_ASSERT((uint32_t) pos < n_outputs); - output_pos[pos] = i; - } - } + loras.clear(); +} - write(&n_outputs, sizeof(n_outputs)); +bool llama_context::apply_adapter_cvec( + const float * data, + size_t len, + int32_t n_embd, + int32_t il_start, + int32_t il_end) { + LLAMA_LOG_DEBUG("%s: il_start = %d, il_end = %d\n", __func__, il_start, il_end); - if (n_outputs) { - write(output_pos.data(), n_outputs * sizeof(int32_t)); - } + return cvec.apply(model, data, len, n_embd, il_start, il_end); +} + +int llama_context::encode(llama_batch & inp_batch) { + if (inp_batch.n_tokens == 0) { + LLAMA_LOG_ERROR("%s: n_tokens == 0\n", __func__); + return -1; } - void write_logits(const struct llama_context * ctx) { - const uint64_t logits_size = std::min((uint64_t) ctx->logits_size, (uint64_t) ctx->n_outputs * ctx->model.vocab.n_tokens()); + // temporary allocate memory for the input batch if needed + // TODO: this is incorrect for multiple sequences because pos_max() is the maximum across all sequences + llama_batch_allocr batch_allocr(inp_batch, inp_batch.pos ? -1 : kv_self->pos_max() + 1); - write(&logits_size, sizeof(logits_size)); + const llama_batch & batch = batch_allocr.batch; + const int32_t n_tokens = batch.n_tokens; - if (logits_size) { - write(ctx->logits, logits_size * sizeof(float)); + const auto & hparams = model.hparams; + + GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT + + if (batch.token) { + for (int32_t i = 0; i < n_tokens; ++i) { + if (batch.token[i] < 0 || (uint32_t) batch.token[i] >= model.vocab.n_tokens()) { + LLAMA_LOG_ERROR("%s: invalid token[%d] = %d\n", __func__, i, batch.token[i]); + return -1; + } } } - void write_embeddings(const struct llama_context * ctx) { - const uint64_t embeddings_size = std::min((uint64_t) ctx->embd_size, (uint64_t) ctx->n_outputs * ctx->model.hparams.n_embd); + // micro-batching is not possible for non-causal encoding, so we process the batch in a single shot + GGML_ASSERT(cparams.n_ubatch >= (uint32_t) n_tokens && "encoder requires n_ubatch >= n_tokens"); - write(&embeddings_size, sizeof(embeddings_size)); - - if (embeddings_size) { - write(ctx->embd, embeddings_size * sizeof(float)); - } + if (t_compute_start_us == 0) { + t_compute_start_us = ggml_time_us(); } - void write_kv_cache_meta(const llama_kv_cache & kv_self, const std::vector> & cell_ranges, llama_seq_id seq_id = -1) { - for (const auto & range : cell_ranges) { - for (uint32_t i = range.first; i < range.second; ++i) { - const auto & cell = kv_self.cells[i]; - const llama_pos pos = cell.pos; - const uint32_t n_seq_id = seq_id == -1 ? cell.seq_id.size() : 0; + n_queued_tokens += n_tokens; - write(&pos, sizeof(pos)); - write(&n_seq_id, sizeof(n_seq_id)); + const int64_t n_embd = hparams.n_embd; - if (n_seq_id) { - for (auto seq_id : cell.seq_id) { - write(&seq_id, sizeof(seq_id)); - } - } - } - } - } + sbatch.from_batch(batch, n_embd, /* simple_split */ true, /* logits_all */ true); - void write_kv_cache_data(const struct llama_context * ctx, const std::vector> & cell_ranges) { - const struct llama_kv_cache & kv_self = ctx->kv_self; - const struct llama_hparams & hparams = ctx->model.hparams; + const llama_ubatch ubatch = sbatch.split_simple(n_tokens); - const uint32_t v_trans = kv_self.v_trans ? 1 : 0; - const uint32_t n_layer = hparams.n_layer; + // reserve output buffer + if (output_reserve(n_tokens) < n_tokens) { + LLAMA_LOG_ERROR("%s: could not reserve space for batch with %u outputs\n", __func__, n_tokens); + return -2; + }; - write(&v_trans, sizeof(v_trans)); - write(&n_layer, sizeof(n_layer)); + for (int32_t i = 0; i < n_tokens; ++i) { + output_ids[i] = i; + } - std::vector tmp_buf; + n_outputs = n_tokens; - // Iterate and write all the keys first, each row is a cell - // Get whole range at a time - for (uint32_t il = 0; il < n_layer; ++il) { - const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s(); + //batch_manager->prepare(ubatch); - // Write key type - const int32_t k_type_i = (int32_t)kv_self.k_l[il]->type; - write(&k_type_i, sizeof(k_type_i)); + ggml_backend_sched_reset(sched.get()); + ggml_backend_sched_set_eval_callback(sched.get(), cparams.cb_eval, cparams.cb_eval_user_data); - // Write row size of key - const uint64_t k_size_row = ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa); - write(&k_size_row, sizeof(k_size_row)); + auto * gf = graph_init(); + auto res = graph_build(ctx_compute.get(), gf, ubatch, LLM_GRAPH_TYPE_ENCODER); - // Read each range of cells of k_size length each into tmp_buf and write out - for (const auto & range : cell_ranges) { - const size_t range_size = range.second - range.first; - const size_t buf_size = range_size * k_size_row; - write_tensor_data(kv_self.k_l[il], range.first * k_size_row, buf_size); - } - } + ggml_backend_sched_alloc_graph(sched.get(), gf); - if (!kv_self.v_trans) { - for (uint32_t il = 0; il < n_layer; ++il) { - const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(); + res->set_inputs(&ubatch); - // Write value type - const int32_t v_type_i = (int32_t)kv_self.v_l[il]->type; - write(&v_type_i, sizeof(v_type_i)); + const auto compute_status = graph_compute(gf, n_tokens > 1); + switch (compute_status) { + case GGML_STATUS_SUCCESS: + break; + case GGML_STATUS_ABORTED: + return 2; + case GGML_STATUS_ALLOC_FAILED: + return -2; + case GGML_STATUS_FAILED: + default: + return -3; + } - // Write row size of value - const uint64_t v_size_row = ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa); - write(&v_size_row, sizeof(v_size_row)); + auto * t_embd = res->get_embd_pooled() ? res->get_embd_pooled() : res->get_embd(); - // Read each range of cells of v_size length each into tmp_buf and write out - for (const auto & range : cell_ranges) { - const size_t range_size = range.second - range.first; - const size_t buf_size = range_size * v_size_row; - write_tensor_data(kv_self.v_l[il], range.first * v_size_row, buf_size); - } - } - } else { - // When v is transposed, we also need the element size and get the element ranges from each row - const uint32_t kv_size = kv_self.size; - for (uint32_t il = 0; il < n_layer; ++il) { - const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(); + // extract embeddings + if (t_embd) { + ggml_backend_t backend_embd = ggml_backend_sched_get_tensor_backend(sched.get(), t_embd); + GGML_ASSERT(backend_embd != nullptr); - // Write value type - const int32_t v_type_i = (int32_t)kv_self.v_l[il]->type; - write(&v_type_i, sizeof(v_type_i)); + GGML_ASSERT(embd != nullptr); - // Write element size - const uint32_t v_size_el = ggml_type_size(kv_self.v_l[il]->type); - write(&v_size_el, sizeof(v_size_el)); + switch (cparams.pooling_type) { + case LLAMA_POOLING_TYPE_NONE: + { + // extract token embeddings + GGML_ASSERT(n_tokens*n_embd <= (int64_t) embd_size); + ggml_backend_tensor_get_async(backend_embd, t_embd, embd, 0, n_tokens*n_embd*sizeof(float)); + } break; + case LLAMA_POOLING_TYPE_MEAN: + case LLAMA_POOLING_TYPE_CLS: + case LLAMA_POOLING_TYPE_LAST: + { + // extract sequence embeddings + auto & embd_seq_out = embd_seq; + embd_seq_out.clear(); - // Write GQA embedding size - write(&n_embd_v_gqa, sizeof(n_embd_v_gqa)); + GGML_ASSERT(!ubatch.equal_seqs); // TODO: handle equal splits - // For each row, we get the element values of each cell - for (uint32_t j = 0; j < n_embd_v_gqa; ++j) { - // Read each range of cells of v_size_el length each into tmp_buf and write out - for (const auto & range : cell_ranges) { - const size_t range_size = range.second - range.first; - const size_t src_offset = (range.first + j * kv_size) * v_size_el; - const size_t buf_size = range_size * v_size_el; - write_tensor_data(kv_self.v_l[il], src_offset, buf_size); + for (int32_t i = 0; i < n_tokens; i++) { + const llama_seq_id seq_id = ubatch.seq_id[i][0]; + if (embd_seq_out.find(seq_id) != embd_seq_out.end()) { + continue; + } + embd_seq_out[seq_id].resize(n_embd); + ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_embd*seq_id)*sizeof(float), n_embd*sizeof(float)); } + } break; + case LLAMA_POOLING_TYPE_RANK: + { + // TODO: this likely should be the same logic as in llama_decoder_internal, but better to + // wait for an encoder model that requires this pooling type in order to test it + // https://github.com/ggerganov/llama.cpp/pull/9510 + GGML_ABORT("RANK pooling not implemented yet"); + } + case LLAMA_POOLING_TYPE_UNSPECIFIED: + { + GGML_ABORT("unknown pooling type"); } - } } } - void write_kv_cache(const struct llama_context * ctx, llama_seq_id seq_id = -1) { - const struct llama_kv_cache & kv_self = ctx->kv_self; - std::vector> cell_ranges; // ranges, from inclusive, to exclusive - uint32_t cell_count = 0; + // Reset state for the next token before backend sync, to allow the CPU activities in the reset to + // overlap with device computation. + ggml_backend_sched_reset(sched.get()); - // Count the number of cells with the specified seq_id - // Find all the ranges of cells with this seq id (or all, when -1) - uint32_t cell_range_begin = kv_self.size; - for (uint32_t i = 0; i < kv_self.size; ++i) { - const auto & cell = kv_self.cells[i]; - if ((seq_id == -1 && !cell.is_empty()) || cell.has_seq_id(seq_id)) { - ++cell_count; - if (cell_range_begin == kv_self.size) { - cell_range_begin = i; - } - } else { - if (cell_range_begin != kv_self.size) { - cell_ranges.emplace_back(cell_range_begin, i); - cell_range_begin = kv_self.size; - } - } - } - if (cell_range_begin != kv_self.size) { - cell_ranges.emplace_back(cell_range_begin, kv_self.size); - } + // TODO: hacky solution + if (model.arch == LLM_ARCH_T5 && t_embd) { + //cross.t_embd = t_embd; - // DEBUG CHECK: Sum of cell counts in ranges should equal the total cell count - uint32_t cell_count_check = 0; - for (const auto & range : cell_ranges) { - cell_count_check += range.second - range.first; + cross.n_embd = t_embd->ne[0]; + cross.n_enc = t_embd->ne[1]; + cross.v_embd.resize(cross.n_embd*cross.n_enc); + memcpy(cross.v_embd.data(), embd, ggml_nbytes(t_embd)); + + // remember the sequence ids used during the encoding - needed for cross attention later + cross.seq_ids_enc.resize(n_tokens); + for (int32_t i = 0; i < n_tokens; i++) { + for (int s = 0; s < ubatch.n_seq_id[i]; s++) { + llama_seq_id seq_id = ubatch.seq_id[i][s]; + cross.seq_ids_enc[i].insert(seq_id); + } } - GGML_ASSERT(cell_count == cell_count_check); + } - write(&cell_count, sizeof(cell_count)); + return 0; +} - write_kv_cache_meta(kv_self, cell_ranges, seq_id); - write_kv_cache_data(ctx, cell_ranges); +int llama_context::decode(llama_batch & inp_batch) { + if (inp_batch.n_tokens == 0) { + LLAMA_LOG_ERROR("%s: n_tokens == 0\n", __func__); + return -1; } -}; -struct llama_data_read { - virtual const uint8_t * read(size_t size) = 0; - virtual void read_to(void * dst, size_t size) = 0; - virtual size_t get_size_read() = 0; - virtual ~llama_data_read() = default; + // temporary allocate memory for the input batch if needed + // TODO: this is incorrect for multiple sequences because pos_max() is the maximum across all sequences + llama_batch_allocr batch_allocr(inp_batch, inp_batch.pos ? -1 : kv_self->pos_max() + 1); - void read_string(std::string & str) { - uint32_t str_size; - read_to(&str_size, sizeof(str_size)); + const llama_batch & batch = batch_allocr.batch; - str.assign((const char *) read(str_size), str_size); - } + const auto & vocab = model.vocab; + const auto & hparams = model.hparams; - // validate model information - void read_model_info(const struct llama_context * ctx) { - const std::string cur_arch_str = llm_arch_name(ctx->model.arch); + const int32_t n_vocab = vocab.n_tokens(); - std::string arch_str; - read_string(arch_str); - if (cur_arch_str != arch_str) { - throw std::runtime_error(format("wrong model arch: '%s' instead of '%s'", arch_str.c_str(), cur_arch_str.c_str())); + const int64_t n_tokens_all = batch.n_tokens; + const int64_t n_embd = hparams.n_embd; + + // TODO: remove this stuff + class batch_guard { + public: + batch_guard(llama_kv_cache_unified & kv_self) : kv_slot_restorer(kv_self) { } - // TODO: add more info which needs to be identical but which is not verified otherwise - } - //void read_rng(std::mt19937 & rng) { - // std::string rng_str; - // read_string(rng_str); + ~batch_guard() { + if (!is_done) { + kv_slot_restorer.restore(); + } + } - // std::istringstream rng_ss(rng_str); - // rng_ss >> rng; + void done() { + is_done = true; + } - // if (rng_ss.fail()) { - // throw std::runtime_error("failed to load RNG state"); - // } - //} + void save(const llama_kv_cache_slot_info & slot_info) { + kv_slot_restorer.save(slot_info); + } - void read_output_ids(struct llama_context * ctx) { - std::vector output_pos; + private: + bool is_done = false; - uint32_t n_outputs; - read_to(&n_outputs, sizeof(n_outputs)); + llama_kv_slot_restorer kv_slot_restorer; + }; - if (n_outputs > llama_output_reserve(*ctx, n_outputs)) { - throw std::runtime_error("could not reserve outputs"); - } + batch_guard bg(*kv_self); - if (n_outputs) { - output_pos.resize(n_outputs); - read_to(output_pos.data(), n_outputs * sizeof(int32_t)); + GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT - for (int32_t i = 0; i < (int32_t) output_pos.size(); ++i) { - int32_t id = output_pos[i]; - if ((uint32_t) id >= ctx->cparams.n_batch) { - throw std::runtime_error(format("invalid output id, %d does not fit in batch size of %u", id, ctx->cparams.n_batch)); - } - ctx->output_ids[id] = i; + if (batch.token) { + for (int64_t i = 0; i < n_tokens_all; ++i) { + if (batch.token[i] < 0 || (uint32_t) batch.token[i] >= model.vocab.n_tokens()) { + LLAMA_LOG_ERROR("%s: invalid token[%" PRId64 "] = %d\n", __func__, i, batch.token[i]); + throw std::runtime_error("invalid token"); } - - ctx->n_outputs = n_outputs; } } - void read_logits(struct llama_context * ctx) { - uint64_t logits_size; - read_to(&logits_size, sizeof(logits_size)); + GGML_ASSERT(n_tokens_all <= cparams.n_batch); - if (ctx->logits_size < logits_size) { - throw std::runtime_error("logits buffer too small"); - } + GGML_ASSERT((cparams.causal_attn || cparams.n_ubatch >= n_tokens_all) && "non-causal attention requires n_ubatch >= n_tokens"); - if (logits_size) { - read_to(ctx->logits, logits_size * sizeof(float)); - } + if (t_compute_start_us == 0) { + t_compute_start_us = ggml_time_us(); } + n_queued_tokens += n_tokens_all; - void read_embeddings(struct llama_context * ctx) { - uint64_t embeddings_size; - read_to(&embeddings_size, sizeof(embeddings_size)); + // this indicates we are doing pooled embedding, so we ignore batch.logits and output all tokens + const bool embd_pooled = cparams.embeddings && cparams.pooling_type != LLAMA_POOLING_TYPE_NONE; - if (ctx->embd_size < embeddings_size) { - throw std::runtime_error("embeddings buffer too small"); - } + embd_seq.clear(); - if (embeddings_size) { - read_to(ctx->embd, embeddings_size * sizeof(float)); + int64_t n_outputs_all = 0; + + // count outputs + if (batch.logits && !embd_pooled) { + for (uint32_t i = 0; i < n_tokens_all; ++i) { + n_outputs_all += batch.logits[i] != 0; } + } else if (logits_all || embd_pooled) { + n_outputs_all = n_tokens_all; + } else { + // keep last output only + n_outputs_all = 1; } - bool read_kv_cache_meta(struct llama_context * ctx, uint32_t cell_count, llama_seq_id dest_seq_id = -1) { - struct llama_kv_cache & kv_self = ctx->kv_self; + const bool logits_all = n_outputs_all == n_tokens_all; - if (dest_seq_id != -1) { - // single sequence + sbatch.from_batch(batch, n_embd, + /* simple_split */ !kv_self->recurrent, + /* logits_all */ logits_all); - llama_kv_cache_seq_rm(kv_self, dest_seq_id, -1, -1); + // reserve output buffer + if (output_reserve(n_outputs_all) < n_outputs_all) { + LLAMA_LOG_ERROR("%s: could not reserve space for batch with %" PRId64 " outputs\n", __func__, n_outputs_all); + return -2; + }; - llama_ubatch batch = ctx->sbatch.reserve_ubatch(cell_count, /* has_embd */ false); - batch.n_tokens = cell_count; - batch.n_seq_tokens = cell_count; - batch.n_seqs = 1; + int64_t n_outputs_prev = 0; - for (uint32_t i = 0; i < cell_count; ++i) { - llama_pos pos; - uint32_t n_seq_id; + while (sbatch.n_tokens > 0) { + llama_ubatch ubatch = llama_ubatch(); - read_to(&pos, sizeof(pos)); - read_to(&n_seq_id, sizeof(n_seq_id)); - - if (n_seq_id != 0) { - LLAMA_LOG_ERROR("%s: invalid seq_id-agnostic kv cell\n", __func__); - return false; - } + const auto & n_ubatch = cparams.n_ubatch; - batch.pos[i] = pos; - } - batch.n_seq_id[0] = 1; - batch.seq_id[0] = &dest_seq_id; - if (!llama_kv_cache_find_slot(kv_self, batch)) { - LLAMA_LOG_ERROR("%s: failed to find available cells in kv cache\n", __func__); - return false; + if (kv_self->recurrent) { + if (embd_pooled) { + // Pooled embeddings cannot be split across ubatches (yet) + ubatch = sbatch.split_seq(cparams.n_ubatch); + } else { + // recurrent model architectures are easier to implement + // with equal-length sequences + ubatch = sbatch.split_equal(cparams.n_ubatch); } - - // DEBUG CHECK: kv_self.head should be our first cell, kv_self.head + cell_count - 1 should be our last cell (verify seq_id and pos values) - // Assume that this is one contiguous block of cells - GGML_ASSERT(kv_self.head + cell_count <= kv_self.size); - GGML_ASSERT(kv_self.cells[kv_self.head].pos == batch.pos[0]); - GGML_ASSERT(kv_self.cells[kv_self.head + cell_count - 1].pos == batch.pos[cell_count - 1]); - GGML_ASSERT(kv_self.cells[kv_self.head].has_seq_id(dest_seq_id)); - GGML_ASSERT(kv_self.cells[kv_self.head + cell_count - 1].has_seq_id(dest_seq_id)); } else { - // whole KV cache restore - - if (cell_count > kv_self.size) { - LLAMA_LOG_ERROR("%s: not enough cells in kv cache\n", __func__); - return false; - } - - llama_kv_cache_clear(kv_self); - - for (uint32_t i = 0; i < cell_count; ++i) { - llama_kv_cell & cell = kv_self.cells[i]; - - llama_pos pos; - uint32_t n_seq_id; + ubatch = sbatch.split_simple(n_ubatch); + } - read_to(&pos, sizeof(pos)); - read_to(&n_seq_id, sizeof(n_seq_id)); + // count the outputs in this u_batch + { + int32_t n_outputs_new = 0; - cell.pos = pos; + if (n_outputs_all == n_tokens_all) { + n_outputs_new = ubatch.n_tokens; + } else { + GGML_ASSERT(ubatch.output); + for (uint32_t i = 0; i < ubatch.n_tokens; i++) { + n_outputs_new += (int32_t) (ubatch.output[i] != 0); + } + } - for (uint32_t j = 0; j < n_seq_id; ++j) { - llama_seq_id seq_id; - read_to(&seq_id, sizeof(seq_id)); + // needs to happen before the graph is built + n_outputs = n_outputs_new; + } - if (seq_id < 0 || (uint32_t) seq_id >= llama_n_seq_max(ctx)) { - LLAMA_LOG_ERROR("%s: invalid seq_id, %d is out of range [0, %u)\n", __func__, seq_id, llama_n_seq_max(ctx)); - return false; - } + // non-causal masks do not use the KV cache + if (hparams.causal_attn) { + kv_self_update(); - cell.seq_id.insert(seq_id); + // if we have enough unused cells before the current head -> + // better to start searching from the beginning of the cache, hoping to fill it + if (kv_self->head > kv_self->used + 2*ubatch.n_tokens) { + kv_self->head = 0; + } - if (kv_self.recurrent) { - int32_t & tail = kv_self.cells[seq_id].tail; - if (tail != -1) { - LLAMA_LOG_ERROR("%s: duplicate tail for seq_id %d in cell %d and %d\n", __func__, seq_id, i, tail); - return false; - } - tail = i; - } - } + const auto slot_info = kv_self->find_slot(ubatch); + if (!slot_info) { + LLAMA_LOG_ERROR("%s: failed to prepare ubatch\n", __func__); + return -3; } - kv_self.head = 0; - kv_self.used = cell_count; - } + bg.save(slot_info); - if (kv_self.recurrent) { - for (uint32_t i = 0; i < cell_count; ++i) { - uint32_t cell_id = kv_self.head + i; - // make sure the recurrent states will keep their restored state - kv_self.cells[cell_id].src = cell_id; + if (!kv_self->recurrent) { + // a heuristic, to avoid attending the full cache if it is not yet utilized + // after enough generations, the benefit from this heuristic disappears + // if we start defragmenting the cache, the benefit from this will be more important + const uint32_t pad = kv_self->get_padding(cparams); + kv_self->n = std::min(kv_self->size, std::max(pad, GGML_PAD(kv_self->cell_max(), pad))); } } - return true; - } + //printf("kv_self.n = %5d, kv_self.used = %5d, kv_self.head = %5d\n", kv_self->n, kv_self->used, kv_self->head); - bool read_kv_cache_data(struct llama_context * ctx, uint32_t cell_count) { - const struct llama_hparams & hparams = ctx->model.hparams; - struct llama_kv_cache & kv_self = ctx->kv_self; - uint32_t v_trans; - uint32_t n_layer; - read_to(&v_trans, sizeof(v_trans)); - read_to(&n_layer, sizeof(n_layer)); + ggml_backend_sched_reset(sched.get()); + ggml_backend_sched_set_eval_callback(sched.get(), cparams.cb_eval, cparams.cb_eval_user_data); - if (n_layer != hparams.n_layer) { - LLAMA_LOG_ERROR("%s: mismatched layer count (%u instead of %u)\n", __func__, n_layer, hparams.n_layer); - return false; - } - if (cell_count > kv_self.size) { - LLAMA_LOG_ERROR("%s: not enough cells in kv cache to restore state (%u > %u)\n", __func__, cell_count, kv_self.size); - return false; - } - if (kv_self.v_trans != (bool) v_trans) { - LLAMA_LOG_ERROR("%s: incompatible V transposition\n", __func__); - return false; - } + auto * gf = graph_init(); + auto res = graph_build(ctx_compute.get(), gf, ubatch, LLM_GRAPH_TYPE_DECODER); - // For each layer, read the keys for each cell, one row is one cell, read as one contiguous block - for (uint32_t il = 0; il < n_layer; ++il) { - const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s(); + // LLAMA_LOG_INFO("graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs); - // Read type of key - int32_t k_type_i_ref; - read_to(&k_type_i_ref, sizeof(k_type_i_ref)); - const int32_t k_type_i = (int32_t)kv_self.k_l[il]->type; - if (k_type_i != k_type_i_ref) { - LLAMA_LOG_ERROR("%s: mismatched key type (%d != %d, layer %d)\n", __func__, k_type_i, k_type_i_ref, il); - return false; - } + ggml_backend_sched_alloc_graph(sched.get(), gf); - // Read row size of key - uint64_t k_size_row_ref; - read_to(&k_size_row_ref, sizeof(k_size_row_ref)); - const size_t k_size_row = ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa); - if (k_size_row != k_size_row_ref) { - LLAMA_LOG_ERROR("%s: mismatched key row size (%zu != %zu, layer %d)\n", __func__, k_size_row, (size_t) k_size_row_ref, il); - return false; + res->set_inputs(&ubatch); + + const auto compute_status = graph_compute(gf, ubatch.n_tokens > 1); + if (compute_status != GGML_STATUS_SUCCESS) { + switch (compute_status) { + case GGML_STATUS_ABORTED: + return 2; + case GGML_STATUS_ALLOC_FAILED: + return -2; + case GGML_STATUS_FAILED: + default: + return -3; } + } - if (cell_count) { - // Read and set the keys for the whole cell range - ggml_backend_tensor_set(kv_self.k_l[il], read(cell_count * k_size_row), kv_self.head * k_size_row, cell_count * k_size_row); + // update the kv ring buffer + { + kv_self->head += ubatch.n_tokens; + + // Ensure kv cache head points to a valid index. + if (kv_self->head >= kv_self->size) { + kv_self->head = 0; } } - if (!kv_self.v_trans) { - for (uint32_t il = 0; il < n_layer; ++il) { - const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(); + // plot the computation graph in dot format (for debugging purposes) + //if (n_past%100 == 0) { + // ggml_graph_dump_dot(gf, NULL, "llama.dot"); + //} - // Read type of value - int32_t v_type_i_ref; - read_to(&v_type_i_ref, sizeof(v_type_i_ref)); - const int32_t v_type_i = (int32_t)kv_self.v_l[il]->type; - if (v_type_i != v_type_i_ref) { - LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il); - return false; - } + auto * t_logits = cparams.embeddings ? nullptr : res->get_logits(); + auto * t_embd = cparams.embeddings ? res->get_embd() : nullptr; - // Read row size of value - uint64_t v_size_row_ref; - read_to(&v_size_row_ref, sizeof(v_size_row_ref)); - const size_t v_size_row = ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa); - if (v_size_row != v_size_row_ref) { - LLAMA_LOG_ERROR("%s: mismatched value row size (%zu != %zu, layer %d)\n", __func__, v_size_row, (size_t) v_size_row_ref, il); - return false; - } + if (t_embd && res->get_embd_pooled()) { + t_embd = res->get_embd_pooled(); + } - if (cell_count) { - // Read and set the values for the whole cell range - ggml_backend_tensor_set(kv_self.v_l[il], read(cell_count * v_size_row), kv_self.head * v_size_row, cell_count * v_size_row); - } - } - } else { - // For each layer, read the values for each cell (transposed) - for (uint32_t il = 0; il < n_layer; ++il) { - const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(); - - // Read type of value - int32_t v_type_i_ref; - read_to(&v_type_i_ref, sizeof(v_type_i_ref)); - const int32_t v_type_i = (int32_t)kv_self.v_l[il]->type; - if (v_type_i != v_type_i_ref) { - LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il); - return false; - } + // extract logits + if (t_logits && n_outputs > 0) { + ggml_backend_t backend_res = ggml_backend_sched_get_tensor_backend(sched.get(), t_logits); + GGML_ASSERT(backend_res != nullptr); + GGML_ASSERT(logits != nullptr); - // Read element size of value - uint32_t v_size_el_ref; - read_to(&v_size_el_ref, sizeof(v_size_el_ref)); - const size_t v_size_el = ggml_type_size(kv_self.v_l[il]->type); - if (v_size_el != v_size_el_ref) { - LLAMA_LOG_ERROR("%s: mismatched value element size (%zu != %zu, layer %d)\n", __func__, v_size_el, (size_t) v_size_el_ref, il); - return false; - } + float * logits_out = logits + n_outputs_prev*n_vocab; - // Read GQA embedding size - uint32_t n_embd_v_gqa_ref; - read_to(&n_embd_v_gqa_ref, sizeof(n_embd_v_gqa_ref)); - if (n_embd_v_gqa != n_embd_v_gqa_ref) { - LLAMA_LOG_ERROR("%s: mismatched GQA embedding size (%u != %u, layer %d)\n", __func__, n_embd_v_gqa, n_embd_v_gqa_ref, il); - return false; - } + if (n_outputs) { + GGML_ASSERT( n_outputs_prev + n_outputs <= n_outputs_all); + GGML_ASSERT((n_outputs_prev + n_outputs)*n_vocab <= (int64_t) logits_size); + ggml_backend_tensor_get_async(backend_res, t_logits, logits_out, 0, n_outputs*n_vocab*sizeof(float)); + } + } - if (cell_count) { - // For each row in the transposed matrix, read the values for the whole cell range - for (uint32_t j = 0; j < n_embd_v_gqa; ++j) { - const size_t dst_offset = (kv_self.head + j * kv_self.size) * v_size_el; - ggml_backend_tensor_set(kv_self.v_l[il], read(cell_count * v_size_el), dst_offset, cell_count * v_size_el); + // extract embeddings + if (t_embd && n_outputs > 0) { + ggml_backend_t backend_embd = ggml_backend_sched_get_tensor_backend(sched.get(), t_embd); + GGML_ASSERT(backend_embd != nullptr); + + switch (cparams.pooling_type) { + case LLAMA_POOLING_TYPE_NONE: + { + // extract token embeddings + GGML_ASSERT(embd != nullptr); + float * embd_out = embd + n_outputs_prev*n_embd; + + if (n_outputs) { + GGML_ASSERT( n_outputs_prev + n_outputs <= n_outputs_all); + GGML_ASSERT((n_outputs_prev + n_outputs)*n_embd <= (int64_t) embd_size); + ggml_backend_tensor_get_async(backend_embd, t_embd, embd_out, 0, n_outputs*n_embd*sizeof(float)); + } + } break; + case LLAMA_POOLING_TYPE_MEAN: + case LLAMA_POOLING_TYPE_CLS: + case LLAMA_POOLING_TYPE_LAST: + { + // extract sequence embeddings (cleared before processing each batch) + auto & embd_seq_out = embd_seq; + + for (uint32_t s = 0; s < ubatch.n_seqs; ++s) { + const llama_seq_id seq_id = ubatch.seq_id[s][0]; + if (embd_seq_out.find(seq_id) != embd_seq_out.end()) { + continue; + } + embd_seq_out[seq_id].resize(n_embd); + ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_embd*seq_id)*sizeof(float), n_embd*sizeof(float)); + } + } break; + case LLAMA_POOLING_TYPE_RANK: + { + // extract the rerank score - a single float per sequence + auto & embd_seq_out = embd_seq; + + for (uint32_t s = 0; s < ubatch.n_seqs; ++s) { + const llama_seq_id seq_id = ubatch.seq_id[s][0]; + if (embd_seq_out.find(seq_id) != embd_seq_out.end()) { + continue; + } + embd_seq_out[seq_id].resize(1); + ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (seq_id)*sizeof(float), sizeof(float)); + } + } break; + case LLAMA_POOLING_TYPE_UNSPECIFIED: + { + GGML_ABORT("unknown pooling type"); } - } } } - return true; + + n_outputs_prev += n_outputs; } - void read_kv_cache(struct llama_context * ctx, llama_seq_id seq_id = -1) { - uint32_t cell_count; - read_to(&cell_count, sizeof(cell_count)); + // finalize the batch processing + bg.done(); - bool res = read_kv_cache_meta(ctx, cell_count, seq_id) && read_kv_cache_data(ctx, cell_count); + // set output mappings + { + bool sorted_output = true; - if (!res) { - if (seq_id == -1) { - llama_kv_cache_clear(ctx); - } else { - llama_kv_cache_seq_rm(ctx, seq_id, -1, -1); + GGML_ASSERT(sbatch.out_ids.size() == (size_t) n_outputs_all); + + for (int64_t i = 0; i < n_outputs_all; ++i) { + int64_t out_id = sbatch.out_ids[i]; + output_ids[out_id] = i; + if (out_id != i) { + sorted_output = false; } - throw std::runtime_error("failed to restore kv cache"); + } + + if (sorted_output) { + sbatch.out_ids.clear(); } } -}; -struct llama_data_write_dummy : llama_data_write { - size_t size_written = 0; + // set to total number of outputs in the batch, for use in llama_get_logits_ith + n_outputs = n_outputs_all; - llama_data_write_dummy() {} + // wait for the computation to finish (automatically done when obtaining the model output) + //synchronize(); - void write(const void * /* src */, size_t size) override { - size_written += size; - } + // decide if we need to defrag the kv cache + if (cparams.causal_attn && cparams.defrag_thold > 0.0f) { + // - do not defrag small contexts (i.e. < 2048 tokens) + // - count the padding towards the number of used tokens + const float fragmentation = kv_self->n >= 2048 ? std::max(0.0f, 1.0f - float(kv_self->used + kv_self->get_padding(cparams))/float(kv_self->n)) : 0.0f; - void write_tensor_data(const struct ggml_tensor * /* tensor */, size_t /* offset */, size_t size) override { - size_written += size; - } + // queue defragmentation for next llama_kv_cache_update + if (fragmentation > cparams.defrag_thold) { + LLAMA_LOG_DEBUG("%s: fragmentation: %.2f - requesting defrag\n", __func__, fragmentation); - size_t get_size_written() override { - return size_written; + kv_self->defrag(); + } } -}; -struct llama_data_write_buffer : llama_data_write { - uint8_t * ptr; - size_t buf_size = 0; - size_t size_written = 0; + // Reset state for the next token before backend sync, to allow the CPU activities in the reset to + // overlap with device computation. + ggml_backend_sched_reset(sched.get()); - llama_data_write_buffer(uint8_t * p, size_t len) : ptr(p), buf_size(len) {} + return 0; +} - void write(const void * src, size_t size) override { - if (size > buf_size) { - throw std::runtime_error("unexpectedly reached end of buffer"); - } - memcpy(ptr, src, size); - ptr += size; - size_written += size; - buf_size -= size; - } +// +// output +// - void write_tensor_data(const struct ggml_tensor * tensor, size_t offset, size_t size) override { - if (size > buf_size) { - throw std::runtime_error("unexpectedly reached end of buffer"); - } - ggml_backend_tensor_get(tensor, ptr, offset, size); - ptr += size; - size_written += size; - buf_size -= size; - } +int32_t llama_context::output_reserve(int32_t n_outputs) { + const auto & hparams = model.hparams; + const auto & vocab = model.vocab; - size_t get_size_written() override { - return size_written; - } -}; + const int64_t n_outputs_max = std::max(n_outputs, n_seq_max()); -struct llama_data_read_buffer : llama_data_read { - const uint8_t * ptr; - size_t buf_size = 0; - size_t size_read = 0; + const auto n_batch = cparams.n_batch; + const auto n_vocab = vocab.n_tokens(); + const auto n_embd = hparams.n_embd; - llama_data_read_buffer(const uint8_t * p, size_t len) : ptr(p), buf_size(len) {} + // TODO: use a per-batch flag for logits presence instead + bool has_logits = !cparams.embeddings; + bool has_embd = cparams.embeddings && (cparams.pooling_type == LLAMA_POOLING_TYPE_NONE); - const uint8_t * read(size_t size) override { - const uint8_t * base_ptr = ptr; - if (size > buf_size) { - throw std::runtime_error("unexpectedly reached end of buffer"); - } - ptr += size; - size_read += size; - buf_size -= size; - return base_ptr; + // TODO: hacky enc-dec support + if (model.arch == LLM_ARCH_T5) { + has_logits = true; + has_embd = true; } - void read_to(void * dst, size_t size) override { - memcpy(dst, read(size), size); - } + logits_size = has_logits ? n_vocab*n_outputs_max : 0; + embd_size = has_embd ? n_embd*n_outputs_max : 0; - size_t get_size_read() override { - return size_read; + if (output_ids.empty()) { + // init, never resized afterwards + output_ids.resize(n_batch); } -}; -struct llama_data_write_file : llama_data_write { - llama_file * file; - size_t size_written = 0; - std::vector temp_buffer; + const size_t prev_size = buf_output ? ggml_backend_buffer_get_size(buf_output.get()) : 0; + const size_t new_size = (logits_size + embd_size) * sizeof(float); - llama_data_write_file(llama_file * f) : file(f) {} + // alloc only when more than the current capacity is required + // TODO: also consider shrinking the buffer + if (!buf_output || prev_size < new_size) { + if (buf_output) { +#ifndef NDEBUG + // This doesn't happen often, but may be annoying in some cases (like the HellaSwag benchmark) + LLAMA_LOG_INFO("%s: reallocating output buffer from size %.02f MiB to %.02f MiB\n", __func__, prev_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0); +#endif + buf_output = nullptr; + logits = nullptr; + embd = nullptr; + } + + auto * buft = ggml_backend_cpu_buffer_type(); + // try to use the host buffer of the device where the output tensor is allocated for faster transfer to system memory + auto * output_dev = model.dev_output(); + auto * output_dev_host_buft = output_dev ? ggml_backend_dev_host_buffer_type(output_dev) : nullptr; + if (output_dev_host_buft) { + buft = output_dev_host_buft; + } + buf_output.reset(ggml_backend_buft_alloc_buffer(buft, new_size)); + if (buf_output == nullptr) { + LLAMA_LOG_ERROR("%s: failed to allocate output buffer of size %.2f MiB\n", __func__, new_size / (1024.0 * 1024.0)); + return 0; + } + } + + float * output_base = (float *) ggml_backend_buffer_get_base(buf_output.get()); + + logits = has_logits ? output_base : nullptr; + embd = has_embd ? output_base + logits_size : nullptr; + + // set all ids as invalid (negative) + std::fill(output_ids.begin(), output_ids.end(), -1); + + ggml_backend_buffer_clear(buf_output.get(), 0); + + this->n_outputs = 0; + this->n_outputs_max = n_outputs_max; + + return n_outputs_max; +} + +void llama_context::output_reorder() { + auto & out_ids = sbatch.out_ids; + if (!out_ids.empty()) { + const uint32_t n_vocab = model.vocab.n_tokens(); + const uint32_t n_embd = model.hparams.n_embd; + + GGML_ASSERT((size_t) n_outputs == out_ids.size()); + + // TODO: is there something more efficient which also minimizes swaps? + // selection sort, to minimize swaps (from https://en.wikipedia.org/wiki/Selection_sort) + for (int32_t i = 0; i < n_outputs - 1; ++i) { + int32_t j_min = i; + for (int32_t j = i + 1; j < n_outputs; ++j) { + if (out_ids[j] < out_ids[j_min]) { + j_min = j; + } + } + if (j_min == i) { continue; } + std::swap(out_ids[i], out_ids[j_min]); + if (logits_size > 0) { + for (uint32_t k = 0; k < n_vocab; k++) { + std::swap(logits[i*n_vocab + k], logits[j_min*n_vocab + k]); + } + } + if (embd_size > 0) { + for (uint32_t k = 0; k < n_embd; k++) { + std::swap(embd[i*n_embd + k], embd[j_min*n_embd + k]); + } + } + } + std::fill(output_ids.begin(), output_ids.end(), -1); + for (int32_t i = 0; i < n_outputs; ++i) { + output_ids[out_ids[i]] = i; + } + out_ids.clear(); + } +} + +// +// graph +// + +int32_t llama_context::graph_max_nodes() const { + return std::max(8192, 5*model.n_tensors()); +} + +ggml_cgraph * llama_context::graph_init() { + ggml_init_params params = { + /*.mem_size =*/ buf_compute_meta.size(), + /*.mem_buffer =*/ buf_compute_meta.data(), + /*.no_alloc =*/ true, + }; + + ctx_compute.reset(ggml_init(params)); + + return ggml_new_graph_custom(ctx_compute.get(), graph_max_nodes(), false); +} + +llm_graph_result_ptr llama_context::graph_build( + ggml_context * ctx, + ggml_cgraph * gf, + const llama_ubatch & ubatch, + llm_graph_type gtype) { + return model.build_graph( + { + /*.ctx =*/ ctx, + /*.arch =*/ model.arch, + /*.hparams =*/ model.hparams, + /*.cparams =*/ cparams, + /*.ubatch =*/ ubatch, + /*.sched =*/ sched.get(), + /*.backend_cpu =*/ backend_cpu, + /*.cvec =*/ &cvec, + /*.loras =*/ &loras, + /*.memory =*/ kv_self.get(), + /*.cross =*/ &cross, + /*.n_outputs =*/ n_outputs, + /*.cb =*/ graph_get_cb(), + }, gf, gtype); +} + +ggml_status llama_context::graph_compute( + ggml_cgraph * gf, + bool batched) { + int n_threads = batched ? cparams.n_threads_batch : cparams.n_threads; + ggml_threadpool_t tp = batched ? threadpool_batch : threadpool; + + if (backend_cpu != nullptr) { + auto * reg = ggml_backend_dev_backend_reg(ggml_backend_get_device(backend_cpu)); + auto * set_threadpool_fn = (decltype(ggml_backend_cpu_set_threadpool) *) ggml_backend_reg_get_proc_address(reg, "ggml_backend_cpu_set_threadpool"); + set_threadpool_fn(backend_cpu, tp); + } + + // set the number of threads for all the backends + for (const auto & set_n_threads_fn : set_n_threads_fns) { + set_n_threads_fn.second(set_n_threads_fn.first, n_threads); + } + + auto status = ggml_backend_sched_graph_compute_async(sched.get(), gf); + if (status != GGML_STATUS_SUCCESS) { + LLAMA_LOG_ERROR("%s: ggml_backend_sched_graph_compute_async failed with error %d\n", __func__, status); + } + + // fprintf(stderr, "splits: %d\n", ggml_backend_sched_get_n_splits(sched)); + + return status; +} + +llm_graph_cb llama_context::graph_get_cb() const { + return [&](const llama_ubatch & ubatch, ggml_tensor * cur, const char * name, int il) { + if (il >= 0) { + ggml_format_name(cur, "%s-%d", name, il); + } else { + ggml_set_name(cur, name); + } + + if (!cparams.offload_kqv) { + if (strcmp(name, "kqv_merged_cont") == 0) { + // all nodes between the KV store and the attention output are run on the CPU + ggml_backend_sched_set_tensor_backend(sched.get(), cur, backend_cpu); + } + } + + // norm may be automatically assigned to the backend of the previous layer, increasing data transfer between backends + // FIXME: fix in ggml_backend_sched + const bool full_offload = model.params.n_gpu_layers > (int) model.hparams.n_layer; + if (ubatch.n_tokens < 32 || full_offload) { + if (il != -1 && strcmp(name, "norm") == 0) { + const auto & dev_layer = model.dev_layer(il); + for (const auto & backend : backends) { + if (ggml_backend_get_device(backend.get()) == dev_layer) { + if (ggml_backend_supports_op(backend.get(), cur)) { + ggml_backend_sched_set_tensor_backend(sched.get(), cur, backend.get()); + } + } + } + } + } + }; +} + +// +// state save/load +// + +class llama_io_write_dummy : public llama_io_write_i { +public: + llama_io_write_dummy() = default; + + void write(const void * /* src */, size_t size) override { + size_written += size; + } + + void write_tensor(const ggml_tensor * /* tensor */, size_t /* offset */, size_t size) override { + size_written += size; + } + + size_t n_bytes() override { + return size_written; + } + +private: + size_t size_written = 0; +}; + +class llama_io_write_buffer : public llama_io_write_i { +public: + llama_io_write_buffer( + uint8_t * p, size_t len) : ptr(p), buf_size(len) {} + + void write(const void * src, size_t size) override { + if (size > buf_size) { + throw std::runtime_error("unexpectedly reached end of buffer"); + } + memcpy(ptr, src, size); + ptr += size; + size_written += size; + buf_size -= size; + } + + void write_tensor(const ggml_tensor * tensor, size_t offset, size_t size) override { + if (size > buf_size) { + throw std::runtime_error("unexpectedly reached end of buffer"); + } + ggml_backend_tensor_get(tensor, ptr, offset, size); + ptr += size; + size_written += size; + buf_size -= size; + } + + size_t n_bytes() override { + return size_written; + } + +private: + uint8_t * ptr; + size_t buf_size = 0; + size_t size_written = 0; +}; + +class llama_io_read_buffer : public llama_io_read_i { +public: + llama_io_read_buffer(const uint8_t * p, size_t len) : ptr(p), buf_size(len) {} + + const uint8_t * read(size_t size) override { + const uint8_t * base_ptr = ptr; + if (size > buf_size) { + throw std::runtime_error("unexpectedly reached end of buffer"); + } + ptr += size; + size_read += size; + buf_size -= size; + return base_ptr; + } + + void read_to(void * dst, size_t size) override { + memcpy(dst, read(size), size); + } + + size_t n_bytes() override { + return size_read; + } + +private: + const uint8_t * ptr; + size_t buf_size = 0; + size_t size_read = 0; +}; + +class llama_io_write_file : public llama_io_write_i { +public: + llama_io_write_file(llama_file * f) : file(f) {} void write(const void * src, size_t size) override { file->write_raw(src, size); size_written += size; } - void write_tensor_data(const struct ggml_tensor * tensor, size_t offset, size_t size) override { + void write_tensor(const ggml_tensor * tensor, size_t offset, size_t size) override { temp_buffer.resize(size); ggml_backend_tensor_get(tensor, temp_buffer.data(), offset, size); write(temp_buffer.data(), temp_buffer.size()); } - size_t get_size_written() override { + size_t n_bytes() override { return size_written; } -}; -struct llama_data_read_file : llama_data_read { +private: llama_file * file; - size_t size_read = 0; + size_t size_written = 0; std::vector temp_buffer; +}; - llama_data_read_file(llama_file * f) : file(f) {} +class llama_io_read_file : public llama_io_read_i { +public: + llama_io_read_file(llama_file * f) : file(f) {} void read_to(void * dst, size_t size) override { file->read_raw(dst, size); @@ -1489,89 +1813,78 @@ struct llama_data_read_file : llama_data_read { return temp_buffer.data(); } - size_t get_size_read() override { + size_t n_bytes() override { return size_read; } -}; - -/** copy state data into either a buffer or file depending on the passed in context - * - * file context: - * llama_file file("/path", "wb"); - * llama_data_write_file data_ctx(&file); - * llama_state_get_data_internal(ctx, data_ctx); - * - * buffer context: - * std::vector buf(max_size, 0); - * llama_data_write_buffer data_ctx(buf.data(), max_size); - * llama_state_get_data_internal(ctx, data_ctx); - * -*/ -static size_t llama_state_get_data_internal(struct llama_context * ctx, llama_data_write & data_ctx) { - llama_synchronize(ctx); - - data_ctx.write_model_info(ctx); - - // copy outputs - data_ctx.write_output_ids(ctx); - data_ctx.write_logits(ctx); - data_ctx.write_embeddings(ctx); - data_ctx.write_kv_cache(ctx); +private: + llama_file * file; + size_t size_read = 0; + std::vector temp_buffer; +}; - return data_ctx.get_size_written(); +size_t llama_context::state_get_size() { + llama_io_write_dummy io; + try { + return state_write_data(io); + } catch (const std::exception & err) { + LLAMA_LOG_ERROR("%s: error getting state size: %s\n", __func__, err.what()); + return 0; + } } -size_t llama_state_get_data(struct llama_context * ctx, uint8_t * dst, size_t size) { - llama_data_write_buffer data_ctx(dst, size); +size_t llama_context::state_get_data(uint8_t * dst, size_t size) { + llama_io_write_buffer io(dst, size); try { - return llama_state_get_data_internal(ctx, data_ctx); + return state_write_data(io); } catch (const std::exception & err) { LLAMA_LOG_ERROR("%s: error saving state: %s\n", __func__, err.what()); return 0; } } -// Returns the *actual* size of the state. -// Intended to be used when saving to state to a buffer. -size_t llama_state_get_size(struct llama_context * ctx) { - llama_data_write_dummy data_ctx; +size_t llama_context::state_set_data(const uint8_t * src, size_t size) { + llama_io_read_buffer io(src, size); try { - return llama_state_get_data_internal(ctx, data_ctx); + return state_read_data(io); } catch (const std::exception & err) { - LLAMA_LOG_ERROR("%s: error getting state size: %s\n", __func__, err.what()); + LLAMA_LOG_ERROR("%s: error loading state: %s\n", __func__, err.what()); return 0; } } -static size_t llama_state_set_data_internal(struct llama_context * ctx, llama_data_read & data_ctx) { - llama_synchronize(ctx); - - data_ctx.read_model_info(ctx); - - // set outputs - data_ctx.read_output_ids(ctx); - data_ctx.read_logits(ctx); - data_ctx.read_embeddings(ctx); - - data_ctx.read_kv_cache(ctx); +size_t llama_context::state_seq_get_size(llama_seq_id seq_id) { + llama_io_write_dummy io; + try { + return state_seq_write_data(io, seq_id); + } catch (const std::exception & err) { + LLAMA_LOG_ERROR("%s: error getting state size: %s\n", __func__, err.what()); + return 0; + } +} - return data_ctx.get_size_read(); +size_t llama_context::state_seq_get_data(llama_seq_id seq_id, uint8_t * dst, size_t size) { + llama_io_write_buffer io(dst, size); + try { + return state_seq_write_data(io, seq_id); + } catch (const std::exception & err) { + LLAMA_LOG_ERROR("%s: error saving state: %s\n", __func__, err.what()); + return 0; + } } -// Sets the state reading from the specified source address -size_t llama_state_set_data(struct llama_context * ctx, const uint8_t * src, size_t size) { - llama_data_read_buffer data_ctx(src, size); +size_t llama_context::state_seq_set_data(llama_seq_id seq_id, const uint8_t * src, size_t size) { + llama_io_read_buffer io(src, size); try { - return llama_state_set_data_internal(ctx, data_ctx); + return state_seq_read_data(io, seq_id); } catch (const std::exception & err) { LLAMA_LOG_ERROR("%s: error loading state: %s\n", __func__, err.what()); return 0; } } -static bool llama_state_load_file_internal(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) { - llama_file file(path_session, "rb"); +bool llama_context::state_load_file(const char * filepath, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) { + llama_file file(filepath, "rb"); // sanity checks { @@ -1601,28 +1914,20 @@ static bool llama_state_load_file_internal(struct llama_context * ctx, const cha { const size_t n_state_size_cur = file.size() - file.tell(); - llama_data_read_file data_ctx(&file); - const size_t n_read = llama_state_set_data_internal(ctx, data_ctx); + llama_io_read_file io( &file); + const size_t n_read = state_read_data(io); if (n_read != n_state_size_cur) { LLAMA_LOG_ERROR("%s: did not read all of the session file data! size %zu, got %zu\n", __func__, n_state_size_cur, n_read); return false; } } - return true; -} -bool llama_state_load_file(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) { - try { - return llama_state_load_file_internal(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out); - } catch (const std::exception & err) { - LLAMA_LOG_ERROR("%s: error loading session file: %s\n", __func__, err.what()); - return false; - } + return true; } -static bool llama_state_save_file_internal(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) { - llama_file file(path_session, "wb"); +bool llama_context::state_save_file(const char * filepath, const llama_token * tokens, size_t n_token_count) { + llama_file file(filepath, "wb"); file.write_u32(LLAMA_SESSION_MAGIC); file.write_u32(LLAMA_SESSION_VERSION); @@ -1632,82 +1937,13 @@ static bool llama_state_save_file_internal(struct llama_context * ctx, const cha file.write_raw(tokens, sizeof(llama_token) * n_token_count); // save the context state using stream saving - llama_data_write_file data_ctx(&file); - llama_state_get_data_internal(ctx, data_ctx); + llama_io_write_file io(&file); + state_write_data(io); return true; } -bool llama_state_save_file(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) { - try { - return llama_state_save_file_internal(ctx, path_session, tokens, n_token_count); - } catch (const std::exception & err) { - LLAMA_LOG_ERROR("%s: error saving session file: %s\n", __func__, err.what()); - return false; - } -} - -static size_t llama_state_seq_get_data_internal(struct llama_context * ctx, llama_data_write & data_ctx, llama_seq_id seq_id) { - llama_synchronize(ctx); - - data_ctx.write_kv_cache(ctx, seq_id); - - return data_ctx.get_size_written(); -} - -size_t llama_state_seq_get_size(struct llama_context * ctx, llama_seq_id seq_id) { - llama_data_write_dummy data_ctx; - return llama_state_seq_get_data_internal(ctx, data_ctx, seq_id); -} - -size_t llama_state_seq_get_data(struct llama_context * ctx, uint8_t * dst, size_t size, llama_seq_id seq_id) { - llama_data_write_buffer data_ctx(dst, size); - try { - return llama_state_seq_get_data_internal(ctx, data_ctx, seq_id); - } catch (const std::exception & err) { - LLAMA_LOG_ERROR("%s: error saving sequence state: %s\n", __func__, err.what()); - return 0; - } -} - -static size_t llama_state_seq_set_data_internal(struct llama_context * ctx, llama_data_read & data_ctx, llama_seq_id dest_seq_id) { - llama_synchronize(ctx); - - data_ctx.read_kv_cache(ctx, dest_seq_id); - - return data_ctx.get_size_read(); -} - -size_t llama_state_seq_set_data(struct llama_context * ctx, const uint8_t * src, size_t size, llama_seq_id dest_seq_id) { - llama_data_read_buffer data_ctx(src, size); - try { - return llama_state_seq_set_data_internal(ctx, data_ctx, dest_seq_id); - } catch (const std::exception & err) { - LLAMA_LOG_ERROR("%s: error loading sequence state: %s\n", __func__, err.what()); - return 0; - } -} - -static size_t llama_state_seq_save_file_internal(struct llama_context * ctx, const char * filepath, llama_seq_id seq_id, const llama_token * tokens, size_t n_token_count) { - llama_file file(filepath, "wb"); - - file.write_u32(LLAMA_STATE_SEQ_MAGIC); - file.write_u32(LLAMA_STATE_SEQ_VERSION); - - // save the prompt - file.write_u32((uint32_t) n_token_count); - file.write_raw(tokens, sizeof(llama_token) * n_token_count); - - // save the context state using stream saving - llama_data_write_file data_ctx(&file); - llama_state_seq_get_data_internal(ctx, data_ctx, seq_id); - - const size_t res = file.tell(); - GGML_ASSERT(res == sizeof(uint32_t) * 3 + sizeof(llama_token) * n_token_count + data_ctx.get_size_written()); - return res; -} - -static size_t llama_state_seq_load_file_internal(struct llama_context * ctx, const char * filepath, llama_seq_id dest_seq_id, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) { +size_t llama_context::state_seq_load_file(llama_seq_id seq_id, const char * filepath, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) { llama_file file(filepath, "rb"); // version checks @@ -1737,8 +1973,8 @@ static size_t llama_state_seq_load_file_internal(struct llama_context * ctx, con // restore the context state { const size_t state_size = file.size() - file.tell(); - llama_data_read_file data_ctx(&file); - const size_t nread = llama_state_seq_set_data_internal(ctx, data_ctx, dest_seq_id); + llama_io_read_file io(&file); + const size_t nread = state_seq_read_data(io, seq_id); if (!nread) { LLAMA_LOG_ERROR("%s: failed to restore sequence state\n", __func__); return 0; @@ -1750,26 +1986,785 @@ static size_t llama_state_seq_load_file_internal(struct llama_context * ctx, con return file.tell(); } -size_t llama_state_seq_save_file(struct llama_context * ctx, const char * filepath, llama_seq_id seq_id, const llama_token * tokens, size_t n_token_count) { - try { - return llama_state_seq_save_file_internal(ctx, filepath, seq_id, tokens, n_token_count); - } catch (const std::exception & err) { - LLAMA_LOG_ERROR("%s: error saving sequence state file: %s\n", __func__, err.what()); - return 0; - } -} +size_t llama_context::state_seq_save_file(llama_seq_id seq_id, const char * filepath, const llama_token * tokens, size_t n_token_count) { + llama_file file(filepath, "wb"); -size_t llama_state_seq_load_file(struct llama_context * ctx, const char * filepath, llama_seq_id dest_seq_id, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) { - try { - return llama_state_seq_load_file_internal(ctx, filepath, dest_seq_id, tokens_out, n_token_capacity, n_token_count_out); - } catch (const std::exception & err) { - LLAMA_LOG_ERROR("%s: error loading sequence state file: %s\n", __func__, err.what()); - return 0; - } + file.write_u32(LLAMA_STATE_SEQ_MAGIC); + file.write_u32(LLAMA_STATE_SEQ_VERSION); + + // save the prompt + file.write_u32((uint32_t) n_token_count); + file.write_raw(tokens, sizeof(llama_token) * n_token_count); + + // save the context state using stream saving + llama_io_write_file io(&file); + state_seq_write_data(io, seq_id); + + const size_t res = file.tell(); + GGML_ASSERT(res == sizeof(uint32_t) * 3 + sizeof(llama_token) * n_token_count + io.n_bytes()); + + return res; +} + +size_t llama_context::state_write_data(llama_io_write_i & io) { + LLAMA_LOG_DEBUG("%s: writing state\n", __func__); + + // write model info + { + LLAMA_LOG_DEBUG("%s: - writing model info\n", __func__); + + const std::string arch_str = llm_arch_name(model.arch); + io.write_string(arch_str); + // TODO: add more model-specific info which should prevent loading the session file if not identical + } + + // write output ids + { + LLAMA_LOG_DEBUG("%s: - writing output ids\n", __func__); + + output_reorder(); + + const auto n_outputs = this->n_outputs; + const auto & output_ids = this->output_ids; + + std::vector w_output_pos; + + GGML_ASSERT(n_outputs <= n_outputs_max); + + w_output_pos.resize(n_outputs); + + // build a more compact representation of the output ids + for (size_t i = 0; i < n_batch(); ++i) { + // map an output id to a position in the batch + int32_t pos = output_ids[i]; + if (pos >= 0) { + GGML_ASSERT(pos < n_outputs); + w_output_pos[pos] = i; + } + } + + io.write(&n_outputs, sizeof(n_outputs)); + + if (n_outputs) { + io.write(w_output_pos.data(), n_outputs * sizeof(int32_t)); + } + } + + // write logits + { + LLAMA_LOG_DEBUG("%s: - writing logits\n", __func__); + + const uint64_t logits_size = std::min((uint64_t) this->logits_size, (uint64_t) n_outputs * model.vocab.n_tokens()); + + io.write(&logits_size, sizeof(logits_size)); + + if (logits_size) { + io.write(logits, logits_size * sizeof(float)); + } + } + + // write embeddings + { + LLAMA_LOG_DEBUG("%s: - writing embeddings\n", __func__); + + const uint64_t embd_size = std::min((uint64_t) this->embd_size, (uint64_t) n_outputs * model.hparams.n_embd); + + io.write(&embd_size, sizeof(embd_size)); + + if (embd_size) { + io.write(embd, embd_size * sizeof(float)); + } + } + + LLAMA_LOG_DEBUG("%s: - writing KV self\n", __func__); + kv_self->state_write(io); + + return io.n_bytes(); +} + +size_t llama_context::state_read_data(llama_io_read_i & io) { + LLAMA_LOG_DEBUG("%s: reading state\n", __func__); + + // read model info + { + LLAMA_LOG_DEBUG("%s: - reading model info\n", __func__); + + const std::string cur_arch_str = llm_arch_name(model.arch); + + std::string arch_str; + io.read_string(arch_str); + if (cur_arch_str != arch_str) { + throw std::runtime_error(format("wrong model arch: '%s' instead of '%s'", arch_str.c_str(), cur_arch_str.c_str())); + } + // TODO: add more info which needs to be identical but which is not verified otherwise + } + + // read output ids + { + LLAMA_LOG_DEBUG("%s: - reading output ids\n", __func__); + + auto n_outputs = this->n_outputs; + io.read_to(&n_outputs, sizeof(n_outputs)); + + if (n_outputs > output_reserve(n_outputs)) { + throw std::runtime_error("could not reserve outputs"); + } + + std::vector output_pos; + + if (n_outputs) { + output_pos.resize(n_outputs); + io.read_to(output_pos.data(), n_outputs * sizeof(int32_t)); + + for (int32_t i = 0; i < (int32_t) output_pos.size(); ++i) { + int32_t id = output_pos[i]; + if ((uint32_t) id >= n_batch()) { + throw std::runtime_error(format("invalid output id, %d does not fit in batch size of %u", id, n_batch())); + } + this->output_ids[id] = i; + } + + this->n_outputs = n_outputs; + } + } + + // read logits + { + LLAMA_LOG_DEBUG("%s: - reading logits\n", __func__); + + uint64_t logits_size; + io.read_to(&logits_size, sizeof(logits_size)); + + if (this->logits_size < logits_size) { + throw std::runtime_error("logits buffer too small"); + } + + if (logits_size) { + io.read_to(this->logits, logits_size * sizeof(float)); + } + } + + // read embeddings + { + LLAMA_LOG_DEBUG("%s: - reading embeddings\n", __func__); + + uint64_t embd_size; + io.read_to(&embd_size, sizeof(embd_size)); + + if (this->embd_size < embd_size) { + throw std::runtime_error("embeddings buffer too small"); + } + + if (embd_size) { + io.read_to(this->embd, embd_size * sizeof(float)); + } + } + + LLAMA_LOG_DEBUG("%s: - reading KV self\n", __func__); + kv_self->state_read(io); + + return io.n_bytes(); +} + +size_t llama_context::state_seq_write_data(llama_io_write_i & io, llama_seq_id seq_id) { + GGML_UNUSED(seq_id); + + kv_self->state_write(io, seq_id); + + return io.n_bytes(); +} + +size_t llama_context::state_seq_read_data(llama_io_read_i & io, llama_seq_id seq_id) { + GGML_UNUSED(seq_id); + + kv_self->state_read(io, seq_id); + + return io.n_bytes(); +} + +// +// perf +// + +llama_perf_context_data llama_context::perf_get_data() const { + llama_perf_context_data data = {}; + + data.t_start_ms = 1e-3 * t_start_us; + data.t_load_ms = 1e-3 * t_load_us; + data.t_p_eval_ms = 1e-3 * t_p_eval_us; + data.t_eval_ms = 1e-3 * t_eval_us; + data.n_p_eval = std::max(1, n_p_eval); + data.n_eval = std::max(1, n_eval); + + return data; +} + +void llama_context::perf_reset() { + t_start_us = ggml_time_us(); + t_eval_us = n_eval = 0; + t_p_eval_us = n_p_eval = 0; +} + +// +// interface implementation +// + +llama_context_params llama_context_default_params() { + llama_context_params result = { + /*.n_ctx =*/ 512, + /*.n_batch =*/ 2048, + /*.n_ubatch =*/ 512, + /*.n_seq_max =*/ 1, + /*.n_threads =*/ GGML_DEFAULT_N_THREADS, // TODO: better default + /*.n_threads_batch =*/ GGML_DEFAULT_N_THREADS, + /*.rope_scaling_type =*/ LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED, + /*.pooling_type =*/ LLAMA_POOLING_TYPE_UNSPECIFIED, + /*.attention_type =*/ LLAMA_ATTENTION_TYPE_UNSPECIFIED, + /*.rope_freq_base =*/ 0.0f, + /*.rope_freq_scale =*/ 0.0f, + /*.yarn_ext_factor =*/ -1.0f, + /*.yarn_attn_factor =*/ 1.0f, + /*.yarn_beta_fast =*/ 32.0f, + /*.yarn_beta_slow =*/ 1.0f, + /*.yarn_orig_ctx =*/ 0, + /*.defrag_thold =*/ -1.0f, + /*.cb_eval =*/ nullptr, + /*.cb_eval_user_data =*/ nullptr, + /*.type_k =*/ GGML_TYPE_F16, + /*.type_v =*/ GGML_TYPE_F16, + /*.logits_all =*/ false, + /*.embeddings =*/ false, + /*.offload_kqv =*/ true, + /*.flash_attn =*/ false, + /*.no_perf =*/ true, + /*.abort_callback =*/ nullptr, + /*.abort_callback_data =*/ nullptr, + }; + + return result; +} + +llama_context * llama_init_from_model( + llama_model * model, + llama_context_params params) { + if (!model) { + LLAMA_LOG_ERROR("%s: model cannot be NULL\n", __func__); + return nullptr; + } + + if (params.n_batch == 0 && params.n_ubatch == 0) { + LLAMA_LOG_ERROR("%s: n_batch and n_ubatch cannot both be zero\n", __func__); + return nullptr; + } + + if (params.n_ctx == 0 && model->hparams.n_ctx_train == 0) { + LLAMA_LOG_ERROR("%s: n_ctx and model->hparams.n_ctx_train cannot both be zero\n", __func__); + return nullptr; + } + + if (params.flash_attn && model->arch == LLM_ARCH_GROK) { + LLAMA_LOG_WARN("%s: flash_attn is not compatible with Grok - forcing off\n", __func__); + params.flash_attn = false; + } + + if (params.flash_attn && model->hparams.n_embd_head_k != model->hparams.n_embd_head_v) { + LLAMA_LOG_WARN("%s: flash_attn requires n_embd_head_k == n_embd_head_v - forcing off\n", __func__); + params.flash_attn = false; + } + + if (ggml_is_quantized(params.type_v) && !params.flash_attn) { + LLAMA_LOG_ERROR("%s: V cache quantization requires flash_attn\n", __func__); + return nullptr; + } + + try { + auto * ctx = new llama_context(*model, params); + return ctx; + } catch (const std::exception & err) { + LLAMA_LOG_ERROR("%s: failed to initialize the context: %s\n", __func__, err.what()); + } + + return nullptr; +} + +// deprecated +llama_context * llama_new_context_with_model( + llama_model * model, + llama_context_params params) { + return llama_init_from_model(model, params); +} + +void llama_free(llama_context * ctx) { + delete ctx; +} + +uint32_t llama_n_ctx(const llama_context * ctx) { + return ctx->n_ctx(); +} + +uint32_t llama_n_batch(const llama_context * ctx) { + return ctx->n_batch(); +} + +uint32_t llama_n_ubatch(const llama_context * ctx) { + return ctx->n_ubatch(); +} + +uint32_t llama_n_seq_max(const llama_context * ctx) { + return ctx->n_seq_max(); +} + +const llama_model * llama_get_model(const llama_context * ctx) { + return &ctx->get_model(); +} + +llama_kv_cache * llama_get_kv_self(llama_context * ctx) { + return ctx->get_kv_self(); +} + +void llama_kv_self_update(llama_context * ctx) { + ctx->kv_self_update(); +} + +enum llama_pooling_type llama_pooling_type(const llama_context * ctx) { + return ctx->pooling_type(); +} + +void llama_attach_threadpool( + llama_context * ctx, + ggml_threadpool_t threadpool, + ggml_threadpool_t threadpool_batch) { + ctx->attach_threadpool(threadpool, threadpool_batch); +} + +void llama_detach_threadpool(llama_context * ctx) { + ctx->detach_threadpool(); +} + +void llama_set_n_threads(llama_context * ctx, int32_t n_threads, int32_t n_threads_batch) { + ctx->set_n_threads(n_threads, n_threads_batch); +} + +int32_t llama_n_threads(llama_context * ctx) { + return ctx->n_threads(); +} + +int32_t llama_n_threads_batch(llama_context * ctx) { + return ctx->n_threads_batch(); +} + +void llama_set_abort_callback(llama_context * ctx, bool (*abort_callback)(void * data), void * abort_callback_data) { + ctx->set_abort_callback(abort_callback, abort_callback_data); +} + +void llama_set_embeddings(llama_context * ctx, bool embeddings) { + ctx->set_embeddings(embeddings); +} + +void llama_set_causal_attn(llama_context * ctx, bool causal_attn) { + ctx->set_causal_attn(causal_attn); +} + +void llama_synchronize(llama_context * ctx) { + ctx->synchronize(); +} + +float * llama_get_logits(llama_context * ctx) { + ctx->synchronize(); + + return ctx->get_logits(); +} + +float * llama_get_logits_ith(llama_context * ctx, int32_t i) { + ctx->synchronize(); + + return ctx->get_logits_ith(i); +} + +float * llama_get_embeddings(llama_context * ctx) { + ctx->synchronize(); + + return ctx->get_embeddings(); +} + +float * llama_get_embeddings_ith(llama_context * ctx, int32_t i) { + ctx->synchronize(); + + return ctx->get_embeddings_ith(i); +} + +float * llama_get_embeddings_seq(llama_context * ctx, llama_seq_id seq_id) { + ctx->synchronize(); + + return ctx->get_embeddings_seq(seq_id); +} + +// llama adapter API + +int32_t llama_set_adapter_lora( + llama_context * ctx, + llama_adapter_lora * adapter, + float scale) { + ctx->set_adapter_lora(adapter, scale); + + return 0; +} + +int32_t llama_rm_adapter_lora( + llama_context * ctx, + llama_adapter_lora * adapter) { + bool res = ctx->rm_adapter_lora(adapter); + + return res ? 0 : -1; +} + +void llama_clear_adapter_lora(llama_context * ctx) { + ctx->clear_adapter_lora(); +} + +int32_t llama_apply_adapter_cvec( + llama_context * ctx, + const float * data, + size_t len, + int32_t n_embd, + int32_t il_start, + int32_t il_end) { + bool res = ctx->apply_adapter_cvec(data, len, n_embd, il_start, il_end); + + return res ? 0 : -1; +} + +// +// kv cache view +// + +llama_kv_cache_view llama_kv_cache_view_init(const llama_context * ctx, int32_t n_seq_max) { + const auto * kv = ctx->get_kv_self(); + if (kv == nullptr) { + LLAMA_LOG_WARN("%s: the context does not have a KV cache\n", __func__); + return {}; + } + + return llama_kv_cache_view_init(*kv, n_seq_max); +} + +void llama_kv_cache_view_update(const llama_context * ctx, llama_kv_cache_view * view) { + const auto * kv = ctx->get_kv_self(); + if (kv == nullptr) { + LLAMA_LOG_WARN("%s: the context does not have a KV cache\n", __func__); + return; + } + + llama_kv_cache_view_update(view, kv); +} + +// +// kv cache +// + +// deprecated +int32_t llama_get_kv_cache_token_count(const llama_context * ctx) { + return llama_kv_self_n_tokens(ctx); +} + +int32_t llama_kv_self_n_tokens(const llama_context * ctx) { + return llama_kv_cache_n_tokens(ctx->get_kv_self()); +} + +// deprecated +int32_t llama_get_kv_cache_used_cells(const llama_context * ctx) { + return llama_kv_self_used_cells(ctx); +} + +int32_t llama_kv_self_used_cells(const llama_context * ctx) { + return llama_kv_cache_used_cells(ctx->get_kv_self()); +} + +// deprecated +void llama_kv_cache_clear(llama_context * ctx) { + llama_kv_self_clear(ctx); +} + +void llama_kv_self_clear(llama_context * ctx) { + llama_kv_cache_clear(ctx->get_kv_self()); +} + +// deprecated +bool llama_kv_cache_seq_rm( + llama_context * ctx, + llama_seq_id seq_id, + llama_pos p0, + llama_pos p1) { + return llama_kv_self_seq_rm(ctx, seq_id, p0, p1); +} + +bool llama_kv_self_seq_rm( + llama_context * ctx, + llama_seq_id seq_id, + llama_pos p0, + llama_pos p1) { + return llama_kv_cache_seq_rm(ctx->get_kv_self(), seq_id, p0, p1); +} + +// deprecated +void llama_kv_cache_seq_cp( + llama_context * ctx, + llama_seq_id seq_id_src, + llama_seq_id seq_id_dst, + llama_pos p0, + llama_pos p1) { + return llama_kv_self_seq_cp(ctx, seq_id_src, seq_id_dst, p0, p1); +} + +void llama_kv_self_seq_cp( + llama_context * ctx, + llama_seq_id seq_id_src, + llama_seq_id seq_id_dst, + llama_pos p0, + llama_pos p1) { + return llama_kv_cache_seq_cp(ctx->get_kv_self(), seq_id_src, seq_id_dst, p0, p1); +} + +// deprecated +void llama_kv_cache_seq_keep( + llama_context * ctx, + llama_seq_id seq_id) { + return llama_kv_self_seq_keep(ctx, seq_id); +} + +void llama_kv_self_seq_keep(llama_context * ctx, llama_seq_id seq_id) { + return llama_kv_cache_seq_keep(ctx->get_kv_self(), seq_id); +} + +// deprecated +void llama_kv_cache_seq_add( + llama_context * ctx, + llama_seq_id seq_id, + llama_pos p0, + llama_pos p1, + llama_pos delta) { + return llama_kv_self_seq_add(ctx, seq_id, p0, p1, delta); +} + +void llama_kv_self_seq_add( + llama_context * ctx, + llama_seq_id seq_id, + llama_pos p0, + llama_pos p1, + llama_pos delta) { + return llama_kv_cache_seq_add(ctx->get_kv_self(), seq_id, p0, p1, delta); +} + +// deprecated +void llama_kv_cache_seq_div( + llama_context * ctx, + llama_seq_id seq_id, + llama_pos p0, + llama_pos p1, + int d) { + return llama_kv_self_seq_div(ctx, seq_id, p0, p1, d); +} + +void llama_kv_self_seq_div( + llama_context * ctx, + llama_seq_id seq_id, + llama_pos p0, + llama_pos p1, + int d) { + return llama_kv_cache_seq_div(ctx->get_kv_self(), seq_id, p0, p1, d); +} + +// deprecated +llama_pos llama_kv_cache_seq_pos_max(llama_context * ctx, llama_seq_id seq_id) { + return llama_kv_self_seq_pos_max(ctx, seq_id); +} + +llama_pos llama_kv_self_seq_pos_max(llama_context * ctx, llama_seq_id seq_id) { + return llama_kv_cache_seq_pos_max(ctx->get_kv_self(), seq_id); +} + +// deprecated +void llama_kv_cache_defrag(llama_context * ctx) { + return llama_kv_self_defrag(ctx); +} + +void llama_kv_self_defrag(llama_context * ctx) { + llama_kv_cache_defrag(ctx->get_kv_self()); +} + +// deprecated +bool llama_kv_cache_can_shift(const llama_context * ctx) { + return llama_kv_self_can_shift(ctx); +} + +bool llama_kv_self_can_shift(const llama_context * ctx) { + return llama_kv_cache_can_shift(ctx->get_kv_self()); +} + +// deprecated +void llama_kv_cache_update(llama_context * ctx) { + llama_kv_self_update(ctx); +} + +// llama state API + +// deprecated +size_t llama_get_state_size(llama_context * ctx) { + return llama_state_get_size(ctx); +} + +// deprecated +size_t llama_copy_state_data(llama_context * ctx, uint8_t * dst) { + return llama_state_get_data(ctx, dst, -1); +} + +// deprecated +size_t llama_set_state_data(llama_context * ctx, const uint8_t * src) { + return llama_state_set_data(ctx, src, -1); +} + +// deprecated +bool llama_load_session_file(llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) { + return llama_state_load_file(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out); +} + +// deprecated +bool llama_save_session_file(llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) { + return llama_state_save_file(ctx, path_session, tokens, n_token_count); +} + +// Returns the *actual* size of the state. +// Intended to be used when saving to state to a buffer. +size_t llama_state_get_size(llama_context * ctx) { + return ctx->state_get_size(); +} + +size_t llama_state_get_data(llama_context * ctx, uint8_t * dst, size_t size) { + ctx->synchronize(); + + return ctx->state_get_data(dst, size); +} + +// Sets the state reading from the specified source address +size_t llama_state_set_data(llama_context * ctx, const uint8_t * src, size_t size) { + ctx->synchronize(); + + return ctx->state_set_data(src, size); +} + +bool llama_state_load_file(llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) { + ctx->synchronize(); + + try { + return ctx->state_load_file(path_session, tokens_out, n_token_capacity, n_token_count_out); + } catch (const std::exception & err) { + LLAMA_LOG_ERROR("%s: error loading session file: %s\n", __func__, err.what()); + return false; + } +} + +bool llama_state_save_file(llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) { + ctx->synchronize(); + + try { + return ctx->state_save_file(path_session, tokens, n_token_count); + } catch (const std::exception & err) { + LLAMA_LOG_ERROR("%s: error saving session file: %s\n", __func__, err.what()); + return false; + } +} + +size_t llama_state_seq_get_size(llama_context * ctx, llama_seq_id seq_id) { + return ctx->state_seq_get_size(seq_id); +} + +size_t llama_state_seq_get_data(llama_context * ctx, uint8_t * dst, size_t size, llama_seq_id seq_id) { + ctx->synchronize(); + + return ctx->state_seq_get_data(seq_id, dst, size); +} + +size_t llama_state_seq_set_data(llama_context * ctx, const uint8_t * src, size_t size, llama_seq_id seq_id) { + ctx->synchronize(); + + return ctx->state_seq_set_data(seq_id, src, size); +} + +size_t llama_state_seq_save_file(llama_context * ctx, const char * filepath, llama_seq_id seq_id, const llama_token * tokens, size_t n_token_count) { + ctx->synchronize(); + + try { + return ctx->state_seq_save_file(seq_id, filepath, tokens, n_token_count); + } catch (const std::exception & err) { + LLAMA_LOG_ERROR("%s: error saving sequence state file: %s\n", __func__, err.what()); + return 0; + } +} + +size_t llama_state_seq_load_file(llama_context * ctx, const char * filepath, llama_seq_id dest_seq_id, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) { + ctx->synchronize(); + + try { + return ctx->state_seq_load_file(dest_seq_id, filepath, tokens_out, n_token_capacity, n_token_count_out); + } catch (const std::exception & err) { + LLAMA_LOG_ERROR("%s: error loading sequence state file: %s\n", __func__, err.what()); + return 0; + } +} + +/// + +int32_t llama_encode( + llama_context * ctx, + llama_batch batch) { + const int ret = ctx->encode(batch); + if (ret != 0) { + LLAMA_LOG_ERROR("%s: failed to encode, ret = %d\n", __func__, ret); + } + + return ret; +} + +int32_t llama_decode( + llama_context * ctx, + llama_batch batch) { + const int ret = ctx->decode(batch); + if (ret != 0) { + LLAMA_LOG_ERROR("%s: failed to decode, ret = %d\n", __func__, ret); + } + + return ret; +} + +// +// perf +// + +llama_perf_context_data llama_perf_context(const llama_context * ctx) { + llama_perf_context_data data = {}; + + if (ctx == nullptr) { + return data; + } + + data = ctx->perf_get_data(); + + return data; +} + +void llama_perf_context_print(const llama_context * ctx) { + const auto data = llama_perf_context(ctx); + + const double t_end_ms = 1e-3 * ggml_time_us(); + + LLAMA_LOG_INFO("%s: load time = %10.2f ms\n", __func__, data.t_load_ms); + LLAMA_LOG_INFO("%s: prompt eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n", + __func__, data.t_p_eval_ms, data.n_p_eval, data.t_p_eval_ms / data.n_p_eval, 1e3 / data.t_p_eval_ms * data.n_p_eval); + LLAMA_LOG_INFO("%s: eval time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n", + __func__, data.t_eval_ms, data.n_eval, data.t_eval_ms / data.n_eval, 1e3 / data.t_eval_ms * data.n_eval); + LLAMA_LOG_INFO("%s: total time = %10.2f ms / %5d tokens\n", __func__, (t_end_ms - data.t_start_ms), (data.n_p_eval + data.n_eval)); } -const std::vector> & llama_internal_get_tensor_map( - struct llama_context * ctx -) { - return ctx->model.tensors_by_name; +void llama_perf_context_reset(llama_context * ctx) { + ctx->perf_reset(); } diff --git a/src/llama-context.h b/src/llama-context.h index a9268b2920908..71d702e8baeeb 100644 --- a/src/llama-context.h +++ b/src/llama-context.h @@ -3,66 +3,210 @@ #include "llama.h" #include "llama-batch.h" #include "llama-cparams.h" -#include "llama-model.h" -#include "llama-kv-cache.h" +#include "llama-graph.h" #include "llama-adapter.h" #include "ggml-cpp.h" #include -#include #include -#include + +struct llama_model; +struct llama_kv_cache; + +class llama_io_read_i; +class llama_io_write_i; struct llama_context { - llama_context(const llama_model & model) - : model(model) - , t_start_us(model.t_start_us) - , t_load_us(model.t_load_us) {} + // init scheduler and compute buffers, reserve worst-case graphs + llama_context( + const llama_model & model, + llama_context_params params); - const struct llama_model & model; + ~llama_context(); - struct llama_cparams cparams; - struct llama_sbatch sbatch; // TODO: revisit if needed - struct llama_kv_cache kv_self; - struct llama_adapter_cvec cvec; + void synchronize(); - std::unordered_map lora; + const llama_model & get_model() const; - std::vector backends; - std::vector> set_n_threads_fns; + uint32_t n_ctx() const; + uint32_t n_ctx_per_seq() const; + uint32_t n_batch() const; + uint32_t n_ubatch() const; + uint32_t n_seq_max() const; - ggml_backend_t backend_cpu = nullptr; + uint32_t n_threads() const; + uint32_t n_threads_batch() const; - ggml_threadpool_t threadpool = nullptr; - ggml_threadpool_t threadpool_batch = nullptr; + llama_kv_cache * get_kv_self(); + const llama_kv_cache * get_kv_self() const; - bool has_evaluated_once = false; + void kv_self_update(); - mutable int64_t t_start_us; - mutable int64_t t_load_us; - mutable int64_t t_p_eval_us = 0; - mutable int64_t t_eval_us = 0; + enum llama_pooling_type pooling_type() const; - mutable int64_t t_compute_start_us = 0; - mutable int64_t n_queued_tokens = 0; + float * get_logits(); + float * get_logits_ith(int32_t i); - mutable int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1) - mutable int32_t n_eval = 0; // number of eval calls + float * get_embeddings(); + float * get_embeddings_ith(int32_t i); + float * get_embeddings_seq(llama_seq_id seq_id); - // host buffer for the model output (logits and embeddings) - ggml_backend_buffer_ptr buf_output; + void attach_threadpool( + ggml_threadpool_t threadpool, + ggml_threadpool_t threadpool_batch); - // decode output (2-dimensional array: [n_outputs][n_vocab]) - size_t logits_size = 0; // capacity (of floats) for logits - float * logits = nullptr; + void detach_threadpool(); - std::vector output_ids; // map batch token positions to ids of the logits and embd buffers - size_t output_size = 0; // capacity (of tokens positions) for the output buffers - int32_t n_outputs = 0; // number of actually-used outputs in the current ubatch or last logical batch + void set_n_threads(int32_t n_threads, int32_t n_threads_batch); + + void set_abort_callback(bool (*abort_callback)(void * data), void * abort_callback_data); + + void set_embeddings (bool value); + void set_causal_attn(bool value); + + void set_adapter_lora( + llama_adapter_lora * adapter, + float scale); + + bool rm_adapter_lora( + llama_adapter_lora * adapter); + + void clear_adapter_lora(); + + bool apply_adapter_cvec( + const float * data, + size_t len, + int32_t n_embd, + int32_t il_start, + int32_t il_end); + + int encode(llama_batch & inp_batch); + int decode(llama_batch & inp_batch); + + // + // state save/load + // + + size_t state_get_size(); + size_t state_get_data( uint8_t * dst, size_t size); + size_t state_set_data(const uint8_t * src, size_t size); + + size_t state_seq_get_size(llama_seq_id seq_id); + size_t state_seq_get_data(llama_seq_id seq_id, uint8_t * dst, size_t size); + size_t state_seq_set_data(llama_seq_id seq_id, const uint8_t * src, size_t size); + + bool state_load_file( + const char * filepath, + llama_token * tokens_out, + size_t n_token_capacity, + size_t * n_token_count_out); + + bool state_save_file( + const char * filepath, + const llama_token * tokens, + size_t n_token_count); + + size_t state_seq_load_file( + llama_seq_id seq_id, + const char * filepath, + llama_token * tokens_out, + size_t n_token_capacity, + size_t * n_token_count_out); + + size_t state_seq_save_file( + llama_seq_id seq_id, + const char * filepath, + const llama_token * tokens, + size_t n_token_count); + + // + // perf + // + + llama_perf_context_data perf_get_data() const; + void perf_reset(); + +private: + // + // output + // + + // Make sure enough space is available for outputs. + // Returns max number of outputs for which space was reserved. + int32_t output_reserve(int32_t n_outputs); + + // make the outputs have the same order they had in the user-provided batch + // TODO: maybe remove this + void output_reorder(); + // + // graph + // + + int32_t graph_max_nodes() const; + + // zero-out inputs and create the ctx_compute for the compute graph + ggml_cgraph * graph_init(); + + llm_graph_result_ptr graph_build( + ggml_context * ctx, + ggml_cgraph * gf, + const llama_ubatch & ubatch, + llm_graph_type gtype); + + // returns the result of ggml_backend_sched_graph_compute_async execution + ggml_status graph_compute( + ggml_cgraph * gf, + bool batched); + + llm_graph_cb graph_get_cb() const; + + // used by kv_self_update() + ggml_tensor * build_rope_shift( + ggml_context * ctx0, + ggml_tensor * cur, + ggml_tensor * shift, + ggml_tensor * factors, + ggml_backend_buffer * bbuf) const; + + llm_graph_result_ptr build_kv_self_shift( + ggml_context * ctx0, + ggml_cgraph * gf) const; + + llm_graph_result_ptr build_kv_self_defrag( + ggml_context * ctx0, + ggml_cgraph * gf) const; + + // TODO: read/write lora adapters and cvec + size_t state_write_data(llama_io_write_i & io); + size_t state_read_data (llama_io_read_i & io); + + size_t state_seq_write_data(llama_io_write_i & io, llama_seq_id seq_id); + size_t state_seq_read_data (llama_io_read_i & io, llama_seq_id seq_id); + + // + // members + // + + const llama_model & model; + + llama_cparams cparams; + llama_adapter_cvec cvec; + llama_adapter_loras loras; + llama_sbatch sbatch; + + llama_cross cross; // TODO: tmp for handling cross-attention - need something better probably + + std::unique_ptr kv_self; + + // TODO: remove bool logits_all = false; + // decode output (2-dimensional array: [n_outputs][n_vocab]) + size_t logits_size = 0; // capacity (of floats) for logits + float * logits = nullptr; + // embeddings output (2-dimensional array: [n_outputs][n_embd]) // populated only when pooling_type == LLAMA_POOLING_TYPE_NONE size_t embd_size = 0; // capacity (of floats) for embeddings @@ -72,57 +216,47 @@ struct llama_context { // populated only when pooling_type != LLAMA_POOLING_TYPE_NONE std::map> embd_seq; - // whether we are computing encoder output or decoder output - bool is_encoding = false; + int32_t n_outputs = 0; // number of actually-used outputs in the current ubatch or last logical batch + int32_t n_outputs_max = 0; // capacity (of tokens positions) for the output buffers - // TODO: find a better way to accommodate mutli-dimension position encoding methods - // number of position id each token get, 1 for each token in most cases. - // when using m-rope, it will be 3 position ids per token to representing 3 dimension coordinate. - int n_pos_per_token = 1; - - // output of the encoder part of the encoder-decoder models - std::vector embd_enc; - std::vector> seq_ids_enc; + std::vector output_ids; // map batch token positions to ids of the logits and embd buffers - // memory buffers used to evaluate the model - std::vector buf_compute_meta; ggml_backend_sched_ptr sched; + ggml_backend_t backend_cpu = nullptr; + std::vector backends; + + ggml_context_ptr ctx_compute; + + ggml_threadpool_t threadpool = nullptr; + ggml_threadpool_t threadpool_batch = nullptr; + ggml_abort_callback abort_callback = nullptr; void * abort_callback_data = nullptr; - // input tensors - struct ggml_tensor * inp_tokens; // I32 [n_batch] - struct ggml_tensor * inp_embd; // F32 [n_embd, n_batch] - struct ggml_tensor * inp_pos; // I32 [n_batch] - struct ggml_tensor * inp_out_ids; // I32 [n_outputs] - struct ggml_tensor * inp_KQ_mask; // F32 [kv_size, n_batch] - struct ggml_tensor * inp_KQ_mask_swa; // F32 [kv_size, n_batch] - struct ggml_tensor * inp_K_shift; // I32 [kv_size] - struct ggml_tensor * inp_mean; // F32 [n_batch, n_batch] - struct ggml_tensor * inp_cls; // I32 [n_batch] - struct ggml_tensor * inp_s_copy; // I32 [kv_size] - struct ggml_tensor * inp_s_mask; // F32 [1, n_kv] - struct ggml_tensor * inp_s_seq; // I32 [n_kv, n_batch] - struct ggml_tensor * inp_pos_bucket; // I32 [n_batch|n_kv, n_batch] - struct ggml_tensor * inp_embd_enc; // F32 [n_embd, n_outputs_enc] - struct ggml_tensor * inp_KQ_mask_cross; // F32 [n_outputs_enc, n_batch] -}; + std::vector> set_n_threads_fns; -// TODO: make these methods of llama_context -void llama_set_k_shift(struct llama_context & lctx); + // buffer types used for the compute buffer of each backend + std::vector backend_ptrs; + std::vector backend_buft; -void llama_set_s_copy(struct llama_context & lctx); + // memory buffers used to evaluate the model + std::vector buf_compute_meta; -void llama_set_inputs(llama_context & lctx, const llama_ubatch & ubatch); + // host buffer for the model output (logits and embeddings) + ggml_backend_buffer_ptr buf_output; -// Make sure enough space is available for outputs. -// Returns max number of outputs for which space was reserved. -size_t llama_output_reserve(struct llama_context & lctx, size_t n_outputs); + bool has_evaluated_once = false; -// make the outputs have the same order they had in the user-provided batch -void llama_output_reorder(struct llama_context & ctx); + // perf + mutable int64_t t_start_us = 0; + mutable int64_t t_load_us = 0; + mutable int64_t t_p_eval_us = 0; + mutable int64_t t_eval_us = 0; -// For internal test use -// TODO: remove -const std::vector> & llama_internal_get_tensor_map(struct llama_context * ctx); + mutable int64_t t_compute_start_us = 0; + mutable int64_t n_queued_tokens = 0; + + mutable int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1) + mutable int32_t n_eval = 0; // number of eval calls +}; diff --git a/src/llama-graph.cpp b/src/llama-graph.cpp new file mode 100644 index 0000000000000..1e3f2efc89d2c --- /dev/null +++ b/src/llama-graph.cpp @@ -0,0 +1,1695 @@ +#include "llama-graph.h" + +#include "llama-impl.h" +#include "llama-batch.h" +#include "llama-cparams.h" +#include "llama-kv-cache.h" + +#include +#include +#include + +static int32_t llama_relative_position_bucket(llama_pos x, llama_pos y, uint64_t n_buckets, bool bidirectional) { + // TODO move to hparams if a T5 variant appears that uses a different value + const int64_t max_distance = 128; + + if (bidirectional) { + n_buckets >>= 1; + } + + const int64_t max_exact = n_buckets >> 1; + + int32_t relative_position = x - y; + int32_t relative_bucket = 0; + + if (bidirectional) { + relative_bucket += (relative_position > 0) * n_buckets; + relative_position = abs(relative_position); + } else { + relative_position = -std::min(relative_position, 0); + } + + int32_t relative_position_if_large = floorf(max_exact + logf(1.0 * relative_position / max_exact) * (n_buckets - max_exact) / log(1.0 * max_distance / max_exact)); + relative_position_if_large = std::min(relative_position_if_large, n_buckets - 1); + relative_bucket += (relative_position < max_exact ? relative_position : relative_position_if_large); + + return relative_bucket; +} + +void llm_graph_input_embd::set_input(const llama_ubatch * ubatch) { + if (ubatch->token) { + const int64_t n_tokens = ubatch->n_tokens; + + ggml_backend_tensor_set(tokens, ubatch->token, 0, n_tokens*ggml_element_size(tokens)); + } + + if (ubatch->embd) { + const int64_t n_embd = embd->ne[0]; + const int64_t n_tokens = ubatch->n_tokens; + + ggml_backend_tensor_set(embd, ubatch->embd, 0, n_tokens*n_embd*ggml_element_size(embd)); + } +} + +void llm_graph_input_pos::set_input(const llama_ubatch * ubatch) { + if (ubatch->pos && pos) { + const int64_t n_tokens = ubatch->n_tokens; + + ggml_backend_tensor_set(pos, ubatch->pos, 0, n_tokens*n_pos_per_token*ggml_element_size(pos)); + } +} + +void llm_graph_input_pos_bucket::set_input(const llama_ubatch * ubatch) { + if (pos_bucket) { + const int64_t n_tokens = ubatch->n_tokens; + + GGML_ASSERT(ggml_backend_buffer_is_host(pos_bucket->buffer)); + GGML_ASSERT(!ubatch->equal_seqs); // TODO: use ubatch->n_seqs instead of failing + + int32_t * data = (int32_t *) pos_bucket->data; + + for (int h = 0; h < 1; ++h) { + for (int j = 0; j < n_tokens; ++j) { + for (int i = 0; i < n_tokens; ++i) { + data[h*(n_tokens*n_tokens) + j*n_tokens + i] = llama_relative_position_bucket(ubatch->pos[i], ubatch->pos[j], hparams.n_rel_attn_bkts, true); + } + } + } + } +} + +void llm_graph_input_pos_bucket_kv::set_input(const llama_ubatch * ubatch) { + if (pos_bucket) { + const int64_t n_tokens = ubatch->n_tokens; + + GGML_ASSERT(ggml_backend_buffer_is_host(pos_bucket->buffer)); + GGML_ASSERT(!ubatch->equal_seqs); // TODO: use ubatch->n_seqs instead of failing + + int32_t * data = (int32_t *) pos_bucket->data; + + const int64_t n_kv = kv_self->n; + + for (int h = 0; h < 1; ++h) { + for (int j = 0; j < n_tokens; ++j) { + for (int i = 0; i < n_kv; ++i) { + data[h*(n_kv*n_tokens) + j*n_kv + i] = llama_relative_position_bucket(kv_self->cells[i].pos, ubatch->pos[j], hparams.n_rel_attn_bkts, false); + } + } + } + } +} + +void llm_graph_input_out_ids::set_input(const llama_ubatch * ubatch) { + if (hparams.causal_attn || cparams.pooling_type == LLAMA_POOLING_TYPE_NONE) { + //GGML_ASSERT(out_ids && "every model that can must skip unused outputs"); + + if (!out_ids) { + LLAMA_LOG_WARN("%s: 'out_ids' is not created\n", __func__); + } else { + const int64_t n_tokens = ubatch->n_tokens; + + GGML_ASSERT(ggml_backend_buffer_is_host(out_ids->buffer)); + int32_t * data = (int32_t *) out_ids->data; + + if (n_outputs == n_tokens) { + for (int i = 0; i < n_tokens; ++i) { + data[i] = i; + } + } else if (ubatch->output) { + int32_t n_outputs = 0; + for (int i = 0; i < n_tokens; ++i) { + if (ubatch->output[i]) { + data[n_outputs++] = i; + } + } + // the graph needs to have been passed the correct number of outputs + GGML_ASSERT(n_outputs == n_outputs); + } else if (n_outputs == 1) { + // only keep last output + data[0] = n_tokens - 1; + } else { + GGML_ASSERT(n_outputs == 0); + } + } + } +} + +void llm_graph_input_mean::set_input(const llama_ubatch * ubatch) { + if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_MEAN) { + const int64_t n_tokens = ubatch->n_tokens; + const int64_t n_seq_tokens = ubatch->n_seq_tokens; + const int64_t n_seqs = ubatch->n_seqs; + + GGML_ASSERT(mean); + GGML_ASSERT(ggml_backend_buffer_is_host(mean->buffer)); + + float * data = (float *) mean->data; + memset(mean->data, 0, n_tokens * n_tokens * ggml_element_size(mean)); + + std::vector sum(n_tokens, 0); + + for (int s = 0; s < n_seqs; ++s) { + const llama_seq_id seq_id = ubatch->seq_id[s][0]; + + // TODO: adapt limits to n_seqs when ubatch->equal_seqs is true + GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == MEAN"); + + sum[seq_id] += ubatch->n_seq_tokens; + } + + std::vector div(n_tokens, 0.0f); + for (int i = 0; i < n_tokens; ++i) { + const uint64_t s = sum[i]; + if (s > 0) { + div[i] = 1.0f/float(s); + } + } + + for (int s = 0; s < n_seqs; ++s) { + const llama_seq_id seq_id = ubatch->seq_id[s][0]; + + for (int i = 0; i < n_seq_tokens; ++i) { + data[seq_id*n_tokens + s*n_seq_tokens + i] = div[seq_id]; + } + } + } +} + +void llm_graph_input_cls::set_input(const llama_ubatch * ubatch) { + if (cparams.embeddings && ( + cparams.pooling_type == LLAMA_POOLING_TYPE_CLS || + cparams.pooling_type == LLAMA_POOLING_TYPE_RANK)) { + const int64_t n_tokens = ubatch->n_tokens; + const int64_t n_seq_tokens = ubatch->n_seq_tokens; + const int64_t n_seqs = ubatch->n_seqs; + + GGML_ASSERT(cls); + GGML_ASSERT(ggml_backend_buffer_is_host(cls->buffer)); + + uint32_t * data = (uint32_t *) cls->data; + memset(cls->data, 0, n_tokens * ggml_element_size(cls)); + + for (int s = 0; s < n_seqs; ++s) { + const llama_seq_id seq_id = ubatch->seq_id[s][0]; + + // TODO: adapt limits to n_seqs when ubatch->equal_seqs is true + GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == CLS or RANK"); + + for (int i = 0; i < n_seq_tokens; ++i) { + const llama_pos pos = ubatch->pos[s*n_seq_tokens + i]; + + if (pos == 0) { + data[seq_id] = s*n_seq_tokens + i; + } + } + } + } + + if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_LAST) { + const int64_t n_tokens = ubatch->n_tokens; + const int64_t n_seq_tokens = ubatch->n_seq_tokens; + const int64_t n_seqs = ubatch->n_seqs; + + GGML_ASSERT(cls); + GGML_ASSERT(ggml_backend_buffer_is_host(cls->buffer)); + + uint32_t * data = (uint32_t *) cls->data; + memset(cls->data, 0, n_tokens * ggml_element_size(cls)); + + std::vector last_pos(n_tokens, -1); + std::vector last_row(n_tokens, -1); + + for (int s = 0; s < n_seqs; ++s) { + const llama_seq_id seq_id = ubatch->seq_id[s][0]; + + // TODO: adapt limits to n_seqs when ubatch->equal_seqs is true + GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == LAST"); + + for (int i = 0; i < n_seq_tokens; ++i) { + const llama_pos pos = ubatch->pos[s*n_seq_tokens + i]; + + if (pos >= last_pos[seq_id]) { + last_pos[seq_id] = pos; + last_row[seq_id] = s*n_seq_tokens + i; + } + } + } + + for (int i = 0; i < n_tokens; ++i) { + if (last_row[i] >= 0) { + data[i] = last_row[i]; + } + } + } +} + +void llm_graph_input_s_copy::set_input(const llama_ubatch * ubatch) { + GGML_UNUSED(ubatch); + + const int64_t n_kv = kv_self->n; + + if (s_copy) { + GGML_ASSERT(ggml_backend_buffer_is_host(s_copy->buffer)); + int32_t * data = (int32_t *) s_copy->data; + + // assuming copy destinations ALWAYS happen ONLY on the cells between head and head+n + for (uint32_t i = 0; i < n_kv; ++i) { + const uint32_t cell_id = i + kv_self->head; + + ////////////////////////////////////////////// + // TODO: this should not mutate the KV cache ! + llama_kv_cell & kv_cell = const_cast(kv_self)->cells[i]; + + // prevent out-of-bound sources + if (kv_cell.src < 0 || (uint32_t) kv_cell.src >= kv_self->size) { + kv_cell.src = cell_id; + } + + data[i] = kv_cell.src; + + // TODO: do not mutate the KV cache + // ensure copy only happens once + if (kv_cell.src != (int32_t) cell_id) { + kv_cell.src = cell_id; + } + } + } +} + +void llm_graph_input_s_mask::set_input(const llama_ubatch * ubatch) { + GGML_UNUSED(ubatch); + + const int64_t n_kv = kv_self->n; + + if (s_mask) { + GGML_ASSERT(ggml_backend_buffer_is_host(s_mask->buffer)); + float * data = (float *) s_mask->data; + + // clear unused states + for (int i = 0; i < n_kv; ++i) { + const uint32_t cell_id = i + kv_self->head; + + ////////////////////////////////////////////// + // TODO: this should not mutate the KV cache ! + llama_kv_cell & kv_cell = const_cast(kv_self)->cells[i]; + + data[i] = (float) (kv_cell.src >= 0); + + // only clear once + if (kv_cell.src < 0) { + kv_cell.src = cell_id; + } + } + } +} + +void llm_graph_input_cross_embd::set_input(const llama_ubatch * ubatch) { + GGML_UNUSED(ubatch); + + if (cross_embd && !cross->v_embd.empty()) { + assert(cross_embd->type == GGML_TYPE_F32); + + ggml_backend_tensor_set(cross_embd, cross->v_embd.data(), 0, ggml_nbytes(cross_embd)); + } +} + +void llm_graph_input_attn_no_cache::set_input(const llama_ubatch * ubatch) { + if (kq_mask) { + if (cparams.causal_attn) { + const int64_t n_kv = ubatch->n_tokens; + const int64_t n_tokens = ubatch->n_tokens; + const int64_t n_seq_tokens = ubatch->n_seq_tokens; + const int64_t n_seqs = ubatch->n_seqs; + + GGML_ASSERT(ggml_backend_buffer_is_host(kq_mask->buffer)); + float * data = (float *) kq_mask->data; + + for (int h = 0; h < 1; ++h) { + for (int s1 = 0; s1 < n_seqs; ++s1) { + const llama_seq_id seq_id = ubatch->seq_id[s1][0]; + + for (int j = 0; j < n_seq_tokens; ++j) { + const int32_t tj = s1*n_seq_tokens + j; + + for (int s0 = 0; s0 < n_seqs; ++s0) { + for (int i = 0; i < n_seq_tokens; ++i) { + const int32_t ti = s0*n_seq_tokens + i; + float f = -INFINITY; + + for (int s = 0; s < ubatch->n_seq_id[s0]; ++s) { + if (ubatch->seq_id[s0][s] == seq_id && ubatch->pos[ti] <= ubatch->pos[tj]) { + if (hparams.use_alibi) { + f = -std::abs(ubatch->pos[ti] - ubatch->pos[tj]); + } else { + f = 0.0f; + } + break; + } + } + + data[h*(n_kv*n_tokens) + tj*n_kv + ti] = f; + } + } + } + } + } + } else { + const int64_t n_tokens = ubatch->n_tokens; + const int64_t n_seq_tokens = ubatch->n_seq_tokens; + const int64_t n_seqs = ubatch->n_seqs; + const int64_t n_stride = ubatch->n_tokens; + + GGML_ASSERT(ggml_backend_buffer_is_host(kq_mask->buffer)); + + float * data = (float *) kq_mask->data; + + for (int h = 0; h < 1; ++h) { + for (int s1 = 0; s1 < n_seqs; ++s1) { + const llama_seq_id seq_id = ubatch->seq_id[s1][0]; + + for (int j = 0; j < n_seq_tokens; ++j) { + const int32_t tj = s1*n_seq_tokens + j; + + for (int s0 = 0; s0 < n_seqs; ++s0) { + for (int i = 0; i < n_seq_tokens; ++i) { + const int32_t ti = s0*n_seq_tokens + i; + float f = -INFINITY; + + for (int s = 0; s < ubatch->n_seq_id[s0]; ++s) { + if (ubatch->seq_id[s0][s] == seq_id) { + if (hparams.use_alibi) { + f = -std::abs(ubatch->pos[ti] - ubatch->pos[tj]); + } else { + f = 0.0f; + } + break; + } + } + + data[h*(n_tokens*n_tokens) + tj*n_stride + ti] = f; + } + } + + for (int i = n_tokens; i < n_stride; ++i) { + data[h*(n_tokens*n_tokens) + tj*n_stride + i] = -INFINITY; + } + } + } + } + } + } +} + +void llm_graph_input_attn_kv_unified::set_input(const llama_ubatch * ubatch) { + if (self_kq_mask || self_kq_mask_swa) { + // NOTE: hparams.causal_attn indicates the model is capable of generation and uses the kv cache. + if (cparams.causal_attn) { + const int64_t n_kv = kv_self->n; + const int64_t n_tokens = ubatch->n_tokens; + const int64_t n_seq_tokens = ubatch->n_seq_tokens; + const int64_t n_seqs = ubatch->n_seqs; + + float * data = nullptr; + float * data_swa = nullptr; + + if (self_kq_mask) { + GGML_ASSERT(ggml_backend_buffer_is_host(self_kq_mask->buffer)); + data = (float *) self_kq_mask->data; + } + + if (self_kq_mask_swa) { + GGML_ASSERT(ggml_backend_buffer_is_host(self_kq_mask_swa->buffer)); + data_swa = (float *) self_kq_mask_swa->data; + } + + // For causal attention, use only the previous KV cells + // of the correct sequence for each token of the ubatch. + // It's assumed that if a token in the batch has multiple sequences, they are equivalent. + for (int h = 0; h < 1; ++h) { + for (int s = 0; s < n_seqs; ++s) { + const llama_seq_id seq_id = ubatch->seq_id[s][0]; + + for (int j = 0; j < n_seq_tokens; ++j) { + const llama_pos pos = ubatch->pos[s*n_seq_tokens + j]; + + for (int i = 0; i < n_kv; ++i) { + float f; + if (!kv_self->cells[i].has_seq_id(seq_id) || kv_self->cells[i].pos > pos) { + f = -INFINITY; + } else { + if (hparams.use_alibi) { + f = -std::abs(kv_self->cells[i].pos - pos); + } else { + f = 0.0f; + } + } + + if (data) { + data[h*(n_kv*n_tokens) + s*(n_kv*n_seq_tokens) + j*n_kv + i] = f; + } + + // may need to cut off old tokens for sliding window + if (data_swa) { + if (pos - kv_self->cells[i].pos >= (int32_t)hparams.n_swa) { + f = -INFINITY; + } + data_swa[h*(n_kv*n_tokens) + s*(n_kv*n_seq_tokens) + j*n_kv + i] = f; + } + } + } + } + + if (data) { + for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) { + for (int j = 0; j < n_kv; ++j) { + data[h*(n_kv*n_tokens) + i*n_kv + j] = -INFINITY; + } + } + } + + if (data_swa) { + for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) { + for (int j = 0; j < n_kv; ++j) { + data_swa[h*(n_kv*n_tokens) + i*n_kv + j] = -INFINITY; + } + } + } + } + } else { + const int64_t n_tokens = ubatch->n_tokens; + const int64_t n_seq_tokens = ubatch->n_seq_tokens; + const int64_t n_seqs = ubatch->n_seqs; + // when using kv cache, the mask needs to match the kv cache size + const int64_t n_stride = n_tokens; + + GGML_ASSERT(ggml_backend_buffer_is_host(self_kq_mask->buffer)); + + float * data = (float *) self_kq_mask->data; + + for (int h = 0; h < 1; ++h) { + for (int s1 = 0; s1 < n_seqs; ++s1) { + const llama_seq_id seq_id = ubatch->seq_id[s1][0]; + + for (int j = 0; j < n_seq_tokens; ++j) { + const int32_t tj = s1*n_seq_tokens + j; + + for (int s0 = 0; s0 < n_seqs; ++s0) { + for (int i = 0; i < n_seq_tokens; ++i) { + const int32_t ti = s0*n_seq_tokens + i; + float f = -INFINITY; + + for (int s = 0; s < ubatch->n_seq_id[s0]; ++s) { + if (ubatch->seq_id[s0][s] == seq_id) { + if (hparams.use_alibi) { + f = -std::abs(ubatch->pos[ti] - ubatch->pos[tj]); + } else { + f = 0.0f; + } + break; + } + } + + data[h*(n_tokens*n_tokens) + tj*n_stride + ti] = f; + } + } + + for (int i = n_tokens; i < n_stride; ++i) { + data[h*(n_tokens*n_tokens) + tj*n_stride + i] = -INFINITY; + } + } + } + } + } + } +} + +void llm_graph_input_attn_cross::set_input(const llama_ubatch * ubatch) { + if (cross_kq_mask) { + const int64_t n_enc = cross_kq_mask->ne[0]; + const int64_t n_tokens = ubatch->n_tokens; + + GGML_ASSERT(ggml_backend_buffer_is_host(cross_kq_mask->buffer)); + GGML_ASSERT(!ubatch->equal_seqs); // TODO: use ubatch->n_seqs instead of failing + + float * data = (float *) cross_kq_mask->data; + + for (int h = 0; h < 1; ++h) { + for (int j = 0; j < n_tokens; ++j) { + for (int i = 0; i < n_enc; ++i) { + float f = -INFINITY; + for (int s = 0; s < ubatch->n_seq_id[j]; ++s) { + const llama_seq_id seq_id = ubatch->seq_id[j][s]; + if (cross->seq_ids_enc[i].find(seq_id) != cross->seq_ids_enc[i].end()) { + f = 0.0f; + } + } + data[h*(n_enc*n_tokens) + j*n_enc + i] = f; + } + } + + for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) { + for (int j = 0; j < n_enc; ++j) { + data[h*(n_enc*n_tokens) + i*n_enc + j] = -INFINITY; + } + } + } + } +} + +// +// llm_graph_context +// + +llm_graph_context::llm_graph_context(const llm_graph_params & params) : + arch (params.arch), + hparams (params.hparams), + cparams (params.cparams), + ubatch (params.ubatch), + n_embd (hparams.n_embd), + n_layer (hparams.n_layer), + n_rot (hparams.n_rot), + n_ctx (cparams.n_ctx), + n_ctx_per_seq (cparams.n_ctx / cparams.n_seq_max), + n_head (hparams.n_head()), + n_head_kv (hparams.n_head_kv()), + n_embd_head_k (hparams.n_embd_head_k), + n_embd_k_gqa (hparams.n_embd_k_gqa()), + n_embd_head_v (hparams.n_embd_head_v), + n_embd_v_gqa (hparams.n_embd_v_gqa()), + n_expert (hparams.n_expert), + n_expert_used (hparams.n_expert_used), + freq_base (cparams.rope_freq_base), + freq_scale (cparams.rope_freq_scale), + ext_factor (cparams.yarn_ext_factor), + attn_factor (cparams.yarn_attn_factor), + beta_fast (cparams.yarn_beta_fast), + beta_slow (cparams.yarn_beta_slow), + norm_eps (hparams.f_norm_eps), + norm_rms_eps (hparams.f_norm_rms_eps), + n_tokens (ubatch.n_tokens), + n_outputs (params.n_outputs), + n_ctx_orig (cparams.n_ctx_orig_yarn), + pooling_type (cparams.pooling_type), + rope_type (hparams.rope_type), + ctx0 (params.ctx), + sched (params.sched), + backend_cpu (params.backend_cpu), + cvec (params.cvec), + loras (params.loras), + memory (params.memory), + cross (params.cross), + cb_func (params.cb), + res (std::make_unique()) { + } + +int64_t llm_graph_context::n_pos_per_token() const { + return arch == LLM_ARCH_QWEN2VL ? 4 : 1; +} + +void llm_graph_context::cb(ggml_tensor * cur, const char * name, int il) const { + if (cb_func) { + cb_func(ubatch, cur, name, il); + } +} + +ggml_tensor * llm_graph_context::build_cvec( + ggml_tensor * cur, + int il) const { + return cvec->apply_to(ctx0, cur, il); +} + +ggml_tensor * llm_graph_context::build_lora_mm( + ggml_tensor * w, + ggml_tensor * cur) const { + ggml_tensor * res = ggml_mul_mat(ctx0, w, cur); + + for (const auto & lora : *loras) { + llama_adapter_lora_weight * lw = lora.first->get_weight(w); + if (lw == nullptr) { + continue; + } + + const float adapter_scale = lora.second; + const float scale = lw->get_scale(lora.first->alpha, adapter_scale); + + ggml_tensor * ab_cur = ggml_mul_mat( + ctx0, lw->b, + ggml_mul_mat(ctx0, lw->a, cur) + ); + + ab_cur = ggml_scale(ctx0, ab_cur, scale); + res = ggml_add(ctx0, res, ab_cur); + } + + return res; +} + +ggml_tensor * llm_graph_context::build_lora_mm_id( + ggml_tensor * w, // ggml_tensor * as + ggml_tensor * cur, // ggml_tensor * b + ggml_tensor * ids) const { + ggml_tensor * res = ggml_mul_mat_id(ctx0, w, cur, ids); + for (const auto & lora : *loras) { + llama_adapter_lora_weight * lw = lora.first->get_weight(w); + if (lw == nullptr) { + continue; + } + + const float alpha = lora.first->alpha; + const float rank = (float) lw->b->ne[0]; + const float scale = alpha ? lora.second * alpha / rank : lora.second; + + ggml_tensor * ab_cur = ggml_mul_mat_id( + ctx0, lw->b, + ggml_mul_mat_id(ctx0, lw->a, cur, ids), + ids + ); + + ab_cur = ggml_scale(ctx0, ab_cur, scale); + res = ggml_add(ctx0, res, ab_cur); + } + + return res; +} + +ggml_tensor * llm_graph_context::build_norm( + ggml_tensor * cur, + ggml_tensor * mw, + ggml_tensor * mb, + llm_norm_type type, + int il) const { + switch (type) { + case LLM_NORM: cur = ggml_norm (ctx0, cur, hparams.f_norm_eps); break; + case LLM_NORM_RMS: cur = ggml_rms_norm(ctx0, cur, hparams.f_norm_rms_eps); break; + case LLM_NORM_GROUP: + { + cur = ggml_reshape_3d(ctx0, cur, cur->ne[0], 1, cur->ne[1]); + cur = ggml_group_norm(ctx0, cur, hparams.n_norm_groups, hparams.f_norm_group_eps); + cur = ggml_reshape_2d(ctx0, cur, cur->ne[0], cur->ne[2]); + } break; + } + + if (mw || mb) { + cb(cur, "norm", il); + } + + if (mw) { + cur = ggml_mul(ctx0, cur, mw); + if (mb) { + cb(cur, "norm_w", il); + } + } + + if (mb) { + cur = ggml_add(ctx0, cur, mb); + } + + return cur; +} + +ggml_tensor * llm_graph_context::build_ffn( + ggml_tensor * cur, + ggml_tensor * up, + ggml_tensor * up_b, + ggml_tensor * up_s, + ggml_tensor * gate, + ggml_tensor * gate_b, + ggml_tensor * gate_s, + ggml_tensor * down, + ggml_tensor * down_b, + ggml_tensor * down_s, + ggml_tensor * act_scales, + llm_ffn_op_type type_op, + llm_ffn_gate_type type_gate, + int il) const { + ggml_tensor * tmp = up ? build_lora_mm(up, cur) : cur; + cb(tmp, "ffn_up", il); + + if (up_b) { + tmp = ggml_add(ctx0, tmp, up_b); + cb(tmp, "ffn_up_b", il); + } + + if (up_s) { + tmp = ggml_mul(ctx0, tmp, up_s); + cb(tmp, "ffn_up_s", il); + } + + if (gate) { + switch (type_gate) { + case LLM_FFN_SEQ: + { + cur = build_lora_mm(gate, tmp); + cb(cur, "ffn_gate", il); + } break; + case LLM_FFN_PAR: + { + cur = build_lora_mm(gate, cur); + cb(cur, "ffn_gate", il); + } break; + } + + if (gate_b) { + cur = ggml_add(ctx0, cur, gate_b); + cb(cur, "ffn_gate_b", il); + } + + if (gate_s) { + cur = ggml_mul(ctx0, cur, gate_s); + cb(cur, "ffn_gate_s", il); + } + + } else { + cur = tmp; + } + + switch (type_op) { + case LLM_FFN_SILU: + { + cur = ggml_silu(ctx0, cur); + cb(cur, "ffn_silu", il); + } break; + case LLM_FFN_GELU: + { + cur = ggml_gelu(ctx0, cur); + cb(cur, "ffn_gelu", il); + if (act_scales != NULL) { + cur = ggml_div(ctx0, cur, act_scales); + cb(cur, "ffn_act", il); + } + } break; + case LLM_FFN_RELU: + { + cur = ggml_relu(ctx0, cur); + cb(cur, "ffn_relu", il); + } break; + case LLM_FFN_RELU_SQR: + { + cur = ggml_relu(ctx0, cur); + cb(cur, "ffn_relu", il); + + cur = ggml_sqr(ctx0, cur); + cb(cur, "ffn_sqr(relu)", il); + } break; + case LLM_FFN_SWIGLU: + { + // Project to 4h. If using swiglu double the output width, see https://arxiv.org/pdf/2002.05202.pdf + int64_t split_point = cur->ne[0] / 2; + ggml_tensor * x0 = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, split_point, cur->ne[1], cur->nb[1], 0)); + ggml_tensor * x1 = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, split_point, cur->ne[1], cur->nb[1], split_point * ggml_element_size(cur))); + + x0 = ggml_silu(ctx0, x0); + cb(cur, "ffn_silu", il); + + cur = ggml_mul(ctx0, x0, x1); + cb(cur, "ffn_mul", il); + } break; + } + + if (type_gate == LLM_FFN_PAR) { + cur = ggml_mul(ctx0, cur, tmp); + cb(cur, "ffn_gate_par", il); + } + + if (down) { + cur = build_lora_mm(down, cur); + } + + if (down_b) { + cb(cur, "ffn_down", il); + } + + if (down_b) { + cur = ggml_add(ctx0, cur, down_b); + } + + if (down_s) { + cur = ggml_mul(ctx0, cur, down_s); + cb(cur, "ffn_down_s", il); + } + + return cur; +} + +ggml_tensor * llm_graph_context::build_moe_ffn( + ggml_tensor * cur, + ggml_tensor * gate_inp, + ggml_tensor * up_exps, + ggml_tensor * gate_exps, + ggml_tensor * down_exps, + ggml_tensor * exp_probs_b, + int64_t n_expert, + int64_t n_expert_used, + llm_ffn_op_type type_op, + bool norm_w, + bool scale_w, + float w_scale, + llama_expert_gating_func_type gating_op, + int il) const { + int64_t n_embd = cur->ne[0]; + int64_t n_tokens = cur->ne[1]; + + ggml_tensor * logits = build_lora_mm(gate_inp, cur); // [n_expert, n_tokens] + cb(logits, "ffn_moe_logits", il); + + ggml_tensor * probs = nullptr; + switch (gating_op) { + case LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX: + { + probs = ggml_soft_max(ctx0, logits); // [n_expert, n_tokens] + } break; + case LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID: + { + probs = ggml_sigmoid(ctx0, logits); // [n_expert, n_tokens] + } break; + default: + GGML_ABORT("fatal error"); + } + cb(probs, "ffn_moe_probs", il); + + // add experts selection bias - introduced in DeepSeek V3 + // leave probs unbiased as it's later used to get expert weights + ggml_tensor * selection_probs = probs; + if (exp_probs_b != nullptr) { + selection_probs = ggml_add(ctx0, probs, exp_probs_b); + cb(selection_probs, "ffn_moe_probs_biased", il); + } + + // select experts + ggml_tensor * selected_experts = ggml_top_k(ctx0, selection_probs, n_expert_used); // [n_expert_used, n_tokens] + cb(selected_experts->src[0], "ffn_moe_argsort", il); + cb(selected_experts, "ffn_moe_topk", il); + + ggml_tensor * weights = ggml_get_rows(ctx0, + ggml_reshape_3d(ctx0, probs, 1, n_expert, n_tokens), selected_experts); // [1, n_expert_used, n_tokens] + cb(weights, "ffn_moe_weights", il); + + if (norm_w) { + weights = ggml_reshape_2d(ctx0, weights, n_expert_used, n_tokens); + + ggml_tensor * weights_sum = ggml_sum_rows(ctx0, weights); // [1, n_tokens] + cb(weights_sum, "ffn_moe_weights_sum", il); + + weights = ggml_div(ctx0, weights, weights_sum); // [n_expert_used, n_tokens] + cb(weights, "ffn_moe_weights_norm", il); + + weights = ggml_reshape_3d(ctx0, weights, 1, n_expert_used, n_tokens); + } + if (scale_w) { + weights = ggml_scale(ctx0, weights, w_scale); + cb(weights, "ffn_moe_weights_scaled", il); + } + + cur = ggml_reshape_3d(ctx0, cur, n_embd, 1, n_tokens); + ggml_tensor * up = build_lora_mm_id(up_exps, cur, selected_experts); // [n_ff, n_expert_used, n_tokens] + cb(up, "ffn_moe_up", il); + + ggml_tensor * gate = build_lora_mm_id(gate_exps, cur, selected_experts); // [n_ff, n_expert_used, n_tokens] + cb(gate, "ffn_moe_gate", il); + + switch (type_op) { + case LLM_FFN_SILU: + { + gate = ggml_silu(ctx0, gate); + cb(gate, "ffn_moe_silu", il); + } break; + case LLM_FFN_GELU: + { + gate = ggml_gelu(ctx0, gate); + cb(gate, "ffn_moe_gelu", il); + } break; + default: + GGML_ABORT("fatal error"); + } + + ggml_tensor * par = ggml_mul(ctx0, up, gate); // [n_ff, n_expert_used, n_tokens] + cb(par, "ffn_moe_gate_par", il); + + ggml_tensor * experts = build_lora_mm_id(down_exps, par, selected_experts); // [n_embd, n_expert_used, n_tokens] + cb(experts, "ffn_moe_down", il); + + experts = ggml_mul(ctx0, experts, weights); + + // aggregate experts + ggml_tensor * moe_out = nullptr; + for (int i = 0; i < n_expert_used; ++i) { + ggml_tensor * cur_expert = ggml_view_2d(ctx0, experts, n_embd, n_tokens, + experts->nb[2], i*experts->nb[1]); + + if (i == 0) { + moe_out = cur_expert; + } else { + moe_out = ggml_add(ctx0, moe_out, cur_expert); + } + } + + if (n_expert_used == 1) { + // avoid returning a non-contiguous tensor + moe_out = ggml_cont(ctx0, moe_out); + } + + return moe_out; +} + +// input embeddings with optional lora +ggml_tensor * llm_graph_context::build_inp_embd(ggml_tensor * tok_embd) const { + const int64_t n_embd = hparams.n_embd; + + auto inp = std::make_unique(); + + ggml_tensor * cur = nullptr; + + if (ubatch.token) { + inp->tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, ubatch.n_tokens); + //cb(inp->tokens, "inp_tokens", -1); + ggml_set_input(inp->tokens); + + cur = ggml_get_rows(ctx0, tok_embd, inp->tokens); + + // apply lora for embedding tokens if needed + for (const auto & lora : *loras) { + llama_adapter_lora_weight * lw = lora.first->get_weight(tok_embd); + if (lw == nullptr) { + continue; + } + + const float adapter_scale = lora.second; + const float scale = lw->get_scale(lora.first->alpha, adapter_scale); + + ggml_tensor * inpL_delta = ggml_scale(ctx0, ggml_mul_mat( + ctx0, lw->b, // non-transposed lora_b + ggml_get_rows(ctx0, lw->a, inp->tokens) + ), scale); + + cur = ggml_add(ctx0, cur, inpL_delta); + } + } else { + inp->embd = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, ubatch.n_tokens); + ggml_set_input(inp->embd); + + cur = inp->embd; + } + + // For Granite architecture + if (hparams.f_embedding_scale != 0.0f) { + cur = ggml_scale(ctx0, cur, hparams.f_embedding_scale); + } + + cb(cur, "inp_embd", -1); + + res->add_input(std::move(inp)); + + return cur; +} + +ggml_tensor * llm_graph_context::build_inp_pos() const { + auto inp = std::make_unique(n_pos_per_token()); + + auto & cur = inp->pos; + + cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens*n_pos_per_token()); + ggml_set_input(cur); + + res->add_input(std::move(inp)); + + return cur; +} + +ggml_tensor * llm_graph_context::build_inp_out_ids() const { + auto inp = std::make_unique(hparams, cparams, n_outputs); + + auto & cur = inp->out_ids; + + cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_outputs); + ggml_set_input(cur); + + res->add_input(std::move(inp)); + + return cur; +} + +ggml_tensor * llm_graph_context::build_inp_mean() const { + auto inp = std::make_unique(cparams); + + auto & cur = inp->mean; + + cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, n_tokens); + ggml_set_input(cur); + + res->add_input(std::move(inp)); + + return cur; +} + +ggml_tensor * llm_graph_context::build_inp_cls() const { + auto inp = std::make_unique(cparams); + + auto & cur = inp->cls; + + cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); + ggml_set_input(cur); + + res->add_input(std::move(inp)); + + return cur; +} + +ggml_tensor * llm_graph_context::build_inp_s_copy() const { + const llama_kv_cache_unified * kv_self = static_cast(memory); + + auto inp = std::make_unique(kv_self); + + const auto n_kv = kv_self->n; + + auto & cur = inp->s_copy; + + cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_kv); + ggml_set_input(cur); + + res->add_input(std::move(inp)); + + return cur; +} + +ggml_tensor * llm_graph_context::build_inp_s_mask() const { + const llama_kv_cache_unified * kv_self = static_cast(memory); + + auto inp = std::make_unique(kv_self); + + const auto n_kv = kv_self->n; + + auto & cur = inp->s_mask; + + cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, 1, n_kv); + ggml_set_input(cur); + + res->add_input(std::move(inp)); + + return cur; +} + +ggml_tensor * llm_graph_context::build_inp_cross_embd() const { + auto inp = std::make_unique(cross); + + auto & cur = inp->cross_embd; + + // if we have the output embeddings from the encoder, use them directly + // TODO: needs more work to be correct, for now just use the tensor shape + //if (cross->t_embd) { + // cur = ggml_view_tensor(ctx0, cross->t_embd); + + // return cur; + //} + + const auto n_embd = !cross->v_embd.empty() ? cross->n_embd : hparams.n_embd; + const auto n_enc = !cross->v_embd.empty() ? cross->n_enc : hparams.n_ctx_train; + + cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, n_enc); + ggml_set_input(cur); + + res->add_input(std::move(inp)); + + return cur; +} + +ggml_tensor * llm_graph_context::build_inp_pos_bucket_enc() const { + auto inp = std::make_unique(hparams); + + auto & cur = inp->pos_bucket; + + cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_I32, n_tokens, n_tokens); + ggml_set_input(cur); + + res->add_input(std::move(inp)); + + return cur; +} + +ggml_tensor * llm_graph_context::build_inp_pos_bucket_dec() const { + const llama_kv_cache_unified * kv_self = static_cast(memory); + + auto inp = std::make_unique(hparams, kv_self); + + const auto n_kv = kv_self->n; + + auto & cur = inp->pos_bucket; + + cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_I32, n_kv, n_tokens); + ggml_set_input(cur); + + res->add_input(std::move(inp)); + + return cur; +} + +ggml_tensor * llm_graph_context::build_pos_bias(ggml_tensor * pos_bucket, ggml_tensor * attn_rel_b) const { + ggml_tensor * pos_bucket_1d = ggml_reshape_1d(ctx0, pos_bucket, pos_bucket->ne[0] * pos_bucket->ne[1]); + cb(pos_bucket_1d, "pos_bucket_1d", -1); + + ggml_tensor * pos_bias = ggml_get_rows(ctx0, attn_rel_b, pos_bucket_1d); + + pos_bias = ggml_reshape_3d(ctx0, pos_bias, pos_bias->ne[0], pos_bucket->ne[0], pos_bucket->ne[1]); + pos_bias = ggml_permute (ctx0, pos_bias, 2, 0, 1, 3); + pos_bias = ggml_cont (ctx0, pos_bias); + + cb(pos_bias, "pos_bias", -1); + + return pos_bias; +} + +ggml_tensor * llm_graph_context::build_attn_mha( + ggml_cgraph * gf, + ggml_tensor * q, + ggml_tensor * k, + ggml_tensor * v, + ggml_tensor * kq_b, + ggml_tensor * kq_mask, + bool v_trans, + float kq_scale) const { + //const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(il); + //const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa(il); + + //const int64_t n_head = hparams.n_head(il); + //const int64_t n_head_kv = hparams.n_head_kv(il); + + //const auto & n_embd_head_k = hparams.n_embd_head_k; + //const auto & n_embd_head_v = hparams.n_embd_head_v; + + const auto n_embd_head_v = v_trans ? v->ne[1] : v->ne[0]; + + const auto n_tokens = q->ne[1]; + const auto n_head = q->ne[2]; + const auto n_kv = k->ne[1]; + + ggml_tensor * cur; + + // TODO: replace hardcoded padding with ggml-provided padding + if (cparams.flash_attn && (n_kv % 256 == 0) && kq_b == nullptr) { + GGML_ASSERT(kq_b == nullptr && "Flash attention does not support KQ bias yet"); + + if (v_trans) { + v = ggml_transpose(ctx0, v); + } + + cur = ggml_flash_attn_ext(ctx0, q, k, v, kq_mask, kq_scale, hparams.f_max_alibi_bias, + hparams.attn_soft_cap ? hparams.f_attn_logit_softcapping : 0.0f); + + ggml_flash_attn_ext_set_prec(cur, GGML_PREC_F32); + + cur = ggml_reshape_2d(ctx0, cur, n_embd_head_v*n_head, n_tokens); + } else { + ggml_tensor * kq = ggml_mul_mat(ctx0, k, q); + + // note: this op tends to require high floating point range + // while for some models F16 is enough, for others it is not, so we default to F32 here + ggml_mul_mat_set_prec(kq, GGML_PREC_F32); + + if (arch == LLM_ARCH_GROK) { + // need to do the following: + // multiply by attn_output_multiplyer of 0.08838834764831845 + // and then : + // kq = 30 * tanh(kq / 30) + // before the softmax below + + kq = ggml_tanh(ctx0, ggml_scale(ctx0, kq, 0.08838834764831845f/30.0f)); + kq = ggml_scale(ctx0, kq, 30); + } + + if (hparams.attn_soft_cap) { + kq = ggml_scale(ctx0, kq, 1.0f / hparams.f_attn_logit_softcapping); + kq = ggml_tanh (ctx0, kq); + kq = ggml_scale(ctx0, kq, hparams.f_attn_logit_softcapping); + } + + if (kq_b) { + kq = ggml_add(ctx0, kq, kq_b); + } + + kq = ggml_soft_max_ext(ctx0, kq, kq_mask, kq_scale, hparams.f_max_alibi_bias); + + if (!v_trans) { + // note: avoid this branch + v = ggml_cont(ctx0, ggml_transpose(ctx0, v)); + } + + ggml_tensor * kqv = ggml_mul_mat(ctx0, v, kq); + + ggml_tensor * kqv_merged = ggml_permute(ctx0, kqv, 0, 2, 1, 3); + + cur = ggml_cont_2d(ctx0, kqv_merged, n_embd_head_v*n_head, n_tokens); + + if (!cparams.offload_kqv) { + // all nodes between the KV store and the attention output are run on the CPU + ggml_backend_sched_set_tensor_backend(sched, cur, backend_cpu); + } + } + + ggml_build_forward_expand(gf, cur); + + return cur; +} + +llm_graph_input_attn_no_cache * llm_graph_context::build_attn_inp_no_cache() const { + auto inp = std::make_unique(hparams, cparams); + + // note: there is no KV cache, so the number of KV values is equal to the number of tokens in the batch + inp->kq_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD)); + //cb(inp_kq_mask, "KQ_mask", -1); + ggml_set_input(inp->kq_mask); + + inp->kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->kq_mask, GGML_TYPE_F16) : inp->kq_mask; + + return (llm_graph_input_attn_no_cache *) res->add_input(std::move(inp)); +} + +ggml_tensor * llm_graph_context::build_attn( + llm_graph_input_attn_no_cache * inp, + ggml_cgraph * gf, + ggml_tensor * wo, + ggml_tensor * wo_b, + ggml_tensor * q_cur, + ggml_tensor * k_cur, + ggml_tensor * v_cur, + ggml_tensor * kq_b, + float kq_scale, + int il) const { + GGML_UNUSED(n_tokens); + + // these nodes are added to the graph together so that they are not reordered + // by doing so, the number of splits in the graph is reduced + ggml_build_forward_expand(gf, q_cur); + ggml_build_forward_expand(gf, k_cur); + ggml_build_forward_expand(gf, v_cur); + + const auto & kq_mask = inp->get_kq_mask(); + + ggml_tensor * q = ggml_permute(ctx0, q_cur, 0, 2, 1, 3); + //cb(q, "q", il); + + ggml_tensor * k = ggml_permute(ctx0, k_cur, 0, 2, 1, 3); + //cb(k, "k", il); + + ggml_tensor * v = ggml_permute(ctx0, v_cur, 0, 2, 1, 3); + //cb(k, "v", il); + + ggml_tensor * cur = build_attn_mha(gf, q, k, v, kq_b, kq_mask, false, kq_scale); + + cb(cur, "kqv_out", il); + + if (wo) { + cur = build_lora_mm(wo, cur); + } + + if (wo_b) { + //cb(cur, "kqv_wo", il); + } + + if (wo_b) { + cur = ggml_add(ctx0, cur, wo_b); + } + + return cur; +} + +llm_graph_input_attn_kv_unified * llm_graph_context::build_attn_inp_kv_unified( + bool causal, + bool swa) const { + const llama_kv_cache_unified * kv_self = static_cast(memory); + + auto inp = std::make_unique(hparams, cparams, kv_self); + + const auto n_kv = kv_self->n; + + inp->self_kq_mask = causal + ? ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD)) + : ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD)); + //cb(inp->self_kq_mask, "KQ_mask", -1); + ggml_set_input(inp->self_kq_mask); + + inp->self_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask, GGML_TYPE_F16) : inp->self_kq_mask; + + if (swa) { + GGML_ASSERT(hparams.n_swa > 0); + + inp->self_kq_mask_swa = causal + ? ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD)) + : ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD)); + //cb(inp->self_kq_mask_swa, "KQ_mask_swa", -1); + ggml_set_input(inp->self_kq_mask_swa); + + inp->self_kq_mask_swa_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask_swa, GGML_TYPE_F16) : inp->self_kq_mask_swa; + } + + return (llm_graph_input_attn_kv_unified *) res->add_input(std::move(inp)); +} + +ggml_tensor * llm_graph_context::build_attn( + llm_graph_input_attn_kv_unified * inp, + ggml_cgraph * gf, + ggml_tensor * wo, + ggml_tensor * wo_b, + ggml_tensor * q_cur, + ggml_tensor * k_cur, + ggml_tensor * v_cur, + ggml_tensor * kq_b, + float kq_scale, + int il) const { + // these nodes are added to the graph together so that they are not reordered + // by doing so, the number of splits in the graph is reduced + ggml_build_forward_expand(gf, q_cur); + ggml_build_forward_expand(gf, k_cur); + ggml_build_forward_expand(gf, v_cur); + + const llama_kv_cache_unified * kv_self = static_cast(memory); + const auto & n_ctx = cparams.n_ctx; + + const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(il); + const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa(il); + + const auto n_tokens = q_cur->ne[2]; + + const bool v_trans = !cparams.flash_attn; + + // store to KV cache + { + GGML_ASSERT(!kv_self->recurrent); + + const auto kv_head = kv_self->head; + + GGML_ASSERT(kv_self->size == n_ctx); + + ggml_tensor * k_cache_view = ggml_view_1d(ctx0, kv_self->k_l[il], n_tokens*n_embd_k_gqa, ggml_row_size(kv_self->k_l[il]->type, n_embd_k_gqa)*kv_head); + //cb(k_cache_view, "k_cache_view", il); + + // note: storing RoPE-ed version of K in the KV cache + ggml_build_forward_expand(gf, ggml_cpy(ctx0, k_cur, k_cache_view)); + + assert(v_cur->ne[0] == n_embd_v_gqa && v_cur->ne[1] == n_tokens); + + ggml_tensor * v_cache_view = nullptr; + + if (!v_trans) { + v_cache_view = ggml_view_1d(ctx0, kv_self->v_l[il], n_tokens*n_embd_v_gqa, ggml_row_size(kv_self->v_l[il]->type, n_embd_v_gqa)*kv_head); + } else { + // note: the V cache is transposed when not using flash attention + v_cache_view = ggml_view_2d(ctx0, kv_self->v_l[il], n_tokens, n_embd_v_gqa, + ( n_ctx)*ggml_element_size(kv_self->v_l[il]), + (kv_head)*ggml_element_size(kv_self->v_l[il])); + + v_cur = ggml_transpose(ctx0, v_cur); + } + //cb(v_cache_view, "v_cache_view", il); + + ggml_build_forward_expand(gf, ggml_cpy(ctx0, v_cur, v_cache_view)); + } + + // TODO: improve + bool is_sliding = false; + + switch (arch) { + case LLM_ARCH_COHERE2: + { + const int32_t sliding_window_pattern = 4; + is_sliding = il % sliding_window_pattern < (sliding_window_pattern - 1); + } break; + case LLM_ARCH_GEMMA2: + { + const int32_t sliding_window_pattern = 2; + is_sliding = il % sliding_window_pattern < (sliding_window_pattern - 1); + } break; + case LLM_ARCH_GEMMA3: + { + const int32_t sliding_window_pattern = 6; + is_sliding = il % sliding_window_pattern < (sliding_window_pattern - 1); + } break; + case LLM_ARCH_PHI3: + { + is_sliding = hparams.n_swa > 0; + } break; + default: + { + is_sliding = false; + } + }; + + const auto & kq_mask = is_sliding ? inp->get_kq_mask_swa() : inp->get_kq_mask(); + + const auto n_kv = kv_self->n; + + const int64_t n_head_kv = hparams.n_head_kv(il); + + const auto & n_embd_head_k = hparams.n_embd_head_k; + const auto & n_embd_head_v = hparams.n_embd_head_v; + + ggml_tensor * q = ggml_permute(ctx0, q_cur, 0, 2, 1, 3); + //cb(q, "q", il); + + ggml_tensor * k = + ggml_view_3d(ctx0, kv_self->k_l[il], + n_embd_head_k, n_kv, n_head_kv, + ggml_row_size(kv_self->k_l[il]->type, n_embd_k_gqa), + ggml_row_size(kv_self->k_l[il]->type, n_embd_head_k), + 0); + //cb(k, "k", il); + + ggml_tensor * v = !v_trans ? + ggml_view_3d(ctx0, kv_self->v_l[il], + n_embd_head_v, n_kv, n_head_kv, + ggml_row_size(kv_self->v_l[il]->type, n_embd_v_gqa), + ggml_row_size(kv_self->v_l[il]->type, n_embd_head_v), + 0) : + ggml_view_3d(ctx0, kv_self->v_l[il], + n_kv, n_embd_head_v, n_head_kv, + ggml_element_size(kv_self->v_l[il])*n_ctx, + ggml_element_size(kv_self->v_l[il])*n_ctx*n_embd_head_v, + 0); + + ggml_tensor * cur = build_attn_mha(gf, q, k, v, kq_b, kq_mask, v_trans, kq_scale); + cb(cur, "kqv_out", il); + + if (wo) { + cur = build_lora_mm(wo, cur); + } + + if (wo_b) { + //cb(cur, "kqv_wo", il); + } + + if (wo_b) { + cur = ggml_add(ctx0, cur, wo_b); + } + + return cur; +} + +llm_graph_input_attn_cross * llm_graph_context::build_attn_inp_cross() const { + auto inp = std::make_unique(cross); + + const int32_t n_enc = !cross->v_embd.empty() ? cross->n_enc : hparams.n_ctx_train; + + inp->cross_kq_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_enc, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD)); + ggml_set_input(inp->cross_kq_mask); + + inp->cross_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->cross_kq_mask, GGML_TYPE_F16) : inp->cross_kq_mask; + + return (llm_graph_input_attn_cross *) res->add_input(std::move(inp)); +} + +ggml_tensor * llm_graph_context::build_attn( + llm_graph_input_attn_cross * inp, + ggml_cgraph * gf, + ggml_tensor * wo, + ggml_tensor * wo_b, + ggml_tensor * q_cur, + ggml_tensor * k_cur, + ggml_tensor * v_cur, + ggml_tensor * kq_b, + float kq_scale, + int il) const { + // these nodes are added to the graph together so that they are not reordered + // by doing so, the number of splits in the graph is reduced + ggml_build_forward_expand(gf, q_cur); + ggml_build_forward_expand(gf, k_cur); + ggml_build_forward_expand(gf, v_cur); + + const auto & kq_mask = inp->get_kq_mask_cross(); + + ggml_tensor * q = ggml_permute(ctx0, q_cur, 0, 2, 1, 3); + //cb(q, "q", il); + + ggml_tensor * k = ggml_permute(ctx0, k_cur, 0, 2, 1, 3); + //cb(k, "k", il); + + ggml_tensor * v = ggml_permute(ctx0, v_cur, 0, 2, 1, 3); + //cb(k, "v", il); + + ggml_tensor * cur = build_attn_mha(gf, q, k, v, kq_b, kq_mask, false, kq_scale); + + cb(cur, "kqv_out", il); + + if (wo) { + cur = build_lora_mm(wo, cur); + } + + if (wo_b) { + //cb(cur, "kqv_wo", il); + } + + if (wo_b) { + cur = ggml_add(ctx0, cur, wo_b); + } + + return cur; +} + +ggml_tensor * llm_graph_context::build_copy_mask_state( + ggml_cgraph * gf, + ggml_tensor * s, + ggml_tensor * state_copy, + ggml_tensor * state_mask, + int32_t n_state, + int32_t n_seqs) const { + const llama_kv_cache_unified * kv_self = static_cast(memory); + + const auto n_kv = kv_self->n; + const auto kv_head = kv_self->head; + + ggml_tensor * states = ggml_reshape_2d(ctx0, s, n_state, kv_self->size); + + // copy states + // NOTE: assuming the copy destinations are ALL contained between kv_head and kv_head + n_kv + // this shrinks the tensors's ne[1] to n_kv + states = ggml_get_rows(ctx0, states, state_copy); + + // clear states of sequences which are starting at the beginning of this batch + // FIXME: zero-out NANs? + states = ggml_mul(ctx0, states, state_mask); + + // copy states which won't be changed further (between n_seqs and n_kv) + ggml_build_forward_expand(gf, + ggml_cpy(ctx0, + ggml_view_1d(ctx0, states, n_state*(n_kv - n_seqs), (n_seqs )*n_state*ggml_element_size(states)), + ggml_view_1d(ctx0, s, n_state*(n_kv - n_seqs), (kv_head + n_seqs)*n_state*ggml_element_size(s)))); + + // the part of the states that will be used and modified + return ggml_view_2d(ctx0, states, n_state, n_seqs, states->nb[1], 0); +} + +ggml_tensor * llm_graph_context::build_rwkv_token_shift_load( + ggml_cgraph * gf, + ggml_tensor * state_copy, + ggml_tensor * state_mask, + const llama_ubatch & ubatch, + int il) const { + const llama_kv_cache_unified * kv_self = static_cast(memory); + + const auto token_shift_count = hparams.token_shift_count; + + const int64_t n_seqs = ubatch.n_seqs; + + ggml_tensor * token_shift_all = kv_self->k_l[il]; + + ggml_tensor * token_shift = build_copy_mask_state( + gf, token_shift_all, state_copy, state_mask, + hparams.n_embd_k_s(), n_seqs); + + token_shift = ggml_reshape_3d(ctx0, token_shift, hparams.n_embd, token_shift_count, n_seqs); + + return token_shift; +} + +ggml_tensor * llm_graph_context::build_rwkv_token_shift_store( + ggml_tensor * token_shift, + const llama_ubatch & ubatch, + int il) const { + const llama_kv_cache_unified * kv_self = static_cast(memory); + + const auto token_shift_count = hparams.token_shift_count; + const auto n_embd = hparams.n_embd; + + const int64_t n_seqs = ubatch.n_seqs; + + const auto kv_head = kv_self->head; + + return ggml_cpy( + ctx0, + ggml_view_1d(ctx0, token_shift, n_embd * n_seqs * token_shift_count, 0), + ggml_view_1d(ctx0, kv_self->k_l[il], hparams.n_embd_k_s() * n_seqs, hparams.n_embd_k_s() * kv_head * ggml_element_size(kv_self->k_l[il])) + ); +} + +void llm_graph_context::build_pooling( + ggml_cgraph * gf, + ggml_tensor * cls, + ggml_tensor * cls_b, + ggml_tensor * cls_out, + ggml_tensor * cls_out_b) const { + if (!cparams.embeddings) { + return; + } + + ggml_tensor * inp = res->t_embd; + + //// find result_norm tensor for input + //for (int i = ggml_graph_n_nodes(gf) - 1; i >= 0; --i) { + // inp = ggml_graph_node(gf, i); + // if (strcmp(inp->name, "result_norm") == 0 || strcmp(inp->name, "result_embd") == 0) { + // break; + // } + + // inp = nullptr; + //} + + GGML_ASSERT(inp != nullptr && "missing result_norm/result_embd tensor"); + + ggml_tensor * cur; + + switch (pooling_type) { + case LLAMA_POOLING_TYPE_NONE: + { + cur = inp; + } break; + case LLAMA_POOLING_TYPE_MEAN: + { + ggml_tensor * inp_mean = build_inp_mean(); + cur = ggml_mul_mat(ctx0, ggml_cont(ctx0, ggml_transpose(ctx0, inp)), inp_mean); + } break; + case LLAMA_POOLING_TYPE_CLS: + case LLAMA_POOLING_TYPE_LAST: + { + ggml_tensor * inp_cls = build_inp_cls(); + cur = ggml_get_rows(ctx0, inp, inp_cls); + } break; + case LLAMA_POOLING_TYPE_RANK: + { + ggml_tensor * inp_cls = build_inp_cls(); + inp = ggml_get_rows(ctx0, inp, inp_cls); + + // classification head + // https://github.com/huggingface/transformers/blob/5af7d41e49bbfc8319f462eb45253dcb3863dfb7/src/transformers/models/roberta/modeling_roberta.py#L1566 + GGML_ASSERT(cls != nullptr); + GGML_ASSERT(cls_b != nullptr); + + cur = ggml_add (ctx0, ggml_mul_mat(ctx0, cls, inp), cls_b); + cur = ggml_tanh(ctx0, cur); + + // some models don't have `cls_out`, for example: https://huggingface.co/jinaai/jina-reranker-v1-tiny-en + // https://huggingface.co/jinaai/jina-reranker-v1-tiny-en/blob/cb5347e43979c3084a890e3f99491952603ae1b7/modeling_bert.py#L884-L896 + if (cls_out) { + GGML_ASSERT(cls_out_b != nullptr); + + cur = ggml_add (ctx0, ggml_mul_mat(ctx0, cls_out, cur), cls_out_b); + } + } break; + default: + { + GGML_ABORT("unknown pooling type"); + } + } + + cb(cur, "result_embd_pooled", -1); + res->t_embd_pooled = cur; + + ggml_build_forward_expand(gf, cur); +} + diff --git a/src/llama-graph.h b/src/llama-graph.h new file mode 100644 index 0000000000000..b7a66d1898736 --- /dev/null +++ b/src/llama-graph.h @@ -0,0 +1,576 @@ +#pragma once + +#include "llama-arch.h" +#include "llama-hparams.h" +#include "llama-adapter.h" + +#include +#include +#include +#include +#include + +struct ggml_cgraph; +struct ggml_context; +struct ggml_tensor; + +struct llama_ubatch; +struct llama_cparams; + +class llama_memory_i; +class llama_kv_cache_unified; + +// certain models (typically multi-modal) can produce different types of graphs +enum llm_graph_type { + LLM_GRAPH_TYPE_DEFAULT, + LLM_GRAPH_TYPE_ENCODER, + LLM_GRAPH_TYPE_DECODER, +}; + +enum llm_ffn_op_type { + LLM_FFN_SILU, + LLM_FFN_GELU, + LLM_FFN_RELU, + LLM_FFN_RELU_SQR, + LLM_FFN_SWIGLU, +}; + +enum llm_ffn_gate_type { + LLM_FFN_SEQ, + LLM_FFN_PAR, // ffn_gate is parallel to ffn_up +}; + +enum llm_norm_type { + LLM_NORM, + LLM_NORM_RMS, + LLM_NORM_GROUP, +}; + +// TODO: tmp - need something better to pass the data from the encoder to the decoder +struct llama_cross { + // the output embeddings from the encoder as a ggml tensor + // TODO: this needs more work to be correct, for now copy the embeddings data to host memory + // ref: https://github.com/ggml-org/llama.cpp/pull/11213#discussion_r1969892524 + //ggml_tensor * t_embd = nullptr; + + int64_t n_embd = 0; + int64_t n_enc = 0; + + // embeddings data copied to host memory (tmp) + std::vector v_embd; + + // needed to construct the cross-attention mask in the decoder + std::vector> seq_ids_enc; +}; + +// +// llm_graph_input +// + +class llm_graph_input_i { +public: + virtual ~llm_graph_input_i() = default; + + virtual void set_input(const llama_ubatch * ubatch) = 0; +}; + +using llm_graph_input_ptr = std::unique_ptr; + + +class llm_graph_input_embd : public llm_graph_input_i { +public: + llm_graph_input_embd() = default; + virtual ~llm_graph_input_embd() = default; + + void set_input(const llama_ubatch * ubatch) override; + + ggml_tensor * tokens = nullptr; // I32 [n_batch] + ggml_tensor * embd = nullptr; // F32 [n_embd, n_batch] +}; + +class llm_graph_input_pos : public llm_graph_input_i { +public: + llm_graph_input_pos(int64_t n_pos_per_token) : n_pos_per_token(n_pos_per_token) {} + virtual ~llm_graph_input_pos() = default; + + void set_input(const llama_ubatch * ubatch) override; + + ggml_tensor * pos = nullptr; // I32 [n_batch] + + const int64_t n_pos_per_token = 1; +}; + +class llm_graph_input_pos_bucket : public llm_graph_input_i { +public: + llm_graph_input_pos_bucket(const llama_hparams & hparams) : hparams(hparams) {} + virtual ~llm_graph_input_pos_bucket() = default; + + void set_input(const llama_ubatch * ubatch) override; + + ggml_tensor * pos_bucket = nullptr; // I32 [n_batch, n_batch] + + const llama_hparams & hparams; +}; + +class llm_graph_input_pos_bucket_kv : public llm_graph_input_i { +public: + llm_graph_input_pos_bucket_kv( + const llama_hparams & hparams, + const llama_kv_cache_unified * kv_self) : hparams(hparams), kv_self(kv_self) {} + virtual ~llm_graph_input_pos_bucket_kv() = default; + + void set_input(const llama_ubatch * ubatch) override; + + ggml_tensor * pos_bucket = nullptr; // I32 [n_kv, n_batch] + + const llama_hparams & hparams; + const llama_kv_cache_unified * kv_self; +}; + +class llm_graph_input_out_ids : public llm_graph_input_i { +public: + llm_graph_input_out_ids( + const llama_hparams & hparams, + const llama_cparams & cparams, + int32_t n_outputs) : hparams(hparams), cparams(cparams), n_outputs(n_outputs) {} + virtual ~llm_graph_input_out_ids() = default; + + void set_input(const llama_ubatch * ubatch) override; + + ggml_tensor * out_ids; // I32 [n_outputs] + + const llama_hparams & hparams; + const llama_cparams & cparams; + + const int32_t n_outputs; +}; + +class llm_graph_input_mean : public llm_graph_input_i { +public: + llm_graph_input_mean(const llama_cparams & cparams) : cparams(cparams) {} + virtual ~llm_graph_input_mean() = default; + + void set_input(const llama_ubatch * ubatch) override; + + ggml_tensor * mean; // F32 [n_batch, n_batch] + + const llama_cparams & cparams; +}; + +class llm_graph_input_cls : public llm_graph_input_i { +public: + llm_graph_input_cls(const llama_cparams & cparams) : cparams(cparams) {} + virtual ~llm_graph_input_cls() = default; + + void set_input(const llama_ubatch * ubatch) override; + + ggml_tensor * cls; // I32 [n_batch] + + const llama_cparams & cparams; +}; + +class llm_graph_input_s_copy : public llm_graph_input_i { +public: + llm_graph_input_s_copy(const llama_kv_cache_unified * kv_self) : kv_self(kv_self) {} + virtual ~llm_graph_input_s_copy() = default; + + void set_input(const llama_ubatch * ubatch) override; + + ggml_tensor * s_copy; // I32 [kv_size] + + const llama_kv_cache_unified * kv_self; +}; + +class llm_graph_input_s_mask : public llm_graph_input_i { +public: + llm_graph_input_s_mask(const llama_kv_cache_unified * kv_self) : kv_self(kv_self) {} + virtual ~llm_graph_input_s_mask() = default; + + void set_input(const llama_ubatch * ubatch) override; + + ggml_tensor * s_mask; // F32 [1, n_kv] + + const llama_kv_cache_unified * kv_self; +}; + +class llm_graph_input_cross_embd : public llm_graph_input_i { +public: + llm_graph_input_cross_embd( + const llama_cross * cross) : cross(cross) {} + virtual ~llm_graph_input_cross_embd() = default; + + void set_input(const llama_ubatch * ubatch) override; + + ggml_tensor * cross_embd; // F32 [n_embd, n_outputs_enc] + + const llama_cross * cross; +}; + +class llm_graph_input_attn_no_cache : public llm_graph_input_i { +public: + llm_graph_input_attn_no_cache(const llama_hparams & hparams, const llama_cparams & cparams) : + hparams(hparams), + cparams(cparams) { + } + ~llm_graph_input_attn_no_cache() = default; + + void set_input(const llama_ubatch * ubatch) override; + + ggml_tensor * get_kq_mask() const { return kq_mask_cnv; } + + ggml_tensor * kq_mask = nullptr; // F32 [n_tokens, n_batch] + ggml_tensor * kq_mask_cnv = nullptr; // [n_tokens, n_batch] + + const llama_hparams & hparams; + const llama_cparams & cparams; +}; + +class llm_graph_input_attn_kv_unified : public llm_graph_input_i { +public: + llm_graph_input_attn_kv_unified( + const llama_hparams & hparams, + const llama_cparams & cparams, + const llama_kv_cache_unified * kv_self) : + hparams(hparams), + cparams(cparams), + kv_self(kv_self) { + } + ~llm_graph_input_attn_kv_unified() = default; + + void set_input(const llama_ubatch * ubatch) override; + + ggml_tensor * get_kq_mask() const { return self_kq_mask_cnv; } + ggml_tensor * get_kq_mask_swa() const { return self_kq_mask_swa_cnv; } + + ggml_tensor * self_kq_mask = nullptr; // F32 [n_kv, n_batch] + ggml_tensor * self_kq_mask_cnv = nullptr; // [n_kv, n_batch] + ggml_tensor * self_kq_mask_swa = nullptr; // F32 [n_kv, n_batch] + ggml_tensor * self_kq_mask_swa_cnv = nullptr; // [n_kv, n_batch] + + const llama_hparams & hparams; + const llama_cparams & cparams; + + const llama_kv_cache_unified * kv_self; +}; + +class llm_graph_input_attn_cross : public llm_graph_input_i { +public: + llm_graph_input_attn_cross(const llama_cross * cross) : cross(cross) {} + ~llm_graph_input_attn_cross() = default; + + void set_input(const llama_ubatch * ubatch) override; + + ggml_tensor * get_kq_mask_cross() const { return cross_kq_mask_cnv; } + + ggml_tensor * cross_kq_mask = nullptr; // F32 [n_outputs_enc, n_batch] + ggml_tensor * cross_kq_mask_cnv = nullptr; // F32 [n_outputs_enc, n_batch] + + const llama_cross * cross = nullptr; +}; + +// +// llm_graph_result +// + +// these objects deliver the result from the graph build process back to the llama_context +// note that the input tensors created for the graph are referenced here - the goal is to be able to populate their +// specific data, by calling the set_inputs() method +// along with the input tensors, the object also provides commonly used outputs tensors, such as logits, embeddings, etc. +// these are used by the llama_context to extact the relevant data, based on the compute parameters + +class llm_graph_result_i { +public: + virtual ~llm_graph_result_i() = default; + + virtual ggml_tensor * get_logits() = 0; + virtual ggml_tensor * get_embd() = 0; + virtual ggml_tensor * get_embd_pooled() = 0; + + virtual void set_inputs(const llama_ubatch * ubatch) = 0; +}; + +using llm_graph_result_ptr = std::unique_ptr; + + +class llm_graph_result : public llm_graph_result_i { +public: + virtual ~llm_graph_result() = default; + + ggml_tensor * get_logits() override { return t_logits; } + ggml_tensor * get_embd() override { return t_embd; } + ggml_tensor * get_embd_pooled() override { return t_embd_pooled; } + + void set_inputs(const llama_ubatch * ubatch) override { + for (auto & input : inputs) { + input->set_input(ubatch); + } + } + + llm_graph_input_i * add_input(llm_graph_input_ptr input) { + inputs.emplace_back(std::move(input)); + return inputs.back().get(); + } + + // important graph nodes + ggml_tensor * t_logits = nullptr; + ggml_tensor * t_embd = nullptr; + ggml_tensor * t_embd_pooled = nullptr; + + std::vector inputs; +}; + +// +// llm_graph_context +// + +// callback that allows us to apply custom logic to each tensor (e.g. ggml-alloc, offloading, etc.) +using llm_graph_cb = std::function; + +struct llm_graph_params { + ggml_context * ctx; + + const llm_arch arch; + + const llama_hparams & hparams; + const llama_cparams & cparams; + const llama_ubatch & ubatch; + + ggml_backend_sched * sched; + ggml_backend * backend_cpu; + + const llama_adapter_cvec * cvec; + const llama_adapter_loras * loras; + const llama_memory_i * memory; + const llama_cross * cross; + + int32_t n_outputs; + + const llm_graph_cb & cb; +}; + +struct llm_graph_context { + const llm_arch arch; + + const llama_hparams & hparams; + const llama_cparams & cparams; + const llama_ubatch & ubatch; + + const int64_t n_embd; + const int64_t n_layer; + const int64_t n_rot; + const int64_t n_ctx; // user-specified context size (can be different from n_ctx_train) + const int64_t n_ctx_per_seq; + const int64_t n_head; + const int64_t n_head_kv; + const int64_t n_embd_head_k; + const int64_t n_embd_k_gqa; + const int64_t n_embd_head_v; + const int64_t n_embd_v_gqa; + const int64_t n_expert; + const int64_t n_expert_used; + + const float freq_base; + const float freq_scale; + const float ext_factor; + const float attn_factor; + const float beta_fast; + const float beta_slow; + const float norm_eps; + const float norm_rms_eps; + + const int32_t n_tokens; + const int32_t n_outputs; + const int32_t n_ctx_orig; // yarn + + const enum llama_pooling_type pooling_type; + const enum llama_rope_type rope_type; + + ggml_context * ctx0 = nullptr; + + ggml_backend_sched * sched; + + ggml_backend * backend_cpu; // TODO: needed by build_attn_mha, figure out a way to remove? + + const llama_adapter_cvec * cvec; + const llama_adapter_loras * loras; + const llama_memory_i * memory; + const llama_cross * cross; + + const llm_graph_cb & cb_func; + + std::unique_ptr res; + + llm_graph_context(const llm_graph_params & params); + + int64_t n_pos_per_token() const; + + void cb(ggml_tensor * cur, const char * name, int il) const; + + // + // common + // + + ggml_tensor * build_cvec( + ggml_tensor * cur, + int il) const; + + // do mat_mul, while optionally apply lora + ggml_tensor * build_lora_mm( + ggml_tensor * w, + ggml_tensor * cur) const; + + // do mat_mul_id, while optionally apply lora + ggml_tensor * build_lora_mm_id( + ggml_tensor * w, // ggml_tensor * as + ggml_tensor * cur, // ggml_tensor * b + ggml_tensor * ids) const; + + ggml_tensor * build_norm( + ggml_tensor * cur, + ggml_tensor * mw, + ggml_tensor * mb, + llm_norm_type type, + int il) const; + + ggml_tensor * build_ffn( + ggml_tensor * cur, + ggml_tensor * up, + ggml_tensor * up_b, + ggml_tensor * up_s, + ggml_tensor * gate, + ggml_tensor * gate_b, + ggml_tensor * gate_s, + ggml_tensor * down, + ggml_tensor * down_b, + ggml_tensor * down_s, + ggml_tensor * act_scales, + llm_ffn_op_type type_op, + llm_ffn_gate_type type_gate, + int il) const; + + ggml_tensor * build_moe_ffn( + ggml_tensor * cur, + ggml_tensor * gate_inp, + ggml_tensor * up_exps, + ggml_tensor * gate_exps, + ggml_tensor * down_exps, + ggml_tensor * exp_probs_b, + int64_t n_expert, + int64_t n_expert_used, + llm_ffn_op_type type_op, + bool norm_w, + bool scale_w, + float w_scale, + llama_expert_gating_func_type gating_op, + int il) const; + + // + // inputs + // + + ggml_tensor * build_inp_embd(ggml_tensor * tok_embd) const; + ggml_tensor * build_inp_pos() const; + ggml_tensor * build_inp_out_ids() const; + ggml_tensor * build_inp_mean() const; + ggml_tensor * build_inp_cls() const; + ggml_tensor * build_inp_s_copy() const; + ggml_tensor * build_inp_s_mask() const; + + ggml_tensor * build_inp_cross_embd() const; + ggml_tensor * build_inp_pos_bucket_enc() const; + ggml_tensor * build_inp_pos_bucket_dec() const; + ggml_tensor * build_pos_bias(ggml_tensor * pos_bucket, ggml_tensor * attn_rel_b) const; + + // + // attention + // + + ggml_tensor * build_attn_mha( + ggml_cgraph * gf, + ggml_tensor * q, + ggml_tensor * k, + ggml_tensor * v, + ggml_tensor * kq_b, + ggml_tensor * kq_mask, + bool v_trans, + float kq_scale) const; + + llm_graph_input_attn_no_cache * build_attn_inp_no_cache() const; + + ggml_tensor * build_attn( + llm_graph_input_attn_no_cache * inp, + ggml_cgraph * gf, + ggml_tensor * wo, + ggml_tensor * wo_b, + ggml_tensor * q_cur, + ggml_tensor * k_cur, + ggml_tensor * v_cur, + ggml_tensor * kq_b, + float kq_scale, + int il) const; + + llm_graph_input_attn_kv_unified * build_attn_inp_kv_unified( + bool causal, + bool swa) const; + + ggml_tensor * build_attn( + llm_graph_input_attn_kv_unified * inp, + ggml_cgraph * gf, + ggml_tensor * wo, + ggml_tensor * wo_b, + ggml_tensor * q_cur, + ggml_tensor * k_cur, + ggml_tensor * v_cur, + ggml_tensor * kq_b, + float kq_scale, + int il) const; + + llm_graph_input_attn_cross * build_attn_inp_cross() const; + + ggml_tensor * build_attn( + llm_graph_input_attn_cross * inp, + ggml_cgraph * gf, + ggml_tensor * wo, + ggml_tensor * wo_b, + ggml_tensor * q_cur, + ggml_tensor * k_cur, + ggml_tensor * v_cur, + ggml_tensor * kq_b, + float kq_scale, + int il) const; + + // + // recurrent + // + + ggml_tensor * build_copy_mask_state( + ggml_cgraph * gf, + ggml_tensor * s, + ggml_tensor * state_copy, + ggml_tensor * state_mask, + int32_t n_state, + int32_t n_seqs) const; + + ggml_tensor * build_rwkv_token_shift_load( + ggml_cgraph * gf, + ggml_tensor * state_copy, + ggml_tensor * state_mask, + const llama_ubatch & ubatch, + int il) const; + + ggml_tensor * build_rwkv_token_shift_store( + ggml_tensor * token_shift, + const llama_ubatch & ubatch, + int il) const; + + // + // pooling + // + + void build_pooling( + ggml_cgraph * gf, + ggml_tensor * cls, + ggml_tensor * cls_b, + ggml_tensor * cls_out, + ggml_tensor * cls_out_b) const; +}; diff --git a/src/llama-io.cpp b/src/llama-io.cpp new file mode 100644 index 0000000000000..7ad70d163343d --- /dev/null +++ b/src/llama-io.cpp @@ -0,0 +1,15 @@ +#include "llama-io.h" + +void llama_io_write_i::write_string(const std::string & str) { + uint32_t str_size = str.size(); + + write(&str_size, sizeof(str_size)); + write(str.data(), str_size); +} + +void llama_io_read_i::read_string(std::string & str) { + uint32_t str_size; + read_to(&str_size, sizeof(str_size)); + + str.assign((const char *) read(str_size), str_size); +} diff --git a/src/llama-io.h b/src/llama-io.h new file mode 100644 index 0000000000000..ce9216b83b192 --- /dev/null +++ b/src/llama-io.h @@ -0,0 +1,35 @@ +#pragma once + +#include +#include +#include + +struct ggml_tensor; + +class llama_io_write_i { +public: + llama_io_write_i() = default; + virtual ~llama_io_write_i() = default; + + virtual void write(const void * src, size_t size) = 0; + virtual void write_tensor(const ggml_tensor * tensor, size_t offset, size_t size) = 0; + + // bytes written so far + virtual size_t n_bytes() = 0; + + void write_string(const std::string & str); +}; + +class llama_io_read_i { +public: + llama_io_read_i() = default; + virtual ~llama_io_read_i() = default; + + virtual const uint8_t * read(size_t size) = 0; + virtual void read_to(void * dst, size_t size) = 0; + + // bytes read so far + virtual size_t n_bytes() = 0; + + void read_string(std::string & str); +}; diff --git a/src/llama-kv-cache.cpp b/src/llama-kv-cache.cpp index feffdf0de52cf..14c8933b4d6c4 100644 --- a/src/llama-kv-cache.cpp +++ b/src/llama-kv-cache.cpp @@ -6,86 +6,92 @@ #include "llama-model.h" #include +#include #include #include +#include static const llama_kv_cache_slot_info llama_kv_cache_slot_info_failed{false}; -uint32_t llama_kv_cache_get_padding(const struct llama_cparams & cparams) { - // the FA kernels require padding to avoid extra runtime boundary checks - return cparams.flash_attn ? 256u : 32u; +llama_kv_cache_unified::llama_kv_cache_unified(const llama_hparams & hparams, callbacks cbs) : hparams(hparams), cbs(std::move(cbs)) { } -bool llama_kv_cache_init( - struct llama_kv_cache & cache, - const llama_model & model, - const llama_cparams & cparams, - ggml_type type_k, - ggml_type type_v, - uint32_t kv_size, - bool offload) { - const struct llama_hparams & hparams = model.hparams; - +bool llama_kv_cache_unified::init( + const llama_model & model, + const llama_cparams & cparams, + ggml_type type_k, + ggml_type type_v, + uint32_t kv_size, + bool offload) { const int32_t n_layer = hparams.n_layer; - cache.has_shift = false; + has_shift = false; - cache.recurrent = llama_model_is_recurrent(&model); - cache.v_trans = !cache.recurrent && !cparams.flash_attn; - cache.can_shift = !cache.recurrent && model.arch != LLM_ARCH_DEEPSEEK2; // not supported due to MLA + recurrent = llama_model_is_recurrent(&model); + v_trans = !recurrent && !cparams.flash_attn; + can_shift = !recurrent && model.arch != LLM_ARCH_DEEPSEEK2; // not supported due to MLA LLAMA_LOG_INFO("%s: kv_size = %d, offload = %d, type_k = '%s', type_v = '%s', n_layer = %d, can_shift = %d\n", - __func__, kv_size, offload, ggml_type_name(type_k), ggml_type_name(type_v), n_layer, cache.can_shift); + __func__, kv_size, offload, ggml_type_name(type_k), ggml_type_name(type_v), n_layer, can_shift); - cache.head = 0; - cache.size = kv_size; - cache.used = 0; + head = 0; + size = kv_size; + used = 0; - cache.type_k = type_k; - cache.type_v = type_v; + this->type_k = type_k; + this->type_v = type_v; - cache.cells.clear(); - cache.cells.resize(kv_size); + cells.clear(); + cells.resize(kv_size); // create a context for each buffer type std::map ctx_map; auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * { auto it = ctx_map.find(buft); if (it == ctx_map.end()) { - struct ggml_init_params params = { + ggml_init_params params = { /*.mem_size =*/ size_t(2u*n_layer*ggml_tensor_overhead()), /*.mem_buffer =*/ NULL, /*.no_alloc =*/ true, }; + ggml_context * ctx = ggml_init(params); if (!ctx) { return nullptr; } + ctx_map[buft] = ctx; - cache.ctxs.emplace_back(ctx); + ctxs.emplace_back(ctx); + return ctx; } + return it->second; }; - cache.k_l.reserve(n_layer); - cache.v_l.reserve(n_layer); + k_l.reserve(n_layer); + v_l.reserve(n_layer); for (int i = 0; i < n_layer; i++) { const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(i) + hparams.n_embd_k_s(); const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(i) + hparams.n_embd_v_s(); - LLAMA_LOG_DEBUG("%s: layer %d: n_embd_k_gqa = %d, n_embd_v_gqa = %d\n", __func__, i, n_embd_k_gqa, n_embd_v_gqa); + const char * dev_name = "CPU"; ggml_backend_buffer_type_t buft; if (offload) { auto * dev = model.dev_layer(i); buft = ggml_backend_dev_buffer_type(dev); + + dev_name = ggml_backend_dev_name(dev); } else { buft = ggml_backend_cpu_buffer_type(); } - ggml_context * ctx = ctx_for_buft(buft); + LLAMA_LOG_DEBUG("%s: layer %3d: n_embd_k_gqa = %d, n_embd_v_gqa = %d, dev = %s\n", __func__, + i, n_embd_k_gqa, n_embd_v_gqa, dev_name); + + ggml_context * ctx = ctx_for_buft(buft); if (!ctx) { LLAMA_LOG_ERROR("%s: failed to create ggml context for kv cache\n", __func__); return false; @@ -95,8 +101,8 @@ bool llama_kv_cache_init( ggml_tensor * v = ggml_new_tensor_1d(ctx, type_v, n_embd_v_gqa*kv_size); ggml_format_name(k, "cache_k_l%d", i); ggml_format_name(v, "cache_v_l%d", i); - cache.k_l.push_back(k); - cache.v_l.push_back(v); + k_l.push_back(k); + v_l.push_back(v); } // allocate tensors and initialize the buffers to avoid NaNs in the padding @@ -111,20 +117,346 @@ bool llama_kv_cache_init( } ggml_backend_buffer_clear(buf, 0); LLAMA_LOG_INFO("%s: %10s KV buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf)/1024.0/1024.0); - cache.bufs.emplace_back(buf); + bufs.emplace_back(buf); } return true; } -struct llama_kv_cache_slot_info llama_kv_cache_find_slot( - struct llama_kv_cache & cache, - const struct llama_ubatch & ubatch) { +int32_t llama_kv_cache_unified::get_n_tokens() const { + int32_t result = 0; + + for (uint32_t i = 0; i < size; i++) { + result += cells[i].seq_id.size(); + } + + return result; +} + +uint32_t llama_kv_cache_unified::get_used_cells() const { + return used; +} + +size_t llama_kv_cache_unified::total_size() const { + size_t size = 0; + for (const auto & buf : bufs) { + size += ggml_backend_buffer_get_size(buf.get()); + } + + return size; +} + +llama_pos llama_kv_cache_unified::pos_max() const { + llama_pos pos_max = -1; + for (const auto & cell : cells) { + pos_max = std::max(pos_max, cell.pos); + } + + return pos_max; +} + +void llama_kv_cache_unified::clear() { + for (int32_t i = 0; i < (int32_t) size; ++i) { + cells[i].pos = -1; + cells[i].seq_id.clear(); + cells[i].src = -1; + cells[i].tail = -1; + } + head = 0; + used = 0; + + for (auto & buf : bufs) { + ggml_backend_buffer_clear(buf.get(), 0); + } +} + +bool llama_kv_cache_unified::seq_rm(llama_seq_id seq_id, llama_pos p0, llama_pos p1) { + uint32_t new_head = size; + + if (p0 < 0) { + p0 = 0; + } + + if (p1 < 0) { + p1 = std::numeric_limits::max(); + } + + // models like Mamba or RWKV can't have a state partially erased + if (recurrent) { + if (seq_id >= (int64_t) size) { + // could be fatal + return false; + } + if (0 <= seq_id) { + int32_t & tail_id = cells[seq_id].tail; + if (tail_id >= 0) { + const llama_kv_cell & cell = cells[tail_id]; + // partial intersection is invalid + if ((0 < p0 && p0 <= cell.pos) || (0 < p1 && p1 <= cell.pos)) { + return false; + } + // invalidate tails which will be cleared + if (p0 <= cell.pos && cell.pos < p1) { + tail_id = -1; + } + } + } else { + // seq_id is negative, then the range should include everything or nothing + if (p0 != p1 && (p0 != 0 || p1 != std::numeric_limits::max())) { + return false; + } + } + } + + for (uint32_t i = 0; i < size; ++i) { + if (cells[i].pos >= p0 && cells[i].pos < p1) { + if (seq_id < 0) { + cells[i].seq_id.clear(); + } else if (cells[i].has_seq_id(seq_id)) { + cells[i].seq_id.erase(seq_id); + } else { + continue; + } + if (cells[i].is_empty()) { + // keep count of the number of used cells + if (cells[i].pos >= 0) { + used--; + } + + cells[i].pos = -1; + cells[i].src = -1; + + if (new_head == size) { + new_head = i; + } + } + } + } + + // If we freed up a slot, set head to it so searching can start there. + if (new_head != size && new_head < head) { + head = new_head; + } + + return true; +} + +void llama_kv_cache_unified::seq_cp(llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) { + if (seq_id_src == seq_id_dst) { + return; + } + + if (p0 < 0) { + p0 = 0; + } + + if (p1 < 0) { + p1 = std::numeric_limits::max(); + } + + if (recurrent) { + if ((uint32_t) seq_id_dst < size && (uint32_t) seq_id_src < size) { + llama_kv_cell & tail_src = cells[seq_id_src]; + llama_kv_cell & tail_dst = cells[seq_id_dst]; + if (tail_dst.tail >= 0) { + // clear destination seq_id if it wasn't empty + llama_kv_cell & cell_dst = cells[tail_dst.tail]; + + cell_dst.seq_id.erase(seq_id_dst); + tail_dst.tail = -1; + if (cell_dst.seq_id.empty()) { + cell_dst.pos = -1; + cell_dst.delta = -1; + cell_dst.src = -1; + used -= 1; + } + } + if (tail_src.tail >= 0) { + llama_kv_cell & cell_src = cells[tail_src.tail]; + + cell_src.seq_id.insert(seq_id_dst); + tail_dst.tail = tail_src.tail; + } + } + + return; + } + + // otherwise, this is the KV of a Transformer-like model + head = 0; + + for (uint32_t i = 0; i < size; ++i) { + if (cells[i].has_seq_id(seq_id_src) && cells[i].pos >= p0 && cells[i].pos < p1) { + cells[i].seq_id.insert(seq_id_dst); + } + } +} + +void llama_kv_cache_unified::seq_keep(llama_seq_id seq_id) { + uint32_t new_head = size; + + for (uint32_t i = 0; i < size; ++i) { + if (recurrent && (llama_seq_id) i != seq_id) { + cells[i].tail = -1; + } + + if (!cells[i].has_seq_id(seq_id)) { + if (cells[i].pos >= 0) { + used--; + } + + cells[i].pos = -1; + cells[i].src = -1; + cells[i].seq_id.clear(); + + if (new_head == size){ + new_head = i; + } + } else { + cells[i].seq_id.clear(); + cells[i].seq_id.insert(seq_id); + } + } + + // If we freed up a slot, set head to it so searching can start there. + if (new_head != size && new_head < head) { + head = new_head; + } +} + +void llama_kv_cache_unified::seq_add(llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) { + if (delta == 0) { + return; + } + + uint32_t new_head = size; + + if (p0 < 0) { + p0 = 0; + } + + if (p1 < 0) { + p1 = std::numeric_limits::max(); + } + + // If there is no range then return early to avoid looping over the + if (p0 == p1) { + return; + } + + if (recurrent) { + // for Mamba-like or RWKV models, only the pos needs to be shifted + if (0 <= seq_id && seq_id < (int64_t) size) { + const int32_t tail_id = cells[seq_id].tail; + if (tail_id >= 0) { + llama_kv_cell & cell = cells[tail_id]; + if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) { + cell.pos += delta; + } + } + } + return; + } + + for (uint32_t i = 0; i < size; ++i) { + if (cells[i].has_seq_id(seq_id) && cells[i].pos >= p0 && cells[i].pos < p1) { + has_shift = true; + cells[i].pos += delta; + cells[i].delta += delta; + + if (cells[i].pos < 0) { + if (!cells[i].is_empty()) { + used--; + } + cells[i].pos = -1; + cells[i].seq_id.clear(); + if (new_head == size) { + new_head = i; + } + } + } + } + + // If we freed up a slot, set head to it so searching can start there. + // Otherwise we just start the next search from the beginning. + head = new_head != size ? new_head : 0; +} + +void llama_kv_cache_unified::seq_div(llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) { + if (d == 1) { + return; + } + + if (p0 < 0) { + p0 = 0; + } + + if (p1 < 0) { + p1 = std::numeric_limits::max(); + } + + // If there is no range then return early to avoid looping over the cache. + if (p0 == p1) { + return; + } + + if (recurrent) { + // for Mamba-like or RWKV models, only the pos needs to be changed + if (0 <= seq_id && seq_id < (int64_t) size) { + const int32_t tail_id = cells[seq_id].tail; + if (tail_id >= 0) { + llama_kv_cell & cell = cells[tail_id]; + if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) { + cell.pos /= d; + } + } + } + + return; + } + + for (uint32_t i = 0; i < size; ++i) { + if (cells[i].has_seq_id(seq_id) && cells[i].pos >= p0 && cells[i].pos < p1) { + has_shift = true; + + { + llama_pos p_old = cells[i].pos; + cells[i].pos /= d; + cells[i].delta += cells[i].pos - p_old; + } + } + } +} + +llama_pos llama_kv_cache_unified::seq_pos_max(llama_seq_id seq_id) { + llama_pos result = 0; + + for (uint32_t i = 0; i < size; ++i) { + if (cells[i].has_seq_id(seq_id)) { + result = std::max(result, cells[i].pos); + } + } + + return result; +} + +void llama_kv_cache_unified::defrag() { + if (!recurrent) { + do_defrag = true; + } +} + +bool llama_kv_cache_unified::get_can_shift() const { + return can_shift; +} + +llama_kv_cache_slot_info llama_kv_cache_unified::find_slot( + const llama_ubatch & ubatch) { const uint32_t n_tokens = ubatch.n_tokens; const uint32_t n_seqs = ubatch.n_seqs; const uint32_t n_seq_tokens = ubatch.n_seq_tokens; - if (cache.recurrent) { + if (recurrent) { // For recurrent state architectures (like Mamba or RWKV), // each cache cell can store the state for a whole sequence. // A slot should be always be contiguous. @@ -132,7 +464,7 @@ struct llama_kv_cache_slot_info llama_kv_cache_find_slot( // can only process batches with an equal number of new tokens in each sequence GGML_ASSERT(ubatch.equal_seqs); - int32_t min = cache.size - 1; + int32_t min = size - 1; int32_t max = 0; // everything should fit if all seq_ids are smaller than the max @@ -141,16 +473,16 @@ struct llama_kv_cache_slot_info llama_kv_cache_find_slot( for (uint32_t j = 0; j < n_seq_id; ++j) { const llama_seq_id seq_id = ubatch.seq_id[s][j]; - if (seq_id < 0 || (uint32_t) seq_id >= cache.size) { + if (seq_id < 0 || (uint32_t) seq_id >= size) { // too big seq_id // TODO: would it be possible to resize the cache instead? - LLAMA_LOG_ERROR("%s: seq_id=%d >= n_seq_max=%d Try using a bigger --parallel value\n", __func__, seq_id, cache.size); + LLAMA_LOG_ERROR("%s: seq_id=%d >= n_seq_max=%d Try using a bigger --parallel value\n", __func__, seq_id, size); return llama_kv_cache_slot_info_failed; } if (j > 0) { - llama_kv_cell & seq = cache.cells[seq_id]; + llama_kv_cell & seq = cells[seq_id]; if (seq.tail >= 0) { - llama_kv_cell & cell = cache.cells[seq.tail]; + llama_kv_cell & cell = cells[seq.tail]; // clear cells from seq_ids that become shared // (should not normally happen, but let's handle it anyway) cell.seq_id.erase(seq_id); @@ -158,7 +490,7 @@ struct llama_kv_cache_slot_info llama_kv_cache_find_slot( if (cell.seq_id.empty()) { cell.pos = -1; cell.src = -1; - cache.used -= 1; + used -= 1; } } } @@ -168,9 +500,9 @@ struct llama_kv_cache_slot_info llama_kv_cache_find_slot( #ifndef NDEBUG { std::vector tails_verif; - tails_verif.assign(cache.size, -1); - for (uint32_t i = 0; i < cache.size; ++i) { - llama_kv_cell & cell = cache.cells[i]; + tails_verif.assign(size, -1); + for (uint32_t i = 0; i < size; ++i) { + llama_kv_cell & cell = cells[i]; for (llama_seq_id seq_id : cell.seq_id) { if (tails_verif[seq_id] != -1) { LLAMA_LOG_ERROR("%s: duplicate tail for seq_id %d in cell %d and %d\n", __func__, seq_id, i, tails_verif[seq_id]); @@ -178,20 +510,20 @@ struct llama_kv_cache_slot_info llama_kv_cache_find_slot( tails_verif[seq_id] = i; } } - for (uint32_t i = 0; i < cache.size; ++i) { - if (tails_verif[i] != cache.cells[i].tail) { - LLAMA_LOG_ERROR("%s: wrong tail for seq_id %d, (%d instead of %d)\n", __func__, i, cache.cells[i].tail, tails_verif[i]); + for (uint32_t i = 0; i < size; ++i) { + if (tails_verif[i] != cells[i].tail) { + LLAMA_LOG_ERROR("%s: wrong tail for seq_id %d, (%d instead of %d)\n", __func__, i, cells[i].tail, tails_verif[i]); } } } #endif // find next empty cell - uint32_t next_empty_cell = cache.head; + uint32_t next_empty_cell = head; - for (uint32_t i = 0; i < cache.size; ++i) { - if (next_empty_cell >= cache.size) { next_empty_cell -= cache.size; } - llama_kv_cell & cell = cache.cells[next_empty_cell]; + for (uint32_t i = 0; i < size; ++i) { + if (next_empty_cell >= size) { next_empty_cell -= size; } + llama_kv_cell & cell = cells[next_empty_cell]; if (cell.is_empty()) { break; } next_empty_cell += 1; } @@ -199,20 +531,20 @@ struct llama_kv_cache_slot_info llama_kv_cache_find_slot( // find usable cell range for (uint32_t s = 0; s < n_seqs; ++s) { const llama_seq_id seq_id = ubatch.seq_id[s][0]; - llama_kv_cell & seq_meta = cache.cells[seq_id]; + llama_kv_cell & seq_meta = cells[seq_id]; bool has_cell = false; if (seq_meta.tail >= 0) { - llama_kv_cell & cell = cache.cells[seq_meta.tail]; + llama_kv_cell & cell = cells[seq_meta.tail]; GGML_ASSERT(cell.has_seq_id(seq_id)); // does this seq_id "own" the cell? if (cell.seq_id.size() == 1) { has_cell = true; } } if (!has_cell) { - llama_kv_cell & empty_cell = cache.cells[next_empty_cell]; + llama_kv_cell & empty_cell = cells[next_empty_cell]; GGML_ASSERT(empty_cell.is_empty()); // copy old tail into the empty cell if (seq_meta.tail >= 0) { - llama_kv_cell & orig_cell = cache.cells[seq_meta.tail]; + llama_kv_cell & orig_cell = cells[seq_meta.tail]; empty_cell.pos = orig_cell.pos; empty_cell.src = orig_cell.src; orig_cell.seq_id.erase(seq_id); @@ -222,9 +554,9 @@ struct llama_kv_cache_slot_info llama_kv_cache_find_slot( // find next empty cell if (s + 1 < n_seqs) { next_empty_cell += 1; - for (uint32_t i = 0; i < cache.size; ++i) { - if (next_empty_cell >= cache.size) { next_empty_cell -= cache.size; } - llama_kv_cell & cell = cache.cells[next_empty_cell]; + for (uint32_t i = 0; i < size; ++i) { + if (next_empty_cell >= size) { next_empty_cell -= size; } + llama_kv_cell & cell = cells[next_empty_cell]; if (cell.is_empty()) { break; } next_empty_cell += 1; } @@ -237,10 +569,10 @@ struct llama_kv_cache_slot_info llama_kv_cache_find_slot( // gather and re-order for (uint32_t s = 0; s < n_seqs; ++s) { int32_t dst_id = s + min; - int32_t src_id = cache.cells[ubatch.seq_id[s][0]].tail; + int32_t src_id = cells[ubatch.seq_id[s][0]].tail; if (dst_id != src_id) { - llama_kv_cell & dst_cell = cache.cells[dst_id]; - llama_kv_cell & src_cell = cache.cells[src_id]; + llama_kv_cell & dst_cell = cells[dst_id]; + llama_kv_cell & src_cell = cells[src_id]; std::swap(dst_cell.pos, src_cell.pos); std::swap(dst_cell.src, src_cell.src); @@ -248,10 +580,10 @@ struct llama_kv_cache_slot_info llama_kv_cache_find_slot( // swap tails (assuming they NEVER overlap) for (const llama_seq_id seq_id : src_cell.seq_id) { - cache.cells[seq_id].tail = src_id; + cells[seq_id].tail = src_id; } for (const llama_seq_id seq_id : dst_cell.seq_id) { - cache.cells[seq_id].tail = dst_id; + cells[seq_id].tail = dst_id; } } } @@ -260,7 +592,7 @@ struct llama_kv_cache_slot_info llama_kv_cache_find_slot( for (uint32_t s = 0; s < n_seqs; ++s) { const llama_pos last_pos = ubatch.pos[n_seq_tokens * s + n_seq_tokens - 1]; int32_t cell_id = s + min; - llama_kv_cell & cell = cache.cells[cell_id]; + llama_kv_cell & cell = cells[cell_id]; if (cell.pos >= 0 && last_pos != cell.pos + (llama_pos) n_seq_tokens) { // What should happen when the pos backtracks or skips a value? @@ -273,41 +605,42 @@ struct llama_kv_cache_slot_info llama_kv_cache_find_slot( for (int32_t j = 0; j < ubatch.n_seq_id[s]; ++j) { const llama_seq_id seq_id = ubatch.seq_id[s][j]; cell.seq_id.insert(seq_id); - cache.cells[seq_id].tail = cell_id; + cells[seq_id].tail = cell_id; } } // allow getting the range of used cells, from head to head + n - cache.head = min; - cache.n = max - min + 1; - cache.used = std::count_if(cache.cells.begin(), cache.cells.end(), + head = min; + n = max - min + 1; + used = std::count_if(cells.begin(), cells.end(), [](const llama_kv_cell& cell){ return !cell.is_empty(); }); // sanity check - return llama_kv_cache_slot_info(cache.n >= n_seqs); + return llama_kv_cache_slot_info(n >= n_seqs); } + // otherwise, one cell per token. - if (n_tokens > cache.size) { - LLAMA_LOG_ERROR("%s: n_tokens=%d > cache.size=%d\n", __func__, n_tokens, cache.size); + if (n_tokens > size) { + LLAMA_LOG_ERROR("%s: n_tokens = %d > size = %d\n", __func__, n_tokens, size); return llama_kv_cache_slot_info_failed; } uint32_t n_tested = 0; while (true) { - if (cache.head + n_tokens > cache.size) { - n_tested += cache.size - cache.head; - cache.head = 0; + if (head + n_tokens > size) { + n_tested += size - head; + head = 0; continue; } bool found = true; for (uint32_t i = 0; i < n_tokens; i++) { - if (cache.cells[cache.head + i].pos >= 0) { + if (cells[head + i].pos >= 0) { found = false; - cache.head += i + 1; - n_tested += i + 1; + head += i + 1; + n_tested += i + 1; break; } } @@ -316,7 +649,7 @@ struct llama_kv_cache_slot_info llama_kv_cache_find_slot( break; } - if (n_tested >= cache.size) { + if (n_tested >= size) { //LLAMA_LOG_ERROR("%s: failed to find a slot for %d tokens\n", __func__, n_tokens); return llama_kv_cache_slot_info_failed; } @@ -325,22 +658,27 @@ struct llama_kv_cache_slot_info llama_kv_cache_find_slot( for (uint32_t s = 0; s < n_seqs; s++) { for (uint32_t i = 0; i < n_seq_tokens; ++i) { uint32_t k = s*n_seq_tokens + i; - cache.cells[cache.head + k].pos = ubatch.pos[k]; + cells[head + k].pos = ubatch.pos[k]; for (int32_t j = 0; j < ubatch.n_seq_id[s]; j++) { - cache.cells[cache.head + k].seq_id.insert(ubatch.seq_id[s][j]); + cells[head + k].seq_id.insert(ubatch.seq_id[s][j]); } } } - cache.used += n_tokens; + used += n_tokens; - return llama_kv_cache_slot_info(cache.head, cache.head + n_tokens); + return llama_kv_cache_slot_info(head, head + n_tokens); } -uint32_t llama_kv_cache_cell_max(const struct llama_kv_cache & cache) { - for (uint32_t i = cache.size; i > 0; --i) { - const llama_kv_cell & cell = cache.cells[i - 1]; +uint32_t llama_kv_cache_unified::get_padding(const llama_cparams & cparams) const { + // the FA kernels require padding to avoid extra runtime boundary checks + return cparams.flash_attn ? 256u : 32u; +} + +uint32_t llama_kv_cache_unified::cell_max() const { + for (uint32_t i = size; i > 0; --i) { + const llama_kv_cell & cell = cells[i - 1]; if (cell.pos >= 0 && !cell.is_empty()) { return i; @@ -350,289 +688,659 @@ uint32_t llama_kv_cache_cell_max(const struct llama_kv_cache & cache) { return 0; } -void llama_kv_cache_clear(struct llama_kv_cache & cache) { - for (int32_t i = 0; i < (int32_t) cache.size; ++i) { - cache.cells[i].pos = -1; - cache.cells[i].seq_id.clear(); - cache.cells[i].src = -1; - cache.cells[i].tail = -1; +size_t llama_kv_cache_unified::size_k_bytes() const { + size_t size_k_bytes = 0; + + for (const auto & k : k_l) { + size_k_bytes += ggml_nbytes(k); } - cache.head = 0; - cache.used = 0; - for (auto & buf : cache.bufs) { - ggml_backend_buffer_clear(buf.get(), 0); + return size_k_bytes; +} + +size_t llama_kv_cache_unified::size_v_bytes() const { + size_t size_v_bytes = 0; + + for (const auto & v : v_l) { + size_v_bytes += ggml_nbytes(v); } + + return size_v_bytes; } -bool llama_kv_cache_seq_rm( - struct llama_kv_cache & cache, - llama_seq_id seq_id, - llama_pos p0, - llama_pos p1) { - uint32_t new_head = cache.size; +bool llama_kv_cache_unified::defrag_prepare(int32_t n_max_nodes) { + const uint32_t n_layer = hparams.n_layer; - if (p0 < 0) p0 = 0; - if (p1 < 0) p1 = std::numeric_limits::max(); + const uint32_t n_kv = cell_max(); + const uint32_t n_used = used; - // models like Mamba or RWKV can't have a state partially erased - if (cache.recurrent) { - if (seq_id >= (int64_t) cache.size) { - // could be fatal - return false; + assert(n_used <= n_kv); + + //const int64_t t_start = ggml_time_us(); + + // number of cells moved + uint32_t n_moves = 0; + + // each move requires 6*n_layer tensors (see graph_build_kv_self_defrag) + // - source view, destination view, copy operation + // - x2 for keys and values + //const uint32_t max_moves = max_nodes()/(6*n_layer); + // TODO: tmp fix https://github.com/ggerganov/llama.cpp/issues/6685#issuecomment-2057579516 + const uint32_t max_moves = (n_max_nodes - 2*n_layer)/(6*n_layer); + + // determine which KV cells to move where + // + // cell i moves to ids[i] + // + // if ids[i] == i || ids[i] == n_kv, then cell i is not moved + // + auto & ids = defrag_info.ids; + + ids.clear(); + ids.resize(n_kv, n_kv); + + for (uint32_t i0 = 0; i0 < n_used; ++i0) { + const auto & cell0 = cells[i0]; + + if (!cell0.is_empty()) { + ids[i0] = i0; + + continue; } - if (0 <= seq_id) { - int32_t & tail_id = cache.cells[seq_id].tail; - if (tail_id >= 0) { - const llama_kv_cell & cell = cache.cells[tail_id]; - // partial intersection is invalid - if ((0 < p0 && p0 <= cell.pos) || (0 < p1 && p1 <= cell.pos)) { - return false; - } - // invalidate tails which will be cleared - if (p0 <= cell.pos && cell.pos < p1) { - tail_id = -1; - } + + // found a hole - fill it with data from the end of the cache + + uint32_t nh = 1; + + // determine the size of the hole + while (i0 + nh < n_used && cells[i0 + nh].is_empty()) { + nh++; + } + + uint32_t nf = 0; + uint32_t is = n_kv - 1; + + // starting from the end, find nh non-empty cells + for (; is > i0; --is) { + const auto & cell1 = cells[is]; + + if (cell1.is_empty() || ids[is] != n_kv) { + continue; } - } else { - // seq_id is negative, then the range should include everything or nothing - if (p0 != p1 && (p0 != 0 || p1 != std::numeric_limits::max())) { - return false; + + // non-empty cell which is not yet moved + nf++; + + if (nf == nh) { + break; } } - } - for (uint32_t i = 0; i < cache.size; ++i) { - if (cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) { - if (seq_id < 0) { - cache.cells[i].seq_id.clear(); - } else if (cache.cells[i].has_seq_id(seq_id)) { - cache.cells[i].seq_id.erase(seq_id); - } else { + // this can only happen if `n_used` is not accurate, which would be a bug + GGML_ASSERT(nf == nh && "KV defrag bug: nf != nh"); + + nf = 0; + + uint32_t i1 = is; + + // are we moving a continuous block of memory? + bool cont = false; + + // should we stop searching for the next move? + bool stop = false; + + // go back and move the nf cells to the hole + for (; i1 < n_kv; ++i1) { + auto & cell1 = cells[i1]; + + if (cell1.is_empty() || ids[i1] != n_kv) { + if (n_moves == max_moves) { + stop = true; + break; + } + + cont = false; continue; } - if (cache.cells[i].is_empty()) { - // keep count of the number of used cells - if (cache.cells[i].pos >= 0) cache.used--; - cache.cells[i].pos = -1; - cache.cells[i].src = -1; - if (new_head == cache.size) new_head = i; + // this cell goes to (i0 + nf) + ids[i1] = i0 + nf; + + // move the cell meta data + cells[i0 + nf] = cell1; + + // clear the old cell and move the head there + cell1 = llama_kv_cell(); + head = n_used; + + if (!cont) { + n_moves++; + cont = true; + } + + nf++; + + if (nf == nh) { + break; } } + + if (stop || n_moves == max_moves) { + break; + } + + //LLAMA_LOG_INFO("(tmp log) KV defrag: move [%u, %u) to [%u, %u)\n", is, i1 + 1, i0, i0 + nh); + + i0 += nh - 1; } - // If we freed up a slot, set head to it so searching can start there. - if (new_head != cache.size && new_head < cache.head) cache.head = new_head; + if (n_moves == 0) { + return false; + } + + LLAMA_LOG_DEBUG("(tmp log) KV defrag cell moves: %u\n", n_moves); + + LLAMA_LOG_DEBUG("expected gf nodes: %u\n", 6*n_moves*n_layer); return true; } -void llama_kv_cache_seq_cp( - struct llama_kv_cache & cache, - llama_seq_id seq_id_src, - llama_seq_id seq_id_dst, - llama_pos p0, - llama_pos p1) { - if (p0 < 0) p0 = 0; - if (p1 < 0) p1 = std::numeric_limits::max(); - - if (cache.recurrent) { - if ((uint32_t) seq_id_dst < cache.size && (uint32_t) seq_id_src < cache.size) { - llama_kv_cell & tail_src = cache.cells[seq_id_src]; - llama_kv_cell & tail_dst = cache.cells[seq_id_dst]; - if (tail_dst.tail >= 0) { - // clear destination seq_id if it wasn't empty - llama_kv_cell & cell_dst = cache.cells[tail_dst.tail]; - - cell_dst.seq_id.erase(seq_id_dst); - tail_dst.tail = -1; - if (cell_dst.seq_id.empty()) { - cell_dst.pos = -1; - cell_dst.delta = -1; - cell_dst.src = -1; - cache.used -= 1; - } +void llama_kv_cache_unified::state_write(llama_io_write_i & io, llama_seq_id seq_id) const { + std::vector> cell_ranges; // ranges, from inclusive, to exclusive + uint32_t cell_count = 0; + + // Count the number of cells with the specified seq_id + // Find all the ranges of cells with this seq id (or all, when -1) + uint32_t cell_range_begin = size; + for (uint32_t i = 0; i < size; ++i) { + const auto & cell = cells[i]; + if ((seq_id == -1 && !cell.is_empty()) || cell.has_seq_id(seq_id)) { + ++cell_count; + if (cell_range_begin == size) { + cell_range_begin = i; } - if (tail_src.tail >= 0) { - llama_kv_cell & cell_src = cache.cells[tail_src.tail]; - - cell_src.seq_id.insert(seq_id_dst); - tail_dst.tail = tail_src.tail; + } else { + if (cell_range_begin != size) { + cell_ranges.emplace_back(cell_range_begin, i); + cell_range_begin = size; } } + } + if (cell_range_begin != size) { + cell_ranges.emplace_back(cell_range_begin, size); + } - return; + // DEBUG CHECK: Sum of cell counts in ranges should equal the total cell count + uint32_t cell_count_check = 0; + for (const auto & range : cell_ranges) { + cell_count_check += range.second - range.first; } - // otherwise, this is the KV cache of a Transformer-like model + GGML_ASSERT(cell_count == cell_count_check); + + io.write(&cell_count, sizeof(cell_count)); + + state_write_meta(io, cell_ranges, seq_id); + state_write_data(io, cell_ranges); +} + +void llama_kv_cache_unified::state_read(llama_io_read_i & io, llama_seq_id seq_id) { + uint32_t cell_count; + io.read_to(&cell_count, sizeof(cell_count)); - cache.head = 0; + bool res = true; + res = res && state_read_meta(io, cell_count, seq_id); + res = res && state_read_data(io, cell_count); - for (uint32_t i = 0; i < cache.size; ++i) { - if (cache.cells[i].has_seq_id(seq_id_src) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) { - cache.cells[i].seq_id.insert(seq_id_dst); + if (!res) { + if (seq_id == -1) { + clear(); + } else { + seq_rm(seq_id, -1, -1); } + throw std::runtime_error("failed to restore kv cache"); } } -void llama_kv_cache_seq_keep(struct llama_kv_cache & cache, llama_seq_id seq_id) { - uint32_t new_head = cache.size; +void llama_kv_cache_unified::state_write_meta(llama_io_write_i & io, const std::vector> & cell_ranges, llama_seq_id seq_id) const { + for (const auto & range : cell_ranges) { + for (uint32_t i = range.first; i < range.second; ++i) { + const auto & cell = cells[i]; + const llama_pos pos = cell.pos; + const uint32_t n_seq_id = seq_id == -1 ? cell.seq_id.size() : 0; + + io.write(&pos, sizeof(pos)); + io.write(&n_seq_id, sizeof(n_seq_id)); - for (uint32_t i = 0; i < cache.size; ++i) { - if (cache.recurrent && (llama_seq_id) i != seq_id) { - cache.cells[i].tail = -1; + if (n_seq_id) { + for (auto seq_id : cell.seq_id) { + io.write(&seq_id, sizeof(seq_id)); + } + } } - if (!cache.cells[i].has_seq_id(seq_id)) { - if (cache.cells[i].pos >= 0) cache.used--; - cache.cells[i].pos = -1; - cache.cells[i].src = -1; - cache.cells[i].seq_id.clear(); - if (new_head == cache.size) new_head = i; - } else { - cache.cells[i].seq_id.clear(); - cache.cells[i].seq_id.insert(seq_id); + } +} + +void llama_kv_cache_unified::state_write_data(llama_io_write_i & io, const std::vector> & cell_ranges) const { + const uint32_t v_trans = this->v_trans ? 1 : 0; + const uint32_t n_layer = hparams.n_layer; + + io.write(&v_trans, sizeof(v_trans)); + io.write(&n_layer, sizeof(n_layer)); + + std::vector tmp_buf; + + // Iterate and write all the keys first, each row is a cell + // Get whole range at a time + for (uint32_t il = 0; il < n_layer; ++il) { + const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s(); + + // Write key type + const int32_t k_type_i = (int32_t)k_l[il]->type; + io.write(&k_type_i, sizeof(k_type_i)); + + // Write row size of key + const uint64_t k_size_row = ggml_row_size(k_l[il]->type, n_embd_k_gqa); + io.write(&k_size_row, sizeof(k_size_row)); + + // Read each range of cells of k_size length each into tmp_buf and write out + for (const auto & range : cell_ranges) { + const size_t range_size = range.second - range.first; + const size_t buf_size = range_size * k_size_row; + io.write_tensor(k_l[il], range.first * k_size_row, buf_size); } } - // If we freed up a slot, set head to it so searching can start there. - if (new_head != cache.size && new_head < cache.head) cache.head = new_head; -} + if (!v_trans) { + for (uint32_t il = 0; il < n_layer; ++il) { + const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(); -void llama_kv_cache_seq_add( - struct llama_kv_cache & cache, - llama_seq_id seq_id, - llama_pos p0, - llama_pos p1, - llama_pos delta) { - uint32_t new_head = cache.size; - - if (p0 < 0) p0 = 0; - if (p1 < 0) p1 = std::numeric_limits::max(); - // If there is no range then return early to avoid looping over the cache. - if (p0 == p1) return; + // Write value type + const int32_t v_type_i = (int32_t)v_l[il]->type; + io.write(&v_type_i, sizeof(v_type_i)); - if (cache.recurrent) { - // for Mamba-like or RWKV models, only the pos needs to be shifted - if (0 <= seq_id && seq_id < (int64_t) cache.size) { - const int32_t tail_id = cache.cells[seq_id].tail; - if (tail_id >= 0) { - llama_kv_cell & cell = cache.cells[tail_id]; - if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) { - cell.pos += delta; + // Write row size of value + const uint64_t v_size_row = ggml_row_size(v_l[il]->type, n_embd_v_gqa); + io.write(&v_size_row, sizeof(v_size_row)); + + // Read each range of cells of v_size length each into tmp_buf and write out + for (const auto & range : cell_ranges) { + const size_t range_size = range.second - range.first; + const size_t buf_size = range_size * v_size_row; + io.write_tensor(v_l[il], range.first * v_size_row, buf_size); + } + } + } else { + // When v is transposed, we also need the element size and get the element ranges from each row + const uint32_t kv_size = size; + for (uint32_t il = 0; il < n_layer; ++il) { + const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(); + + // Write value type + const int32_t v_type_i = (int32_t)v_l[il]->type; + io.write(&v_type_i, sizeof(v_type_i)); + + // Write element size + const uint32_t v_size_el = ggml_type_size(v_l[il]->type); + io.write(&v_size_el, sizeof(v_size_el)); + + // Write GQA embedding size + io.write(&n_embd_v_gqa, sizeof(n_embd_v_gqa)); + + // For each row, we get the element values of each cell + for (uint32_t j = 0; j < n_embd_v_gqa; ++j) { + // Read each range of cells of v_size_el length each into tmp_buf and write out + for (const auto & range : cell_ranges) { + const size_t range_size = range.second - range.first; + const size_t src_offset = (range.first + j * kv_size) * v_size_el; + const size_t buf_size = range_size * v_size_el; + io.write_tensor(v_l[il], src_offset, buf_size); } } } - return; } +} + +bool llama_kv_cache_unified::state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id) { + if (dest_seq_id != -1) { + // single sequence - for (uint32_t i = 0; i < cache.size; ++i) { - if (cache.cells[i].has_seq_id(seq_id) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) { - cache.has_shift = true; - cache.cells[i].pos += delta; - cache.cells[i].delta += delta; + seq_rm(dest_seq_id, -1, -1); + + llama_sbatch sbatch; + llama_ubatch batch = sbatch.reserve_ubatch(cell_count, /* has_embd */ false); + + batch.n_tokens = cell_count; + batch.n_seq_tokens = cell_count; + batch.n_seqs = 1; + + for (uint32_t i = 0; i < cell_count; ++i) { + llama_pos pos; + uint32_t n_seq_id; + + io.read_to(&pos, sizeof(pos)); + io.read_to(&n_seq_id, sizeof(n_seq_id)); + + if (n_seq_id != 0) { + LLAMA_LOG_ERROR("%s: invalid seq_id-agnostic kv cell\n", __func__); + return false; + } - if (cache.cells[i].pos < 0) { - if (!cache.cells[i].is_empty()) { - cache.used--; + batch.pos[i] = pos; + } + batch.n_seq_id[0] = 1; + batch.seq_id[0] = &dest_seq_id; + if (!find_slot(batch)) { + LLAMA_LOG_ERROR("%s: failed to find available cells in kv cache\n", __func__); + return false; + } + + // DEBUG CHECK: kv.head should be our first cell, kv.head + cell_count - 1 should be our last cell (verify seq_id and pos values) + // Assume that this is one contiguous block of cells + GGML_ASSERT(head + cell_count <= size); + GGML_ASSERT(cells[head].pos == batch.pos[0]); + GGML_ASSERT(cells[head + cell_count - 1].pos == batch.pos[cell_count - 1]); + GGML_ASSERT(cells[head].has_seq_id(dest_seq_id)); + GGML_ASSERT(cells[head + cell_count - 1].has_seq_id(dest_seq_id)); + } else { + // whole KV cache restore + + if (cell_count > size) { + LLAMA_LOG_ERROR("%s: not enough cells in kv cache\n", __func__); + return false; + } + + clear(); + + for (uint32_t i = 0; i < cell_count; ++i) { + llama_kv_cell & cell = cells[i]; + + llama_pos pos; + uint32_t n_seq_id; + + io.read_to(&pos, sizeof(pos)); + io.read_to(&n_seq_id, sizeof(n_seq_id)); + + cell.pos = pos; + + for (uint32_t j = 0; j < n_seq_id; ++j) { + llama_seq_id seq_id; + io.read_to(&seq_id, sizeof(seq_id)); + + // TODO: llama_kv_cache_unified should have a notion of max sequences + //if (seq_id < 0 || (uint32_t) seq_id >= llama_n_seq_max(ctx)) { + if (seq_id < 0) { + //LLAMA_LOG_ERROR("%s: invalid seq_id, %d is out of range [0, %u)\n", __func__, seq_id, llama_n_seq_max(ctx)); + LLAMA_LOG_ERROR("%s: invalid seq_id, %d is out of range [0, inf)\n", __func__, seq_id); + return false; } - cache.cells[i].pos = -1; - cache.cells[i].seq_id.clear(); - if (new_head == cache.size) { - new_head = i; + + cell.seq_id.insert(seq_id); + + if (recurrent) { + int32_t & tail = cells[seq_id].tail; + if (tail != -1) { + LLAMA_LOG_ERROR("%s: duplicate tail for seq_id %d in cell %d and %d\n", __func__, seq_id, i, tail); + return false; + } + tail = i; } } } + + head = 0; + used = cell_count; } - // If we freed up a slot, set head to it so searching can start there. - // Otherwise we just start the next search from the beginning. - cache.head = new_head != cache.size ? new_head : 0; + if (recurrent) { + for (uint32_t i = 0; i < cell_count; ++i) { + uint32_t cell_id = head + i; + // make sure the recurrent states will keep their restored state + cells[cell_id].src = cell_id; + } + } + + return true; } -void llama_kv_cache_seq_div( - struct llama_kv_cache & cache, - llama_seq_id seq_id, - llama_pos p0, - llama_pos p1, - int d) { - if (p0 < 0) p0 = 0; - if (p1 < 0) p1 = std::numeric_limits::max(); - // If there is no range then return early to avoid looping over the cache. - if (p0 == p1) return; +bool llama_kv_cache_unified::state_read_data(llama_io_read_i & io, uint32_t cell_count) { + uint32_t v_trans; + uint32_t n_layer; + io.read_to(&v_trans, sizeof(v_trans)); + io.read_to(&n_layer, sizeof(n_layer)); - if (cache.recurrent) { - // for Mamba-like or RWKV models, only the pos needs to be changed - if (0 <= seq_id && seq_id < (int64_t) cache.size) { - const int32_t tail_id = cache.cells[seq_id].tail; - if (tail_id >= 0) { - llama_kv_cell & cell = cache.cells[tail_id]; - if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) { - cell.pos /= d; + if (n_layer != hparams.n_layer) { + LLAMA_LOG_ERROR("%s: mismatched layer count (%u instead of %u)\n", __func__, n_layer, hparams.n_layer); + return false; + } + if (cell_count > size) { + LLAMA_LOG_ERROR("%s: not enough cells in kv cache to restore state (%u > %u)\n", __func__, cell_count, size); + return false; + } + if (v_trans != (bool) v_trans) { + LLAMA_LOG_ERROR("%s: incompatible V transposition\n", __func__); + return false; + } + + // For each layer, read the keys for each cell, one row is one cell, read as one contiguous block + for (uint32_t il = 0; il < n_layer; ++il) { + const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s(); + + // Read type of key + int32_t k_type_i_ref; + io.read_to(&k_type_i_ref, sizeof(k_type_i_ref)); + const int32_t k_type_i = (int32_t) k_l[il]->type; + if (k_type_i != k_type_i_ref) { + LLAMA_LOG_ERROR("%s: mismatched key type (%d != %d, layer %d)\n", __func__, k_type_i, k_type_i_ref, il); + return false; + } + + // Read row size of key + uint64_t k_size_row_ref; + io.read_to(&k_size_row_ref, sizeof(k_size_row_ref)); + const size_t k_size_row = ggml_row_size(k_l[il]->type, n_embd_k_gqa); + if (k_size_row != k_size_row_ref) { + LLAMA_LOG_ERROR("%s: mismatched key row size (%zu != %zu, layer %d)\n", __func__, k_size_row, (size_t) k_size_row_ref, il); + return false; + } + + if (cell_count) { + // Read and set the keys for the whole cell range + ggml_backend_tensor_set(k_l[il], io.read(cell_count * k_size_row), head * k_size_row, cell_count * k_size_row); + } + } + + if (!v_trans) { + for (uint32_t il = 0; il < n_layer; ++il) { + const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(); + + // Read type of value + int32_t v_type_i_ref; + io.read_to(&v_type_i_ref, sizeof(v_type_i_ref)); + const int32_t v_type_i = (int32_t)v_l[il]->type; + if (v_type_i != v_type_i_ref) { + LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il); + return false; + } + + // Read row size of value + uint64_t v_size_row_ref; + io.read_to(&v_size_row_ref, sizeof(v_size_row_ref)); + const size_t v_size_row = ggml_row_size(v_l[il]->type, n_embd_v_gqa); + if (v_size_row != v_size_row_ref) { + LLAMA_LOG_ERROR("%s: mismatched value row size (%zu != %zu, layer %d)\n", __func__, v_size_row, (size_t) v_size_row_ref, il); + return false; + } + + if (cell_count) { + // Read and set the values for the whole cell range + ggml_backend_tensor_set(v_l[il], io.read(cell_count * v_size_row), head * v_size_row, cell_count * v_size_row); + } + } + } else { + // For each layer, read the values for each cell (transposed) + for (uint32_t il = 0; il < n_layer; ++il) { + const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(); + + // Read type of value + int32_t v_type_i_ref; + io.read_to(&v_type_i_ref, sizeof(v_type_i_ref)); + const int32_t v_type_i = (int32_t)v_l[il]->type; + if (v_type_i != v_type_i_ref) { + LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il); + return false; + } + + // Read element size of value + uint32_t v_size_el_ref; + io.read_to(&v_size_el_ref, sizeof(v_size_el_ref)); + const size_t v_size_el = ggml_type_size(v_l[il]->type); + if (v_size_el != v_size_el_ref) { + LLAMA_LOG_ERROR("%s: mismatched value element size (%zu != %zu, layer %d)\n", __func__, v_size_el, (size_t) v_size_el_ref, il); + return false; + } + + // Read GQA embedding size + uint32_t n_embd_v_gqa_ref; + io.read_to(&n_embd_v_gqa_ref, sizeof(n_embd_v_gqa_ref)); + if (n_embd_v_gqa != n_embd_v_gqa_ref) { + LLAMA_LOG_ERROR("%s: mismatched GQA embedding size (%u != %u, layer %d)\n", __func__, n_embd_v_gqa, n_embd_v_gqa_ref, il); + return false; + } + + if (cell_count) { + // For each row in the transposed matrix, read the values for the whole cell range + for (uint32_t j = 0; j < n_embd_v_gqa; ++j) { + const size_t dst_offset = (head + j * size) * v_size_el; + ggml_backend_tensor_set(v_l[il], io.read(cell_count * v_size_el), dst_offset, cell_count * v_size_el); } } } + } + + return true; +} + +// +// interface implementation +// + +int32_t llama_kv_cache_n_tokens(const llama_kv_cache * kv) { + if (!kv) { + return 0; + } + + return kv->get_n_tokens(); +} + +int32_t llama_kv_cache_used_cells(const llama_kv_cache * kv) { + if (!kv) { + return 0; + } + + return kv->get_used_cells(); +} + +void llama_kv_cache_clear(llama_kv_cache * kv) { + if (!kv) { return; } - for (uint32_t i = 0; i < cache.size; ++i) { - if (cache.cells[i].has_seq_id(seq_id) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) { - cache.has_shift = true; + kv->clear(); +} - { - llama_pos p_old = cache.cells[i].pos; - cache.cells[i].pos /= d; - cache.cells[i].delta += cache.cells[i].pos - p_old; - } - } +bool llama_kv_cache_seq_rm( + llama_kv_cache * kv, + llama_seq_id seq_id, + llama_pos p0, + llama_pos p1) { + if (!kv) { + return true; } + + return kv->seq_rm(seq_id, p0, p1); } -llama_pos llama_kv_cache_seq_pos_max(struct llama_kv_cache & cache, llama_seq_id seq_id) { - llama_pos result = 0; +void llama_kv_cache_seq_cp( + llama_kv_cache * kv, + llama_seq_id seq_id_src, + llama_seq_id seq_id_dst, + llama_pos p0, + llama_pos p1) { + if (!kv) { + return; + } - for (uint32_t i = 0; i < cache.size; ++i) { - if (cache.cells[i].has_seq_id(seq_id)) { - result = std::max(result, cache.cells[i].pos); - } + kv->seq_cp(seq_id_src, seq_id_dst, p0, p1); +} + +void llama_kv_cache_seq_keep(llama_kv_cache * kv, llama_seq_id seq_id) { + if (!kv) { + return; } - return result; + kv->seq_keep(seq_id); } -void llama_kv_cache_defrag(struct llama_kv_cache & cache) { - if (!cache.recurrent) { - cache.do_defrag = true; +void llama_kv_cache_seq_add( + llama_kv_cache * kv, + llama_seq_id seq_id, + llama_pos p0, + llama_pos p1, + llama_pos delta) { + if (!kv) { + return; } + + kv->seq_add(seq_id, p0, p1, delta); } -int32_t llama_get_kv_cache_token_count(const struct llama_kv_cache & kv) { - int result = 0; +void llama_kv_cache_seq_div( + llama_kv_cache * kv, + llama_seq_id seq_id, + llama_pos p0, + llama_pos p1, + int d) { + if (!kv) { + return; + } - for (uint32_t i = 0; i < kv.size; i++) { - result += kv.cells[i].seq_id.size(); + kv->seq_div(seq_id, p0, p1, d); +} + +llama_pos llama_kv_cache_seq_pos_max(llama_kv_cache * kv, llama_seq_id seq_id) { + if (!kv) { + return 0; } - return result; + return kv->seq_pos_max(seq_id); } -int32_t llama_get_kv_cache_used_cells(const struct llama_kv_cache & kv) { - return kv.used; +void llama_kv_cache_defrag(llama_kv_cache * kv) { + if (!kv) { + return; + } + + kv->defrag(); } -bool llama_kv_cache_can_shift(const struct llama_kv_cache & kv) { - return kv.can_shift; +bool llama_kv_cache_can_shift(const llama_kv_cache * kv) { + if (!kv) { + return false; + } + + return kv->get_can_shift(); } // // kv cache view // -struct llama_kv_cache_view llama_kv_cache_view_init(const struct llama_kv_cache & kv, int32_t n_seq_max) { - struct llama_kv_cache_view result = { +llama_kv_cache_view llama_kv_cache_view_init(const llama_kv_cache & kv, int32_t n_seq_max) { + llama_kv_cache_view result = { /*.n_cells = */ 0, /*.n_seq_max = */ n_seq_max, /*.token_count = */ 0, - /*.used_cells = */ llama_get_kv_cache_used_cells(kv), + /*.used_cells = */ llama_kv_cache_used_cells(&kv), /*.max_contiguous = */ 0, /*.max_contiguous_idx = */ -1, /*.cells = */ nullptr, @@ -642,7 +1350,7 @@ struct llama_kv_cache_view llama_kv_cache_view_init(const struct llama_kv_cache return result; } -void llama_kv_cache_view_free(struct llama_kv_cache_view * view) { +void llama_kv_cache_view_free(llama_kv_cache_view * view) { if (view->cells != nullptr) { free(view->cells); view->cells = nullptr; @@ -653,18 +1361,25 @@ void llama_kv_cache_view_free(struct llama_kv_cache_view * view) { } } -void llama_kv_cache_view_update(struct llama_kv_cache_view * view, const struct llama_kv_cache & kv) { - if (uint32_t(view->n_cells) < kv.size || view->cells == nullptr) { - view->n_cells = int32_t(kv.size); - void * p = realloc(view->cells, sizeof(struct llama_kv_cache_view_cell) * view->n_cells); +void llama_kv_cache_view_update(llama_kv_cache_view * view, const llama_kv_cache * kv) { + // TODO: rework this in the future, for now quick hack + const llama_kv_cache_unified * kvu = dynamic_cast(kv); + if (kvu == nullptr) { + LLAMA_LOG_ERROR("%s: the kv_cache_view currently works only with llama_kv_cache_unified\n", __func__); + return; + } + + if (uint32_t(view->n_cells) < kvu->size || view->cells == nullptr) { + view->n_cells = int32_t(kvu->size); + void * p = realloc(view->cells, sizeof(llama_kv_cache_view_cell) * view->n_cells); GGML_ASSERT(p != nullptr && "Failed to alloc kv_cache_view cells"); - view->cells = (struct llama_kv_cache_view_cell *)p; + view->cells = (llama_kv_cache_view_cell *)p; p = realloc(view->cells_sequences, sizeof(llama_seq_id) * view->n_seq_max * view->n_cells); GGML_ASSERT(p != nullptr && "Failed to alloc kv_cache_view cells sequences"); view->cells_sequences = (llama_seq_id *)p; } - const std::vector & kv_cells = kv.cells; + const std::vector & kv_cells = kvu->cells; llama_kv_cache_view_cell * c_curr = view->cells; llama_seq_id * cs_curr = view->cells_sequences; int32_t used_cells = 0; @@ -673,7 +1388,7 @@ void llama_kv_cache_view_update(struct llama_kv_cache_view * view, const struct uint32_t max_contig = 0; int32_t max_contig_idx = -1; - for (int32_t i = 0; i < int32_t(kv.size); i++, c_curr++, cs_curr += view->n_seq_max) { + for (int32_t i = 0; i < int32_t(kvu->size); i++, c_curr++, cs_curr += view->n_seq_max) { const size_t curr_size = kv_cells[i].seq_id.size(); token_count += curr_size; c_curr->pos = kv_cells[i].pos + kv_cells[i].delta; @@ -711,8 +1426,8 @@ void llama_kv_cache_view_update(struct llama_kv_cache_view * view, const struct view->max_contiguous_idx = max_contig_idx; view->token_count = token_count; view->used_cells = used_cells; - if (uint32_t(used_cells) != kv.used) { + if (uint32_t(used_cells) != kvu->used) { LLAMA_LOG_ERROR("%s: used cells mismatch. kv_cache says %d but we calculated %d\n", - __func__, kv.used, used_cells); + __func__, kvu->used, used_cells); } } diff --git a/src/llama-kv-cache.h b/src/llama-kv-cache.h index 1ce0850ec81bb..0a7ff8a4ea3e6 100644 --- a/src/llama-kv-cache.h +++ b/src/llama-kv-cache.h @@ -1,12 +1,29 @@ #pragma once #include "llama.h" +#include "llama-io.h" +#include "llama-memory.h" #include "ggml-cpp.h" +#include #include #include -#include + +struct llama_cparams; +struct llama_hparams; +struct llama_ubatch; + +struct llama_kv_cache : public llama_memory_i { + using llama_memory_i::llama_memory_i; + + virtual int32_t get_n_tokens() const = 0; + virtual uint32_t get_used_cells() const = 0; // TODO: remove, this is too-specific to the unified cache + + virtual bool get_can_shift() const = 0; + + bool get_can_edit() const override { return get_can_shift(); } +}; struct llama_kv_cell { llama_pos pos = -1; @@ -29,55 +46,6 @@ struct llama_kv_cell { } }; -// ring-buffer of cached KV data -struct llama_kv_cache { - bool has_shift = false; - bool do_defrag = false; - bool recurrent = false; // with recurrent state models, a cell can hold the state for more than one past token - bool v_trans = true; // the value tensor is transposed - bool can_shift = false; - - // Note: The value of head isn't only used to optimize searching - // for a free KV slot. llama_decode_impl also uses it, so it - // cannot be freely changed after a slot has been allocated. - uint32_t head = 0; - uint32_t size = 0; - uint32_t used = 0; // used cells (i.e. at least one seq_id) - - // computed before each graph build - uint32_t n = 0; - - ggml_type type_k = GGML_TYPE_F16; - ggml_type type_v = GGML_TYPE_F16; - - std::vector cells; - - std::vector k_l; // per layer - std::vector v_l; - - std::vector ctxs; - std::vector bufs; - - size_t total_size() const { - size_t size = 0; - for (const auto & buf : bufs) { - size += ggml_backend_buffer_get_size(buf.get()); - } - - return size; - } - - // TODO: better data structures to reduce the cost of this operation - llama_pos max_pos() const { - llama_pos max_pos = -1; - for (const auto & cell : cells) { - max_pos = std::max(max_pos, cell.pos); - } - - return max_pos; - } -}; - // a structure holds information about the slot found in llama_kv_cache_find_slot struct llama_kv_cache_slot_info { std::pair boundaries; // slot boundaries [begin, end) @@ -89,82 +57,131 @@ struct llama_kv_cache_slot_info { operator bool() const { return found; } }; -// TODO: maybe not needed -uint32_t llama_kv_cache_get_padding(const struct llama_cparams & cparams); - -bool llama_kv_cache_init( - struct llama_kv_cache & cache, - const llama_model & model, +// ring-buffer of cached KV data +// TODO: pimpl +// TODO: add notion of max sequences +class llama_kv_cache_unified : public llama_kv_cache { +public: + // can be used to query data from the model if needed + struct callbacks { + std::function get_rope_factors; + }; + + llama_kv_cache_unified( + const llama_hparams & hparams, + callbacks cbs); + + virtual ~llama_kv_cache_unified() = default; + + // TODO: become constructor + bool init( + const llama_model & model, // TODO: do not reference the model const llama_cparams & cparams, ggml_type type_k, ggml_type type_v, uint32_t kv_size, bool offload); -// find an empty slot of size "n_tokens" in the cache -// updates the cache head -// returns a structure holding information about the slot found -// Note: On success, it's important that cache.head points -// to the first cell of the slot. -struct llama_kv_cache_slot_info llama_kv_cache_find_slot( - struct llama_kv_cache & cache, - const struct llama_ubatch & batch); + int32_t get_n_tokens() const override; + uint32_t get_used_cells() const override; -// find how many cells are currently in use -uint32_t llama_kv_cache_cell_max(const struct llama_kv_cache & cache); + size_t total_size() const; -void llama_kv_cache_clear(struct llama_kv_cache & cache); + // TODO: better data structures to reduce the cost of this operation + llama_pos pos_max() const; -bool llama_kv_cache_seq_rm( - struct llama_kv_cache & cache, - llama_seq_id seq_id, - llama_pos p0, - llama_pos p1); + void clear() override; + void defrag() override; -void llama_kv_cache_seq_cp( - struct llama_kv_cache & cache, - llama_seq_id seq_id_src, - llama_seq_id seq_id_dst, - llama_pos p0, - llama_pos p1); + bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override; + void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override; + void seq_keep(llama_seq_id seq_id) override; + void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) override; + void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override; -void llama_kv_cache_seq_keep( - struct llama_kv_cache & cache, - llama_seq_id seq_id); + llama_pos seq_pos_max(llama_seq_id seq_id) override; -void llama_kv_cache_seq_add( - struct llama_kv_cache & cache, - llama_seq_id seq_id, - llama_pos p0, - llama_pos p1, - llama_pos delta); + bool get_can_shift() const override; -void llama_kv_cache_seq_div( - struct llama_kv_cache & cache, - llama_seq_id seq_id, - llama_pos p0, - llama_pos p1, - int d); + // find an empty slot of size "n_tokens" in the cache + // updates the cache head + // returns a structure holding information about the slot found + // Note: On success, it's important that cache.head points + // to the first cell of the slot. + llama_kv_cache_slot_info find_slot(const llama_ubatch & batch); -llama_pos llama_kv_cache_seq_pos_max( - struct llama_kv_cache & cache, - llama_seq_id seq_id); + // TODO: maybe not needed + uint32_t get_padding(const llama_cparams & cparams) const; -void llama_kv_cache_defrag(struct llama_kv_cache & cache); + // find how many cells are currently in use + uint32_t cell_max() const; -int32_t llama_get_kv_cache_token_count(const struct llama_kv_cache & kv); + size_t size_k_bytes() const; + size_t size_v_bytes() const; -int32_t llama_get_kv_cache_used_cells(const struct llama_kv_cache & kv); + // defrag -bool llama_kv_cache_can_shift(const struct llama_kv_cache & kv); + struct { + std::vector ids; + } defrag_info; -// -// kv cache view -// + // return true if cells have been moved + bool defrag_prepare(int32_t n_max_nodes); + + // state save/load + + void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const; + void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1); -struct llama_kv_cache_view llama_kv_cache_view_init(const struct llama_kv_cache & kv, int32_t n_seq_max); + // members -void llama_kv_cache_view_update(struct llama_kv_cache_view * view, const struct llama_kv_cache & kv); + const llama_hparams & hparams; + + callbacks cbs; + + bool has_shift = false; + bool do_defrag = false; + + // TODO: remove this and implement llama_kv_cache_recurrent instead + bool recurrent = false; // with recurrent state models, a cell can hold the state for more than one past token + + bool v_trans = true; // the value tensor is transposed + bool can_shift = false; + + // Note: The value of head isn't only used to optimize searching + // for a free KV slot. llama_decode_impl also uses it, so it + // cannot be freely changed after a slot has been allocated. + uint32_t head = 0; + uint32_t size = 0; + uint32_t used = 0; // used cells (i.e. at least one seq_id) + + // computed before each graph build + uint32_t n = 0; + + std::vector cells; + + std::vector k_l; // per layer + std::vector v_l; + +private: + ggml_type type_k = GGML_TYPE_F16; + ggml_type type_v = GGML_TYPE_F16; + + std::vector ctxs; + std::vector bufs; + + void state_write_meta(llama_io_write_i & io, const std::vector> & cell_ranges, llama_seq_id seq_id = -1) const; + void state_write_data(llama_io_write_i & io, const std::vector> & cell_ranges) const; + + bool state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id = -1); + bool state_read_data(llama_io_read_i & io, uint32_t cell_count); +}; + +// TODO: temporary reusing llama_kv_cache_unified -- implement recurrent cache and simplify llama_kv_cache_unified +//class llama_kv_cache_recurrent : public llama_kv_cache_unified { +//public: +// using llama_kv_cache_unified::llama_kv_cache_unified; +//}; // // kv cache restore @@ -184,13 +201,15 @@ struct llama_kv_slot_restorer { bool do_restore = false; - explicit llama_kv_slot_restorer(const struct llama_kv_cache & cache) { + llama_kv_cache_unified & cache; + + explicit llama_kv_slot_restorer(llama_kv_cache_unified & cache) : cache(cache) { old_state.head = cache.head; old_state.n = cache.n; } // saves a slot information for future restoration - void save(const struct llama_kv_cache_slot_info & slot) { + void save(const llama_kv_cache_slot_info & slot) { if (slot) { do_restore = true; if (slot.boundaries.first != slot.boundaries.second) { @@ -201,19 +220,68 @@ struct llama_kv_slot_restorer { // must be explicitly called to restore the kv_cache state // and rollback changes from all llama_kv_cache_find_slot calls - void restore(struct llama_kv_cache & cache) { + void restore() { if (do_restore) { cache.head = old_state.head; cache.n = old_state.n; if (cache.recurrent) { // recurrent models like Mamba or RWKV can't have a state partially erased - llama_kv_cache_seq_rm(cache, -1, -1, -1); + cache.seq_rm(-1, -1, -1); } else { for (auto & slot : slot_boundaries) { - llama_kv_cache_seq_rm(cache, -1, slot.first, slot.second); + cache.seq_rm(-1, slot.first, slot.second); } } } } }; +// TODO: maybe become part of the public llama_kv_cache in the future +int32_t llama_kv_cache_n_tokens(const llama_kv_cache * kv); + +int32_t llama_kv_cache_used_cells(const llama_kv_cache * kv); + +void llama_kv_cache_clear(llama_kv_cache * kv); + +bool llama_kv_cache_seq_rm( + llama_kv_cache * kv, + llama_seq_id seq_id, + llama_pos p0, + llama_pos p1); + +void llama_kv_cache_seq_cp( + llama_kv_cache * kv, + llama_seq_id seq_id_src, + llama_seq_id seq_id_dst, + llama_pos p0, + llama_pos p1); + +void llama_kv_cache_seq_keep(llama_kv_cache * kv, llama_seq_id seq_id); + +void llama_kv_cache_seq_add( + llama_kv_cache * kv, + llama_seq_id seq_id, + llama_pos p0, + llama_pos p1, + llama_pos delta); + +void llama_kv_cache_seq_div( + llama_kv_cache * kv, + llama_seq_id seq_id, + llama_pos p0, + llama_pos p1, + int d); + +llama_pos llama_kv_cache_seq_pos_max(llama_kv_cache * kv, llama_seq_id seq_id); + +void llama_kv_cache_defrag(llama_kv_cache * kv); + +bool llama_kv_cache_can_shift(const llama_kv_cache * kv); + +// +// kv cache view +// + +llama_kv_cache_view llama_kv_cache_view_init(const llama_kv_cache & kv, int32_t n_seq_max); + +void llama_kv_cache_view_update(llama_kv_cache_view * view, const llama_kv_cache * kv); diff --git a/src/llama-memory.cpp b/src/llama-memory.cpp new file mode 100644 index 0000000000000..10173253edfe4 --- /dev/null +++ b/src/llama-memory.cpp @@ -0,0 +1 @@ +#include "llama-memory.h" diff --git a/src/llama-memory.h b/src/llama-memory.h new file mode 100644 index 0000000000000..69e6e34ca4516 --- /dev/null +++ b/src/llama-memory.h @@ -0,0 +1,21 @@ +#pragma once + +#include "llama.h" + +// general concept of LLM memory +// the KV cache is a type of LLM memory, but there can be other types +class llama_memory_i { +public: + virtual void clear() = 0; + virtual void defrag() = 0; + + virtual bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) = 0; + virtual void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) = 0; + virtual void seq_keep(llama_seq_id seq_id) = 0; + virtual void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) = 0; + virtual void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) = 0; + + virtual llama_pos seq_pos_max(llama_seq_id seq_id) = 0; + + virtual bool get_can_edit() const = 0; +}; diff --git a/src/llama-model.cpp b/src/llama-model.cpp index 9f75589d805a9..522219c012242 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -2,12 +2,17 @@ #include "llama-impl.h" #include "llama-mmap.h" +#include "llama-batch.h" +#include "llama-cparams.h" #include "llama-model-loader.h" +#include "llama-kv-cache.h" #include "ggml-cpp.h" #include #include +#include +#include #include #include #include @@ -244,6 +249,7 @@ static ggml_backend_buffer_type_t select_weight_buft(const llama_hparams & hpara return cur_buft; } } + return nullptr; } @@ -302,7 +308,7 @@ static buft_list_t make_cpu_buft_list(const std::vector & de } // GPU: split if LLAMA_SPLIT_MODE_ROW -> GPU -static buft_list_t make_gpu_buft_list(ggml_backend_dev_t dev, enum llama_split_mode split_mode, const float * tensor_split) { +static buft_list_t make_gpu_buft_list(ggml_backend_dev_t dev, llama_split_mode split_mode, const float * tensor_split) { buft_list_t buft_list; // add the device split buffer type if requested and available @@ -369,7 +375,7 @@ struct llama_model::impl { std::vector dev_layer; }; -llama_model::llama_model(const struct llama_model_params & params) : params(params), pimpl(std::make_unique()) { +llama_model::llama_model(const llama_model_params & params) : params(params), pimpl(std::make_unique()) { } llama_model::~llama_model() {} @@ -391,7 +397,7 @@ void llama_model::load_hparams(llama_model_loader & ml) { // get metadata as string for (int i = 0; i < gguf_get_n_kv(ctx); i++) { - enum gguf_type type = gguf_get_kv_type(ctx, i); + gguf_type type = gguf_get_kv_type(ctx, i); if (type == GGUF_TYPE_ARRAY) { continue; } @@ -1444,7 +1450,10 @@ bool llama_model::load_tensors(llama_model_loader & ml) { // skip unused tensors if (info.op == GGML_OP_NONE) { - LLAMA_LOG_WARN("model has unused tensor %s -- ignoring\n", tn.str().c_str()); + const size_t nbytes = ggml_nbytes(t_meta); + LLAMA_LOG_WARN("model has unused tensor %s (size = %zu bytes) -- ignoring\n", tn.str().c_str(), nbytes); + + ml.size_data -= nbytes; ml.n_created++; return nullptr; @@ -3631,8 +3640,8 @@ size_t llama_model::size() const { return pimpl->n_bytes; } -size_t llama_model::max_nodes() const { - return std::max(8192, tensors_by_name.size()*5); +size_t llama_model::n_tensors() const { + return tensors_by_name.size(); } size_t llama_model::n_devices() const { @@ -3745,7 +3754,7 @@ void llama_model::print_info() const { LLAMA_LOG_INFO("%s: n_expert_shared = %d\n", __func__, hparams.n_expert_shared); LLAMA_LOG_INFO("%s: expert_weights_scale = %.1f\n", __func__, hparams.expert_weights_scale); LLAMA_LOG_INFO("%s: expert_weights_norm = %d\n", __func__, hparams.expert_weights_norm); - LLAMA_LOG_INFO("%s: expert_gating_func = %s\n", __func__, llama_expert_gating_func_name((enum llama_expert_gating_func_type) hparams.expert_gating_func)); + LLAMA_LOG_INFO("%s: expert_gating_func = %s\n", __func__, llama_expert_gating_func_name((llama_expert_gating_func_type) hparams.expert_gating_func)); LLAMA_LOG_INFO("%s: rope_yarn_log_mul = %.4f\n", __func__, hparams.rope_yarn_log_mul); } @@ -3821,9 +3830,9 @@ ggml_backend_buffer_type_t llama_model::select_buft(int il) const { }); } -const struct ggml_tensor * llama_model::get_tensor(const char * name) const { +const ggml_tensor * llama_model::get_tensor(const char * name) const { auto it = std::find_if(tensors_by_name.begin(), tensors_by_name.end(), - [name](const std::pair & it) { + [name](const std::pair & it) { return it.first == name; }); if (it == tensors_by_name.end()) { @@ -3833,255 +3842,7556 @@ const struct ggml_tensor * llama_model::get_tensor(const char * name) const { return it->second; } -// -// interface implementation -// +struct llm_build_llama : public llm_graph_context { + llm_build_llama(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; -struct llama_model_params llama_model_default_params() { - struct llama_model_params result = { - /*.devices =*/ nullptr, - /*.n_gpu_layers =*/ 0, - /*.split_mode =*/ LLAMA_SPLIT_MODE_LAYER, - /*.main_gpu =*/ 0, - /*.tensor_split =*/ nullptr, - /*.progress_callback =*/ nullptr, - /*.progress_callback_user_data =*/ nullptr, - /*.kv_overrides =*/ nullptr, - /*.vocab_only =*/ false, - /*.use_mmap =*/ true, - /*.use_mlock =*/ false, - /*.check_tensors =*/ false, - }; + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); -#ifdef GGML_USE_METAL - // note: we usually have plenty of VRAM, so by default offload all layers to the GPU - result.n_gpu_layers = 999; -#endif + ggml_tensor * cur; + ggml_tensor * inpL; - return result; -} + inpL = build_inp_embd(model.tok_embd); -const struct llama_vocab * llama_model_get_vocab(const struct llama_model * model) { - return &model->vocab; -} + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); -void llama_free_model(struct llama_model * model) { - llama_model_free(model); -} + auto * inp_attn = build_attn_inp_kv_unified(true, false); -void llama_model_free(struct llama_model * model) { - delete model; -} + const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale; + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; -int32_t llama_model_n_ctx_train(const struct llama_model * model) { - return model->hparams.n_ctx_train; -} + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); -int32_t llama_model_n_embd(const struct llama_model * model) { - return model->hparams.n_embd; -} + // self-attention + { + // rope freq factors for llama3; may return nullptr for llama2 and other models + ggml_tensor * rope_factors = static_cast(memory)->cbs.get_rope_factors(n_ctx_per_seq, il); + + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + if (model.layers[il].bq) { + Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + cb(Qcur, "Qcur", il); + } -int32_t llama_model_n_layer(const struct llama_model * model) { - return model->hparams.n_layer; -} + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + if (model.layers[il].bk) { + Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + cb(Kcur, "Kcur", il); + } -int32_t llama_model_n_head(const struct llama_model * model) { - return model->hparams.n_head(); -} + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + if (model.layers[il].bv) { + Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + cb(Vcur, "Vcur", il); + } -int32_t llama_model_n_head_kv(const struct llama_model * model) { - return model->hparams.n_head_kv(); -} + Qcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, rope_factors, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Qcur, "Qcur", il); + + Kcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, rope_factors, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Kcur, "Kcur", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, kq_scale, il); + } -// deprecated -int32_t llama_n_ctx_train(const struct llama_model * model) { - return llama_model_n_ctx_train(model); -} + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } -// deprecated -int32_t llama_n_embd(const struct llama_model * model) { - return llama_model_n_embd(model); -} + // For Granite architecture + if (hparams.f_residual_scale) { + cur = ggml_scale(ctx0, cur, hparams.f_residual_scale); + } -// deprecated -int32_t llama_n_layer(const struct llama_model * model) { - return llama_model_n_layer(model); -} + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); -// deprecated -int32_t llama_n_head(const struct llama_model * model) { - return llama_model_n_head(model); -} + // feed-forward network + if (model.layers[il].ffn_gate_inp == nullptr) { -enum llama_rope_type llama_model_rope_type(const struct llama_model * model) { - switch (model->arch) { - // these models do not use RoPE - case LLM_ARCH_GPT2: - case LLM_ARCH_GPTJ: - case LLM_ARCH_MPT: - case LLM_ARCH_REFACT: - case LLM_ARCH_BLOOM: - case LLM_ARCH_MAMBA: - case LLM_ARCH_JINA_BERT_V2: - case LLM_ARCH_T5: - case LLM_ARCH_T5ENCODER: - case LLM_ARCH_JAIS: - case LLM_ARCH_RWKV6: - case LLM_ARCH_RWKV6QWEN2: - case LLM_ARCH_WAVTOKENIZER_DEC: - return LLAMA_ROPE_TYPE_NONE; + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); - // use what we call a normal RoPE, operating on pairs of consecutive head values - case LLM_ARCH_LLAMA: - case LLM_ARCH_DECI: - case LLM_ARCH_BAICHUAN: - case LLM_ARCH_STARCODER: - case LLM_ARCH_PLAMO: - case LLM_ARCH_ORION: - case LLM_ARCH_INTERNLM2: - case LLM_ARCH_MINICPM: - case LLM_ARCH_XVERSE: - case LLM_ARCH_COMMAND_R: - case LLM_ARCH_COHERE2: - case LLM_ARCH_OLMO: - case LLM_ARCH_ARCTIC: - case LLM_ARCH_DEEPSEEK: - case LLM_ARCH_DEEPSEEK2: - case LLM_ARCH_CHATGLM: - case LLM_ARCH_GRANITE: - case LLM_ARCH_GRANITE_MOE: - case LLM_ARCH_CHAMELEON: - return LLAMA_ROPE_TYPE_NORM; + cur = build_ffn(cur, + model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, + model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, NULL, + model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + } else { + // MoE branch + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + cur = build_moe_ffn(cur, + model.layers[il].ffn_gate_inp, + model.layers[il].ffn_up_exps, + model.layers[il].ffn_gate_exps, + model.layers[il].ffn_down_exps, + nullptr, + n_expert, n_expert_used, + LLM_FFN_SILU, true, + false, 0.0, + LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX, + il); + cb(cur, "ffn_moe_out", il); + } - // the pairs of head values are offset by n_rot/2 - case LLM_ARCH_FALCON: - case LLM_ARCH_GROK: - case LLM_ARCH_DBRX: - case LLM_ARCH_BERT: - case LLM_ARCH_NOMIC_BERT: - case LLM_ARCH_STABLELM: - case LLM_ARCH_BITNET: - case LLM_ARCH_QWEN: - case LLM_ARCH_QWEN2: - case LLM_ARCH_QWEN2MOE: - case LLM_ARCH_OLMO2: - case LLM_ARCH_OLMOE: - case LLM_ARCH_PHI2: - case LLM_ARCH_PHI3: - case LLM_ARCH_PHIMOE: - case LLM_ARCH_GEMMA: - case LLM_ARCH_GEMMA2: - case LLM_ARCH_GEMMA3: - case LLM_ARCH_STARCODER2: - case LLM_ARCH_OPENELM: - case LLM_ARCH_GPTNEOX: - case LLM_ARCH_CODESHELL: - case LLM_ARCH_NEMOTRON: - case LLM_ARCH_EXAONE: - case LLM_ARCH_MINICPM3: - return LLAMA_ROPE_TYPE_NEOX; + // For Granite architecture + if (hparams.f_residual_scale) { + cur = ggml_scale(ctx0, cur, hparams.f_residual_scale); + } - case LLM_ARCH_QWEN2VL: - return LLAMA_ROPE_TYPE_MROPE; + cur = ggml_add(ctx0, cur, ffn_inp); + cb(cur, "ffn_out", il); - // all model arches should be listed explicitly here - case LLM_ARCH_UNKNOWN: - GGML_ABORT("unknown architecture"); - } + cur = build_cvec(cur, il); + cb(cur, "l_out", il); - return LLAMA_ROPE_TYPE_NONE; -} + // input for next layer + inpL = cur; + } -float llama_model_rope_freq_scale_train(const struct llama_model * model) { - return model->hparams.rope_freq_scale_train; -} + cur = inpL; -int32_t llama_model_meta_val_str(const struct llama_model * model, const char * key, char * buf, size_t buf_size) { - const auto & it = model->gguf_kv.find(key); - if (it == model->gguf_kv.end()) { - if (buf_size > 0) { - buf[0] = '\0'; + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + // For Granite architecture + if (hparams.f_logit_scale) { + cur = ggml_scale(ctx0, cur, 1.0f / hparams.f_logit_scale); } - return -1; + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); } - return snprintf(buf, buf_size, "%s", it->second.c_str()); -} +}; -int32_t llama_model_meta_count(const struct llama_model * model) { - return (int)model->gguf_kv.size(); -} +struct llm_build_deci : public llm_graph_context { + llm_build_deci(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; -int32_t llama_model_meta_key_by_index(const struct llama_model * model, int i, char * buf, size_t buf_size) { - if (i < 0 || i >= (int)model->gguf_kv.size()) { - if (buf_size > 0) { - buf[0] = '\0'; + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(true, false); + + const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale; + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + const int64_t n_head_kv = hparams.n_head_kv(il); + const int64_t n_head = hparams.n_head(il); + + if (n_head == 0) { + // attention-free layer of Llama-3_1-Nemotron-51B + cur = inpL; + } else { + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + } + + if (n_head > 0 && n_head_kv == 0) { + // "linear attention" of Llama-3_1-Nemotron-51B + cur = build_lora_mm(model.layers[il].wo, cur); + cb(cur, "wo", il); + } else if (n_head > 0) { + // self-attention + // rope freq factors for llama3; may return nullptr for llama2 and other models + ggml_tensor * rope_factors = static_cast(memory)->cbs.get_rope_factors(n_ctx_per_seq, il); + + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + if (model.layers[il].bq) { + Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + cb(Qcur, "Qcur", il); + } + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + if (model.layers[il].bk) { + Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + cb(Kcur, "Kcur", il); + } + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + if (model.layers[il].bv) { + Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + cb(Vcur, "Vcur", il); + } + + Qcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, rope_factors, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Qcur, "Qcur", il); + + Kcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, rope_factors, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Kcur, "Kcur", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, kq_scale, il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + // For Granite architecture + if (hparams.f_residual_scale) { + cur = ggml_scale(ctx0, cur, hparams.f_residual_scale); + } + + // modified to support attention-free layer of Llama-3_1-Nemotron-51B + ggml_tensor * ffn_inp = cur; + if (n_head > 0) { + ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + } + + // feed-forward network + if (model.layers[il].ffn_gate_inp == nullptr) { + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, + model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, + model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, NULL, + model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + } + + // For Granite architecture + if (hparams.f_residual_scale) { + cur = ggml_scale(ctx0, cur, hparams.f_residual_scale); + } + + cur = ggml_add(ctx0, cur, ffn_inp); + cb(cur, "ffn_out", il); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; } - return -1; - } - auto it = model->gguf_kv.begin(); - std::advance(it, i); - return snprintf(buf, buf_size, "%s", it->first.c_str()); -} -int32_t llama_model_meta_val_str_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size) { - if (i < 0 || i >= (int)model->gguf_kv.size()) { - if (buf_size > 0) { - buf[0] = '\0'; + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + // For Granite architecture + if (hparams.f_logit_scale) { + cur = ggml_scale(ctx0, cur, 1.0f / hparams.f_logit_scale); } - return -1; + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); } - auto it = model->gguf_kv.begin(); - std::advance(it, i); - return snprintf(buf, buf_size, "%s", it->second.c_str()); -} +}; -int32_t llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size) { - return snprintf(buf, buf_size, "%s", model->desc().c_str()); -} +struct llm_build_baichuan : public llm_graph_context { + llm_build_baichuan(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; -uint64_t llama_model_size(const struct llama_model * model) { - return model->size(); -} + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); -const char * llama_model_chat_template(const struct llama_model * model, const char * name) { - const auto key = name ? LLM_KV(model->arch, name)(LLM_KV_TOKENIZER_CHAT_TEMPLATE_N) - : LLM_KV(model->arch)(LLM_KV_TOKENIZER_CHAT_TEMPLATE); - const auto & it = model->gguf_kv.find(key); - if (it == model->gguf_kv.end()) { - return nullptr; - } + ggml_tensor * cur; + ggml_tensor * inpL; - return it->second.c_str(); -} + inpL = build_inp_embd(model.tok_embd); -uint64_t llama_model_n_params(const struct llama_model * model) { - return model->n_elements(); -} + // inp_pos - contains the positions + ggml_tensor * inp_pos = model.type == LLM_TYPE_7B ? build_inp_pos() : nullptr; -bool llama_model_has_encoder(const struct llama_model * model) { - switch (model->arch) { - case LLM_ARCH_T5: return true; - case LLM_ARCH_T5ENCODER: return true; - default: return false; - } -} + auto * inp_attn = build_attn_inp_kv_unified(true, false); -bool llama_model_has_decoder(const struct llama_model * model) { - switch (model->arch) { - case LLM_ARCH_T5ENCODER: return false; - default: return true; - } -} + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; -llama_token llama_model_decoder_start_token(const struct llama_model * model) { - return model->hparams.dec_start_token_id; -} + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); -bool llama_model_is_recurrent(const struct llama_model * model) { - switch (model->arch) { - case LLM_ARCH_MAMBA: return true; - case LLM_ARCH_RWKV6: return true; - case LLM_ARCH_RWKV6QWEN2: return true; - default: return false; + // self-attention + { + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + + switch (model.type) { + case LLM_TYPE_7B: + Qcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + Kcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + break; + case LLM_TYPE_13B: + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd/n_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd/n_head, n_head, n_tokens); + break; + default: + GGML_ABORT("fatal error"); + } + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, NULL, + Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network + { + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + } + + cur = ggml_add(ctx0, cur, ffn_inp); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); } +}; + +struct llm_build_xverse : public llm_graph_context { + llm_build_xverse(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(true, false); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // self-attention + { + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + + Qcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Qcur, "Qcur", il); + + Kcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Kcur, "Kcur", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, NULL, + Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network + { + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + } + + cur = ggml_add(ctx0, cur, ffn_inp); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, model.output_norm, NULL, LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_falcon : public llm_graph_context { + llm_build_falcon(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(true, false); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * attn_norm; + + attn_norm = build_norm(inpL, + model.layers[il].attn_norm, + model.layers[il].attn_norm_b, + LLM_NORM, il); + cb(attn_norm, "attn_norm", il); + + // self-attention + { + if (model.layers[il].attn_norm_2) { + // Falcon-40B + cur = build_norm(inpL, + model.layers[il].attn_norm_2, + model.layers[il].attn_norm_2_b, + LLM_NORM, il); + cb(cur, "attn_norm_2", il); + } else { + cur = attn_norm; + } + + cur = build_lora_mm(model.layers[il].wqkv, cur); + cb(cur, "wqkv", il); + + ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); + ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); + ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + + // using mode = 2 for neox mode + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, + freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Qcur, "Qcur", il); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, + freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Kcur, "Kcur", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, NULL, + Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); + attn_norm = ggml_get_rows(ctx0, attn_norm, inp_out_ids); + } + + ggml_tensor * ffn_inp = cur; + + // feed forward + { + cur = build_ffn(attn_norm, // !! use the attn norm, not the result + model.layers[il].ffn_up, NULL, NULL, + NULL, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_GELU, LLM_FFN_SEQ, il); + cb(cur, "ffn_out", il); + } + + cur = ggml_add(ctx0, cur, ffn_inp); + cur = ggml_add(ctx0, cur, inpL); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + // norm + cur = build_norm(cur, + model.output_norm, + model.output_norm_b, + LLM_NORM, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_grok : public llm_graph_context { + llm_build_grok(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // multiply by embedding_multiplier_scale of 78.38367176906169 + inpL = ggml_scale(ctx0, inpL, 78.38367176906169f); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(true, false); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + + // self-attention + { + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + if (model.layers[il].bq) { + Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + cb(Qcur, "Qcur", il); + } + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + if (model.layers[il].bk) { + Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + cb(Kcur, "Kcur", il); + } + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + if (model.layers[il].bv) { + Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + cb(Vcur, "Vcur", il); + } + + Qcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Qcur, "Qcur", il); + + Kcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Kcur, "Kcur", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, 1.0f, il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + // Grok + // if attn_out_norm is present then apply it before adding the input + if (model.layers[il].attn_out_norm) { + cur = build_norm(cur, + model.layers[il].attn_out_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_out_norm", il); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network + // MoE branch + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + cur = build_moe_ffn(cur, + model.layers[il].ffn_gate_inp, + model.layers[il].ffn_up_exps, + model.layers[il].ffn_gate_exps, + model.layers[il].ffn_down_exps, + nullptr, + n_expert, n_expert_used, + LLM_FFN_GELU, true, + false, 0.0, + LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX, + il); + cb(cur, "ffn_moe_out", il); + + // Grok + // if layer_out_norm is present then apply it before adding the input + // Idea: maybe ffn_out_norm is a better name + if (model.layers[il].layer_out_norm) { + cur = build_norm(cur, + model.layers[il].layer_out_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "layer_out_norm", il); + } + + cur = ggml_add(ctx0, cur, ffn_inp); + cb(cur, "ffn_out", il); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + // Grok + // multiply logits by output_multiplier_scale of 0.5773502691896257 + + cur = ggml_scale(ctx0, cur, 0.5773502691896257f); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_dbrx : public llm_graph_context { + llm_build_dbrx(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(true, false); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM, il); + cb(cur, "attn_norm", il); + + // self-attention + { + ggml_tensor * Qcur = nullptr; + ggml_tensor * Kcur = nullptr; + ggml_tensor * Vcur = nullptr; + + cur = build_lora_mm(model.layers[il].wqkv, cur); + cb(cur, "wqkv", il); + + cur = ggml_clamp(ctx0, cur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv); + cb(cur, "wqkv_clamped", il); + + Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); + Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); + Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + Qcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Qcur, "Qcur", il); + + Kcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Kcur, "Kcur", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, NULL, + Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network + // MoE branch + cur = build_norm(ffn_inp, + model.layers[il].attn_out_norm, NULL, + LLM_NORM, il); + cb(cur, "attn_out_norm", il); + + cur = build_moe_ffn(cur, + model.layers[il].ffn_gate_inp, + model.layers[il].ffn_up_exps, + model.layers[il].ffn_gate_exps, + model.layers[il].ffn_down_exps, + nullptr, + n_expert, n_expert_used, + LLM_FFN_SILU, true, + false, 0.0, + LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX, + il); + cb(cur, "ffn_moe_out", il); + + cur = ggml_add(ctx0, cur, ffn_inp); + cb(cur, "ffn_out", il); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_starcoder : public llm_graph_context { + llm_build_starcoder(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(true, false); + + ggml_tensor * pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos); + cb(pos, "pos_embd", -1); + + inpL = ggml_add(ctx0, inpL, pos); + cb(inpL, "inpL", -1); + + for (int il = 0; il < n_layer; ++il) { + cur = build_norm(inpL, + model.layers[il].attn_norm, + model.layers[il].attn_norm_b, + LLM_NORM, il); + cb(cur, "attn_norm", il); + + // self-attention + { + cur = build_lora_mm(model.layers[il].wqkv, cur); + cb(cur, "wqkv", il); + + cur = ggml_add(ctx0, cur, model.layers[il].bqkv); + cb(cur, "bqkv", il); + + ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); + ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); + ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); + } + + // add the input + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); + cb(ffn_inp, "ffn_inp", il); + + // FF + { + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, + model.layers[il].ffn_norm_b, + LLM_NORM, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, + model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, + NULL, NULL, NULL, + model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, + NULL, + LLM_FFN_GELU, LLM_FFN_SEQ, il); + cb(cur, "ffn_out", il); + } + + cur = ggml_add(ctx0, cur, ffn_inp); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = build_norm(inpL, + model.output_norm, + model.output_norm_b, + LLM_NORM, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_refact : public llm_graph_context { + llm_build_refact(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + auto * inp_attn = build_attn_inp_kv_unified(true, false); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // self-attention + { + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + cb(Kcur, "Kcur", il); + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + cb(Qcur, "Qcur", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, NULL, + Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network + { + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + } + + cur = ggml_add(ctx0, cur, ffn_inp); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_bert : public llm_graph_context { + llm_build_bert(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + + ggml_tensor * cur; + ggml_tensor * inpL; + ggml_tensor * inp_pos = nullptr; + + if (model.arch != LLM_ARCH_JINA_BERT_V2) { + inp_pos = build_inp_pos(); + } + + // construct input embeddings (token, type, position) + inpL = build_inp_embd(model.tok_embd); + + // token types are hardcoded to zero ("Sentence A") + ggml_tensor * type_row0 = ggml_view_1d(ctx0, model.type_embd, n_embd, 0); + inpL = ggml_add(ctx0, inpL, type_row0); + if (model.arch == LLM_ARCH_BERT) { + inpL = ggml_add(ctx0, ggml_get_rows(ctx0, model.pos_embd, inp_pos), inpL); + } + cb(inpL, "inp_embd", -1); + + // embed layer norm + inpL = build_norm(inpL, model.tok_norm, model.tok_norm_b, LLM_NORM, -1); + cb(inpL, "inp_norm", -1); + + auto * inp_attn = build_attn_inp_no_cache(); + + // iterate layers + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * cur = inpL; + + ggml_tensor * Qcur; + ggml_tensor * Kcur; + ggml_tensor * Vcur; + + // self-attention + if (model.arch == LLM_ARCH_BERT || model.arch == LLM_ARCH_JINA_BERT_V2) { + Qcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wq, cur), model.layers[il].bq); + + if (model.layers[il].attn_q_norm) { + Qcur = build_norm(Qcur, + model.layers[il].attn_q_norm, + model.layers[il].attn_q_norm_b, + LLM_NORM, il); + } + + Kcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wk, cur), model.layers[il].bk); + + if (model.layers[il].attn_k_norm) { + Kcur = build_norm(Kcur, + model.layers[il].attn_k_norm, + model.layers[il].attn_k_norm_b, + LLM_NORM, il); + } + + Vcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wv, cur), model.layers[il].bv); + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + } else { + // compute Q and K and RoPE them + cur = build_lora_mm(model.layers[il].wqkv, cur); + cb(cur, "wqkv", il); + + Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); + Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); + Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); + + Qcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Qcur, "Qcur", il); + + Kcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Kcur, "Kcur", il); + + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + } + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + cb(cur, "kqv_out", il); + + if (il == n_layer - 1 && pooling_type == LLAMA_POOLING_TYPE_NONE) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); + } + + // re-add the layer input + cur = ggml_add(ctx0, cur, inpL); + + // attention layer norm + cur = build_norm(cur, model.layers[il].attn_out_norm, model.layers[il].attn_out_norm_b, LLM_NORM, il); + + if (model.layers[il].attn_norm_2 != nullptr) { + cur = ggml_add(ctx0, cur, inpL); // re-add the layer input + cur = build_norm(cur, model.layers[il].attn_norm_2, model.layers[il].attn_norm_2_b, LLM_NORM, il); + } + + ggml_tensor * ffn_inp = cur; + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network + if (model.arch == LLM_ARCH_BERT) { + cur = build_ffn(cur, + model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, + NULL, NULL, NULL, + model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, + NULL, + LLM_FFN_GELU, LLM_FFN_SEQ, il); + } else if (model.arch == LLM_ARCH_JINA_BERT_V2) { + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, + NULL, + LLM_FFN_GELU, LLM_FFN_PAR, il); + } else { + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + } + cb(cur, "ffn_out", il); + + // attentions bypass the intermediate layer + cur = ggml_add(ctx0, cur, ffn_inp); + + // output layer norm + cur = build_norm(cur, model.layers[il].layer_out_norm, model.layers[il].layer_out_norm_b, LLM_NORM, il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cb(cur, "result_embd", -1); + res->t_embd = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_bloom : public llm_graph_context { + llm_build_bloom(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + auto * inp_attn = build_attn_inp_kv_unified(true, false); + + inpL = build_norm(inpL, + model.tok_norm, + model.tok_norm_b, + LLM_NORM, -1); + cb(inpL, "inp_norm", -1); + + for (int il = 0; il < n_layer; ++il) { + cur = build_norm(inpL, + model.layers[il].attn_norm, + model.layers[il].attn_norm_b, + LLM_NORM, il); + cb(cur, "attn_norm", il); + + // self-attention + { + cur = build_lora_mm(model.layers[il].wqkv, cur); + cb(cur, "wqkv", il); + + cur = ggml_add(ctx0, cur, model.layers[il].bqkv); + cb(cur, "bqkv", il); + + ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); + ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); + ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); + } + + // Add the input + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); + cb(ffn_inp, "ffn_inp", il); + + // FF + { + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, + model.layers[il].ffn_norm_b, + LLM_NORM, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, + model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, + NULL, NULL, NULL, + model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, + NULL, + LLM_FFN_GELU, LLM_FFN_SEQ, il); + cb(cur, "ffn_out", il); + } + + cur = ggml_add(ctx0, cur, ffn_inp); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = build_norm(inpL, + model.output_norm, + model.output_norm_b, + LLM_NORM, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_mpt : public llm_graph_context { + llm_build_mpt(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + + ggml_tensor * cur; + ggml_tensor * pos; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + auto * inp_attn = build_attn_inp_kv_unified(true, false); + + if (model.pos_embd) { + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos); + cb(pos, "pos_embd", -1); + + inpL = ggml_add(ctx0, inpL, pos); + cb(inpL, "inpL", -1); + } + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * attn_norm; + + attn_norm = build_norm(inpL, + model.layers[il].attn_norm, + model.layers[il].attn_norm_b, + LLM_NORM, il); + cb(attn_norm, "attn_norm", il); + + // self-attention + { + cur = attn_norm; + + cur = build_lora_mm(model.layers[il].wqkv, cur); + cb(cur, "wqkv", il); + + if (model.layers[il].bqkv){ + cur = ggml_add(ctx0, cur, model.layers[il].bqkv); + cb(cur, "bqkv", il); + } + + if (hparams.f_clamp_kqv > 0.0f) { + cur = ggml_clamp(ctx0, cur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv); + cb(cur, "wqkv_clamped", il); + } + + ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); + ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); + ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + // Q/K Layernorm + if (model.layers[il].attn_q_norm) { + Qcur = build_norm(Qcur, + model.layers[il].attn_q_norm, + model.layers[il].attn_q_norm_b, + LLM_NORM, il); + cb(Qcur, "Qcur", il); + + Kcur = build_norm(Kcur, + model.layers[il].attn_k_norm, + model.layers[il].attn_k_norm_b, + LLM_NORM, il); + cb(Kcur, "Kcur", il); + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + } else { + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + } + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); + } + + // Add the input + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); + cb(ffn_inp, "ffn_inp", il); + + // feed forward + { + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, + model.layers[il].ffn_norm_b, + LLM_NORM, il); + cb(cur, "ffn_norm", il); + cur = build_ffn(cur, + model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, + NULL, NULL, NULL, + model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, + model.layers[il].ffn_act, + LLM_FFN_GELU, LLM_FFN_SEQ, il); + cb(cur, "ffn_out", il); + } + + cur = ggml_add(ctx0, cur, ffn_inp); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, + model.output_norm_b, + LLM_NORM, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_stablelm : public llm_graph_context { + llm_build_stablelm(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(true, false); + + for (int il = 0; il < n_layer; ++il) { + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, + model.layers[il].attn_norm_b, + LLM_NORM, il); + cb(cur, "attn_norm", il); + + ggml_tensor * inpSA = cur; + + // self-attention + { + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + if (model.layers[il].bq) { + Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + cb(Qcur, "Qcur", il); + } + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + if (model.layers[il].bk) { + Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + cb(Kcur, "Kcur", il); + } + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + if (model.layers[il].bv) { + Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + cb(Vcur, "Vcur", il); + } + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + cb(Qcur, "Qcur", il); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + cb(Kcur, "Kcur", il); + + if (model.layers[il].attn_q_norm) { + Qcur = build_norm(Qcur, + model.layers[il].attn_q_norm, + NULL, + LLM_NORM, il); + cb(Qcur, "Qcur", il); + } + if (model.layers[il].attn_k_norm) { + Kcur = build_norm(Kcur, + model.layers[il].attn_k_norm, + NULL, + LLM_NORM, il); + cb(Kcur, "Kcur", il); + } + + + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Qcur, "Qcur", il); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Kcur, "Kcur", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, NULL, + Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network + { + if (model.layers[il].ffn_norm) { + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, + model.layers[il].ffn_norm_b, + LLM_NORM, il); + cb(cur, "ffn_norm", il); + } else { + // parallel residual + cur = inpSA; + } + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + } + + cur = ggml_add(ctx0, cur, ffn_inp); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, + model.output_norm_b, + LLM_NORM, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_qwen : public llm_graph_context { + llm_build_qwen(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(true, false); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // self-attention + { + cur = build_lora_mm(model.layers[il].wqkv, cur); + cb(cur, "wqkv", il); + + cur = ggml_add(ctx0, cur, model.layers[il].bqkv); + cb(cur, "bqkv", il); + + ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); + ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); + ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 2*sizeof(float)*(n_embd))); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + + // using mode = 2 for neox mode + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, + freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Qcur, "Qcur", il); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, + freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Kcur, "Kcur", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, NULL, + Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward forward + { + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + } + + cur = ggml_add(ctx0, cur, ffn_inp); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_qwen2 : public llm_graph_context { + llm_build_qwen2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(true, false); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // self-attention + { + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + cb(Qcur, "Qcur", il); + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + cb(Kcur, "Kcur", il); + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + cb(Vcur, "Vcur", il); + + Qcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Qcur, "Qcur", il); + + Kcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Kcur, "Kcur", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + + cur = ggml_add(ctx0, cur, ffn_inp); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_qwen2vl : public llm_graph_context { + llm_build_qwen2vl(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(true, false); + + int sections[4]; + std::copy(std::begin(hparams.rope_sections), std::begin(hparams.rope_sections) + 4, sections); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // self-attention + { + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + cb(Qcur, "Qcur", il); + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + cb(Kcur, "Kcur", il); + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + cb(Vcur, "Vcur", il); + + Qcur = ggml_rope_multi( + ctx0, + ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, + n_rot, sections, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Qcur, "Qcur", il); + + Kcur = ggml_rope_multi( + ctx0, + ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + n_rot, sections, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Kcur, "Kcur", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + + cur = ggml_add(ctx0, cur, ffn_inp); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_qwen2moe : public llm_graph_context { + llm_build_qwen2moe(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(true, false); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // self_attention + { + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + cb(Qcur, "Qcur", il); + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + cb(Kcur, "Kcur", il); + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + cb(Vcur, "Vcur", il); + + Qcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Qcur, "Qcur", il); + + Kcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Kcur, "Kcur", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // MoE branch + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + ggml_tensor * moe_out = + build_moe_ffn(cur, + model.layers[il].ffn_gate_inp, + model.layers[il].ffn_up_exps, + model.layers[il].ffn_gate_exps, + model.layers[il].ffn_down_exps, + nullptr, + n_expert, n_expert_used, + LLM_FFN_SILU, false, + false, 0.0, + LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX, + il); + cb(cur, "ffn_moe_out", il); + + // FFN shared expert + { + ggml_tensor * cur_gate_inp = build_lora_mm(model.layers[il].ffn_gate_inp_shexp, cur); + cb(cur_gate_inp, "ffn_shexp_gate_inp", il); + + // sigmoid + ggml_tensor * cur_gate = ggml_div(ctx0, ggml_silu(ctx0, cur_gate_inp), cur_gate_inp); + cb(cur_gate, "ffn_shexp_gate", il); + + ggml_tensor * cur_ffn = build_ffn(cur, + model.layers[il].ffn_up_shexp, NULL, NULL, + model.layers[il].ffn_gate_shexp, NULL, NULL, + model.layers[il].ffn_down_shexp, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur_ffn, "ffn_shexp", il); + + ggml_tensor * ffn_shexp_out = ggml_mul(ctx0, cur_ffn, cur_gate); + cb(ffn_shexp_out, "ffn_shexp_out", il); + + moe_out = ggml_add(ctx0, moe_out, ffn_shexp_out); + cb(moe_out, "ffn_out", il); + + cur = moe_out; + } + + cur = ggml_add(ctx0, cur, ffn_inp); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_phi2 : public llm_graph_context { + llm_build_phi2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + + ggml_tensor * cur; + ggml_tensor * attn_norm_output; + ggml_tensor * ffn_output; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(true, false); + + for (int il = 0; il < n_layer; ++il) { + attn_norm_output = build_norm(inpL, + model.layers[il].attn_norm, + model.layers[il].attn_norm_b, + LLM_NORM, il); + cb(attn_norm_output, "attn_norm", il); + + // self-attention + { + ggml_tensor * Qcur = nullptr; + ggml_tensor * Kcur = nullptr; + ggml_tensor * Vcur = nullptr; + + if (model.layers[il].wqkv) { + cur = build_lora_mm(model.layers[il].wqkv, attn_norm_output); + cb(cur, "wqkv", il); + + cur = ggml_add(ctx0, cur, model.layers[il].bqkv); + cb(cur, "bqkv", il); + + Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); + Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); + Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); + } else { + Qcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wq, attn_norm_output), model.layers[il].bq); + Kcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wk, attn_norm_output), model.layers[il].bk); + Vcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wv, attn_norm_output), model.layers[il].bv); + } + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, + freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Qcur, "Qcur", il); + + // with phi2, we scale the Q to avoid precision issues + // ref: https://github.com/ml-explore/mlx-examples/blob/08e862336ade809bc37d1035f94b359e7d1a5152/phi2/phi2.py#L64-L66 + Qcur = ggml_scale(ctx0, Qcur, 1.0f/sqrtf(float(n_embd_head))); + cb(Qcur, "Qcur", il); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, + freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Kcur, "Kcur", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, 1.0f, il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); + attn_norm_output = ggml_get_rows(ctx0, attn_norm_output, inp_out_ids); + } + + // FF + { + ffn_output = build_ffn(attn_norm_output, + model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, + NULL, NULL, NULL, + model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, + NULL, + LLM_FFN_GELU, LLM_FFN_SEQ, il); + cb(ffn_output, "ffn_out", il); + } + + cur = ggml_add(ctx0, cur, ffn_output); + cur = ggml_add(ctx0, cur, inpL); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = build_norm(inpL, + model.output_norm, + model.output_norm_b, + LLM_NORM, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + cur = build_lora_mm(model.output, cur); + cb(cur, "result_output_no_bias", -1); + + cur = ggml_add(ctx0, cur, model.output_b); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_phi3 : public llm_graph_context { + llm_build_phi3(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(true, true); + + for (int il = 0; il < n_layer; ++il) { + auto * residual = inpL; + + // self-attention + { + // rope freq factors for 128k context + ggml_tensor * rope_factors = static_cast(memory)->cbs.get_rope_factors(n_ctx_per_seq, il); + + ggml_tensor* attn_norm_output = build_norm(inpL, + model.layers[il].attn_norm, + model.layers[il].attn_norm_b, + LLM_NORM_RMS, il); + cb(attn_norm_output, "attn_norm", il); + + ggml_tensor * Qcur = nullptr; + ggml_tensor * Kcur = nullptr; + ggml_tensor * Vcur = nullptr; + + if (model.layers[il].wqkv) { + cur = build_lora_mm(model.layers[il].wqkv, attn_norm_output); + cb(cur, "wqkv", il); + + Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0 * sizeof(float) * (n_embd))); + Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1 * sizeof(float) * (n_embd))); + Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1 * sizeof(float) * (n_embd + n_embd_gqa))); + } else { + Qcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wq, attn_norm_output), model.layers[il].bq); + Kcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wk, attn_norm_output), model.layers[il].bk); + Vcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wv, attn_norm_output), model.layers[il].bv); + } + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, rope_factors, n_rot, rope_type, n_ctx_orig, + freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Qcur, "Qcur", il); + + Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head))); + cb(Qcur, "Qcur", il); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, rope_factors, n_rot, rope_type, n_ctx_orig, + freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Kcur, "Kcur", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, 1.0f, il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor* inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + residual = ggml_get_rows(ctx0, residual, inp_out_ids); + } + + cur = ggml_add(ctx0, cur, residual); + residual = cur; + + cur = build_norm(cur, + model.layers[il].ffn_norm, model.layers[il].ffn_norm_b, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + // feed-forward network + if (model.layers[il].ffn_gate_inp == nullptr) { + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + NULL, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SWIGLU, LLM_FFN_SEQ, il); + cb(cur, "ffn_out", il); + } else { + // MoE branch + cur = build_moe_ffn(cur, + model.layers[il].ffn_gate_inp, + model.layers[il].ffn_up_exps, + model.layers[il].ffn_gate_exps, + model.layers[il].ffn_down_exps, + nullptr, + n_expert, n_expert_used, + LLM_FFN_SILU, true, + false, 0.0, + LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX, + il); + cb(cur, "ffn_moe_out", il); + } + + cur = ggml_add(ctx0, residual, cur); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = build_norm(inpL, + model.output_norm, + model.output_norm_b, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + cur = build_lora_mm(model.output, cur); + + if (model.output_b != nullptr) { + cb(cur, "result_output_no_bias", -1); + cur = ggml_add(ctx0, cur, model.output_b); + } + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_plamo : public llm_graph_context { + llm_build_plamo(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(true, false); + + for (int il = 0; il < n_layer; ++il) { + + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + ggml_tensor * attention_norm = cur; + + // self-attention + { + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + + Qcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Qcur, n_rot, n_head, n_tokens), inp_pos, nullptr, + n_embd_head, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow); + cb(Qcur, "Qcur", il); + + Kcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Kcur, n_rot, n_head_kv, n_tokens), inp_pos, nullptr, + n_embd_head, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow); + cb(Kcur, "Kcur", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, NULL, + Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + } + ggml_tensor * sa_out = cur; + + cur = attention_norm; + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + sa_out = ggml_get_rows(ctx0, sa_out, inp_out_ids); + inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); + } + + // feed-forward network + { + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + } + + cur = ggml_add(ctx0, cur, sa_out); + cur = ggml_add(ctx0, cur, inpL); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_gpt2 : public llm_graph_context { + llm_build_gpt2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + + ggml_tensor * cur; + ggml_tensor * pos; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(true, false); + + pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos); + cb(pos, "pos_embd", -1); + + inpL = ggml_add(ctx0, inpL, pos); + cb(inpL, "inpL", -1); + + for (int il = 0; il < n_layer; ++il) { + cur = build_norm(inpL, + model.layers[il].attn_norm, + model.layers[il].attn_norm_b, + LLM_NORM, il); + cb(cur, "attn_norm", il); + + // self-attention + { + cur = build_lora_mm(model.layers[il].wqkv, cur); + cb(cur, "wqkv", il); + + cur = ggml_add(ctx0, cur, model.layers[il].bqkv); + cb(cur, "bqkv", il); + + ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); + ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); + ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); + } + + // add the input + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); + cb(ffn_inp, "ffn_inp", il); + + // FF + { + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, + model.layers[il].ffn_norm_b, + LLM_NORM, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, + model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, + NULL, NULL, NULL, + model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, + NULL, + LLM_FFN_GELU, LLM_FFN_SEQ, il); + cb(cur, "ffn_out", il); + } + + cur = ggml_add(ctx0, cur, ffn_inp); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = build_norm(inpL, + model.output_norm, + model.output_norm_b, + LLM_NORM, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_codeshell : public llm_graph_context { + llm_build_codeshell(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(true, false); + + for (int il = 0; il < n_layer; ++il) { + cur = build_norm(inpL, + model.layers[il].attn_norm, + model.layers[il].attn_norm_b, + LLM_NORM, il); + cb(cur, "attn_norm", il); + + // self-attention + { + cur = build_lora_mm(model.layers[il].wqkv, cur); + cb(cur, "wqkv", il); + + cur = ggml_add(ctx0, cur, model.layers[il].bqkv); + cb(cur, "bqkv", il); + + ggml_tensor * tmpq = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); + ggml_tensor * tmpk = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); + ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); + + cb(tmpq, "tmpq", il); + cb(tmpk, "tmpk", il); + cb(Vcur, "Vcur", il); + + ggml_tensor * Qcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, tmpq, n_embd_head, n_head, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Qcur, "Qcur", il); + + ggml_tensor * Kcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, tmpk, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Kcur, "Kcur", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); + } + + // add the input + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); + cb(ffn_inp, "ffn_inp", il); + + // FF + { + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, + model.layers[il].ffn_norm_b, + LLM_NORM, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, + model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, + NULL, NULL, NULL, + model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, + NULL, + LLM_FFN_GELU, LLM_FFN_SEQ, il); + cb(cur, "ffn_out", il); + } + + cur = ggml_add(ctx0, cur, ffn_inp); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = build_norm(inpL, + model.output_norm, + model.output_norm_b, + LLM_NORM, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_orion : public llm_graph_context { + llm_build_orion(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(true, false); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, model.layers[il].attn_norm_b, + LLM_NORM, il); + cb(cur, "attn_norm", il); + + // self-attention + { + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + // if (model.layers[il].bq) { + // Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + // cb(Qcur, "Qcur", il); + // } + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + // if (model.layers[il].bk) { + // Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + // cb(Kcur, "Kcur", il); + // } + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + // if (model.layers[il].bv) { + // Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + // cb(Vcur, "Vcur", il); + // } + + Qcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Qcur, "Qcur", il); + + Kcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Kcur, "Kcur", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, NULL, + Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, model.layers[il].ffn_norm_b, + LLM_NORM, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + + cur = ggml_add(ctx0, cur, ffn_inp); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, model.output_norm_b, + LLM_NORM, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_internlm2 : public llm_graph_context { + llm_build_internlm2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(true, false); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // self-attention + { + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + if (model.layers[il].bq) { + Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + cb(Qcur, "Qcur", il); + } + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + if (model.layers[il].bk) { + Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + cb(Kcur, "Kcur", il); + } + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + if (model.layers[il].bv) { + Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + cb(Vcur, "Vcur", il); + } + + Qcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Qcur, "Qcur", il); + + Kcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Kcur, "Kcur", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + + cur = ggml_add(ctx0, cur, ffn_inp); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_minicpm3 : public llm_graph_context { + llm_build_minicpm3(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + //TODO: if the model varies, these parameters need to be read from the model + const int64_t n_embd_base = 256; + const float scale_embd = 12.0f; + const float scale_depth = 1.4f; + const float kq_scale = 1.0f / sqrtf(float(hparams.n_embd_head_k)); + + const uint32_t n_embd_head_qk_rope = hparams.n_rot; + const uint32_t n_embd_head_qk_nope = hparams.n_embd_head_k - hparams.n_rot; + const uint32_t kv_lora_rank = hparams.n_lora_kv; + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // scale the input embeddings + inpL = ggml_scale(ctx0, inpL, scale_embd); + cb(inpL, "inp_scaled", -1); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(true, false); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + ggml_tensor * rope_factors = static_cast(memory)->cbs.get_rope_factors(n_ctx_per_seq, il); + + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // self_attention + { + ggml_tensor * q = NULL; + // {n_embd, q_lora_rank} * {n_embd, n_tokens} -> {q_lora_rank, n_tokens} + q = ggml_mul_mat(ctx0, model.layers[il].wq_a, cur); + cb(q, "q", il); + + q = build_norm(q, + model.layers[il].attn_q_a_norm, NULL, + LLM_NORM_RMS, il); + cb(q, "q", il); + + // {q_lora_rank, n_head * hparams.n_embd_head_k} * {q_lora_rank, n_tokens} -> {n_head * hparams.n_embd_head_k, n_tokens} + q = ggml_mul_mat(ctx0, model.layers[il].wq_b, q); + cb(q, "q", il); + + // split into {n_head * n_embd_head_qk_nope, n_tokens} + ggml_tensor * q_nope = ggml_view_3d(ctx0, q, n_embd_head_qk_nope, n_head, n_tokens, + ggml_row_size(q->type, hparams.n_embd_head_k), + ggml_row_size(q->type, hparams.n_embd_head_k * n_head), + 0); + cb(q_nope, "q_nope", il); + + // and {n_head * n_embd_head_qk_rope, n_tokens} + ggml_tensor * q_pe = ggml_view_3d(ctx0, q, n_embd_head_qk_rope, n_head, n_tokens, + ggml_row_size(q->type, hparams.n_embd_head_k), + ggml_row_size(q->type, hparams.n_embd_head_k * n_head), + ggml_row_size(q->type, n_embd_head_qk_nope)); + cb(q_pe, "q_pe", il); + + // {n_embd, kv_lora_rank + n_embd_head_qk_rope} * {n_embd, n_tokens} -> {kv_lora_rank + n_embd_head_qk_rope, n_tokens} + ggml_tensor * kv_pe_compresseed = ggml_mul_mat(ctx0, model.layers[il].wkv_a_mqa, cur); + cb(kv_pe_compresseed, "kv_pe_compresseed", il); + + // split into {kv_lora_rank, n_tokens} + ggml_tensor * kv_compressed = ggml_view_2d(ctx0, kv_pe_compresseed, kv_lora_rank, n_tokens, + kv_pe_compresseed->nb[1], + 0); + cb(kv_compressed, "kv_compressed", il); + + // and {n_embd_head_qk_rope, n_tokens} + ggml_tensor * k_pe = ggml_view_3d(ctx0, kv_pe_compresseed, n_embd_head_qk_rope, 1, n_tokens, + kv_pe_compresseed->nb[1], + kv_pe_compresseed->nb[1], + ggml_row_size(kv_pe_compresseed->type, kv_lora_rank)); + cb(k_pe, "k_pe", il); + + // TODO: the CUDA backend used to not support non-cont. (RMS) norm, investigate removing ggml_cont + kv_compressed = ggml_cont(ctx0, kv_compressed); + kv_compressed = build_norm(kv_compressed, + model.layers[il].attn_kv_a_norm, NULL, + LLM_NORM_RMS, il); + cb(kv_compressed, "kv_compressed", il); + + // {kv_lora_rank, n_head * (n_embd_head_qk_nope + n_embd_head_v)} * {kv_lora_rank, n_tokens} -> {n_head * (n_embd_head_qk_nope + n_embd_head_v), n_tokens} + ggml_tensor * kv = ggml_mul_mat(ctx0, model.layers[il].wkv_b, kv_compressed); + cb(kv, "kv", il); + + // split into {n_head * n_embd_head_qk_nope, n_tokens} + ggml_tensor * k_nope = ggml_view_3d(ctx0, kv, n_embd_head_qk_nope, n_head, n_tokens, + ggml_row_size(kv->type, n_embd_head_qk_nope + hparams.n_embd_head_v), + ggml_row_size(kv->type, n_head * (n_embd_head_qk_nope + hparams.n_embd_head_v)), + 0); + cb(k_nope, "k_nope", il); + + // and {n_head * n_embd_head_v, n_tokens} + ggml_tensor * v_states = ggml_view_3d(ctx0, kv, hparams.n_embd_head_v, n_head, n_tokens, + ggml_row_size(kv->type, (n_embd_head_qk_nope + hparams.n_embd_head_v)), + ggml_row_size(kv->type, (n_embd_head_qk_nope + hparams.n_embd_head_v)*n_head), + ggml_row_size(kv->type, (n_embd_head_qk_nope))); + cb(v_states, "v_states", il); + + v_states = ggml_cont(ctx0, v_states); + cb(v_states, "v_states", il); + + v_states = ggml_view_2d(ctx0, v_states, hparams.n_embd_head_v * n_head, n_tokens, + ggml_row_size(kv->type, hparams.n_embd_head_v * n_head), + 0); + cb(v_states, "v_states", il); + + q_pe = ggml_cont(ctx0, q_pe); // TODO: the CUDA backend used to not support non-cont. RoPE, investigate removing this + q_pe = ggml_rope_ext( + ctx0, q_pe, inp_pos, rope_factors, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(q_pe, "q_pe", il); + + // shared RoPE key + k_pe = ggml_cont(ctx0, k_pe); // TODO: the CUDA backend used to not support non-cont. RoPE, investigate removing this + k_pe = ggml_rope_ext( + ctx0, k_pe, inp_pos, rope_factors, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(k_pe, "k_pe", il); + + ggml_tensor * q_states = ggml_concat(ctx0, q_nope, q_pe, 0); + cb(q_states, "q_states", il); + + ggml_tensor * k_states = ggml_concat(ctx0, k_nope, ggml_repeat(ctx0, k_pe, q_pe), 0); + cb(k_states, "k_states", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, NULL, + q_states, k_states, v_states, nullptr, kq_scale, il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + // scale_res - scale the hidden states for residual connection + const float scale_res = scale_depth/sqrtf(float(n_layer)); + cur = ggml_scale(ctx0, cur, scale_res); + cb(cur, "hidden_scaled", il); + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network + { + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + } + + // scale the hidden states for residual connection + cur = ggml_scale(ctx0, cur, scale_res); + cb(cur, "hidden_scaled_ffn", il); + + cur = ggml_add(ctx0, cur, ffn_inp); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head scaling + const float scale_lmhead = float(n_embd_base)/float(n_embd); + cur = ggml_scale(ctx0, cur, scale_lmhead); + cb(cur, "lmhead_scaling", -1); + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_gemma : public llm_graph_context { + llm_build_gemma(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head_k = hparams.n_embd_head_k; + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + inpL = ggml_scale(ctx0, inpL, sqrtf(n_embd)); + cb(inpL, "inp_scaled", -1); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(true, false); + + for (int il = 0; il < n_layer; ++il) { + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // self-attention + { + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + + Qcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head_k, n_head, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow); + cb(Qcur, "Qcur", il); + + Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head_k))); + cb(Qcur, "Qcur_scaled", il); + + Kcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head_k, n_head_kv, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow); + cb(Kcur, "Kcur", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, NULL, + Qcur, Kcur, Vcur, nullptr, 1.0f, il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); + } + + ggml_tensor * sa_out = ggml_add(ctx0, cur, inpL); + cb(sa_out, "sa_out", il); + + cur = build_norm(sa_out, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + // feed-forward network + { + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_GELU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + } + + cur = ggml_add(ctx0, cur, sa_out); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_gemma2 : public llm_graph_context { + llm_build_gemma2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head_k = hparams.n_embd_head_k; + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + inpL = ggml_scale(ctx0, inpL, sqrtf(n_embd)); + cb(inpL, "inp_scaled", -1); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(true, true); + + for (int il = 0; il < n_layer; ++il) { + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // self-attention + { + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + + Qcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head_k, n_head, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow); + cb(Qcur, "Qcur", il); + + // ref: https://github.com/google/gemma_pytorch/commit/03e657582d17cb5a8617ebf333c1c16f3694670e + switch (model.type) { + case LLM_TYPE_2B: + case LLM_TYPE_9B: Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head_k))); break; + case LLM_TYPE_27B: Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd / n_head))); break; + default: GGML_ABORT("fatal error"); + }; + cb(Qcur, "Qcur_scaled", il); + + Kcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head_k, n_head_kv, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow); + cb(Kcur, "Kcur", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, NULL, + Qcur, Kcur, Vcur, nullptr, 1.0f, il); + } + + cur = build_norm(cur, + model.layers[il].attn_post_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_post_norm", il); + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); + } + + ggml_tensor * sa_out = ggml_add(ctx0, cur, inpL); + cb(sa_out, "sa_out", il); + + cur = build_norm(sa_out, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + // feed-forward network + { + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_GELU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + } + + cur = build_norm(cur, + model.layers[il].ffn_post_norm, NULL, + LLM_NORM_RMS, -1); + cb(cur, "ffn_post_norm", -1); + + cur = ggml_add(ctx0, cur, sa_out); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + // final logit soft-capping + cur = ggml_scale(ctx0, cur, 1.0f / hparams.f_final_logit_softcapping); + cur = ggml_tanh(ctx0, cur); + cur = ggml_scale(ctx0, cur, hparams.f_final_logit_softcapping); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_gemma3 : public llm_graph_context { + llm_build_gemma3(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head_k = hparams.n_embd_head_k; + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // important: do not normalize weights for raw embeddings input (i.e. encoded image emdeddings) + if (ubatch.token) { + inpL = ggml_scale(ctx0, inpL, sqrtf(n_embd)); + cb(inpL, "inp_scaled", -1); + } + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + // TODO: is causal == true correct? might need some changes + auto * inp_attn = build_attn_inp_kv_unified(true, true); + + // "5-to-1 interleaved attention" + // 5 layers of local attention followed by 1 layer of global attention + static const int sliding_window_pattern = 6; + + for (int il = 0; il < n_layer; ++il) { + const bool is_sliding = il % sliding_window_pattern < (sliding_window_pattern - 1); + + const float freq_base_l = is_sliding ? 10000.0f : freq_base; + const float freq_scale_l = is_sliding ? 1.0f : freq_scale; + + // norm + cur = build_norm(inpL, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // self-attention + { + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head_k, n_head, n_tokens); + Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL, LLM_NORM_RMS, il); + cb(Qcur, "Qcur_normed", il); + + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base_l, freq_scale_l, + ext_factor, attn_factor, beta_fast, beta_slow); + cb(Qcur, "Qcur", il); + + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head_k, n_head_kv, n_tokens); + Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL, LLM_NORM_RMS, il); + cb(Kcur, "Kcur_normed", il); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base_l, freq_scale_l, + ext_factor, attn_factor, beta_fast, beta_slow); + cb(Kcur, "Kcur", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, NULL, + Qcur, Kcur, Vcur, nullptr, hparams.f_attention_scale, il); + } + + cur = build_norm(cur, + model.layers[il].attn_post_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_post_norm", il); + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); + } + + ggml_tensor * sa_out = ggml_add(ctx0, cur, inpL); + cb(sa_out, "sa_out", il); + + cur = build_norm(sa_out, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + // feed-forward network + { + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_GELU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + } + + cur = build_norm(cur, + model.layers[il].ffn_post_norm, NULL, + LLM_NORM_RMS, -1); + cb(cur, "ffn_post_norm", -1); + + cur = ggml_add(ctx0, cur, sa_out); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +// TODO: move up next to build_starcoder +struct llm_build_starcoder2 : public llm_graph_context { + llm_build_starcoder2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(true, false); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, model.layers[il].attn_norm_b, + LLM_NORM, il); + cb(cur, "attn_norm", il); + + // self-attention + { + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + if (model.layers[il].bq) { + Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + cb(Qcur, "Qcur", il); + } + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + if (model.layers[il].bk) { + Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + cb(Kcur, "Kcur", il); + } + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + if (model.layers[il].bv) { + Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + cb(Vcur, "Vcur", il); + } + + Qcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Qcur, "Qcur", il); + + Kcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Kcur, "Kcur", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network + + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, model.layers[il].ffn_norm_b, + LLM_NORM, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, + model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, + NULL, NULL, NULL, + model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, + NULL, + LLM_FFN_GELU, LLM_FFN_SEQ, il); + cb(cur, "ffn_out", il); + + cur = ggml_add(ctx0, cur, ffn_inp); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, model.output_norm_b, + LLM_NORM, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_mamba : public llm_graph_context { + const llama_model & model; + + llm_build_mamba(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params), model(model) { + ggml_tensor * cur; + ggml_tensor * inpL; + + // {n_embd, n_tokens} + inpL = build_inp_embd(model.tok_embd); + + ggml_tensor * state_copy = build_inp_s_copy(); + ggml_tensor * state_mask = build_inp_s_mask(); + + for (int il = 0; il < n_layer; ++il) { + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + //cur = build_mamba_layer(gf, cur, state_copy, state_mask, il); + cur = build_mamba_layer(gf, cur, state_copy, state_mask, ubatch, il); + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); + } + + // residual + cur = ggml_add(ctx0, cur, inpL); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + // final rmsnorm + cur = build_norm(inpL, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } + + // TODO: split + ggml_tensor * build_mamba_layer( + ggml_cgraph * gf, + ggml_tensor * cur, + ggml_tensor * state_copy, + ggml_tensor * state_mask, + const llama_ubatch & ubatch, + int il) const { + const llama_kv_cache_unified * kv_self = static_cast(memory); + + const auto kv_head = kv_self->head; + + const int64_t d_conv = hparams.ssm_d_conv; + const int64_t d_inner = hparams.ssm_d_inner; + const int64_t d_state = hparams.ssm_d_state; + const int64_t dt_rank = hparams.ssm_dt_rank; + const int64_t n_seqs = ubatch.n_seqs; + // Some variants of Mamba arch (e.g. FalconMamba do apply layer norm on B and Dt layers) + const bool ssm_dt_b_c_rms = hparams.ssm_dt_b_c_rms; + // Use the same RMS norm as the final layer norm + const float norm_rms_eps = hparams.f_norm_rms_eps; + + const int64_t n_seq_tokens = ubatch.n_seq_tokens; + + GGML_ASSERT(n_seqs != 0); + GGML_ASSERT(ubatch.equal_seqs); + GGML_ASSERT(ubatch.n_tokens == n_seq_tokens * n_seqs); + + ggml_tensor * conv_states_all = kv_self->k_l[il]; + ggml_tensor * ssm_states_all = kv_self->v_l[il]; + + // (ab)using the KV cache to store the states + ggml_tensor * conv = build_copy_mask_state( + gf, conv_states_all, state_copy, state_mask, + hparams.n_embd_k_s(), n_seqs); + conv = ggml_reshape_3d(ctx0, conv, d_conv - 1, d_inner, n_seqs); + ggml_tensor * ssm = build_copy_mask_state( + gf, ssm_states_all, state_copy, state_mask, + hparams.n_embd_v_s(), n_seqs); + ssm = ggml_reshape_3d(ctx0, ssm, d_state, d_inner, n_seqs); + + // {n_embd, n_tokens} => {n_embd, n_seq_tokens, n_seqs} + cur = ggml_reshape_3d(ctx0, cur, cur->ne[0], n_seq_tokens, n_seqs); + + // {n_embd, 2*d_inner} @ {n_embd, n_seq_tokens, n_seqs} => {2*d_inner, n_seq_tokens, n_seqs} + ggml_tensor * xz = build_lora_mm(model.layers[il].ssm_in, cur); + // split the above in two + // => {d_inner, n_seq_tokens, n_seqs} + ggml_tensor * x = ggml_view_3d(ctx0, xz, d_inner, xz->ne[1], xz->ne[2], xz->nb[1], xz->nb[2], 0); + ggml_tensor * z = ggml_view_3d(ctx0, xz, d_inner, xz->ne[1], xz->ne[2], xz->nb[1], xz->nb[2], d_inner*ggml_element_size(xz)); + + // conv + { + // => {d_conv - 1 + n_seq_tokens, d_inner, n_seqs} + ggml_tensor * conv_x = ggml_concat(ctx0, conv, ggml_transpose(ctx0, x), 0); + + // copy last (d_conv - 1) columns back into the state cache + ggml_tensor * last_conv = ggml_view_3d(ctx0, conv_x, d_conv - 1, d_inner, n_seqs, conv_x->nb[1], conv_x->nb[2], n_seq_tokens*(conv_x->nb[0])); + + ggml_build_forward_expand(gf, + ggml_cpy(ctx0, last_conv, + ggml_view_1d(ctx0, conv_states_all, + (d_conv - 1)*(d_inner)*(n_seqs), + kv_head*(d_conv - 1)*(d_inner)*ggml_element_size(conv_states_all)))); + + // 1D convolution + // The equivalent is to make a self-overlapping view of conv_x + // over d_conv columns at each stride in the 3rd dimension, + // then element-wise multiply that with the conv1d weight, + // then sum the elements of each row, + // (the last two steps are a dot product over rows (also doable with mul_mat)) + // then permute away the ne[0] dimension, + // and then you're left with the resulting x tensor. + // For simultaneous sequences, all sequences need to have the same length. + x = ggml_ssm_conv(ctx0, conv_x, model.layers[il].ssm_conv1d); + + // bias + x = ggml_add(ctx0, x, model.layers[il].ssm_conv1d_b); + + x = ggml_silu(ctx0, x); + } + + // ssm + { + // {d_inner, dt_rank + 2*d_state} @ {d_inner, n_seq_tokens, n_seqs} => {dt_rank + 2*d_state, n_seq_tokens, n_seqs} + ggml_tensor * x_db = build_lora_mm(model.layers[il].ssm_x, x); + // split + ggml_tensor * dt = ggml_view_3d(ctx0, x_db, dt_rank, n_seq_tokens, n_seqs, x_db->nb[1], x_db->nb[2], 0); + ggml_tensor * B = ggml_view_3d(ctx0, x_db, d_state, n_seq_tokens, n_seqs, x_db->nb[1], x_db->nb[2], ggml_element_size(x_db)*dt_rank); + ggml_tensor * C = ggml_view_3d(ctx0, x_db, d_state, n_seq_tokens, n_seqs, x_db->nb[1], x_db->nb[2], ggml_element_size(x_db)*(dt_rank+d_state)); + + // Some Mamba variants (e.g. FalconMamba) apply RMS norm in B, C & Dt layers + if (ssm_dt_b_c_rms) { + dt = ggml_rms_norm(ctx0, dt, norm_rms_eps); + B = ggml_rms_norm(ctx0, B, norm_rms_eps); + C = ggml_rms_norm(ctx0, C, norm_rms_eps); + } + + // {dt_rank, d_inner} @ {dt_rank, n_seq_tokens, n_seqs} => {d_inner, n_seq_tokens, n_seqs} + dt = build_lora_mm(model.layers[il].ssm_dt, dt); + dt = ggml_add(ctx0, dt, model.layers[il].ssm_dt_b); + + // Custom operator to optimize the parallel associative scan + // as described in the Annex D of the Mamba paper. + // => {d_inner, n_seq_tokens, n_seqs} and {d_state, d_inner, n_seqs} + ggml_tensor * y_ssm = ggml_ssm_scan(ctx0, ssm, x, dt, model.layers[il].ssm_a, B, C); + + // store last states + ggml_build_forward_expand(gf, + ggml_cpy(ctx0, + ggml_view_1d(ctx0, y_ssm, d_state*d_inner*n_seqs, x->nb[3]), + ggml_view_1d(ctx0, ssm_states_all, d_state*d_inner*n_seqs, kv_head*d_state*d_inner*ggml_element_size(ssm_states_all)))); + + ggml_tensor * y = ggml_view_3d(ctx0, y_ssm, d_inner, n_seq_tokens, n_seqs, x->nb[1], x->nb[2], 0); + + // TODO: skip computing output earlier for unused tokens + + // {d_inner, n_seq_tokens, n_seqs} * {d_inner} => {d_inner, n_seq_tokens, n_seqs} + y = ggml_add(ctx0, y, ggml_mul(ctx0, x, model.layers[il].ssm_d)); + y = ggml_mul(ctx0, y, ggml_silu(ctx0, ggml_cont(ctx0, z))); + + // {d_inner, n_embd} @ {d_inner, n_seq_tokens, n_seqs} => {n_embd, n_seq_tokens, n_seqs} + cur = build_lora_mm(model.layers[il].ssm_out, y); + } + + // {n_embd, n_seq_tokens, n_seqs} => {n_embd, n_tokens} + cur = ggml_reshape_2d(ctx0, cur, cur->ne[0], n_seq_tokens * n_seqs); + //cb(cur, "mamba_out", il); + + return cur; + } +}; + +struct llm_build_command_r : public llm_graph_context { + llm_build_command_r(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + + const float f_logit_scale = hparams.f_logit_scale; + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(true, false); + + for (int il = 0; il < n_layer; ++il) { + + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM, il); + cb(cur, "attn_norm", il); + ggml_tensor * ffn_inp = cur; + + // self-attention + { + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + if (model.layers[il].bq) { + Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + cb(Qcur, "Qcur", il); + } + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + if (model.layers[il].bk) { + Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + cb(Kcur, "Kcur", il); + } + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + if (model.layers[il].bv) { + Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + cb(Vcur, "Vcur", il); + } + + if (model.layers[il].attn_q_norm) { + Qcur = ggml_view_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens, + ggml_element_size(Qcur) * n_embd_head, + ggml_element_size(Qcur) * n_embd_head * n_head, + 0); + cb(Qcur, "Qcur", il); + Kcur = ggml_view_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens, + ggml_element_size(Kcur) * n_embd_head, + ggml_element_size(Kcur) * n_embd_head * n_head_kv, + 0); + cb(Kcur, "Kcur", il); + + Qcur = build_norm(Qcur, + model.layers[il].attn_q_norm, + NULL, + LLM_NORM, il); + cb(Qcur, "Qcur", il); + + Kcur = build_norm(Kcur, + model.layers[il].attn_k_norm, + NULL, + LLM_NORM, il); + cb(Kcur, "Kcur", il); + } + + Qcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Qcur, "Qcur", il); + + Kcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Kcur, "Kcur", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); + ffn_inp = ggml_get_rows(ctx0, ffn_inp, inp_out_ids); + } + + ggml_tensor * attn_out = cur; + + // feed-forward network + { + cur = build_ffn(ffn_inp, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + } + + // add together residual + FFN + self-attention + cur = ggml_add(ctx0, cur, inpL); + cur = ggml_add(ctx0, cur, attn_out); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + if (f_logit_scale) { + cur = ggml_scale(ctx0, cur, f_logit_scale); + } + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_cohere2 : public llm_graph_context { + llm_build_cohere2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + + const float f_logit_scale = hparams.f_logit_scale; + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(true, true); + + // sliding window switch pattern + const int32_t sliding_window_pattern = 4; + + for (int il = 0; il < n_layer; ++il) { + // three layers sliding window attention (window size 4096) and ROPE + // fourth layer uses global attention without positional embeddings + const bool is_sliding = il % sliding_window_pattern < (sliding_window_pattern - 1); + + // norm + cur = build_norm(inpL, model.layers[il].attn_norm, NULL, LLM_NORM, il); + cb(cur, "attn_norm", il); + ggml_tensor * ffn_inp = cur; + + // self-attention + { + // rope freq factors for 128k context + ggml_tensor * rope_factors = static_cast(memory)->cbs.get_rope_factors(n_ctx_per_seq, il); + + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + if (model.layers[il].bq) { + Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + cb(Qcur, "Qcur", il); + } + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + if (model.layers[il].bk) { + Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + cb(Kcur, "Kcur", il); + } + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + if (model.layers[il].bv) { + Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + cb(Vcur, "Vcur", il); + } + + if (is_sliding) { + Qcur = ggml_rope_ext(ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, rope_factors, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, + beta_fast, beta_slow); + cb(Qcur, "Qcur", il); + + Kcur = ggml_rope_ext(ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, + rope_factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, + attn_factor, beta_fast, beta_slow); + cb(Kcur, "Kcur", il); + } else { + // For non-sliding layers, just reshape without applying RoPE + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + cb(Qcur, "Qcur", il); + + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + cb(Kcur, "Kcur", il); + } + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); + ffn_inp = ggml_get_rows(ctx0, ffn_inp, inp_out_ids); + } + + ggml_tensor * attn_out = cur; + + // feed-forward network + { + cur = build_ffn(ffn_inp, model.layers[il].ffn_up, NULL, NULL, model.layers[il].ffn_gate, + NULL, NULL, model.layers[il].ffn_down, NULL, NULL, NULL, LLM_FFN_SILU, LLM_FFN_PAR, + il); + cb(cur, "ffn_out", il); + } + + // add together residual + FFN + self-attention + cur = ggml_add(ctx0, cur, inpL); + cur = ggml_add(ctx0, cur, attn_out); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, model.output_norm, NULL, LLM_NORM, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + if (f_logit_scale) { + cur = ggml_scale(ctx0, cur, f_logit_scale); + } + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +// ref: https://allenai.org/olmo +// based on the original build_llama() function, changes: +// * non-parametric layer norm +// * clamp qkv +// * removed bias +// * removed MoE +struct llm_build_olmo : public llm_graph_context { + llm_build_olmo(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(true, false); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + // norm + cur = build_norm(inpL, + NULL, NULL, + LLM_NORM, il); + cb(cur, "attn_norm", il); + + // self-attention + { + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + if (hparams.f_clamp_kqv > 0.0f) { + Qcur = ggml_clamp(ctx0, Qcur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv); + cb(Qcur, "Qcur", il); + } + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + if (hparams.f_clamp_kqv > 0.0f) { + Kcur = ggml_clamp(ctx0, Kcur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv); + cb(Kcur, "Kcur", il); + } + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + if (hparams.f_clamp_kqv > 0.0f) { + Vcur = ggml_clamp(ctx0, Vcur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv); + cb(Vcur, "Vcur", il); + } + + Qcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Qcur, "Qcur", il); + + Kcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Kcur, "Kcur", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, nullptr, + Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network + cur = build_norm(ffn_inp, + NULL, NULL, + LLM_NORM, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + + cur = ggml_add(ctx0, cur, ffn_inp); + cb(cur, "ffn_out", il); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + NULL, NULL, + LLM_NORM, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_olmo2 : public llm_graph_context { + llm_build_olmo2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(true, false); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + cur = inpL; + + // self_attention + { + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + + Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL, + LLM_NORM_RMS, il); + cb(Qcur, "Qcur_normed", il); + + Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL, + LLM_NORM_RMS, il); + cb(Kcur, "Kcur_normed", il); + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Qcur, "Qcur_rope", il); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Kcur, "Kcur_rope", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, NULL, + Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + } + + cur = build_norm(cur, + model.layers[il].attn_post_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_post_norm", il); + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network + cur = build_ffn(ffn_inp, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + + cur = build_norm(cur, + model.layers[il].ffn_post_norm, NULL, + LLM_NORM_RMS, -1); + cb(cur, "ffn_post_norm", -1); + + cur = ggml_add(ctx0, cur, ffn_inp); + cb(cur, "ffn_out", il); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +// based on the build_qwen2moe() function, changes: +// * removed shared experts +// * removed bias +// * added q, k norm +struct llm_build_olmoe : public llm_graph_context { + llm_build_olmoe(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(true, false); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // self_attention + { + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + + Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL, + LLM_NORM_RMS, il); + cb(Qcur, "Qcur_normed", il); + + Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL, + LLM_NORM_RMS, il); + cb(Kcur, "Kcur_normed", il); + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Qcur, "Qcur_rope", il); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Kcur, "Kcur_rope", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, NULL, + Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // MoE branch + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + cur = build_moe_ffn(cur, + model.layers[il].ffn_gate_inp, + model.layers[il].ffn_up_exps, + model.layers[il].ffn_gate_exps, + model.layers[il].ffn_down_exps, + nullptr, + n_expert, n_expert_used, + LLM_FFN_SILU, false, + false, 0.0, + LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX, + il); + cb(cur, "ffn_moe_out", il); + + cur = ggml_add(ctx0, cur, ffn_inp); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_openelm : public llm_graph_context { + llm_build_openelm(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + + ggml_tensor * cur; + ggml_tensor * inpL; + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(true, false); + + for (int il = 0; il < n_layer; ++il) { + const int64_t n_head = hparams.n_head(il); + const int64_t n_head_kv = hparams.n_head_kv(il); + const int64_t n_head_qkv = 2*n_head_kv + n_head; + + cur = inpL; + ggml_tensor * residual = cur; + + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // self-attention + { + cur = build_lora_mm(model.layers[il].wqkv, cur); + cb(cur, "wqkv", il); + + cur = ggml_reshape_3d(ctx0, cur, n_embd_head_k, n_head_qkv, n_tokens); + + ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_3d(ctx0, cur, n_embd_head, n_head, n_tokens, cur->nb[1], cur->nb[2], 0)); + cb(Qcur, "Qcur", il); + + ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, cur->nb[1], cur->nb[2], cur->nb[1]*n_head)); + cb(Kcur, "Kcur", il); + + ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, cur->nb[1], cur->nb[2], cur->nb[1]*(n_head+n_head_kv))); + cb(Vcur, "Vcur", il); + + Qcur = build_norm(Qcur, + model.layers[il].attn_q_norm, NULL, + LLM_NORM_RMS, il); + cb(Qcur, "Qcur", il); + + Kcur = build_norm(Kcur, + model.layers[il].attn_k_norm, NULL, + LLM_NORM_RMS, il); + cb(Kcur, "Kcur", il); + + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, NULL, n_rot, rope_type, n_ctx_orig, + freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Qcur, "Qcur", il); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, NULL, n_rot, rope_type, n_ctx_orig, + freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Kcur, "Kcur", il); + + Vcur = ggml_reshape_2d(ctx0, Vcur, n_embd_head * n_head_kv, n_tokens); + cb(Qcur, "Vcur", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, NULL, + Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + residual = ggml_get_rows(ctx0, residual, inp_out_ids); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, residual, cur); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network + { + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + } + + cur = ggml_add(ctx0, cur, ffn_inp); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + inpL = cur; + } + + cur = inpL; + + // norm + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_gptneox : public llm_graph_context { + llm_build_gptneox(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(true, false); + + for (int il = 0; il < n_layer; ++il) { + cur = build_norm(inpL, + model.layers[il].attn_norm, + model.layers[il].attn_norm_b, + LLM_NORM, il); + cb(cur, "attn_norm", il); + + // self-attention + { + cur = build_lora_mm(model.layers[il].wqkv, cur); + cb(cur, "wqkv", il); + + cur = ggml_add(ctx0, cur, model.layers[il].bqkv); + cb(cur, "bqkv", il); + + ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); + ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); + ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + Qcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Qcur, "Qcur", il); + + Kcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Kcur, "Kcur", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); + } + + // ffn + if (hparams.use_par_res) { + // attention and ffn are computed in parallel + // x = x + attn(ln1(x)) + ffn(ln2(x)) + + ggml_tensor * attn_out = cur; + + cur = build_norm(inpL, + model.layers[il].ffn_norm, + model.layers[il].ffn_norm_b, + LLM_NORM, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, + model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, + NULL, NULL, NULL, + model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, + NULL, + LLM_FFN_GELU, LLM_FFN_SEQ, il); + cb(cur, "ffn_out", il); + + cur = ggml_add(ctx0, cur, inpL); + cb(cur, "ffn_out", il); + + cur = ggml_add(ctx0, cur, attn_out); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } else { + // attention and ffn are computed sequentially + // x = x + attn(ln1(x)) + // x = x + ffn(ln2(x)) + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); + cb(ffn_inp, "ffn_inp", il); + + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, + model.layers[il].ffn_norm_b, + LLM_NORM, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, + model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, + NULL, NULL, NULL, + model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, + NULL, + LLM_FFN_GELU, LLM_FFN_SEQ, il); + cb(cur, "ffn_out", il); + + cur = ggml_add(ctx0, cur, ffn_inp); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + } + + cur = build_norm(inpL, + model.output_norm, + model.output_norm_b, + LLM_NORM, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_arctic : public llm_graph_context { + llm_build_arctic(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(true, false); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // self-attention + { + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + + Qcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Qcur, "Qcur", il); + + Kcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Kcur, "Kcur", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, NULL, + Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + + ggml_tensor * ffn_out = ggml_add(ctx0, cur, ffn_inp); + cb(ffn_out, "ffn_out", il); + + // MoE + cur = build_norm(inpSA, + model.layers[il].ffn_norm_exps, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm_exps", il); + + cur = build_moe_ffn(cur, + model.layers[il].ffn_gate_inp, + model.layers[il].ffn_up_exps, + model.layers[il].ffn_gate_exps, + model.layers[il].ffn_down_exps, + nullptr, + n_expert, n_expert_used, + LLM_FFN_SILU, true, + false, 0.0, + LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX, + il); + cb(cur, "ffn_moe_out", il); + + cur = ggml_add(ctx0, cur, ffn_out); + cb(cur, "ffn_out", il); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_deepseek : public llm_graph_context { + llm_build_deepseek(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(true, false); + + const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale; + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // self-attention + { + // rope freq factors for llama3; may return nullptr for llama2 and other models + ggml_tensor * rope_factors = static_cast(memory)->cbs.get_rope_factors(n_ctx_per_seq, il); + + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + if (model.layers[il].bq) { + Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + cb(Qcur, "Qcur", il); + } + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + if (model.layers[il].bk) { + Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + cb(Kcur, "Kcur", il); + } + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + if (model.layers[il].bv) { + Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + cb(Vcur, "Vcur", il); + } + + Qcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, rope_factors, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Qcur, "Qcur", il); + + Kcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, rope_factors, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Kcur, "Kcur", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, kq_scale, il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + if ((uint32_t) il < hparams.n_layer_dense_lead) { + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + } else { + // MoE branch + ggml_tensor * moe_out = + build_moe_ffn(cur, + model.layers[il].ffn_gate_inp, + model.layers[il].ffn_up_exps, + model.layers[il].ffn_gate_exps, + model.layers[il].ffn_down_exps, + nullptr, + n_expert, n_expert_used, + LLM_FFN_SILU, false, + false, hparams.expert_weights_scale, + LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX, + il); + cb(moe_out, "ffn_moe_out", il); + + // FFN shared expert + { + ggml_tensor * ffn_shexp = build_ffn(cur, + model.layers[il].ffn_up_shexp, NULL, NULL, + model.layers[il].ffn_gate_shexp, NULL, NULL, + model.layers[il].ffn_down_shexp, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(ffn_shexp, "ffn_shexp", il); + + cur = ggml_add(ctx0, moe_out, ffn_shexp); + cb(cur, "ffn_out", il); + } + } + + cur = ggml_add(ctx0, cur, ffn_inp); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_deepseek2 : public llm_graph_context { + llm_build_deepseek2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + bool is_lite = (hparams.n_layer == 27); + + // We have to pre-scale kq_scale and attn_factor to make the YaRN RoPE work correctly. + // See https://github.com/ggerganov/llama.cpp/discussions/7416 for detailed explanation. + const float mscale = attn_factor * (1.0f + hparams.rope_yarn_log_mul * logf(1.0f / freq_scale)); + const float kq_scale = 1.0f*mscale*mscale/sqrtf(float(hparams.n_embd_head_k)); + const float attn_factor_scaled = 1.0f / (1.0f + 0.1f * logf(1.0f / freq_scale)); + + const uint32_t n_embd_head_qk_rope = hparams.n_rot; + const uint32_t n_embd_head_qk_nope = hparams.n_embd_head_k - hparams.n_rot; + const uint32_t kv_lora_rank = hparams.n_lora_kv; + + ggml_tensor * cur; + ggml_tensor * inpL; + + // {n_embd, n_tokens} + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(true, false); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // self_attention + { + ggml_tensor * q = NULL; + if (!is_lite) { + // {n_embd, q_lora_rank} * {n_embd, n_tokens} -> {q_lora_rank, n_tokens} + q = ggml_mul_mat(ctx0, model.layers[il].wq_a, cur); + cb(q, "q", il); + + q = build_norm(q, + model.layers[il].attn_q_a_norm, NULL, + LLM_NORM_RMS, il); + cb(q, "q", il); + + // {q_lora_rank, n_head * hparams.n_embd_head_k} * {q_lora_rank, n_tokens} -> {n_head * hparams.n_embd_head_k, n_tokens} + q = ggml_mul_mat(ctx0, model.layers[il].wq_b, q); + cb(q, "q", il); + } else { + q = ggml_mul_mat(ctx0, model.layers[il].wq, cur); + cb(q, "q", il); + } + + // split into {n_head * n_embd_head_qk_nope, n_tokens} + ggml_tensor * q_nope = ggml_view_3d(ctx0, q, n_embd_head_qk_nope, n_head, n_tokens, + ggml_row_size(q->type, hparams.n_embd_head_k), + ggml_row_size(q->type, hparams.n_embd_head_k * n_head), + 0); + cb(q_nope, "q_nope", il); + + // and {n_head * n_embd_head_qk_rope, n_tokens} + ggml_tensor * q_pe = ggml_view_3d(ctx0, q, n_embd_head_qk_rope, n_head, n_tokens, + ggml_row_size(q->type, hparams.n_embd_head_k), + ggml_row_size(q->type, hparams.n_embd_head_k * n_head), + ggml_row_size(q->type, n_embd_head_qk_nope)); + cb(q_pe, "q_pe", il); + + // {n_embd, kv_lora_rank + n_embd_head_qk_rope} * {n_embd, n_tokens} -> {kv_lora_rank + n_embd_head_qk_rope, n_tokens} + ggml_tensor * kv_pe_compresseed = ggml_mul_mat(ctx0, model.layers[il].wkv_a_mqa, cur); + cb(kv_pe_compresseed, "kv_pe_compresseed", il); + + // split into {kv_lora_rank, n_tokens} + ggml_tensor * kv_compressed = ggml_view_2d(ctx0, kv_pe_compresseed, kv_lora_rank, n_tokens, + kv_pe_compresseed->nb[1], + 0); + cb(kv_compressed, "kv_compressed", il); + + // and {n_embd_head_qk_rope, n_tokens} + ggml_tensor * k_pe = ggml_view_3d(ctx0, kv_pe_compresseed, n_embd_head_qk_rope, 1, n_tokens, + kv_pe_compresseed->nb[1], + kv_pe_compresseed->nb[1], + ggml_row_size(kv_pe_compresseed->type, kv_lora_rank)); + cb(k_pe, "k_pe", il); + + // TODO: the CUDA backend used to not support non-cont. (RMS) norm, investigate removing ggml_cont + kv_compressed = ggml_cont(ctx0, kv_compressed); + kv_compressed = build_norm(kv_compressed, + model.layers[il].attn_kv_a_norm, NULL, + LLM_NORM_RMS, il); + cb(kv_compressed, "kv_compressed", il); + + // {kv_lora_rank, n_head * (n_embd_head_qk_nope + n_embd_head_v)} * {kv_lora_rank, n_tokens} -> {n_head * (n_embd_head_qk_nope + n_embd_head_v), n_tokens} + ggml_tensor * kv = ggml_mul_mat(ctx0, model.layers[il].wkv_b, kv_compressed); + cb(kv, "kv", il); + + // split into {n_head * n_embd_head_qk_nope, n_tokens} + ggml_tensor * k_nope = ggml_view_3d(ctx0, kv, n_embd_head_qk_nope, n_head, n_tokens, + ggml_row_size(kv->type, n_embd_head_qk_nope + hparams.n_embd_head_v), + ggml_row_size(kv->type, n_head * (n_embd_head_qk_nope + hparams.n_embd_head_v)), + 0); + cb(k_nope, "k_nope", il); + + // and {n_head * n_embd_head_v, n_tokens} + ggml_tensor * v_states = ggml_view_3d(ctx0, kv, hparams.n_embd_head_v, n_head, n_tokens, + ggml_row_size(kv->type, (n_embd_head_qk_nope + hparams.n_embd_head_v)), + ggml_row_size(kv->type, (n_embd_head_qk_nope + hparams.n_embd_head_v)*n_head), + ggml_row_size(kv->type, (n_embd_head_qk_nope))); + cb(v_states, "v_states", il); + + v_states = ggml_cont(ctx0, v_states); + cb(v_states, "v_states", il); + + v_states = ggml_view_2d(ctx0, v_states, hparams.n_embd_head_v * n_head, n_tokens, + ggml_row_size(kv->type, hparams.n_embd_head_v * n_head), + 0); + cb(v_states, "v_states", il); + + q_pe = ggml_cont(ctx0, q_pe); // TODO: the CUDA backend used to not support non-cont. RoPE, investigate removing this + q_pe = ggml_rope_ext( + ctx0, q_pe, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor_scaled, beta_fast, beta_slow + ); + cb(q_pe, "q_pe", il); + + // shared RoPE key + k_pe = ggml_cont(ctx0, k_pe); // TODO: the CUDA backend used to not support non-cont. RoPE, investigate removing this + k_pe = ggml_rope_ext( + ctx0, k_pe, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor_scaled, beta_fast, beta_slow + ); + cb(k_pe, "k_pe", il); + + ggml_tensor * q_states = ggml_concat(ctx0, q_nope, q_pe, 0); + cb(q_states, "q_states", il); + + ggml_tensor * k_states = ggml_concat(ctx0, k_nope, ggml_repeat(ctx0, k_pe, q_pe), 0); + cb(k_states, "k_states", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, NULL, + q_states, k_states, v_states, nullptr, kq_scale, il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + if ((uint32_t) il < hparams.n_layer_dense_lead) { + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + } else { + // MoE branch + ggml_tensor * moe_out = + build_moe_ffn(cur, + model.layers[il].ffn_gate_inp, + model.layers[il].ffn_up_exps, + model.layers[il].ffn_gate_exps, + model.layers[il].ffn_down_exps, + model.layers[il].ffn_exp_probs_b, + n_expert, n_expert_used, + LLM_FFN_SILU, hparams.expert_weights_norm, + true, hparams.expert_weights_scale, + (llama_expert_gating_func_type) hparams.expert_gating_func, + il); + cb(moe_out, "ffn_moe_out", il); + + // FFN shared expert + { + ggml_tensor * ffn_shexp = build_ffn(cur, + model.layers[il].ffn_up_shexp, NULL, NULL, + model.layers[il].ffn_gate_shexp, NULL, NULL, + model.layers[il].ffn_down_shexp, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(ffn_shexp, "ffn_shexp", il); + + cur = ggml_add(ctx0, moe_out, ffn_shexp); + cb(cur, "ffn_out", il); + } + } + + cur = ggml_add(ctx0, cur, ffn_inp); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = ggml_mul_mat(ctx0, model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_bitnet : public llm_graph_context { + llm_build_bitnet(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(true, false); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // self-attention + { + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + if (model.layers[il].wq_scale) { + Qcur = ggml_mul(ctx0, Qcur, model.layers[il].wq_scale); + } + cb(Qcur, "Qcur", il); + if (model.layers[il].bq) { + Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + cb(Qcur, "Qcur", il); + } + + // B1.K + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + if (model.layers[il].wk_scale) { + Kcur = ggml_mul(ctx0, Kcur, model.layers[il].wk_scale); + } + cb(Kcur, "Kcur", il); + if (model.layers[il].bk) { + Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + cb(Kcur, "Kcur", il); + } + + // B1.V + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + if (model.layers[il].wv_scale) { + Vcur = ggml_mul(ctx0, Vcur, model.layers[il].wv_scale); + } + cb(Vcur, "Vcur", il); + if (model.layers[il].bv) { + Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + cb(Vcur, "Vcur", il); + } + + Qcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Qcur, "Qcur", il); + + Kcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Kcur, "Kcur", il); + + cur = build_attn(inp_attn, gf, + NULL, NULL, + Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + + cur = build_norm(cur, + model.layers[il].attn_sub_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_sub_norm", il); + + cur = build_lora_mm(model.layers[il].wo, cur); + if (model.layers[il].wo_scale) { + cur = ggml_mul(ctx0, cur, model.layers[il].wo_scale); + } + if (model.layers[il].bo) { + cur = ggml_add(ctx0, cur, model.layers[il].bo); + } + cb(cur, "attn_o_out", il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward forward + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, model.layers[il].ffn_up_scale, + model.layers[il].ffn_gate, NULL, model.layers[il].ffn_gate_scale, + NULL, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_sub_out", il); + + cur = build_norm(cur, + model.layers[il].ffn_sub_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_sub_norm", il); + + cur = build_lora_mm(model.layers[il].ffn_down, cur); + if (model.layers[il].ffn_down_scale) { + cur = ggml_mul(ctx0, cur, model.layers[il].ffn_down_scale); + } + cb(cur, "ffn_down", il); + + cur = ggml_add(ctx0, cur, ffn_inp); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + // FIXME: do not use model.tok_embd directly, duplicate as model.output + cur = build_lora_mm(model.tok_embd, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_t5_enc : public llm_graph_context { + llm_build_t5_enc(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + ggml_tensor * pos_bucket_enc = build_inp_pos_bucket_enc(); + + auto * inp_attn = build_attn_inp_no_cache(); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm_enc, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // self-attention + { + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq_enc, cur); + cb(Qcur, "Qcur", il); + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk_enc, cur); + cb(Kcur, "Kcur", il); + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv_enc, cur); + cb(Vcur, "Vcur", il); + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + + ggml_tensor * attn_rel_b = model.layers[il].attn_rel_b_enc ? model.layers[il].attn_rel_b_enc : model.layers[0].attn_rel_b_enc; + ggml_tensor * kq_b = build_pos_bias(pos_bucket_enc, attn_rel_b); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo_enc, nullptr, + Qcur, Kcur, Vcur, kq_b, 1.0f, il); + cb(cur, "kqv_out", il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network + { + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm_enc, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + // T5 uses relu, flan-T5 uses gelu-gated + cur = build_ffn(cur, + model.layers[il].ffn_up_enc, NULL, NULL, + model.layers[il].ffn_gate_enc, NULL, NULL, + model.layers[il].ffn_down_enc, NULL, NULL, + NULL, + model.layers[il].ffn_gate_enc ? LLM_FFN_GELU : LLM_FFN_RELU, + model.layers[il].ffn_gate_enc ? LLM_FFN_PAR : LLM_FFN_SEQ, + il); + cb(cur, "ffn_out", il); + } + + cur = ggml_add(ctx0, cur, ffn_inp); + cb(cur, "ffn_out", il); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + cb(cur, "result_embd", -1); + + cur = build_norm(cur, + model.output_norm_enc, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_t5_dec : public llm_graph_context { + llm_build_t5_dec(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + //const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + ggml_tensor * embd_enc = build_inp_cross_embd(); + ggml_tensor * pos_bucket_dec = build_inp_pos_bucket_dec(); + + const int64_t n_outputs_enc = embd_enc->ne[1]; + + auto * inp_attn_self = build_attn_inp_kv_unified(true, false); + auto * inp_attn_cross = build_attn_inp_cross(); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // self-attention + { + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + + ggml_tensor * attn_rel_b = model.layers[il].attn_rel_b ? model.layers[il].attn_rel_b : model.layers[0].attn_rel_b; + ggml_tensor * kq_b = build_pos_bias(pos_bucket_dec, attn_rel_b); + + cur = build_attn(inp_attn_self, gf, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, kq_b, 1.0f, il); + cb(cur, "kqv_out", il); + } + + cur = ggml_add(ctx0, cur, inpSA); + cb(cur, "cross_inp", il); + + ggml_tensor * inpCA = cur; + + // norm + cur = build_norm(cur, + model.layers[il].attn_norm_cross, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm_cross", il); + + // cross-attention + { + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq_cross, cur); + cb(Qcur, "Qcur", il); + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk_cross, embd_enc); + cb(Kcur, "Kcur", il); + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv_cross, embd_enc); + cb(Vcur, "Vcur", il); + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_outputs_enc); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_outputs_enc); + + cur = build_attn(inp_attn_cross, gf, + model.layers[il].wo_cross, nullptr, + Qcur, Kcur, Vcur, nullptr, 1.0f, il); + cb(cur, "kqv_out", il); + + //ggml_tensor * q = ggml_permute(ctx0, Qcur, 0, 2, 1, 3); + //ggml_tensor * k = ggml_cont(ctx0, ggml_permute(ctx0, Kcur, 0, 2, 1, 3)); + + //ggml_tensor * kq = ggml_mul_mat(ctx0, k, q); + //cb(kq, "kq", il); + + //kq = ggml_soft_max_ext(ctx0, kq, KQ_mask_cross, 1.0f, hparams.f_max_alibi_bias); + //cb(kq, "kq_soft_max_ext", il); + + //ggml_tensor * v = ggml_cont(ctx0, ggml_transpose(ctx0, ggml_reshape_2d(ctx0, Vcur, n_embd_gqa, n_outputs_enc))); + //cb(v, "v", il); + + //ggml_tensor * kqv = ggml_mul_mat(ctx0, ggml_reshape_3d(ctx0, v, n_outputs_enc, n_embd_head, n_head_kv), kq); + //cb(kqv, "kqv", il); + + //ggml_tensor * kqv_merged = ggml_permute(ctx0, kqv, 0, 2, 1, 3); + //cb(kqv_merged, "kqv_merged", il); + + //cur = ggml_cont_2d(ctx0, kqv_merged, n_embd_gqa, n_tokens); + //cb(cur, "kqv_merged_cont", il); + + //ggml_build_forward_expand(gf, cur); + + //cur = build_lora_mm(model.layers[il].wo_cross, cur); + //cb(cur, "kqv_out", il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + inpCA = ggml_get_rows(ctx0, inpCA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpCA); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network + { + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + // T5 uses relu, flan-T5 uses gelu-gated + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + model.layers[il].ffn_gate_enc ? LLM_FFN_GELU : LLM_FFN_RELU, + model.layers[il].ffn_gate_enc ? LLM_FFN_PAR : LLM_FFN_SEQ, + il); + cb(cur, "ffn_out", il); + } + + cur = ggml_add(ctx0, cur, ffn_inp); + cb(cur, "ffn_out", il); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + cb(cur, "result_embd", -1); + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_jais : public llm_graph_context { + llm_build_jais(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + auto * inp_attn = build_attn_inp_kv_unified(true, false); + + for (int il = 0; il < n_layer; ++il) { + cur = build_norm(inpL, + model.layers[il].attn_norm, + model.layers[il].attn_norm_b, + LLM_NORM, il); + cb(cur, "attn_norm", il); + + // self-attention + { + cur = build_lora_mm(model.layers[il].wqkv, cur); + cb(cur, "wqkv", il); + + cur = ggml_add(ctx0, cur, model.layers[il].bqkv); + cb(cur, "bqkv", il); + + ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*cur->nb[0]*(n_embd))); + ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*cur->nb[0]*(n_embd))); + ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*cur->nb[0]*(n_embd + n_embd_gqa))); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, 1.0f/float(n_embd_head), il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); + } + + // add the input + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); + cb(ffn_inp, "ffn_inp", il); + + // FF + { + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, + model.layers[il].ffn_norm_b, + LLM_NORM, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, + model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, + model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, NULL, + model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + } + + inpL = ggml_add(ctx0, cur, ffn_inp); + cb(inpL, "l_out", il); + } + + cur = build_norm(inpL, + model.output_norm, + model.output_norm_b, + LLM_NORM, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_chatglm : public llm_graph_context { + llm_build_chatglm(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(true, false); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + cur = build_norm(inpL, + model.layers[il].attn_norm, + NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // self-attention + { + ggml_tensor * Qcur = nullptr; + ggml_tensor * Kcur = nullptr; + ggml_tensor * Vcur = nullptr; + + if (model.layers[il].wqkv == nullptr) { + Qcur = build_lora_mm(model.layers[il].wq, cur); + if (model.layers[il].bq) { + Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + } + Kcur = build_lora_mm(model.layers[il].wk, cur); + if (model.layers[il].bk) { + Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + } + Vcur = build_lora_mm(model.layers[il].wv, cur); + if (model.layers[il].bv) { + Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + } + } else { + cur = build_lora_mm(model.layers[il].wqkv, cur); + cb(cur, "wqkv", il); + if (model.layers[il].bqkv) { + cur = ggml_add(ctx0, cur, model.layers[il].bqkv); + cb(cur, "bqkv", il); + } + Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); + Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); + Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); + } + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + //printf("freq_base: %f freq_scale: %f ext_factor: %f attn_factor: %f\n", freq_base, freq_scale, ext_factor, attn_factor); + Qcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Qcur, "Qcur_rope", il); + + Kcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Kcur, "Kcur_rope", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, NULL, + Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + // Add the input + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // FF + { + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, + NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + NULL, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SWIGLU, LLM_FFN_SEQ, il); + cb(cur, "ffn_out", il); + + } + + inpL = ggml_add(ctx0, cur, ffn_inp); + cb(inpL, "l_out", il); + } + + cur = build_norm(inpL, + model.output_norm, + NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_nemotron : public llm_graph_context { + llm_build_nemotron(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + //GGML_ASSERT(n_embd_head == hparams.n_rot); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(true, false); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, + model.layers[il].attn_norm_b, + LLM_NORM, il); + cb(cur, "attn_norm", il); + + // self-attention + { + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + if (model.layers[il].bq) { + Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + cb(Qcur, "Qcur", il); + } + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + if (model.layers[il].bk) { + Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + cb(Kcur, "Kcur", il); + } + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + if (model.layers[il].bv) { + Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + cb(Vcur, "Vcur", il); + } + + Qcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Qcur, "Qcur", il); + + Kcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Kcur, "Kcur", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, + model.layers[il].ffn_norm_b, + LLM_NORM, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, + model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, + NULL, NULL, NULL, + model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, + NULL, + LLM_FFN_RELU_SQR, LLM_FFN_SEQ, il); + + cur = ggml_add(ctx0, cur, ffn_inp); + cb(cur, "ffn_out", il); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, model.output_norm_b, + LLM_NORM, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_exaone : public llm_graph_context { + llm_build_exaone(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(true, false); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // self-attention + { + // rope freq factors for llama3; may return nullptr for llama2 and other models + ggml_tensor * rope_factors = static_cast(memory)->cbs.get_rope_factors(n_ctx_per_seq, il); + + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + if (model.layers[il].bq) { + Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + cb(Qcur, "Qcur", il); + } + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + if (model.layers[il].bk) { + Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + cb(Kcur, "Kcur", il); + } + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + if (model.layers[il].bv) { + Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + cb(Vcur, "Vcur", il); + } + + Qcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, rope_factors, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Qcur, "Qcur", il); + + Kcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, rope_factors, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Kcur, "Kcur", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + + cur = ggml_add(ctx0, cur, ffn_inp); + cb(cur, "ffn_out", il); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_rwkv6_base : public llm_graph_context { + const llama_model & model; + + llm_build_rwkv6_base(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params), model(model) { + } + + ggml_tensor * build_rwkv6_channel_mix( + const llama_layer * layer, + ggml_tensor * cur, + ggml_tensor * x_prev, + llm_arch arch) const { + ggml_tensor * sx = ggml_sub(ctx0, x_prev, cur); + switch (arch) { + case LLM_ARCH_RWKV6: + { + ggml_tensor * xk = ggml_add(ctx0, ggml_mul(ctx0, sx, layer->channel_mix_lerp_k), cur); + ggml_tensor * xr = ggml_add(ctx0, ggml_mul(ctx0, sx, layer->channel_mix_lerp_r), cur); + + ggml_tensor * r = ggml_sigmoid(ctx0, build_lora_mm(layer->channel_mix_receptance, xr)); + ggml_tensor * k = ggml_sqr( + ctx0, + ggml_relu( + ctx0, + build_lora_mm(layer->channel_mix_key, xk) + ) + ); + cur = ggml_mul(ctx0, r, build_lora_mm(layer->channel_mix_value, k)); + } break; + default: + GGML_ABORT("fatal error"); + } + + return cur; + } + + ggml_tensor * build_rwkv6_time_mix( + ggml_cgraph * gf, + ggml_tensor * cur, + ggml_tensor * x_prev, + ggml_tensor * state_copy, + ggml_tensor * state_mask, + const llama_ubatch & ubatch, + int il) const { + const llama_kv_cache_unified * kv_self = static_cast(memory); + + const auto n_tokens = ubatch.n_tokens; + const auto n_seqs = ubatch.n_seqs; + const auto n_embd = hparams.n_embd; + const auto head_size = hparams.wkv_head_size; + const auto n_head = n_embd / head_size; + const auto n_head_kv = hparams.n_head_kv(il); + + const auto kv_head = kv_self->head; + + const auto & layer = model.layers[il]; + + bool is_qrwkv = layer.time_mix_first == nullptr; + + ggml_tensor * sx = ggml_sub(ctx0, x_prev, cur); + ggml_tensor * xxx = ggml_add(ctx0, ggml_mul(ctx0, sx, layer.time_mix_lerp_x), cur); + + xxx = ggml_reshape_4d( + ctx0, + ggml_tanh( + ctx0, + ggml_mul_mat(ctx0, layer.time_mix_w1, xxx) + ), + layer.time_mix_w1->ne[1] / 5, 1, 5, n_tokens + ); + + xxx = ggml_cont(ctx0, ggml_permute(ctx0, xxx, 0, 1, 3, 2)); + + xxx = ggml_mul_mat( + ctx0, + ggml_reshape_4d( + ctx0, + layer.time_mix_w2, + layer.time_mix_w2->ne[0], layer.time_mix_w2->ne[1], 1, 5 + ), + xxx + ); + + ggml_tensor *xw, *xk, *xv, *xr, *xg; + if (layer.time_mix_lerp_fused) { + // fusing these weights makes some performance improvement + sx = ggml_reshape_3d(ctx0, sx, n_embd, 1, n_tokens); + cur = ggml_reshape_3d(ctx0, cur, n_embd, 1, n_tokens); + xxx = ggml_add(ctx0, ggml_mul(ctx0, ggml_add(ctx0, xxx, layer.time_mix_lerp_fused), sx), cur); + xw = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], 0); + xk = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * sizeof(float)); + xv = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 2 * sizeof(float)); + xr = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 3 * sizeof(float)); + xg = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 4 * sizeof(float)); + } else { + // for backward compatibility + xw = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], 0); + xk = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * sizeof(float)); + xv = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 2 * sizeof(float)); + xr = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 3 * sizeof(float)); + xg = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 4 * sizeof(float)); + + xw = ggml_add(ctx0, ggml_mul(ctx0, ggml_add(ctx0, xw, layer.time_mix_lerp_w), sx), cur); + xk = ggml_add(ctx0, ggml_mul(ctx0, ggml_add(ctx0, xk, layer.time_mix_lerp_k), sx), cur); + xv = ggml_add(ctx0, ggml_mul(ctx0, ggml_add(ctx0, xv, layer.time_mix_lerp_v), sx), cur); + xr = ggml_add(ctx0, ggml_mul(ctx0, ggml_add(ctx0, xr, layer.time_mix_lerp_r), sx), cur); + xg = ggml_add(ctx0, ggml_mul(ctx0, ggml_add(ctx0, xg, layer.time_mix_lerp_g), sx), cur); + } + + ggml_tensor * r = build_lora_mm(layer.time_mix_receptance, xr); + ggml_tensor * k = build_lora_mm(layer.time_mix_key, xk); + ggml_tensor * v = build_lora_mm(layer.time_mix_value, xv); + if (layer.time_mix_receptance_b) { + r = ggml_add(ctx0, r, layer.time_mix_receptance_b); + } + if (layer.time_mix_key_b) { + k = ggml_add(ctx0, k, layer.time_mix_key_b); + } + if (layer.time_mix_value_b) { + v = ggml_add(ctx0, v, layer.time_mix_value_b); + } + + ggml_tensor * g = build_lora_mm(layer.time_mix_gate, xg); + if (is_qrwkv) { + g = ggml_sigmoid(ctx0, g); + } else { + g = ggml_silu(ctx0, g); + } + + if (n_head_kv != 0 && n_head_kv != n_head) { + GGML_ASSERT(n_head % n_head_kv == 0); + k = ggml_reshape_4d(ctx0, k, head_size, 1, n_head_kv, n_tokens); + v = ggml_reshape_4d(ctx0, v, head_size, 1, n_head_kv, n_tokens); + ggml_tensor * tmp = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, head_size, n_head / n_head_kv, n_head_kv, n_tokens); + k = ggml_repeat(ctx0, k, tmp); + v = ggml_repeat(ctx0, v, tmp); + } + + k = ggml_reshape_3d(ctx0, k, head_size, n_head, n_tokens); + v = ggml_reshape_3d(ctx0, v, head_size, n_head, n_tokens); + r = ggml_reshape_3d(ctx0, r, head_size, n_head, n_tokens); + + ggml_tensor * w = ggml_mul_mat( + ctx0, + layer.time_mix_decay_w2, + ggml_tanh( + ctx0, + ggml_mul_mat(ctx0, layer.time_mix_decay_w1, xw) + ) + ); + + w = ggml_add(ctx0, w, layer.time_mix_decay); + w = ggml_exp(ctx0, ggml_neg(ctx0, ggml_exp(ctx0, w))); + w = ggml_reshape_3d(ctx0, w, head_size, n_head, n_tokens); + + if (is_qrwkv) { + // k = k * (1 - w) + k = ggml_sub(ctx0, k, ggml_mul(ctx0, k, w)); + } + + ggml_tensor * wkv_state = build_copy_mask_state( + gf, kv_self->v_l[il], state_copy, state_mask, + hparams.n_embd_v_s(), n_seqs); + + ggml_tensor * wkv_output; + if (is_qrwkv) { + wkv_output = ggml_gated_linear_attn(ctx0, k, v, r, w, wkv_state, pow(head_size, -0.5f)); + } else { + wkv_output = ggml_rwkv_wkv6(ctx0, k, v, r, layer.time_mix_first, w, wkv_state); + } + cur = ggml_view_1d(ctx0, wkv_output, n_embd * n_tokens, 0); + wkv_state = ggml_view_1d(ctx0, wkv_output, n_embd * head_size * n_seqs, n_embd * n_tokens * sizeof(float)); + + ggml_build_forward_expand( + gf, + ggml_cpy( + ctx0, + wkv_state, + ggml_view_1d( + ctx0, + kv_self->v_l[il], + hparams.n_embd_v_s() * n_seqs, + hparams.n_embd_v_s() * kv_head * ggml_element_size(kv_self->v_l[il]) + ) + ) + ); + + if (!is_qrwkv) { + // group norm with head_count groups + cur = ggml_reshape_3d(ctx0, cur, n_embd / n_head, n_head, n_tokens); + cur = ggml_norm(ctx0, cur, 64e-5f); + + // Convert back to regular vectors. + cur = ggml_reshape_2d(ctx0, cur, n_embd, n_tokens); + cur = ggml_add(ctx0, ggml_mul(ctx0, cur, layer.time_mix_ln), layer.time_mix_ln_b); + } else { + cur = ggml_reshape_2d(ctx0, cur, n_embd, n_tokens); + } + + cur = ggml_mul(ctx0, cur, g); + cur = build_lora_mm(layer.time_mix_output, cur); + + return cur; + } +}; + +struct llm_build_rwkv6 : public llm_build_rwkv6_base { + llm_build_rwkv6(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_build_rwkv6_base(model, params) { + GGML_ASSERT(hparams.token_shift_count == 2); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + inpL = build_norm(inpL, model.tok_norm, model.tok_norm_b, LLM_NORM, -1); + + ggml_tensor * state_copy = build_inp_s_copy(); + ggml_tensor * state_mask = build_inp_s_mask(); + + const auto n_embd = hparams.n_embd; + const auto n_seq_tokens = ubatch.n_seq_tokens; + const auto n_seqs = ubatch.n_seqs; + + for (int il = 0; il < n_layer; ++il) { + const llama_layer * layer = &model.layers[il]; + + ggml_tensor * token_shift = build_rwkv_token_shift_load( + gf, state_copy, state_mask, ubatch, il + ); + + ggml_tensor * att_shift = ggml_view_3d(ctx0, token_shift, n_embd, 1, n_seqs, token_shift->nb[1], token_shift->nb[2], 0); + ggml_tensor * ffn_shift = ggml_view_3d(ctx0, token_shift, n_embd, 1, n_seqs, token_shift->nb[1], token_shift->nb[2], n_embd * ggml_element_size(token_shift)); + + ggml_tensor * att_norm = build_norm(inpL, layer->attn_norm, layer->attn_norm_b, LLM_NORM, il); + cb(att_norm, "attn_norm", il); + + ggml_tensor * x_prev = ggml_concat( + ctx0, + att_shift, + ggml_view_3d(ctx0, att_norm, n_embd, n_seq_tokens - 1, n_seqs, att_norm->nb[1], att_norm->nb[2], 0), + 1 + ); + + cur = build_rwkv6_time_mix(gf, att_norm, x_prev, state_copy, state_mask, ubatch, il); + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); + cb(ffn_inp, "ffn_inp", il); + + ggml_tensor * ffn_norm = build_norm(ffn_inp, layer->attn_norm_2, layer->attn_norm_2_b, LLM_NORM, il); + cb(ffn_norm, "ffn_norm", il); + + x_prev = ggml_concat( + ctx0, + ffn_shift, + ggml_view_3d(ctx0, ffn_norm, n_embd, n_seq_tokens - 1, n_seqs, ffn_norm->nb[1], ffn_norm->nb[2], 0), + 1 + ); + + cur = build_rwkv6_channel_mix(layer, ffn_norm, x_prev, LLM_ARCH_RWKV6); + cur = ggml_add(ctx0, cur, ffn_inp); + + token_shift = ggml_concat(ctx0, + ggml_view_3d(ctx0, att_norm, n_embd, 1, n_seqs, att_norm->nb[1], att_norm->nb[2], (n_seq_tokens-1)*n_embd*ggml_element_size(att_norm)), + ggml_view_3d(ctx0, ffn_norm, n_embd, 1, n_seqs, ffn_norm->nb[1], ffn_norm->nb[2], (n_seq_tokens-1)*n_embd*ggml_element_size(ffn_norm)), + 1 + ); + ggml_build_forward_expand(gf, build_rwkv_token_shift_store(token_shift, ubatch, il)); + + if (hparams.rescale_every_n_layers != 0 && (il + 1) % hparams.rescale_every_n_layers == 0) { + cur = ggml_scale(ctx0, cur, 0.5F); + } + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + ggml_tensor * inp_out_ids = build_inp_out_ids(); + + cur = ggml_reshape_2d(ctx0, cur, n_embd, n_tokens); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + + cur = build_norm(cur, model.output_norm, model.output_norm_b, LLM_NORM, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +// ref: https://huggingface.co/recursal/QRWKV6-32B-Instruct-Preview-v0.1/blob/main/modeling_rwkv6qwen2.py +struct llm_build_rwkv6qwen2 : public llm_build_rwkv6_base { + llm_build_rwkv6qwen2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_build_rwkv6_base(model, params) { + GGML_ASSERT(n_embd == hparams.n_embd_k_s()); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + ggml_tensor * state_copy = build_inp_s_copy(); + ggml_tensor * state_mask = build_inp_s_mask(); + + const auto n_embd = hparams.n_embd; + const auto n_seq_tokens = ubatch.n_seq_tokens; + const auto n_seqs = ubatch.n_seqs; + + inpL = build_inp_embd(model.tok_embd); + + for (int il = 0; il < n_layer; ++il) { + const llama_layer * layer = &model.layers[il]; + + ggml_tensor * token_shift = build_rwkv_token_shift_load( + gf, state_copy, state_mask, ubatch, il + ); + + ggml_tensor * att_norm = build_norm(inpL, layer->attn_norm, layer->attn_norm_b, LLM_NORM_RMS, il); + cb(att_norm, "attn_norm", il); + + ggml_tensor * x_prev = ggml_concat( + ctx0, + token_shift, + ggml_view_3d(ctx0, att_norm, n_embd, n_seq_tokens - 1, n_seqs, att_norm->nb[1], att_norm->nb[2], 0), + 1 + ); + + cur = build_rwkv6_time_mix(gf, att_norm, x_prev, state_copy, state_mask, ubatch, il); + + token_shift = ggml_view_3d(ctx0, att_norm, n_embd, 1, n_seqs, att_norm->nb[1], att_norm->nb[2], (n_seq_tokens-1)*n_embd*ggml_element_size(att_norm)); + ggml_build_forward_expand(gf, build_rwkv_token_shift_store(token_shift, ubatch, il)); + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + + cur = ggml_add(ctx0, cur, ffn_inp); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_reshape_2d(ctx0, cur, n_embd, n_tokens); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + + cur = build_norm(cur, model.output_norm, model.output_norm_b, LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +// ref: https://github.com/facebookresearch/chameleon +// based on the original build_llama() function, changes: +// * qk-norm +// * swin-norm +// * removed bias +// * removed MoE +struct llm_build_chameleon : public llm_graph_context { + llm_build_chameleon(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(true, false); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + // norm + if (hparams.swin_norm) { + cur = inpL; + } else { + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + } + + // self-attention + { + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + + if (model.layers[il].attn_q_norm) { + Qcur = ggml_view_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens, + ggml_element_size(Qcur) * n_embd_head, + ggml_element_size(Qcur) * n_embd_head * n_head, + 0); + cb(Qcur, "Qcur", il); + + Qcur = build_norm(Qcur, + model.layers[il].attn_q_norm, + model.layers[il].attn_q_norm_b, + LLM_NORM, il); + cb(Qcur, "Qcur", il); + } + + if (model.layers[il].attn_k_norm) { + Kcur = ggml_view_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens, + ggml_element_size(Kcur) * n_embd_head, + ggml_element_size(Kcur) * n_embd_head * n_head_kv, + 0); + cb(Kcur, "Kcur", il); + + Kcur = build_norm(Kcur, + model.layers[il].attn_k_norm, + model.layers[il].attn_k_norm_b, + LLM_NORM, il); + cb(Kcur, "Kcur", il); + } + + Qcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Qcur, "Qcur", il); + + Kcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Kcur, "Kcur", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, nullptr, + Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + + if (hparams.swin_norm) { + cur = build_norm(cur, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + } + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network + if (!hparams.swin_norm) { + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + } + + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + + if (hparams.swin_norm) { + cur = build_norm(cur, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + } + + cur = ggml_add(ctx0, cur, ffn_inp); + cb(cur, "ffn_out", il); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + cb(cur, "result_output_with_img_logits", -1); + + // TODO: this suppresses the output of image tokens, which is required to enable text-only outputs. + // Needs to be removed once image outputs are supported. + int img_token_end_idx = 8196; + int img_token_start_idx = 4; + int num_img_tokens = img_token_end_idx - img_token_start_idx; + // creates 1d tensor of size num_img_tokens and values -FLT_MAX, + // which ensures that text token values are always at least larger than image token values + ggml_tensor * img_logits = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, num_img_tokens); + img_logits = ggml_clamp(ctx0, img_logits, -FLT_MAX, -FLT_MAX); + cb(img_logits, "img_logits", -1); + + cur = ggml_set_1d(ctx0, cur, img_logits, ggml_element_size(cur) * img_token_start_idx); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_wavtokenizer_dec : public llm_graph_context { + llm_build_wavtokenizer_dec(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + cur = ggml_cont(ctx0, ggml_transpose(ctx0, inpL)); + + cur = ggml_conv_1d_ph(ctx0, model.conv1d, cur, 1, 1); + cur = ggml_add(ctx0, cur, model.conv1d_b); + + // posnet + for (uint32_t il = 0; il < hparams.posnet.n_layer; ++il) { + const auto & layer = model.layers[il].posnet; + + inpL = cur; + + switch (il) { + case 0: + case 1: + case 3: + case 4: + { + cur = build_norm(cur, + layer.norm1, + layer.norm1_b, + LLM_NORM_GROUP, 0); + + cur = ggml_mul(ctx0, ggml_sigmoid(ctx0, cur), cur); + + cur = ggml_conv_1d_ph(ctx0, layer.conv1, cur, 1, 1); + cur = ggml_add(ctx0, cur, layer.conv1_b); + + cur = build_norm(cur, + layer.norm2, + layer.norm2_b, + LLM_NORM_GROUP, 0); + + cur = ggml_mul(ctx0, ggml_sigmoid(ctx0, cur), cur); + + cur = ggml_conv_1d_ph(ctx0, layer.conv2, cur, 1, 1); + cur = ggml_add(ctx0, cur, layer.conv2_b); + + cur = ggml_add(ctx0, cur, inpL); + } break; + case 2: + { + cur = build_norm(cur, + layer.attn_norm, + layer.attn_norm_b, + LLM_NORM_GROUP, 0); + + ggml_tensor * q; + ggml_tensor * k; + ggml_tensor * v; + + q = ggml_conv_1d_ph(ctx0, layer.attn_q, cur, 1, 1); + k = ggml_conv_1d_ph(ctx0, layer.attn_k, cur, 1, 1); + v = ggml_conv_1d_ph(ctx0, layer.attn_v, cur, 1, 1); + + q = ggml_add(ctx0, q, layer.attn_q_b); + k = ggml_add(ctx0, k, layer.attn_k_b); + v = ggml_add(ctx0, v, layer.attn_v_b); + + q = ggml_cont(ctx0, ggml_transpose(ctx0, q)); + k = ggml_cont(ctx0, ggml_transpose(ctx0, k)); + + ggml_tensor * kq = ggml_mul_mat(ctx0, k, q); + + kq = ggml_soft_max_ext(ctx0, kq, nullptr, 1.0f/sqrtf(float(hparams.posnet.n_embd)), 0.0f); + + cur = ggml_mul_mat(ctx0, kq, v); + + cur = ggml_conv_1d_ph(ctx0, layer.attn_o, cur, 1, 1); + cur = ggml_add(ctx0, cur, layer.attn_o_b); + + cur = ggml_add(ctx0, cur, inpL); + } break; + case 5: + { + cur = build_norm(cur, + layer.norm, + layer.norm_b, + LLM_NORM_GROUP, 0); + } break; + default: GGML_ABORT("unknown posnet layer"); + }; + } + + cur = ggml_cont(ctx0, ggml_transpose(ctx0, cur)); + + cur = build_norm(cur, + model.tok_norm, + model.tok_norm_b, + LLM_NORM, -1); + + cur = ggml_cont(ctx0, ggml_transpose(ctx0, cur)); + + inpL = cur; + + // convnext + for (uint32_t il = 0; il < hparams.convnext.n_layer; ++il) { + const auto & layer = model.layers[il].convnext; + + cur = inpL; + + cur = ggml_conv_1d_dw_ph(ctx0, layer.dw, cur, 1, 1); + cur = ggml_add(ctx0, cur, layer.dw_b); + + cur = ggml_cont(ctx0, ggml_transpose(ctx0, cur)); + + cur = build_norm(cur, + layer.norm, + layer.norm_b, + LLM_NORM, -1); + + cur = build_ffn(cur, + layer.pw1, layer.pw1_b, NULL, + NULL, NULL, NULL, + layer.pw2, layer.pw2_b, NULL, + NULL, + LLM_FFN_GELU, LLM_FFN_SEQ, il); + + cur = ggml_mul(ctx0, cur, layer.gamma); + + cur = ggml_cont(ctx0, ggml_transpose(ctx0, cur)); + + inpL = ggml_add(ctx0, cur, inpL); + } + + cur = inpL; + + cur = ggml_cont(ctx0, ggml_transpose(ctx0, cur)); + + cur = build_norm(cur, + model.output_norm, + model.output_norm_b, + LLM_NORM, -1); + + // lm_head + cur = build_lora_mm(model.output, cur); + + cur = ggml_add(ctx0, cur, model.output_b); + + cb(cur, "result_embd", -1); + res->t_embd = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +llama_memory_i * llama_model::create_memory() const { + llama_memory_i * res; + + switch (arch) { + case LLM_ARCH_RWKV6: + case LLM_ARCH_RWKV6QWEN2: + case LLM_ARCH_MAMBA: + { + res = new llama_kv_cache_unified(hparams, { + /*.get_rope_factors =*/ nullptr + }); + } break; + default: + { + res = new llama_kv_cache_unified(hparams, { + /*.get_rope_factors =*/ [this](uint32_t n_ctx_per_seq, int il) { + // choose long/short freq factors based on the context size + if (layers[il].rope_freqs != nullptr) { + return layers[il].rope_freqs; + } + + if (n_ctx_per_seq > hparams.n_ctx_orig_yarn) { + return layers[il].rope_long; + } + + return layers[il].rope_short; + } + }); + } + } + + return res; +} + +llm_graph_result_ptr llama_model::build_graph( + const llm_graph_params & params, + ggml_cgraph * gf, + llm_graph_type type) const { + std::unique_ptr llm; + + switch (arch) { + case LLM_ARCH_LLAMA: + case LLM_ARCH_MINICPM: + case LLM_ARCH_GRANITE: + case LLM_ARCH_GRANITE_MOE: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_DECI: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_BAICHUAN: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_FALCON: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_GROK: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_STARCODER: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_REFACT: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_BERT: + case LLM_ARCH_JINA_BERT_V2: + case LLM_ARCH_NOMIC_BERT: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_BLOOM: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_MPT: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_STABLELM: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_QWEN: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_QWEN2: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_QWEN2VL: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_QWEN2MOE: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_PHI2: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_PHI3: + case LLM_ARCH_PHIMOE: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_PLAMO: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_GPT2: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_CODESHELL: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_ORION: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_INTERNLM2: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_MINICPM3: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_GEMMA: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_GEMMA2: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_GEMMA3: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_STARCODER2: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_MAMBA: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_XVERSE: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_COMMAND_R: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_COHERE2: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_DBRX: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_OLMO: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_OLMO2: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_OLMOE: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_OPENELM: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_GPTNEOX: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_ARCTIC: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_DEEPSEEK: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_DEEPSEEK2: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_CHATGLM: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_BITNET: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_T5: + { + switch (type) { + case LLM_GRAPH_TYPE_ENCODER: + llm = std::make_unique(*this, params, gf); + break; + case LLM_GRAPH_TYPE_DEFAULT: + case LLM_GRAPH_TYPE_DECODER: + llm = std::make_unique(*this, params, gf); + break; + default: + GGML_ABORT("invalid graph type"); + }; + } break; + //case LLM_ARCH_T5ENCODER: + // { + // llm.build_t5_enc(gf); + // } break; + case LLM_ARCH_JAIS: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_NEMOTRON: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_EXAONE: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_RWKV6: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_RWKV6QWEN2: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_CHAMELEON: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_WAVTOKENIZER_DEC: + { + llm = std::make_unique(*this, params, gf); + } break; + default: + GGML_ABORT("fatal error"); + } + + // add on pooling layer + llm->build_pooling(gf, cls, cls_b, cls_out, cls_out_b); + + return std::move(llm->res); +} + +// +// interface implementation +// + +llama_model_params llama_model_default_params() { + llama_model_params result = { + /*.devices =*/ nullptr, + /*.n_gpu_layers =*/ 0, + /*.split_mode =*/ LLAMA_SPLIT_MODE_LAYER, + /*.main_gpu =*/ 0, + /*.tensor_split =*/ nullptr, + /*.progress_callback =*/ nullptr, + /*.progress_callback_user_data =*/ nullptr, + /*.kv_overrides =*/ nullptr, + /*.vocab_only =*/ false, + /*.use_mmap =*/ true, + /*.use_mlock =*/ false, + /*.check_tensors =*/ false, + }; + +#ifdef GGML_USE_METAL + // note: we usually have plenty of VRAM, so by default offload all layers to the GPU + result.n_gpu_layers = 999; +#endif + + return result; +} + +const llama_vocab * llama_model_get_vocab(const llama_model * model) { + return &model->vocab; +} + +void llama_free_model(llama_model * model) { + llama_model_free(model); +} + +void llama_model_free(llama_model * model) { + delete model; +} + +int32_t llama_model_n_ctx_train(const llama_model * model) { + return model->hparams.n_ctx_train; +} + +int32_t llama_model_n_embd(const llama_model * model) { + return model->hparams.n_embd; +} + +int32_t llama_model_n_layer(const llama_model * model) { + return model->hparams.n_layer; +} + +int32_t llama_model_n_head(const llama_model * model) { + return model->hparams.n_head(); +} + +int32_t llama_model_n_head_kv(const llama_model * model) { + return model->hparams.n_head_kv(); +} + +// deprecated +int32_t llama_n_ctx_train(const llama_model * model) { + return llama_model_n_ctx_train(model); +} + +// deprecated +int32_t llama_n_embd(const llama_model * model) { + return llama_model_n_embd(model); +} + +// deprecated +int32_t llama_n_layer(const llama_model * model) { + return llama_model_n_layer(model); +} + +// deprecated +int32_t llama_n_head(const llama_model * model) { + return llama_model_n_head(model); +} + +llama_rope_type llama_model_rope_type(const llama_model * model) { + switch (model->arch) { + // these models do not use RoPE + case LLM_ARCH_GPT2: + case LLM_ARCH_GPTJ: + case LLM_ARCH_MPT: + case LLM_ARCH_REFACT: + case LLM_ARCH_BLOOM: + case LLM_ARCH_MAMBA: + case LLM_ARCH_JINA_BERT_V2: + case LLM_ARCH_T5: + case LLM_ARCH_T5ENCODER: + case LLM_ARCH_JAIS: + case LLM_ARCH_RWKV6: + case LLM_ARCH_RWKV6QWEN2: + case LLM_ARCH_WAVTOKENIZER_DEC: + return LLAMA_ROPE_TYPE_NONE; + + // use what we call a normal RoPE, operating on pairs of consecutive head values + case LLM_ARCH_LLAMA: + case LLM_ARCH_DECI: + case LLM_ARCH_BAICHUAN: + case LLM_ARCH_STARCODER: + case LLM_ARCH_PLAMO: + case LLM_ARCH_ORION: + case LLM_ARCH_INTERNLM2: + case LLM_ARCH_MINICPM: + case LLM_ARCH_XVERSE: + case LLM_ARCH_COMMAND_R: + case LLM_ARCH_COHERE2: + case LLM_ARCH_OLMO: + case LLM_ARCH_ARCTIC: + case LLM_ARCH_DEEPSEEK: + case LLM_ARCH_DEEPSEEK2: + case LLM_ARCH_CHATGLM: + case LLM_ARCH_GRANITE: + case LLM_ARCH_GRANITE_MOE: + case LLM_ARCH_CHAMELEON: + return LLAMA_ROPE_TYPE_NORM; + + // the pairs of head values are offset by n_rot/2 + case LLM_ARCH_FALCON: + case LLM_ARCH_GROK: + case LLM_ARCH_DBRX: + case LLM_ARCH_BERT: + case LLM_ARCH_NOMIC_BERT: + case LLM_ARCH_STABLELM: + case LLM_ARCH_BITNET: + case LLM_ARCH_QWEN: + case LLM_ARCH_QWEN2: + case LLM_ARCH_QWEN2MOE: + case LLM_ARCH_OLMO2: + case LLM_ARCH_OLMOE: + case LLM_ARCH_PHI2: + case LLM_ARCH_PHI3: + case LLM_ARCH_PHIMOE: + case LLM_ARCH_GEMMA: + case LLM_ARCH_GEMMA2: + case LLM_ARCH_GEMMA3: + case LLM_ARCH_STARCODER2: + case LLM_ARCH_OPENELM: + case LLM_ARCH_GPTNEOX: + case LLM_ARCH_CODESHELL: + case LLM_ARCH_NEMOTRON: + case LLM_ARCH_EXAONE: + case LLM_ARCH_MINICPM3: + return LLAMA_ROPE_TYPE_NEOX; + + case LLM_ARCH_QWEN2VL: + return LLAMA_ROPE_TYPE_MROPE; + + // all model arches should be listed explicitly here + case LLM_ARCH_UNKNOWN: + GGML_ABORT("unknown architecture"); + } + + return LLAMA_ROPE_TYPE_NONE; +} + +float llama_model_rope_freq_scale_train(const llama_model * model) { + return model->hparams.rope_freq_scale_train; +} + +int32_t llama_model_meta_val_str(const llama_model * model, const char * key, char * buf, size_t buf_size) { + const auto & it = model->gguf_kv.find(key); + if (it == model->gguf_kv.end()) { + if (buf_size > 0) { + buf[0] = '\0'; + } + return -1; + } + return snprintf(buf, buf_size, "%s", it->second.c_str()); +} + +int32_t llama_model_meta_count(const llama_model * model) { + return (int)model->gguf_kv.size(); +} + +int32_t llama_model_meta_key_by_index(const llama_model * model, int i, char * buf, size_t buf_size) { + if (i < 0 || i >= (int)model->gguf_kv.size()) { + if (buf_size > 0) { + buf[0] = '\0'; + } + return -1; + } + auto it = model->gguf_kv.begin(); + std::advance(it, i); + return snprintf(buf, buf_size, "%s", it->first.c_str()); +} + +int32_t llama_model_meta_val_str_by_index(const llama_model * model, int32_t i, char * buf, size_t buf_size) { + if (i < 0 || i >= (int)model->gguf_kv.size()) { + if (buf_size > 0) { + buf[0] = '\0'; + } + return -1; + } + auto it = model->gguf_kv.begin(); + std::advance(it, i); + return snprintf(buf, buf_size, "%s", it->second.c_str()); +} + +int32_t llama_model_desc(const llama_model * model, char * buf, size_t buf_size) { + return snprintf(buf, buf_size, "%s", model->desc().c_str()); +} + +uint64_t llama_model_size(const llama_model * model) { + return model->size(); +} + +const char * llama_model_chat_template(const llama_model * model, const char * name) { + const auto key = name ? LLM_KV(model->arch, name)(LLM_KV_TOKENIZER_CHAT_TEMPLATE_N) + : LLM_KV(model->arch)(LLM_KV_TOKENIZER_CHAT_TEMPLATE); + const auto & it = model->gguf_kv.find(key); + if (it == model->gguf_kv.end()) { + return nullptr; + } + + return it->second.c_str(); +} + +uint64_t llama_model_n_params(const llama_model * model) { + return model->n_elements(); +} + +bool llama_model_has_encoder(const llama_model * model) { + switch (model->arch) { + case LLM_ARCH_T5: return true; + case LLM_ARCH_T5ENCODER: return true; + default: return false; + } +} + +bool llama_model_has_decoder(const llama_model * model) { + switch (model->arch) { + case LLM_ARCH_T5ENCODER: return false; + default: return true; + } +} + +llama_token llama_model_decoder_start_token(const llama_model * model) { + return model->hparams.dec_start_token_id; +} + +bool llama_model_is_recurrent(const llama_model * model) { + switch (model->arch) { + case LLM_ARCH_MAMBA: return true; + case LLM_ARCH_RWKV6: return true; + case LLM_ARCH_RWKV6QWEN2: return true; + default: return false; + } +} + +const std::vector> & llama_internal_get_tensor_map(const llama_model * model) { + return model->tensors_by_name; } diff --git a/src/llama-model.h b/src/llama-model.h index a7c30444786fd..55c26a92b02d2 100644 --- a/src/llama-model.h +++ b/src/llama-model.h @@ -2,7 +2,9 @@ #include "llama.h" #include "llama-arch.h" +#include "llama-graph.h" #include "llama-hparams.h" +#include "llama-memory.h" #include "llama-vocab.h" #include @@ -10,6 +12,8 @@ #include #include +struct llama_cparams; +struct llama_ubatch; struct llama_model_loader; // available models @@ -347,7 +351,7 @@ struct llama_model { std::string desc() const; size_t size() const; - size_t max_nodes() const; + size_t n_tensors() const; size_t n_devices() const; // total number of parameters in the model @@ -362,9 +366,22 @@ struct llama_model { const struct ggml_tensor * get_tensor(const char * name) const; + // TODO: move this to new llm_arch_model_i interface + llama_memory_i * create_memory() const; // TODO: params + + // TODO: move this to new llm_arch_model_i interface + llm_graph_result_ptr build_graph( + const llm_graph_params & params, + ggml_cgraph * gf, + llm_graph_type type) const; + private: struct impl; std::unique_ptr pimpl; }; const char * llm_type_name(llm_type type); + +// For internal test use +// TODO: remove +const std::vector> & llama_internal_get_tensor_map(const llama_model * model); diff --git a/src/llama.cpp b/src/llama.cpp index 4a4e91490107c..81e1dd1d0873a 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -2,9517 +2,28 @@ #include "llama-chat.h" #include "llama-mmap.h" -#include "llama-context.h" #include "llama-vocab.h" -#include "llama-sampling.h" -#include "llama-kv-cache.h" #include "llama-model-loader.h" #include "llama-model.h" #include "ggml.h" -#include "ggml-alloc.h" #include "ggml-backend.h" -#include "ggml-cpp.h" #include -#include -#include -#include -#include #include #include #include #include #include -#include #if defined(_MSC_VER) #pragma warning(disable: 4244 4267) // possible loss of data #endif -// Returns 0 on success, -1 on error, and -2 on cancellation via llama_progress_callback -static int llama_model_load(const std::string & fname, std::vector & splits, llama_model & model, llama_model_params & params) { - // loading time will be recalculated after the first eval, so - // we take page faults deferred by mmap() into consideration - model.t_load_us = 0; - time_meas tm(model.t_load_us); - - model.t_start_us = tm.t_start_us; - - try { - llama_model_loader ml(fname, splits, params.use_mmap, params.check_tensors, params.kv_overrides); - - ml.print_info(); - - model.hparams.vocab_only = params.vocab_only; - - try { - model.load_arch(ml); - } catch(const std::exception & e) { - throw std::runtime_error("error loading model architecture: " + std::string(e.what())); - } - try { - model.load_hparams(ml); - } catch(const std::exception & e) { - throw std::runtime_error("error loading model hyperparameters: " + std::string(e.what())); - } - try { - model.load_vocab(ml); - } catch(const std::exception & e) { - throw std::runtime_error("error loading model vocabulary: " + std::string(e.what())); - } - - model.load_stats(ml); - model.print_info(); - - if (params.vocab_only) { - LLAMA_LOG_INFO("%s: vocab only - skipping tensors\n", __func__); - return 0; - } - - if (!model.load_tensors(ml)) { - return -2; - } - } catch (const std::exception & err) { - LLAMA_LOG_ERROR("%s: error loading model: %s\n", __func__, err.what()); - return -1; - } - - return 0; -} - -// -// llm_build -// - -using llm_build_cb = std::function; - -enum llm_ffn_op_type { - LLM_FFN_SILU, - LLM_FFN_GELU, - LLM_FFN_RELU, - LLM_FFN_RELU_SQR, - LLM_FFN_SWIGLU, -}; - -enum llm_ffn_gate_type { - LLM_FFN_SEQ, - LLM_FFN_PAR, // ffn_gate is parallel to ffn_up -}; - -enum llm_norm_type { - LLM_NORM, - LLM_NORM_RMS, - LLM_NORM_GROUP, -}; - -static struct ggml_tensor * llm_build_inp_embd( - struct ggml_context * ctx, - struct llama_context & lctx, - const llama_hparams & hparams, - const llama_ubatch & ubatch, - struct ggml_tensor * tok_embd, - const llm_build_cb & cb) { - const int64_t n_embd = hparams.n_embd; - - struct ggml_tensor * inpL; - - if (ubatch.token) { - lctx.inp_tokens = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, ubatch.n_tokens); - cb(lctx.inp_tokens, "inp_tokens", -1); - ggml_set_input(lctx.inp_tokens); - - inpL = ggml_get_rows(ctx, tok_embd, lctx.inp_tokens); - - // apply lora for embedding tokens if needed - for (auto & it : lctx.lora) { - struct llama_adapter_lora_weight * lw = it.first->get_weight(tok_embd); - if (lw == nullptr) { - continue; - } - const float adapter_scale = it.second; - const float scale = lw->get_scale(it.first->alpha, adapter_scale); - struct ggml_tensor * inpL_delta = ggml_scale(ctx, ggml_mul_mat( - ctx, lw->b, // non-transposed lora_b - ggml_get_rows(ctx, lw->a, lctx.inp_tokens) - ), scale); - inpL = ggml_add(ctx, inpL, inpL_delta); - } - } else { - lctx.inp_embd = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, ubatch.n_tokens); - inpL = lctx.inp_embd; - ggml_set_input(lctx.inp_embd); - } - - // For Granite architecture - if (hparams.f_embedding_scale != 0.0f) { - inpL = ggml_scale(ctx, inpL, hparams.f_embedding_scale); - } - - cb(inpL, "inp_embd", -1); - - return inpL; -} - -static void llm_build_kv_store( - struct ggml_context * ctx, - const llama_hparams & hparams, - const llama_cparams & cparams, - const llama_kv_cache & kv, - struct ggml_cgraph * graph, - struct ggml_tensor * k_cur, - struct ggml_tensor * v_cur, - int32_t n_tokens, - int32_t kv_head, - const llm_build_cb & cb, - int64_t il) { - const int64_t n_ctx = cparams.n_ctx; - - const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(il); - const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa(il); - - GGML_ASSERT(kv.size == n_ctx); - - struct ggml_tensor * k_cache_view = ggml_view_1d(ctx, kv.k_l[il], n_tokens*n_embd_k_gqa, ggml_row_size(kv.k_l[il]->type, n_embd_k_gqa)*kv_head); - cb(k_cache_view, "k_cache_view", il); - - // note: storing RoPE-ed version of K in the KV cache - ggml_build_forward_expand(graph, ggml_cpy(ctx, k_cur, k_cache_view)); - - assert(v_cur->ne[0] == n_embd_v_gqa && v_cur->ne[1] == n_tokens); - - struct ggml_tensor * v_cache_view = nullptr; - - if (cparams.flash_attn) { - v_cache_view = ggml_view_1d(ctx, kv.v_l[il], n_tokens*n_embd_v_gqa, ggml_row_size(kv.v_l[il]->type, n_embd_v_gqa)*kv_head); - } else { - // note: the V cache is transposed when not using flash attention - v_cache_view = ggml_view_2d(ctx, kv.v_l[il], n_tokens, n_embd_v_gqa, - ( n_ctx)*ggml_element_size(kv.v_l[il]), - (kv_head)*ggml_element_size(kv.v_l[il])); - - v_cur = ggml_transpose(ctx, v_cur); - } - cb(v_cache_view, "v_cache_view", il); - - ggml_build_forward_expand(graph, ggml_cpy(ctx, v_cur, v_cache_view)); -} - -// do mat_mul, while optionally apply lora -static struct ggml_tensor * llm_build_lora_mm( - struct llama_context & lctx, - struct ggml_context * ctx0, - struct ggml_tensor * w, - struct ggml_tensor * cur) { - struct ggml_tensor * res = ggml_mul_mat(ctx0, w, cur); - for (auto & it : lctx.lora) { - struct llama_adapter_lora_weight * lw = it.first->get_weight(w); - if (lw == nullptr) { - continue; - } - const float adapter_scale = it.second; - const float scale = lw->get_scale(it.first->alpha, adapter_scale); - struct ggml_tensor * ab_cur = ggml_mul_mat( - ctx0, lw->b, - ggml_mul_mat(ctx0, lw->a, cur) - ); - ab_cur = ggml_scale(ctx0, ab_cur, scale); - res = ggml_add(ctx0, res, ab_cur); - } - return res; -} - -// do mat_mul_id, while optionally apply lora -static struct ggml_tensor * llm_build_lora_mm_id( - struct llama_context & lctx, - struct ggml_context * ctx0, - struct ggml_tensor * w, // struct ggml_tensor * as - struct ggml_tensor * cur, // struct ggml_tensor * b - struct ggml_tensor * ids) { - struct ggml_tensor * res = ggml_mul_mat_id(ctx0, w, cur, ids); - for (auto & it : lctx.lora) { - struct llama_adapter_lora_weight * lw = it.first->get_weight(w); - if (lw == nullptr) { - continue; - } - const float alpha = it.first->alpha; - const float rank = (float) lw->b->ne[0]; - const float scale = alpha ? it.second * alpha / rank : it.second; - struct ggml_tensor * ab_cur = ggml_mul_mat_id( - ctx0, lw->b, - ggml_mul_mat_id(ctx0, lw->a, cur, ids), - ids - ); - ab_cur = ggml_scale(ctx0, ab_cur, scale); - res = ggml_add(ctx0, res, ab_cur); - } - return res; -} - -static struct ggml_tensor * llm_build_norm( - struct ggml_context * ctx, - struct ggml_tensor * cur, - const llama_hparams & hparams, - struct ggml_tensor * mw, - struct ggml_tensor * mb, - llm_norm_type type, - const llm_build_cb & cb, - int il) { - switch (type) { - case LLM_NORM: cur = ggml_norm (ctx, cur, hparams.f_norm_eps); break; - case LLM_NORM_RMS: cur = ggml_rms_norm (ctx, cur, hparams.f_norm_rms_eps); break; - case LLM_NORM_GROUP: - { - cur = ggml_reshape_3d(ctx, cur, cur->ne[0], 1, cur->ne[1]); - cur = ggml_group_norm(ctx, cur, hparams.n_norm_groups, hparams.f_norm_group_eps); - cur = ggml_reshape_2d(ctx, cur, cur->ne[0], cur->ne[2]); - } break; - } - - if (mw || mb) { - cb(cur, "norm", il); - } - - if (mw) { - cur = ggml_mul(ctx, cur, mw); - if (mb) { - cb(cur, "norm_w", il); - } - } - - if (mb) { - cur = ggml_add(ctx, cur, mb); - } - - return cur; -} - -static struct ggml_tensor * llm_build_ffn( - struct ggml_context * ctx, - struct llama_context & lctx, - struct ggml_tensor * cur, - struct ggml_tensor * up, - struct ggml_tensor * up_b, - struct ggml_tensor * up_s, - struct ggml_tensor * gate, - struct ggml_tensor * gate_b, - struct ggml_tensor * gate_s, - struct ggml_tensor * down, - struct ggml_tensor * down_b, - struct ggml_tensor * down_s, - struct ggml_tensor * act_scales, - llm_ffn_op_type type_op, - llm_ffn_gate_type type_gate, - const llm_build_cb & cb, - int il) { - struct ggml_tensor * tmp = up ? llm_build_lora_mm(lctx, ctx, up, cur) : cur; - cb(tmp, "ffn_up", il); - - if (up_b) { - tmp = ggml_add(ctx, tmp, up_b); - cb(tmp, "ffn_up_b", il); - } - - if (up_s) { - tmp = ggml_mul(ctx, tmp, up_s); - cb(tmp, "ffn_up_s", il); - } - - if (gate) { - switch (type_gate) { - case LLM_FFN_SEQ: - { - cur = llm_build_lora_mm(lctx, ctx, gate, tmp); - cb(cur, "ffn_gate", il); - } break; - case LLM_FFN_PAR: - { - cur = llm_build_lora_mm(lctx, ctx, gate, cur); - cb(cur, "ffn_gate", il); - } break; - } - - if (gate_b) { - cur = ggml_add(ctx, cur, gate_b); - cb(cur, "ffn_gate_b", il); - } - - if (gate_s) { - cur = ggml_mul(ctx, cur, gate_s); - cb(cur, "ffn_gate_s", il); - } - - } else { - cur = tmp; - } - - switch (type_op) { - case LLM_FFN_SILU: - { - cur = ggml_silu(ctx, cur); - cb(cur, "ffn_silu", il); - } break; - case LLM_FFN_GELU: - { - cur = ggml_gelu(ctx, cur); - cb(cur, "ffn_gelu", il); - if (act_scales != NULL) { - cur = ggml_div(ctx, cur, act_scales); - cb(cur, "ffn_act", il); - } - } break; - case LLM_FFN_RELU: - { - cur = ggml_relu(ctx, cur); - cb(cur, "ffn_relu", il); - } break; - case LLM_FFN_RELU_SQR: - { - cur = ggml_relu(ctx, cur); - cb(cur, "ffn_relu", il); - - cur = ggml_sqr(ctx, cur); - cb(cur, "ffn_sqr(relu)", il); - } break; - case LLM_FFN_SWIGLU: - { - // Project to 4h. If using swiglu double the output width, see https://arxiv.org/pdf/2002.05202.pdf - int64_t split_point = cur->ne[0] / 2; - struct ggml_tensor * x0 = ggml_cont(ctx, ggml_view_2d(ctx, cur, split_point, cur->ne[1], cur->nb[1], 0)); - struct ggml_tensor * x1 = ggml_cont(ctx, ggml_view_2d(ctx, cur, split_point, cur->ne[1], cur->nb[1], split_point * ggml_element_size(cur))); - - x0 = ggml_silu(ctx, x0); - cb(cur, "ffn_silu", il); - - cur = ggml_mul(ctx, x0, x1); - cb(cur, "ffn_mul", il); - } break; - } - - if (type_gate == LLM_FFN_PAR) { - cur = ggml_mul(ctx, cur, tmp); - cb(cur, "ffn_gate_par", il); - } - - if (down) { - cur = llm_build_lora_mm(lctx, ctx, down, cur); - } - - if (down_b) { - cb(cur, "ffn_down", il); - } - - if (down_b) { - cur = ggml_add(ctx, cur, down_b); - } - - if (down_s) { - cur = ggml_mul(ctx, cur, down_s); - cb(cur, "ffn_down_s", il); - } - - return cur; -} - -static struct ggml_tensor * llm_build_moe_ffn( - struct ggml_context * ctx, - struct llama_context & lctx, - struct ggml_tensor * cur, - struct ggml_tensor * gate_inp, - struct ggml_tensor * up_exps, - struct ggml_tensor * gate_exps, - struct ggml_tensor * down_exps, - struct ggml_tensor * exp_probs_b, - int64_t n_expert, - int64_t n_expert_used, - llm_ffn_op_type type_op, - bool norm_w, - bool scale_w, - float w_scale, -llama_expert_gating_func_type gating_op, - const llm_build_cb & cb, - int il) { - int64_t n_embd = cur->ne[0]; - int64_t n_tokens = cur->ne[1]; - - ggml_tensor * logits = llm_build_lora_mm(lctx, ctx, gate_inp, cur); // [n_expert, n_tokens] - cb(logits, "ffn_moe_logits", il); - - ggml_tensor * probs = nullptr; - switch (gating_op) { - case LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX: - { - probs = ggml_soft_max(ctx, logits); // [n_expert, n_tokens] - } break; - case LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID: - { - probs = ggml_sigmoid(ctx, logits); // [n_expert, n_tokens] - } break; - default: - GGML_ABORT("fatal error"); - } - cb(probs, "ffn_moe_probs", il); - - // add experts selection bias - introduced in DeepSeek V3 - // leave probs unbiased as it's later used to get expert weights - ggml_tensor * selection_probs = probs; - if (exp_probs_b != nullptr) { - selection_probs = ggml_add(ctx, probs, exp_probs_b); - cb(selection_probs, "ffn_moe_probs_biased", il); - } - - // select experts - ggml_tensor * selected_experts = ggml_top_k(ctx, selection_probs, n_expert_used); // [n_expert_used, n_tokens] - cb(selected_experts->src[0], "ffn_moe_argsort", il); - cb(selected_experts, "ffn_moe_topk", il); - - ggml_tensor * weights = ggml_get_rows(ctx, - ggml_reshape_3d(ctx, probs, 1, n_expert, n_tokens), selected_experts); // [1, n_expert_used, n_tokens] - cb(weights, "ffn_moe_weights", il); - - if (norm_w) { - weights = ggml_reshape_2d(ctx, weights, n_expert_used, n_tokens); - - ggml_tensor * weights_sum = ggml_sum_rows(ctx, weights); // [1, n_tokens] - cb(weights_sum, "ffn_moe_weights_sum", il); - - weights = ggml_div(ctx, weights, weights_sum); // [n_expert_used, n_tokens] - cb(weights, "ffn_moe_weights_norm", il); - - weights = ggml_reshape_3d(ctx, weights, 1, n_expert_used, n_tokens); - } - if (scale_w) { - weights = ggml_scale(ctx, weights, w_scale); - cb(weights, "ffn_moe_weights_scaled", il); - } - - cur = ggml_reshape_3d(ctx, cur, n_embd, 1, n_tokens); - ggml_tensor * up = llm_build_lora_mm_id(lctx, ctx, up_exps, cur, selected_experts); // [n_ff, n_expert_used, n_tokens] - cb(up, "ffn_moe_up", il); - - ggml_tensor * gate = llm_build_lora_mm_id(lctx, ctx, gate_exps, cur, selected_experts); // [n_ff, n_expert_used, n_tokens] - cb(gate, "ffn_moe_gate", il); - - switch (type_op) { - case LLM_FFN_SILU: - { - gate = ggml_silu(ctx, gate); - cb(gate, "ffn_moe_silu", il); - } break; - case LLM_FFN_GELU: - { - gate = ggml_gelu(ctx, gate); - cb(gate, "ffn_moe_gelu", il); - } break; - default: - GGML_ABORT("fatal error"); - } - - ggml_tensor * par = ggml_mul(ctx, up, gate); // [n_ff, n_expert_used, n_tokens] - cb(par, "ffn_moe_gate_par", il); - - ggml_tensor * experts = llm_build_lora_mm_id(lctx, ctx, down_exps, par, selected_experts); // [n_embd, n_expert_used, n_tokens] - cb(experts, "ffn_moe_down", il); - - experts = ggml_mul(ctx, experts, weights); - - // aggregate experts - ggml_tensor * moe_out = nullptr; - for (int i = 0; i < n_expert_used; ++i) { - ggml_tensor * cur_expert = ggml_view_2d(ctx, experts, n_embd, n_tokens, - experts->nb[2], i*experts->nb[1]); - - if (i == 0) { - moe_out = cur_expert; - } else { - moe_out = ggml_add(ctx, moe_out, cur_expert); - } - } - - if (n_expert_used == 1) { - // avoid returning a non-contiguous tensor - moe_out = ggml_cont(ctx, moe_out); - } - - return moe_out; -} - -static struct ggml_tensor * llm_build_kqv( - struct ggml_context * ctx, - struct llama_context & lctx, - const llama_kv_cache & kv, - struct ggml_cgraph * graph, - struct ggml_tensor * wo, - struct ggml_tensor * wo_b, - struct ggml_tensor * q_cur, - struct ggml_tensor * kq_mask, - int32_t n_tokens, - int32_t n_kv, - float kq_scale, - const llm_build_cb & cb, - int il) { - const llama_model & model = lctx.model; - const llama_hparams & hparams = lctx.model.hparams; - const llama_cparams & cparams = lctx.cparams; - - const int64_t n_ctx = cparams.n_ctx; - const int64_t n_head = hparams.n_head(il); - const int64_t n_head_kv = hparams.n_head_kv(il); - const int64_t n_embd_head_k = hparams.n_embd_head_k; - const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(il); - const int64_t n_embd_head_v = hparams.n_embd_head_v; - const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa(il); - - struct ggml_tensor * q = ggml_permute(ctx, q_cur, 0, 2, 1, 3); - cb(q, "q", il); - - struct ggml_tensor * k = - ggml_view_3d(ctx, kv.k_l[il], - n_embd_head_k, n_kv, n_head_kv, - ggml_row_size(kv.k_l[il]->type, n_embd_k_gqa), - ggml_row_size(kv.k_l[il]->type, n_embd_head_k), - 0); - cb(k, "k", il); - - struct ggml_tensor * cur; - - if (cparams.flash_attn) { - GGML_UNUSED(model); - GGML_UNUSED(n_ctx); - - // split cached v into n_head heads (not transposed) - struct ggml_tensor * v = - ggml_view_3d(ctx, kv.v_l[il], - n_embd_head_v, n_kv, n_head_kv, - ggml_row_size(kv.v_l[il]->type, n_embd_v_gqa), - ggml_row_size(kv.v_l[il]->type, n_embd_head_v), - 0); - cb(v, "v", il); - - cur = ggml_flash_attn_ext(ctx, q, k, v, kq_mask, kq_scale, hparams.f_max_alibi_bias, - hparams.attn_soft_cap ? hparams.f_attn_logit_softcapping : 0.0f); - - ggml_flash_attn_ext_set_prec(cur, GGML_PREC_F32); - - cur = ggml_reshape_2d(ctx, cur, n_embd_head_v*n_head, n_tokens); - } else { - struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q); - cb(kq, "kq", il); - - // note: this op tends to require high floating point range - // while for some models F16 is enough, for others it is not, so we default to F32 here - ggml_mul_mat_set_prec(kq, GGML_PREC_F32); - - if (model.arch == LLM_ARCH_GROK) { - // need to do the following: - // multiply by attn_output_multiplyer of 0.08838834764831845 - // and then : - // kq = 30 * tanh(kq / 30) - // before the softmax below - - kq = ggml_tanh(ctx, ggml_scale(ctx, kq, 0.08838834764831845f/30.0f)); - kq = ggml_scale(ctx, kq, 30); - } - - if (hparams.attn_soft_cap) { - kq = ggml_scale(ctx, kq, 1.0f / hparams.f_attn_logit_softcapping); - kq = ggml_tanh(ctx, kq); - kq = ggml_scale(ctx, kq, hparams.f_attn_logit_softcapping); - } - - kq = ggml_soft_max_ext(ctx, kq, kq_mask, kq_scale, hparams.f_max_alibi_bias); - cb(kq, "kq_soft_max_ext", il); - - GGML_ASSERT(kv.size == n_ctx); - - // split cached v into n_head heads - struct ggml_tensor * v = - ggml_view_3d(ctx, kv.v_l[il], - n_kv, n_embd_head_v, n_head_kv, - ggml_element_size(kv.v_l[il])*n_ctx, - ggml_element_size(kv.v_l[il])*n_ctx*n_embd_head_v, - 0); - cb(v, "v", il); - - struct ggml_tensor * kqv = ggml_mul_mat(ctx, v, kq); - cb(kqv, "kqv", il); - - struct ggml_tensor * kqv_merged = ggml_permute(ctx, kqv, 0, 2, 1, 3); - cb(kqv_merged, "kqv_merged", il); - - cur = ggml_cont_2d(ctx, kqv_merged, n_embd_head_v*n_head, n_tokens); - cb(cur, "kqv_merged_cont", il); - } - - ggml_build_forward_expand(graph, cur); - - if (wo) { - cur = llm_build_lora_mm(lctx, ctx, wo, cur); - } - - if (wo_b) { - cb(cur, "kqv_wo", il); - } - - if (wo_b) { - cur = ggml_add(ctx, cur, wo_b); - } - - return cur; -} - -static struct ggml_tensor * llm_build_kv( - struct ggml_context * ctx, - struct llama_context & lctx, - const llama_kv_cache & kv, - struct ggml_cgraph * graph, - struct ggml_tensor * wo, - struct ggml_tensor * wo_b, - struct ggml_tensor * k_cur, - struct ggml_tensor * v_cur, - struct ggml_tensor * q_cur, - struct ggml_tensor * kq_mask, - int32_t n_tokens, - int32_t kv_head, - int32_t n_kv, - float kq_scale, - const llm_build_cb & cb, - int il) { - const llama_hparams & hparams = lctx.model.hparams; - const llama_cparams & cparams = lctx.cparams; - - // these nodes are added to the graph together so that they are not reordered - // by doing so, the number of splits in the graph is reduced - ggml_build_forward_expand(graph, q_cur); - ggml_build_forward_expand(graph, k_cur); - ggml_build_forward_expand(graph, v_cur); - - llm_build_kv_store(ctx, hparams, cparams, kv, graph, k_cur, v_cur, n_tokens, kv_head, cb, il); - - struct ggml_tensor * cur; - - cur = llm_build_kqv(ctx, lctx, kv, graph, wo, wo_b, q_cur, kq_mask, n_tokens, n_kv, kq_scale, cb, il); - cb(cur, "kqv_out", il); - - return cur; -} - -static struct ggml_tensor * llm_build_copy_mask_state( - struct ggml_context * ctx, - struct ggml_cgraph * graph, - struct ggml_tensor * s, - struct ggml_tensor * state_copy, - struct ggml_tensor * state_mask, - int32_t n_state, - int32_t kv_size, - int32_t kv_head, - int32_t n_kv, - int32_t n_seqs) { - struct ggml_tensor * states = ggml_reshape_2d(ctx, s, n_state, kv_size); - - // copy states - // NOTE: assuming the copy destinations are ALL contained between kv_head and kv_head + n_kv - // this shrinks the tensors's ne[1] to n_kv - states = ggml_get_rows(ctx, states, state_copy); - - // clear states of sequences which are starting at the beginning of this batch - // FIXME: zero-out NANs? - states = ggml_mul(ctx, states, state_mask); - - // copy states which won't be changed further (between n_seqs and n_kv) - ggml_build_forward_expand(graph, - ggml_cpy(ctx, - ggml_view_1d(ctx, states, n_state*(n_kv - n_seqs), n_seqs*n_state*ggml_element_size(states)), - ggml_view_1d(ctx, s, n_state*(n_kv - n_seqs), (kv_head + n_seqs)*n_state*ggml_element_size(s)))); - - // the part of the states that will be used and modified - return ggml_view_2d(ctx, states, n_state, n_seqs, states->nb[1], 0); -} - -// TODO: split -static struct ggml_tensor * llm_build_mamba( - struct ggml_context * ctx, - struct llama_context & lctx, - const llama_ubatch & ubatch, - struct ggml_cgraph * graph, - struct ggml_tensor * cur, - struct ggml_tensor * state_copy, - struct ggml_tensor * state_mask, - int32_t kv_head, - int32_t n_kv, - const llm_build_cb & cb, - int il) { - const llama_model & model = lctx.model; - const llama_hparams & hparams = model.hparams; - const llama_kv_cache & kv = lctx.kv_self; - const int64_t d_conv = hparams.ssm_d_conv; - const int64_t d_inner = hparams.ssm_d_inner; - const int64_t d_state = hparams.ssm_d_state; - const int64_t dt_rank = hparams.ssm_dt_rank; - const int64_t n_seqs = ubatch.n_seqs; - // Some variants of Mamba arch (e.g. FalconMamba do apply layer norm on B and Dt layers) - const bool ssm_dt_b_c_rms = hparams.ssm_dt_b_c_rms; - // Use the same RMS norm as the final layer norm - const float norm_rms_eps = hparams.f_norm_rms_eps; - - const int64_t n_seq_tokens = ubatch.n_seq_tokens; - - GGML_ASSERT(n_seqs != 0); - GGML_ASSERT(ubatch.equal_seqs); - GGML_ASSERT(ubatch.n_tokens == n_seq_tokens * n_seqs); - - struct ggml_tensor * conv_states_all = kv.k_l[il]; - struct ggml_tensor * ssm_states_all = kv.v_l[il]; - - // (ab)using the KV cache to store the states - struct ggml_tensor * conv = llm_build_copy_mask_state(ctx, - graph, conv_states_all, state_copy, state_mask, - hparams.n_embd_k_s(), kv.size, kv_head, n_kv, n_seqs); - conv = ggml_reshape_3d(ctx, conv, d_conv - 1, d_inner, n_seqs); - struct ggml_tensor * ssm = llm_build_copy_mask_state(ctx, - graph, ssm_states_all, state_copy, state_mask, - hparams.n_embd_v_s(), kv.size, kv_head, n_kv, n_seqs); - ssm = ggml_reshape_3d(ctx, ssm, d_state, d_inner, n_seqs); - - // {n_embd, n_tokens} => {n_embd, n_seq_tokens, n_seqs} - cur = ggml_reshape_3d(ctx, cur, cur->ne[0], n_seq_tokens, n_seqs); - - // {n_embd, 2*d_inner} @ {n_embd, n_seq_tokens, n_seqs} => {2*d_inner, n_seq_tokens, n_seqs} - struct ggml_tensor * xz = llm_build_lora_mm(lctx, ctx, model.layers[il].ssm_in, cur); - // split the above in two - // => {d_inner, n_seq_tokens, n_seqs} - struct ggml_tensor * x = ggml_view_3d(ctx, xz, d_inner, xz->ne[1], xz->ne[2], xz->nb[1], xz->nb[2], 0); - struct ggml_tensor * z = ggml_view_3d(ctx, xz, d_inner, xz->ne[1], xz->ne[2], xz->nb[1], xz->nb[2], d_inner*ggml_element_size(xz)); - - // conv - { - // => {d_conv - 1 + n_seq_tokens, d_inner, n_seqs} - struct ggml_tensor * conv_x = ggml_concat(ctx, conv, ggml_transpose(ctx, x), 0); - - // copy last (d_conv - 1) columns back into the state cache - struct ggml_tensor * last_conv = ggml_view_3d(ctx, conv_x, d_conv - 1, d_inner, n_seqs, conv_x->nb[1], conv_x->nb[2], n_seq_tokens*(conv_x->nb[0])); - - ggml_build_forward_expand(graph, - ggml_cpy(ctx, last_conv, - ggml_view_1d(ctx, conv_states_all, - (d_conv - 1)*(d_inner)*(n_seqs), - kv_head*(d_conv - 1)*(d_inner)*ggml_element_size(conv_states_all)))); - - // 1D convolution - // The equivalent is to make a self-overlapping view of conv_x - // over d_conv columns at each stride in the 3rd dimension, - // then element-wise multiply that with the conv1d weight, - // then sum the elements of each row, - // (the last two steps are a dot product over rows (also doable with mul_mat)) - // then permute away the ne[0] dimension, - // and then you're left with the resulting x tensor. - // For simultaneous sequences, all sequences need to have the same length. - x = ggml_ssm_conv(ctx, conv_x, model.layers[il].ssm_conv1d); - - // bias - x = ggml_add(ctx, x, model.layers[il].ssm_conv1d_b); - - x = ggml_silu(ctx, x); - } - - // ssm - { - // {d_inner, dt_rank + 2*d_state} @ {d_inner, n_seq_tokens, n_seqs} => {dt_rank + 2*d_state, n_seq_tokens, n_seqs} - struct ggml_tensor * x_db = llm_build_lora_mm(lctx, ctx, model.layers[il].ssm_x, x); - // split - struct ggml_tensor * dt = ggml_view_3d(ctx, x_db, dt_rank, n_seq_tokens, n_seqs, x_db->nb[1], x_db->nb[2], 0); - struct ggml_tensor * B = ggml_view_3d(ctx, x_db, d_state, n_seq_tokens, n_seqs, x_db->nb[1], x_db->nb[2], ggml_element_size(x_db)*dt_rank); - struct ggml_tensor * C = ggml_view_3d(ctx, x_db, d_state, n_seq_tokens, n_seqs, x_db->nb[1], x_db->nb[2], ggml_element_size(x_db)*(dt_rank+d_state)); - - // Some Mamba variants (e.g. FalconMamba) apply RMS norm in B, C & Dt layers - if (ssm_dt_b_c_rms) { - dt = ggml_rms_norm(ctx, dt, norm_rms_eps); - B = ggml_rms_norm(ctx, B, norm_rms_eps); - C = ggml_rms_norm(ctx, C, norm_rms_eps); - } - - // {dt_rank, d_inner} @ {dt_rank, n_seq_tokens, n_seqs} => {d_inner, n_seq_tokens, n_seqs} - dt = llm_build_lora_mm(lctx, ctx, model.layers[il].ssm_dt, dt); - dt = ggml_add(ctx, dt, model.layers[il].ssm_dt_b); - - // Custom operator to optimize the parallel associative scan - // as described in the Annex D of the Mamba paper. - // => {d_inner, n_seq_tokens, n_seqs} and {d_state, d_inner, n_seqs} - struct ggml_tensor * y_ssm = ggml_ssm_scan(ctx, ssm, x, dt, model.layers[il].ssm_a, B, C); - - // store last states - ggml_build_forward_expand(graph, - ggml_cpy(ctx, - ggml_view_1d(ctx, y_ssm, d_state*d_inner*n_seqs, x->nb[3]), - ggml_view_1d(ctx, ssm_states_all, d_state*d_inner*n_seqs, kv_head*d_state*d_inner*ggml_element_size(ssm_states_all)))); - - struct ggml_tensor * y = ggml_view_3d(ctx, y_ssm, d_inner, n_seq_tokens, n_seqs, x->nb[1], x->nb[2], 0); - - // TODO: skip computing output earlier for unused tokens - - // {d_inner, n_seq_tokens, n_seqs} * {d_inner} => {d_inner, n_seq_tokens, n_seqs} - y = ggml_add(ctx, y, ggml_mul(ctx, x, model.layers[il].ssm_d)); - y = ggml_mul(ctx, y, ggml_silu(ctx, ggml_cont(ctx, z))); - - // {d_inner, n_embd} @ {d_inner, n_seq_tokens, n_seqs} => {n_embd, n_seq_tokens, n_seqs} - cur = llm_build_lora_mm(lctx, ctx, model.layers[il].ssm_out, y); - } - - // {n_embd, n_seq_tokens, n_seqs} => {n_embd, n_tokens} - cur = ggml_reshape_2d(ctx, cur, cur->ne[0], n_seq_tokens * n_seqs); - cb(cur, "mamba_out", il); - - return cur; -} - -static struct ggml_tensor * llm_build_rwkv6_time_mix( - struct llama_context & lctx, - struct ggml_context * ctx, - const struct llama_layer * layer, - struct ggml_tensor * cur, - struct ggml_tensor * x_prev, - struct ggml_tensor ** wkv_state, - size_t wkv_head_size, - size_t head_count_kv) { - size_t n_embd = cur->ne[0]; - size_t n_seq_tokens = cur->ne[1]; - size_t n_seqs = cur->ne[2]; - - size_t head_size = wkv_head_size; - size_t head_count = n_embd / head_size; - - size_t n_tokens = n_seqs * n_seq_tokens; - - bool is_qrwkv = layer->time_mix_first == nullptr; - - struct ggml_tensor * sx = ggml_sub(ctx, x_prev, cur); - - sx = ggml_reshape_2d(ctx, sx, n_embd, n_tokens); - cur = ggml_reshape_2d(ctx, cur, n_embd, n_tokens); - - struct ggml_tensor * xxx = ggml_add(ctx, ggml_mul(ctx, sx, layer->time_mix_lerp_x), cur); - - xxx = ggml_reshape_4d( - ctx, - ggml_tanh( - ctx, - ggml_mul_mat(ctx, layer->time_mix_w1, xxx) - ), - layer->time_mix_w1->ne[1] / 5, 1, 5, n_tokens - ); - - xxx = ggml_cont(ctx, ggml_permute(ctx, xxx, 0, 1, 3, 2)); - - xxx = ggml_mul_mat( - ctx, - ggml_reshape_4d( - ctx, - layer->time_mix_w2, - layer->time_mix_w2->ne[0], layer->time_mix_w2->ne[1], 1, 5 - ), - xxx - ); - - struct ggml_tensor *xw, *xk, *xv, *xr, *xg; - if (layer->time_mix_lerp_fused) { - // fusing these weights makes some performance improvement - sx = ggml_reshape_3d(ctx, sx, n_embd, 1, n_tokens); - cur = ggml_reshape_3d(ctx, cur, n_embd, 1, n_tokens); - xxx = ggml_add(ctx, ggml_mul(ctx, ggml_add(ctx, xxx, layer->time_mix_lerp_fused), sx), cur); - xw = ggml_view_2d(ctx, xxx, n_embd, n_tokens, xxx->nb[1], 0); - xk = ggml_view_2d(ctx, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * sizeof(float)); - xv = ggml_view_2d(ctx, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 2 * sizeof(float)); - xr = ggml_view_2d(ctx, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 3 * sizeof(float)); - xg = ggml_view_2d(ctx, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 4 * sizeof(float)); - } else { - // for backward compatibility - xw = ggml_view_2d(ctx, xxx, n_embd, n_tokens, xxx->nb[1], 0); - xk = ggml_view_2d(ctx, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * sizeof(float)); - xv = ggml_view_2d(ctx, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 2 * sizeof(float)); - xr = ggml_view_2d(ctx, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 3 * sizeof(float)); - xg = ggml_view_2d(ctx, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 4 * sizeof(float)); - - xw = ggml_add(ctx, ggml_mul(ctx, ggml_add(ctx, xw, layer->time_mix_lerp_w), sx), cur); - xk = ggml_add(ctx, ggml_mul(ctx, ggml_add(ctx, xk, layer->time_mix_lerp_k), sx), cur); - xv = ggml_add(ctx, ggml_mul(ctx, ggml_add(ctx, xv, layer->time_mix_lerp_v), sx), cur); - xr = ggml_add(ctx, ggml_mul(ctx, ggml_add(ctx, xr, layer->time_mix_lerp_r), sx), cur); - xg = ggml_add(ctx, ggml_mul(ctx, ggml_add(ctx, xg, layer->time_mix_lerp_g), sx), cur); - } - - struct ggml_tensor * r = llm_build_lora_mm(lctx, ctx, layer->time_mix_receptance, xr); - struct ggml_tensor * k = llm_build_lora_mm(lctx, ctx, layer->time_mix_key, xk); - struct ggml_tensor * v = llm_build_lora_mm(lctx, ctx, layer->time_mix_value, xv); - if (layer->time_mix_receptance_b) { - r = ggml_add(ctx, r, layer->time_mix_receptance_b); - } - if (layer->time_mix_key_b) { - k = ggml_add(ctx, k, layer->time_mix_key_b); - } - if (layer->time_mix_value_b) { - v = ggml_add(ctx, v, layer->time_mix_value_b); - } - - struct ggml_tensor * g = llm_build_lora_mm(lctx, ctx, layer->time_mix_gate, xg); - if (is_qrwkv) { - g = ggml_sigmoid(ctx, g); - } else { - g = ggml_silu(ctx, g); - } - - if (head_count_kv != head_count) { - GGML_ASSERT(head_count % head_count_kv == 0); - k = ggml_reshape_4d(ctx, k, head_size, 1, head_count_kv, n_tokens); - v = ggml_reshape_4d(ctx, v, head_size, 1, head_count_kv, n_tokens); - struct ggml_tensor * tmp = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, head_size, head_count / head_count_kv, head_count_kv, n_tokens); - k = ggml_repeat(ctx, k, tmp); - v = ggml_repeat(ctx, v, tmp); - } - - k = ggml_reshape_3d(ctx, k, head_size, head_count, n_tokens); - v = ggml_reshape_3d(ctx, v, head_size, head_count, n_tokens); - r = ggml_reshape_3d(ctx, r, head_size, head_count, n_tokens); - - struct ggml_tensor * w = ggml_mul_mat( - ctx, - layer->time_mix_decay_w2, - ggml_tanh( - ctx, - ggml_mul_mat(ctx, layer->time_mix_decay_w1, xw) - ) - ); - - w = ggml_add(ctx, w, layer->time_mix_decay); - w = ggml_exp(ctx, ggml_neg(ctx, ggml_exp(ctx, w))); - w = ggml_reshape_3d(ctx, w, head_size, head_count, n_tokens); - - if (is_qrwkv) { - // k = k * (1 - w) - k = ggml_sub(ctx, k, ggml_mul(ctx, k, w)); - } - - struct ggml_tensor * wkv_output; - if (!layer->time_mix_first) { - wkv_output = ggml_gated_linear_attn(ctx, k, v, r, w, *wkv_state, pow(head_size, -0.5f)); - } else { - wkv_output = ggml_rwkv_wkv6(ctx, k, v, r, layer->time_mix_first, w, *wkv_state); - } - cur = ggml_view_1d(ctx, wkv_output, n_embd * n_tokens, 0); - *wkv_state = ggml_view_1d(ctx, wkv_output, n_embd * head_size * n_seqs, n_embd * n_tokens * sizeof(float)); - - if (!is_qrwkv) { - // group norm with head_count groups - cur = ggml_reshape_3d(ctx, cur, n_embd / head_count, head_count, n_tokens); - cur = ggml_norm(ctx, cur, 64e-5f); - - // Convert back to regular vectors. - cur = ggml_reshape_2d(ctx, cur, n_embd, n_tokens); - cur = ggml_add(ctx, ggml_mul(ctx, cur, layer->time_mix_ln), layer->time_mix_ln_b); - } else { - cur = ggml_reshape_2d(ctx, cur, n_embd, n_tokens); - } - - cur = ggml_mul(ctx, cur, g); - cur = llm_build_lora_mm(lctx, ctx, layer->time_mix_output, cur); - - return ggml_reshape_3d(ctx, cur, n_embd, n_seq_tokens, n_seqs); -} - -static struct ggml_tensor * llm_build_rwkv6_channel_mix( - struct llama_context & lctx, - struct ggml_context * ctx, - const struct llama_layer * layer, - struct ggml_tensor * cur, - struct ggml_tensor * x_prev) { - struct ggml_tensor * sx = ggml_sub(ctx, x_prev, cur); - struct ggml_tensor * xk = ggml_add(ctx, ggml_mul(ctx, sx, layer->channel_mix_lerp_k), cur); - struct ggml_tensor * xr = ggml_add(ctx, ggml_mul(ctx, sx, layer->channel_mix_lerp_r), cur); - - struct ggml_tensor * r = ggml_sigmoid(ctx, llm_build_lora_mm(lctx, ctx, layer->channel_mix_receptance, xr)); - struct ggml_tensor * k = ggml_sqr( - ctx, - ggml_relu( - ctx, - llm_build_lora_mm(lctx, ctx, layer->channel_mix_key, xk) - ) - ); - - return ggml_mul(ctx, r, llm_build_lora_mm(lctx, ctx, layer->channel_mix_value, k)); -} - -struct llm_build_context { - const llama_model & model; - llama_context & lctx; - const llama_hparams & hparams; - const llama_cparams & cparams; - const llama_ubatch & ubatch; - const llama_kv_cache & kv_self; - - const int64_t n_embd; - const int64_t n_layer; - const int64_t n_rot; - const int64_t n_ctx; // user-specified context size (can be different from n_ctx_train) - const int64_t n_head; - const int64_t n_head_kv; - const int64_t n_embd_head_k; - const int64_t n_embd_k_gqa; - const int64_t n_embd_head_v; - const int64_t n_embd_v_gqa; - const int64_t n_expert; - const int64_t n_expert_used; - - const float freq_base; - const float freq_scale; - const float ext_factor; - const float attn_factor; - const float beta_fast; - const float beta_slow; - const float norm_eps; - const float norm_rms_eps; - - const int32_t n_tokens; - const int32_t n_kv; // size of KV cache to consider (n_kv <= kv_self.size) - const int32_t n_outputs; - const int32_t n_outputs_enc; - const int32_t kv_head; // index of where we store new KV data in the cache - const int32_t n_ctx_orig; - - const bool flash_attn; - - const enum llama_pooling_type pooling_type; - const enum llama_rope_type rope_type; - - const llm_build_cb & cb; - - std::vector & buf_compute_meta; - - struct ggml_context * ctx0 = nullptr; - - // TODO: consider making the entire interface noexcept - llm_build_context( - llama_context & lctx, - const llama_ubatch & ubatch, - const llm_build_cb & cb, - bool worst_case) : - model (lctx.model), - lctx (lctx), - hparams (model.hparams), - cparams (lctx.cparams), - ubatch (ubatch), - kv_self (lctx.kv_self), - n_embd (hparams.n_embd), - n_layer (hparams.n_layer), - n_rot (hparams.n_rot), - n_ctx (cparams.n_ctx), - n_head (hparams.n_head()), - n_head_kv (hparams.n_head_kv()), - n_embd_head_k (hparams.n_embd_head_k), - n_embd_k_gqa (hparams.n_embd_k_gqa()), - n_embd_head_v (hparams.n_embd_head_v), - n_embd_v_gqa (hparams.n_embd_v_gqa()), - n_expert (hparams.n_expert), - n_expert_used (hparams.n_expert_used), - freq_base (cparams.rope_freq_base), - freq_scale (cparams.rope_freq_scale), - ext_factor (cparams.yarn_ext_factor), - attn_factor (cparams.yarn_attn_factor), - beta_fast (cparams.yarn_beta_fast), - beta_slow (cparams.yarn_beta_slow), - norm_eps (hparams.f_norm_eps), - norm_rms_eps (hparams.f_norm_rms_eps), - n_tokens (ubatch.n_tokens), - n_kv (worst_case ? kv_self.size : kv_self.n), - n_outputs (worst_case ? n_tokens : lctx.n_outputs), - n_outputs_enc (worst_case ? n_tokens : lctx.embd_enc.size() / hparams.n_embd), - kv_head (worst_case ? (kv_self.recurrent ? 0 : kv_self.size - n_tokens) : kv_self.head), - n_ctx_orig (cparams.n_ctx_orig_yarn), - flash_attn (cparams.flash_attn), - pooling_type (cparams.pooling_type), - rope_type (hparams.rope_type), - cb (cb), - buf_compute_meta (lctx.buf_compute_meta) { - // all initializations should be done in init() - } - - void init() { - struct ggml_init_params params = { - /*.mem_size =*/ buf_compute_meta.size(), - /*.mem_buffer =*/ buf_compute_meta.data(), - /*.no_alloc =*/ true, - }; - - ctx0 = ggml_init(params); - - lctx.inp_tokens = nullptr; - lctx.inp_embd = nullptr; - lctx.inp_pos = nullptr; - lctx.inp_out_ids = nullptr; - lctx.inp_KQ_mask = nullptr; - lctx.inp_KQ_mask_swa = nullptr; - lctx.inp_K_shift = nullptr; - lctx.inp_mean = nullptr; - lctx.inp_cls = nullptr; - lctx.inp_s_copy = nullptr; - lctx.inp_s_mask = nullptr; - lctx.inp_s_seq = nullptr; - lctx.inp_pos_bucket = nullptr; - lctx.inp_embd_enc = nullptr; - lctx.inp_KQ_mask_cross = nullptr; - } - - void free() { - ggml_free(ctx0); - ctx0 = nullptr; - } - - struct ggml_cgraph * build_k_shift() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - GGML_ASSERT(kv_self.size == n_ctx); - - lctx.inp_K_shift = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_ctx); - cb(lctx.inp_K_shift, "K_shift", -1); - ggml_set_input(lctx.inp_K_shift); - - for (int il = 0; il < n_layer; ++il) { - const int64_t n_head_kv = hparams.n_head_kv(il); - const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(il); - struct ggml_tensor * rope_factors = build_rope_factors(il); - struct ggml_tensor * k = - ggml_view_3d(ctx0, kv_self.k_l[il], - n_embd_head_k, n_head_kv, n_ctx, - ggml_row_size(kv_self.k_l[il]->type, n_embd_head_k), - ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa), - 0); - - struct ggml_tensor * tmp; - if (ggml_is_quantized(k->type)) { - // dequantize to f32 -> RoPE -> quantize back - tmp = ggml_cast(ctx0, k, GGML_TYPE_F32); - cb(tmp, "K_f32", il); - for (auto & backend : lctx.backends) { - // Figure out which backend KV cache belongs to - if (ggml_backend_supports_buft(backend.get(), ggml_backend_buffer_get_type(kv_self.k_l[il]->buffer))) { - ggml_backend_sched_set_tensor_backend(lctx.sched.get(), tmp, backend.get()); - break; - } - } - tmp = ggml_rope_ext_inplace(ctx0, tmp, - lctx.inp_K_shift, rope_factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow); - cb(tmp, "K_shifted_f32", il); - tmp = ggml_cpy(ctx0, tmp, k); - } else { - // we rotate only the first n_rot dimensions - tmp = ggml_rope_ext_inplace(ctx0, k, - lctx.inp_K_shift, rope_factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow); - } - cb(tmp, "K_shifted", il); - ggml_build_forward_expand(gf, tmp); - } - - return gf; - } - - struct ggml_cgraph * build_defrag(const std::vector & ids) { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - for (uint32_t i = 0; i < ids.size(); ++i) { - const uint32_t id = ids[i]; - - if (i == id || id == ids.size()) { - continue; - } - - uint32_t nm = 1; - - while (i + nm < ids.size() && ids[i + nm] == id + nm) { - nm++; - } - - for (int il = 0; il < n_layer; ++il) { - const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(il); - const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa(il); - - ggml_tensor * view_k_src = ggml_view_2d(ctx0, kv_self.k_l[il], - n_embd_k_gqa, nm, - ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa), - ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa*i)); - - ggml_tensor * view_k_dst = ggml_view_2d(ctx0, kv_self.k_l[il], - n_embd_k_gqa, nm, - ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa), - ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa*id)); - - ggml_tensor * view_v_src; - ggml_tensor * view_v_dst; - - if (flash_attn) { - // NOTE: the V cache is not transposed when using flash attention - view_v_src = ggml_view_2d(ctx0, kv_self.v_l[il], - n_embd_v_gqa, nm, - ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa), - ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa*i)); - - view_v_dst = ggml_view_2d(ctx0, kv_self.v_l[il], - n_embd_v_gqa, nm, - ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa), - ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa*id)); - } else { - view_v_src = ggml_view_2d(ctx0, kv_self.v_l[il], - nm, n_embd_v_gqa, - ggml_row_size(kv_self.v_l[il]->type, kv_self.size), - ggml_row_size(kv_self.v_l[il]->type, i)); - - view_v_dst = ggml_view_2d(ctx0, kv_self.v_l[il], - nm, n_embd_v_gqa, - ggml_row_size(kv_self.v_l[il]->type, kv_self.size), - ggml_row_size(kv_self.v_l[il]->type, id)); - } - - ggml_build_forward_expand(gf, ggml_cpy(ctx0, view_k_src, view_k_dst)); - ggml_build_forward_expand(gf, ggml_cpy(ctx0, view_v_src, view_v_dst)); - } - - i += nm - 1; - } - - //LLAMA_LOG_INFO("gf->n_nodes = %d\n", gf->n_nodes); - - return gf; - } - - struct ggml_tensor * build_inp_pos() { - lctx.inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); - cb(lctx.inp_pos, "inp_pos", -1); - ggml_set_input(lctx.inp_pos); - return lctx.inp_pos; - } - - struct ggml_tensor * build_rope_factors(int il) { - // choose long/short freq factors based on the context size - const auto n_ctx_pre_seq = cparams.n_ctx / cparams.n_seq_max; - - if (model.layers[il].rope_freqs != nullptr) { - return model.layers[il].rope_freqs; - } - - if (n_ctx_pre_seq > hparams.n_ctx_orig_yarn) { - return model.layers[il].rope_long; - } - - return model.layers[il].rope_short; - } - - struct ggml_tensor * build_inp_out_ids() { - lctx.inp_out_ids = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_outputs); - cb(lctx.inp_out_ids, "inp_out_ids", -1); - ggml_set_input(lctx.inp_out_ids); - return lctx.inp_out_ids; - } - - struct ggml_tensor * build_inp_KQ_mask(bool causal = true) { - lctx.inp_KQ_mask = causal - ? ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD)) - : ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD)); - cb(lctx.inp_KQ_mask, "KQ_mask", -1); - ggml_set_input(lctx.inp_KQ_mask); - - return flash_attn ? ggml_cast(ctx0, lctx.inp_KQ_mask, GGML_TYPE_F16) : lctx.inp_KQ_mask; - } - - struct ggml_tensor * build_inp_KQ_mask_swa(bool causal = true) { - GGML_ASSERT(hparams.n_swa > 0); - - lctx.inp_KQ_mask_swa = causal - ? ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD)) - : ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD)); - cb(lctx.inp_KQ_mask_swa, "KQ_mask_swa", -1); - ggml_set_input(lctx.inp_KQ_mask_swa); - - return flash_attn ? ggml_cast(ctx0, lctx.inp_KQ_mask_swa, GGML_TYPE_F16) : lctx.inp_KQ_mask_swa; - } - - struct ggml_tensor * build_inp_mean() { - lctx.inp_mean = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, n_tokens); - cb(lctx.inp_mean, "inp_mean", -1); - ggml_set_input(lctx.inp_mean); - return lctx.inp_mean; - } - - struct ggml_tensor * build_inp_cls() { - lctx.inp_cls = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); - cb(lctx.inp_cls, "inp_cls", -1); - ggml_set_input(lctx.inp_cls); - return lctx.inp_cls; - } - - struct ggml_tensor * build_inp_s_copy() { - lctx.inp_s_copy = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_kv); - cb(lctx.inp_s_copy, "inp_s_copy", -1); - ggml_set_input(lctx.inp_s_copy); - return lctx.inp_s_copy; - } - - struct ggml_tensor * build_inp_s_mask() { - lctx.inp_s_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, 1, n_kv); - cb(lctx.inp_s_mask, "inp_s_mask", -1); - ggml_set_input(lctx.inp_s_mask); - return lctx.inp_s_mask; - } - - struct ggml_cgraph * append_pooling(struct ggml_cgraph * gf) { - // find result_norm tensor for input - struct ggml_tensor * inp = nullptr; - for (int i = ggml_graph_n_nodes(gf) - 1; i >= 0; --i) { - inp = ggml_graph_node(gf, i); - if (strcmp(inp->name, "result_norm") == 0 || strcmp(inp->name, "result_embd") == 0) { - break; - } else { - inp = nullptr; - } - } - GGML_ASSERT(inp != nullptr && "missing result_norm/result_embd tensor"); - - struct ggml_tensor * cur; - - switch (pooling_type) { - case LLAMA_POOLING_TYPE_NONE: - { - cur = inp; - } break; - case LLAMA_POOLING_TYPE_MEAN: - { - struct ggml_tensor * inp_mean = build_inp_mean(); - cur = ggml_mul_mat(ctx0, ggml_cont(ctx0, ggml_transpose(ctx0, inp)), inp_mean); - } break; - case LLAMA_POOLING_TYPE_CLS: - case LLAMA_POOLING_TYPE_LAST: - { - struct ggml_tensor * inp_cls = build_inp_cls(); - cur = ggml_get_rows(ctx0, inp, inp_cls); - } break; - case LLAMA_POOLING_TYPE_RANK: - { - struct ggml_tensor * inp_cls = build_inp_cls(); - inp = ggml_get_rows(ctx0, inp, inp_cls); - - // classification head - // https://github.com/huggingface/transformers/blob/5af7d41e49bbfc8319f462eb45253dcb3863dfb7/src/transformers/models/roberta/modeling_roberta.py#L1566 - GGML_ASSERT(model.cls != nullptr); - GGML_ASSERT(model.cls_b != nullptr); - - cur = ggml_add (ctx0, ggml_mul_mat(ctx0, model.cls, inp), model.cls_b); - cur = ggml_tanh(ctx0, cur); - - // some models don't have `cls_out`, for example: https://huggingface.co/jinaai/jina-reranker-v1-tiny-en - // https://huggingface.co/jinaai/jina-reranker-v1-tiny-en/blob/cb5347e43979c3084a890e3f99491952603ae1b7/modeling_bert.py#L884-L896 - if (model.cls_out) { - GGML_ASSERT(model.cls_out_b != nullptr); - - cur = ggml_add (ctx0, ggml_mul_mat(ctx0, model.cls_out, cur), model.cls_out_b); - } - } break; - default: - { - GGML_ABORT("unknown pooling type"); - } - } - - cb(cur, "result_embd_pooled", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_tensor * llm_build_pos_bucket(bool causal) { - if (causal) { - lctx.inp_pos_bucket = ggml_new_tensor_2d(ctx0, GGML_TYPE_I32, n_kv, n_tokens); - } else { - lctx.inp_pos_bucket = ggml_new_tensor_2d(ctx0, GGML_TYPE_I32, n_tokens, n_tokens); - } - - ggml_set_input(lctx.inp_pos_bucket); - cb(lctx.inp_pos_bucket, "pos_bucket", -1); - - return lctx.inp_pos_bucket; - } - - struct ggml_tensor * llm_build_pos_bias(struct ggml_tensor * pos_bucket, struct ggml_tensor * attn_rel_b) { - struct ggml_tensor * pos_bucket_1d = ggml_view_1d(ctx0, pos_bucket, pos_bucket->ne[0] * pos_bucket->ne[1], 0); - cb(pos_bucket_1d, "pos_bucket_1d", -1); - - struct ggml_tensor * pos_bias = ggml_get_rows(ctx0, attn_rel_b, pos_bucket_1d); - cb(pos_bias, "pos_bias", -1); - - pos_bias = ggml_view_3d(ctx0, pos_bias, pos_bias->ne[0], lctx.inp_pos_bucket->ne[0], lctx.inp_pos_bucket->ne[1], ggml_element_size(pos_bias) * pos_bias->ne[0], ggml_element_size(pos_bias) * pos_bias->ne[0] * lctx.inp_pos_bucket->ne[0], 0); - cb(pos_bias, "pos_bias", -1); - - pos_bias = ggml_permute(ctx0, pos_bias, 2, 0, 1, 3); - cb(pos_bias, "pos_bias", -1); - - pos_bias = ggml_cont(ctx0, pos_bias); - cb(pos_bias, "pos_bias", -1); - - return pos_bias; - } - - struct ggml_tensor * llm_build_inp_embd_enc() { - const int64_t n_embd = hparams.n_embd; - lctx.inp_embd_enc = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, n_outputs_enc); - ggml_set_input(lctx.inp_embd_enc); - cb(lctx.inp_embd_enc, "embd_enc", -1); - return lctx.inp_embd_enc; - } - - struct ggml_tensor * llm_build_inp_KQ_mask_cross() { - lctx.inp_KQ_mask_cross = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_outputs_enc, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD)); - ggml_set_input(lctx.inp_KQ_mask_cross); - cb(lctx.inp_KQ_mask_cross, "KQ_mask_cross", -1); - return lctx.inp_KQ_mask_cross; - } - - struct ggml_cgraph * build_llama() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - // mutable variable, needed during the last layer of the computation to skip unused tokens - int32_t n_tokens = this->n_tokens; - - const int64_t n_embd_head = hparams.n_embd_head_v; - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - GGML_ASSERT(n_embd_head == hparams.n_rot); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - - const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale; - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * inpSA = inpL; - - // norm - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "attn_norm", il); - - // self-attention - { - // rope freq factors for llama3; may return nullptr for llama2 and other models - struct ggml_tensor * rope_factors = build_rope_factors(il); - - // compute Q and K and RoPE them - struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur); - cb(Qcur, "Qcur", il); - if (model.layers[il].bq) { - Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); - cb(Qcur, "Qcur", il); - } - - struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur); - cb(Kcur, "Kcur", il); - if (model.layers[il].bk) { - Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); - cb(Kcur, "Kcur", il); - } - - struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur); - cb(Vcur, "Vcur", il); - if (model.layers[il].bv) { - Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); - cb(Vcur, "Vcur", il); - } - - Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, rope_factors, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Qcur, "Qcur", il); - - Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, rope_factors, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Kcur, "Kcur", il); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, kq_scale, cb, il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - n_tokens = n_outputs; - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); - } - - // For Granite architecture - if (hparams.f_residual_scale) { - cur = ggml_scale(ctx0, cur, hparams.f_residual_scale); - } - - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); - cb(ffn_inp, "ffn_inp", il); - - // feed-forward network - if (model.layers[il].ffn_gate_inp == nullptr) { - - cur = llm_build_norm(ctx0, ffn_inp, hparams, - model.layers[il].ffn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "ffn_norm", il); - - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, - model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, NULL, - model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, - NULL, - LLM_FFN_SILU, LLM_FFN_PAR, cb, il); - cb(cur, "ffn_out", il); - } else { - // MoE branch - cur = llm_build_norm(ctx0, ffn_inp, hparams, - model.layers[il].ffn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "ffn_norm", il); - - cur = llm_build_moe_ffn(ctx0, lctx, cur, - model.layers[il].ffn_gate_inp, - model.layers[il].ffn_up_exps, - model.layers[il].ffn_gate_exps, - model.layers[il].ffn_down_exps, - nullptr, - n_expert, n_expert_used, - LLM_FFN_SILU, true, - false, 0.0, - LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX, - cb, il); - cb(cur, "ffn_moe_out", il); - } - - // For Granite architecture - if (hparams.f_residual_scale) { - cur = ggml_scale(ctx0, cur, hparams.f_residual_scale); - } - - cur = ggml_add(ctx0, cur, ffn_inp); - cb(cur, "ffn_out", il); - - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - - cur = llm_build_norm(ctx0, cur, hparams, - model.output_norm, NULL, - LLM_NORM_RMS, cb, -1); - cb(cur, "result_norm", -1); - - // lm_head - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - - // For Granite architecture - if (hparams.f_logit_scale) { - cur = ggml_scale(ctx0, cur, 1.0f / hparams.f_logit_scale); - } - - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_deci() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - // mutable variable, needed during the last layer of the computation to skip unused tokens - int32_t n_tokens = this->n_tokens; - - const int64_t n_embd_head = hparams.n_embd_head_v; - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - GGML_ASSERT(n_embd_head == hparams.n_rot); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - - const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale; - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * inpSA = inpL; - const int64_t n_head_kv = hparams.n_head_kv(il); - const int64_t n_head = hparams.n_head(il); - - if (n_head == 0) { - // attention-free layer of Llama-3_1-Nemotron-51B - cur = inpL; - } else { - // norm - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "attn_norm", il); - } - - if (n_head > 0 && n_head_kv == 0) { - // "linear attention" of Llama-3_1-Nemotron-51B - cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wo, cur); - cb(cur, "wo", il); - } else if (n_head > 0) { - // self-attention - // rope freq factors for llama3; may return nullptr for llama2 and other models - struct ggml_tensor * rope_factors = build_rope_factors(il); - - // compute Q and K and RoPE them - struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur); - cb(Qcur, "Qcur", il); - if (model.layers[il].bq) { - Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); - cb(Qcur, "Qcur", il); - } - - struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur); - cb(Kcur, "Kcur", il); - if (model.layers[il].bk) { - Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); - cb(Kcur, "Kcur", il); - } - - struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur); - cb(Vcur, "Vcur", il); - if (model.layers[il].bv) { - Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); - cb(Vcur, "Vcur", il); - } - - Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, rope_factors, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Qcur, "Qcur", il); - - Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, rope_factors, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Kcur, "Kcur", il); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, kq_scale, cb, il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - n_tokens = n_outputs; - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); - } - - // For Granite architecture - if (hparams.f_residual_scale) { - cur = ggml_scale(ctx0, cur, hparams.f_residual_scale); - } - - // modified to support attention-free layer of Llama-3_1-Nemotron-51B - struct ggml_tensor * ffn_inp = cur; - if (n_head > 0) { - ffn_inp = ggml_add(ctx0, cur, inpSA); - cb(ffn_inp, "ffn_inp", il); - } - - // feed-forward network - if (model.layers[il].ffn_gate_inp == nullptr) { - cur = llm_build_norm(ctx0, ffn_inp, hparams, - model.layers[il].ffn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "ffn_norm", il); - - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, - model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, NULL, - model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, - NULL, - LLM_FFN_SILU, LLM_FFN_PAR, cb, il); - cb(cur, "ffn_out", il); - } - - // For Granite architecture - if (hparams.f_residual_scale) { - cur = ggml_scale(ctx0, cur, hparams.f_residual_scale); - } - - cur = ggml_add(ctx0, cur, ffn_inp); - cb(cur, "ffn_out", il); - - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - - cur = llm_build_norm(ctx0, cur, hparams, - model.output_norm, NULL, - LLM_NORM_RMS, cb, -1); - cb(cur, "result_norm", -1); - - // lm_head - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - - // For Granite architecture - if (hparams.f_logit_scale) { - cur = ggml_scale(ctx0, cur, 1.0f / hparams.f_logit_scale); - } - - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_baichuan() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - const int64_t n_embd_head = hparams.n_embd_head_v; - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - GGML_ASSERT(n_embd_head == hparams.n_rot); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = model.type == LLM_TYPE_7B ? build_inp_pos() : nullptr; - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * inpSA = inpL; - - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "attn_norm", il); - - // self-attention - { - struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur); - cb(Qcur, "Qcur", il); - - struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur); - cb(Kcur, "Kcur", il); - - struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur); - cb(Vcur, "Vcur", il); - - switch (model.type) { - case LLM_TYPE_7B: - Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - break; - case LLM_TYPE_13B: - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd/n_head, n_head, n_tokens); - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd/n_head, n_head, n_tokens); - break; - default: - GGML_ABORT("fatal error"); - } - cb(Qcur, "Qcur", il); - cb(Kcur, "Kcur", il); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, NULL, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); - } - - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); - cb(ffn_inp, "ffn_inp", il); - - // feed-forward network - { - cur = llm_build_norm(ctx0, ffn_inp, hparams, - model.layers[il].ffn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "ffn_norm", il); - - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, NULL, NULL, - model.layers[il].ffn_gate, NULL, NULL, - model.layers[il].ffn_down, NULL, NULL, - NULL, - LLM_FFN_SILU, LLM_FFN_PAR, cb, il); - cb(cur, "ffn_out", il); - } - - cur = ggml_add(ctx0, cur, ffn_inp); - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - - cur = llm_build_norm(ctx0, cur, hparams, - model.output_norm, NULL, - LLM_NORM_RMS, cb, -1); - cb(cur, "result_norm", -1); - - // lm_head - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_xverse() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - const int64_t n_embd_head = hparams.n_embd_head_v; - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - GGML_ASSERT(n_embd_head == hparams.n_rot); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * inpSA = inpL; - - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "attn_norm", il); - - // self-attention - { - struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur); - cb(Qcur, "Qcur", il); - - struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur); - cb(Kcur, "Kcur", il); - - struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur); - cb(Vcur, "Vcur", il); - - Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Qcur, "Qcur", il); - - Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Kcur, "Kcur", il); - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, NULL, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); - } - - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); - cb(ffn_inp, "ffn_inp", il); - - // feed-forward network - { - cur = llm_build_norm(ctx0, ffn_inp, hparams, - model.layers[il].ffn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "ffn_norm", il); - - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, NULL, NULL, - model.layers[il].ffn_gate, NULL, NULL, - model.layers[il].ffn_down, NULL, NULL, - NULL, - LLM_FFN_SILU, LLM_FFN_PAR, cb, il); - cb(cur, "ffn_out", il); - } - - cur = ggml_add(ctx0, cur, ffn_inp); - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - - cur = llm_build_norm(ctx0, cur, hparams, model.output_norm, NULL, LLM_NORM_RMS, cb, -1); - cb(cur, "result_norm", -1); - - // lm_head - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_falcon() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - const int64_t n_embd_head = hparams.n_embd_head_v; - const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - GGML_ASSERT(n_embd_head == hparams.n_rot); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * attn_norm; - - attn_norm = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, - model.layers[il].attn_norm_b, - LLM_NORM, cb, il); - cb(attn_norm, "attn_norm", il); - - // self-attention - { - if (model.layers[il].attn_norm_2) { - // Falcon-40B - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm_2, - model.layers[il].attn_norm_2_b, - LLM_NORM, cb, il); - cb(cur, "attn_norm_2", il); - } else { - cur = attn_norm; - } - - cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wqkv, cur); - cb(cur, "wqkv", il); - - struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); - struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); - struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); - - cb(Qcur, "Qcur", il); - cb(Kcur, "Kcur", il); - cb(Vcur, "Vcur", il); - - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); - - // using mode = 2 for neox mode - Qcur = ggml_rope_ext( - ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, - freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Qcur, "Qcur", il); - - Kcur = ggml_rope_ext( - ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, - freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Kcur, "Kcur", il); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, NULL, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); - attn_norm = ggml_get_rows(ctx0, attn_norm, inp_out_ids); - } - - struct ggml_tensor * ffn_inp = cur; - - // feed forward - { - cur = llm_build_ffn(ctx0, lctx, attn_norm, // !! use the attn norm, not the result - model.layers[il].ffn_up, NULL, NULL, - NULL, NULL, NULL, - model.layers[il].ffn_down, NULL, NULL, - NULL, - LLM_FFN_GELU, LLM_FFN_SEQ, cb, il); - cb(cur, "ffn_out", il); - } - - cur = ggml_add(ctx0, cur, ffn_inp); - cur = ggml_add(ctx0, cur, inpL); - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - - // norm - cur = llm_build_norm(ctx0, cur, hparams, - model.output_norm, - model.output_norm_b, - LLM_NORM, cb, -1); - cb(cur, "result_norm", -1); - - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_grok() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - // mutable variable, needed during the last layer of the computation to skip unused tokens - int32_t n_tokens = this->n_tokens; - - const int64_t n_embd_head = hparams.n_embd_head_v; - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - GGML_ASSERT(n_embd_head == hparams.n_rot); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // multiply by embedding_multiplier_scale of 78.38367176906169 - inpL = ggml_scale(ctx0, inpL, 78.38367176906169f); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * inpSA = inpL; - - // norm - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "attn_norm", il); - - - // self-attention - { - // compute Q and K and RoPE them - struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur); - cb(Qcur, "Qcur", il); - if (model.layers[il].bq) { - Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); - cb(Qcur, "Qcur", il); - } - - struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur); - cb(Kcur, "Kcur", il); - if (model.layers[il].bk) { - Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); - cb(Kcur, "Kcur", il); - } - - struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur); - cb(Vcur, "Vcur", il); - if (model.layers[il].bv) { - Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); - cb(Vcur, "Vcur", il); - } - - Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Qcur, "Qcur", il); - - Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Kcur, "Kcur", il); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f, cb, il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - n_tokens = n_outputs; - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); - } - - // Grok - // if attn_out_norm is present then apply it before adding the input - if (model.layers[il].attn_out_norm) { - cur = llm_build_norm(ctx0, cur, hparams, - model.layers[il].attn_out_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "attn_out_norm", il); - } - - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); - cb(ffn_inp, "ffn_inp", il); - - // feed-forward network - // MoE branch - cur = llm_build_norm(ctx0, ffn_inp, hparams, - model.layers[il].ffn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "ffn_norm", il); - - cur = llm_build_moe_ffn(ctx0, lctx, cur, - model.layers[il].ffn_gate_inp, - model.layers[il].ffn_up_exps, - model.layers[il].ffn_gate_exps, - model.layers[il].ffn_down_exps, - nullptr, - n_expert, n_expert_used, - LLM_FFN_GELU, true, - false, 0.0, - LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX, - cb, il); - cb(cur, "ffn_moe_out", il); - - // Grok - // if layer_out_norm is present then apply it before adding the input - // Idea: maybe ffn_out_norm is a better name - if (model.layers[il].layer_out_norm) { - cur = llm_build_norm(ctx0, cur, hparams, - model.layers[il].layer_out_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "layer_out_norm", il); - } - - cur = ggml_add(ctx0, cur, ffn_inp); - cb(cur, "ffn_out", il); - - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - - cur = llm_build_norm(ctx0, cur, hparams, - model.output_norm, NULL, - LLM_NORM_RMS, cb, -1); - cb(cur, "result_norm", -1); - - // lm_head - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - - // Grok - // multiply logits by output_multiplier_scale of 0.5773502691896257 - - cur = ggml_scale(ctx0, cur, 0.5773502691896257f); - - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_dbrx() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - // mutable variable, needed during the last layer of the computation to skip unused tokens - int32_t n_tokens = this->n_tokens; - - const int64_t n_embd_head = hparams.n_embd_head_v; - const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - GGML_ASSERT(n_embd_head == hparams.n_rot); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * inpSA = inpL; - - // norm - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, NULL, - LLM_NORM, cb, il); - cb(cur, "attn_norm", il); - - // self-attention - { - struct ggml_tensor * Qcur = nullptr; - struct ggml_tensor * Kcur = nullptr; - struct ggml_tensor * Vcur = nullptr; - - cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wqkv, cur); - cb(cur, "wqkv", il); - - cur = ggml_clamp(ctx0, cur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv); - cb(cur, "wqkv_clamped", il); - - Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); - Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); - Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); - - cb(Qcur, "Qcur", il); - cb(Kcur, "Kcur", il); - cb(Vcur, "Vcur", il); - - Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Qcur, "Qcur", il); - - Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Kcur, "Kcur", il); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, NULL, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - n_tokens = n_outputs; - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); - } - - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); - cb(ffn_inp, "ffn_inp", il); - - // feed-forward network - // MoE branch - cur = llm_build_norm(ctx0, ffn_inp, hparams, - model.layers[il].attn_out_norm, NULL, - LLM_NORM, cb, il); - cb(cur, "attn_out_norm", il); - - cur = llm_build_moe_ffn(ctx0, lctx, cur, - model.layers[il].ffn_gate_inp, - model.layers[il].ffn_up_exps, - model.layers[il].ffn_gate_exps, - model.layers[il].ffn_down_exps, - nullptr, - n_expert, n_expert_used, - LLM_FFN_SILU, true, - false, 0.0, - LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX, - cb, il); - cb(cur, "ffn_moe_out", il); - - cur = ggml_add(ctx0, cur, ffn_inp); - cb(cur, "ffn_out", il); - - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - - cur = llm_build_norm(ctx0, cur, hparams, - model.output_norm, NULL, - LLM_NORM, cb, -1); - cb(cur, "result_norm", -1); - - // lm_head - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_starcoder() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - const int64_t n_embd_head = hparams.n_embd_head_v; - const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - - struct ggml_tensor * pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos); - cb(pos, "pos_embd", -1); - - inpL = ggml_add(ctx0, inpL, pos); - cb(inpL, "inpL", -1); - - for (int il = 0; il < n_layer; ++il) { - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, - model.layers[il].attn_norm_b, - LLM_NORM, cb, il); - cb(cur, "attn_norm", il); - - // self-attention - { - cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wqkv, cur); - cb(cur, "wqkv", il); - - cur = ggml_add(ctx0, cur, model.layers[il].bqkv); - cb(cur, "bqkv", il); - - struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); - struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); - struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); - - cb(Qcur, "Qcur", il); - cb(Kcur, "Kcur", il); - cb(Vcur, "Vcur", il); - - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); - } - - // add the input - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); - cb(ffn_inp, "ffn_inp", il); - - // FF - { - cur = llm_build_norm(ctx0, ffn_inp, hparams, - model.layers[il].ffn_norm, - model.layers[il].ffn_norm_b, - LLM_NORM, cb, il); - cb(cur, "ffn_norm", il); - - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, - NULL, NULL, NULL, - model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, - NULL, - LLM_FFN_GELU, LLM_FFN_SEQ, cb, il); - cb(cur, "ffn_out", il); - } - - cur = ggml_add(ctx0, cur, ffn_inp); - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = llm_build_norm(ctx0, inpL, hparams, - model.output_norm, - model.output_norm_b, - LLM_NORM, cb, -1); - cb(cur, "result_norm", -1); - - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_refact() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - const int64_t n_embd_head = hparams.n_embd_head_v; - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * inpSA = inpL; - - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "attn_norm", il); - - // self-attention - { - struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur); - cb(Qcur, "Qcur", il); - - struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur); - cb(Kcur, "Kcur", il); - - struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur); - cb(Vcur, "Vcur", il); - - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); - cb(Kcur, "Kcur", il); - - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - cb(Qcur, "Qcur", il); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, NULL, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); - } - - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); - cb(ffn_inp, "ffn_inp", il); - - // feed-forward network - { - cur = llm_build_norm(ctx0, ffn_inp, hparams, - model.layers[il].ffn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "ffn_norm", il); - - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, NULL, NULL, - model.layers[il].ffn_gate, NULL, NULL, - model.layers[il].ffn_down, NULL, NULL, - NULL, - LLM_FFN_SILU, LLM_FFN_PAR, cb, il); - cb(cur, "ffn_out", il); - } - - cur = ggml_add(ctx0, cur, ffn_inp); - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - - cur = llm_build_norm(ctx0, cur, hparams, - model.output_norm, NULL, - LLM_NORM_RMS, cb, -1); - cb(cur, "result_norm", -1); - - // lm_head - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_bert() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - const int64_t n_embd_head = hparams.n_embd_head_v; - const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); - - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - struct ggml_tensor * inp_pos = nullptr; - - if (model.arch != LLM_ARCH_JINA_BERT_V2) { - inp_pos = build_inp_pos(); - } - - // construct input embeddings (token, type, position) - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // token types are hardcoded to zero ("Sentence A") - struct ggml_tensor * type_row0 = ggml_view_1d(ctx0, model.type_embd, n_embd, 0); - inpL = ggml_add(ctx0, inpL, type_row0); - if (model.arch == LLM_ARCH_BERT) { - inpL = ggml_add(ctx0, ggml_get_rows(ctx0, model.pos_embd, inp_pos), inpL); - } - cb(inpL, "inp_embd", -1); - - // embed layer norm - inpL = llm_build_norm(ctx0, inpL, hparams, model.tok_norm, model.tok_norm_b, LLM_NORM, cb, -1); - cb(inpL, "inp_norm", -1); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(false); - - // iterate layers - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * cur = inpL; - - struct ggml_tensor * Qcur; - struct ggml_tensor * Kcur; - struct ggml_tensor * Vcur; - - // self-attention - if (model.arch == LLM_ARCH_BERT || model.arch == LLM_ARCH_JINA_BERT_V2) { - Qcur = ggml_add(ctx0, llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur), model.layers[il].bq); - cb(Qcur, "Qcur", il); - - if (model.layers[il].attn_q_norm) { - Qcur = llm_build_norm(ctx0, Qcur, hparams, - model.layers[il].attn_q_norm, - model.layers[il].attn_q_norm_b, - LLM_NORM, cb, il); - } - - Kcur = ggml_add(ctx0, llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur), model.layers[il].bk); - cb(Kcur, "Kcur", il); - - if (model.layers[il].attn_k_norm) { - Kcur = llm_build_norm(ctx0, Kcur, hparams, - model.layers[il].attn_k_norm, - model.layers[il].attn_k_norm_b, - LLM_NORM, cb, il); - } - Vcur = ggml_add(ctx0, llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur), model.layers[il].bv); - cb(Vcur, "Vcur", il); - - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); - } else { - // compute Q and K and RoPE them - cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wqkv, cur); - cb(cur, "wqkv", il); - - Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); - Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); - Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); - - cb(Qcur, "Qcur", il); - cb(Kcur, "Kcur", il); - cb(Vcur, "Vcur", il); - - Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Qcur, "Qcur", il); - - Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Kcur, "Kcur", il); - } - - struct ggml_tensor * q = ggml_permute(ctx0, Qcur, 0, 2, 1, 3); - struct ggml_tensor * k = ggml_cont(ctx0, ggml_permute(ctx0, Kcur, 0, 2, 1, 3)); - - struct ggml_tensor * kq = ggml_mul_mat(ctx0, k, q); - cb(kq, "kq", il); - - kq = ggml_soft_max_ext(ctx0, kq, KQ_mask, 1.0f/sqrtf(float(n_embd_head)), hparams.f_max_alibi_bias); - cb(kq, "kq_soft_max_ext", il); - - struct ggml_tensor * v = ggml_cont(ctx0, ggml_transpose(ctx0, ggml_reshape_2d(ctx0, Vcur, n_embd_gqa, n_tokens))); - cb(v, "v", il); - - struct ggml_tensor * kqv = ggml_mul_mat(ctx0, ggml_reshape_3d(ctx0, v, n_tokens, n_embd_head, n_head_kv), kq); - cb(kqv, "kqv", il); - - struct ggml_tensor * kqv_merged = ggml_permute(ctx0, kqv, 0, 2, 1, 3); - cb(kqv_merged, "kqv_merged", il); - - cur = ggml_cont_2d(ctx0, kqv_merged, n_embd_gqa, n_tokens); - cb(cur, "kqv_merged_cont", il); - - ggml_build_forward_expand(gf, cur); - - cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wo, cur); - if (model.layers[il].bo) { - cb(cur, "kqv_wo", il); - } - - if (model.layers[il].bo) { - cur = ggml_add(ctx0, cur, model.layers[il].bo); - } - cb(cur, "kqv_out", il); - - if (il == n_layer - 1 && pooling_type == LLAMA_POOLING_TYPE_NONE) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); - } - - // re-add the layer input - cur = ggml_add(ctx0, cur, inpL); - - // attention layer norm - cur = llm_build_norm(ctx0, cur, hparams, model.layers[il].attn_out_norm, model.layers[il].attn_out_norm_b, LLM_NORM, cb, il); - - if (model.layers[il].attn_norm_2 != nullptr) { - cur = ggml_add(ctx0, cur, inpL); // re-add the layer input - cur = llm_build_norm(ctx0, cur, hparams, model.layers[il].attn_norm_2, model.layers[il].attn_norm_2_b, LLM_NORM, cb, il); - } - - struct ggml_tensor * ffn_inp = cur; - cb(ffn_inp, "ffn_inp", il); - - // feed-forward network - if (model.arch == LLM_ARCH_BERT) { - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, - NULL, NULL, NULL, - model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, - NULL, - LLM_FFN_GELU, LLM_FFN_SEQ, cb, il); - } else if (model.arch == LLM_ARCH_JINA_BERT_V2) { - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, NULL, NULL, - model.layers[il].ffn_gate, NULL, NULL, - model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, - NULL, - LLM_FFN_GELU, LLM_FFN_PAR, cb, il); - } else { - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, NULL, NULL, - model.layers[il].ffn_gate, NULL, NULL, - model.layers[il].ffn_down, NULL, NULL, - NULL, - LLM_FFN_SILU, LLM_FFN_PAR, cb, il); - } - cb(cur, "ffn_out", il); - - // attentions bypass the intermediate layer - cur = ggml_add(ctx0, cur, ffn_inp); - - // output layer norm - cur = llm_build_norm(ctx0, cur, hparams, model.layers[il].layer_out_norm, model.layers[il].layer_out_norm_b, LLM_NORM, cb, il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - - cb(cur, "result_embd", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_bloom() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - const int64_t n_embd_head = hparams.n_embd_head_v; - const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - - inpL = llm_build_norm(ctx0, inpL, hparams, - model.tok_norm, - model.tok_norm_b, - LLM_NORM, cb, -1); - cb(inpL, "inp_norm", -1); - - for (int il = 0; il < n_layer; ++il) { - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, - model.layers[il].attn_norm_b, - LLM_NORM, cb, il); - cb(cur, "attn_norm", il); - - // self-attention - { - cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wqkv, cur); - cb(cur, "wqkv", il); - - cur = ggml_add(ctx0, cur, model.layers[il].bqkv); - cb(cur, "bqkv", il); - - struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); - struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); - struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); - - cb(Qcur, "Qcur", il); - cb(Kcur, "Kcur", il); - cb(Vcur, "Vcur", il); - - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); - } - - // Add the input - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); - cb(ffn_inp, "ffn_inp", il); - - // FF - { - cur = llm_build_norm(ctx0, ffn_inp, hparams, - model.layers[il].ffn_norm, - model.layers[il].ffn_norm_b, - LLM_NORM, cb, il); - cb(cur, "ffn_norm", il); - - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, - NULL, NULL, NULL, - model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, - NULL, - LLM_FFN_GELU, LLM_FFN_SEQ, cb, il); - cb(cur, "ffn_out", il); - } - - cur = ggml_add(ctx0, cur, ffn_inp); - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = llm_build_norm(ctx0, inpL, hparams, - model.output_norm, - model.output_norm_b, - LLM_NORM, cb, -1); - cb(cur, "result_norm", -1); - - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_mpt() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - const int64_t n_embd_head = hparams.n_embd_head_v; - const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - - struct ggml_tensor * cur; - struct ggml_tensor * pos; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - - if (model.pos_embd) { - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos); - cb(pos, "pos_embd", -1); - - inpL = ggml_add(ctx0, inpL, pos); - cb(inpL, "inpL", -1); - } - - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * attn_norm; - - attn_norm = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, - model.layers[il].attn_norm_b, - LLM_NORM, cb, il); - cb(attn_norm, "attn_norm", il); - - // self-attention - { - cur = attn_norm; - - cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wqkv, cur); - cb(cur, "wqkv", il); - - if (model.layers[il].bqkv){ - cur = ggml_add(ctx0, cur, model.layers[il].bqkv); - cb(cur, "bqkv", il); - } - - if (hparams.f_clamp_kqv > 0.0f) { - cur = ggml_clamp(ctx0, cur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv); - cb(cur, "wqkv_clamped", il); - } - - struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); - struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); - struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); - - cb(Qcur, "Qcur", il); - cb(Kcur, "Kcur", il); - cb(Vcur, "Vcur", il); - - // Q/K Layernorm - if (model.layers[il].attn_q_norm) { - Qcur = llm_build_norm(ctx0, Qcur, hparams, - model.layers[il].attn_q_norm, - model.layers[il].attn_q_norm_b, - LLM_NORM, cb, il); - cb(Qcur, "Qcur", il); - - Kcur = llm_build_norm(ctx0, Kcur, hparams, - model.layers[il].attn_k_norm, - model.layers[il].attn_k_norm_b, - LLM_NORM, cb, il); - cb(Kcur, "Kcur", il); - - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); - } else { - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); - } - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); - } - - // Add the input - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); - cb(ffn_inp, "ffn_inp", il); - - // feed forward - { - cur = llm_build_norm(ctx0, ffn_inp, hparams, - model.layers[il].ffn_norm, - model.layers[il].ffn_norm_b, - LLM_NORM, cb, il); - cb(cur, "ffn_norm", il); - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, - NULL, NULL, NULL, - model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, - model.layers[il].ffn_act, - LLM_FFN_GELU, LLM_FFN_SEQ, cb, il); - cb(cur, "ffn_out", il); - } - - cur = ggml_add(ctx0, cur, ffn_inp); - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - - cur = llm_build_norm(ctx0, cur, hparams, - model.output_norm, - model.output_norm_b, - LLM_NORM, cb, -1); - cb(cur, "result_norm", -1); - - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_stablelm() { - struct ggml_cgraph * gf = ggml_new_graph(ctx0); - - const int64_t n_embd_head = hparams.n_embd_head_v; - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - - for (int il = 0; il < n_layer; ++il) { - - - // norm - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, - model.layers[il].attn_norm_b, - LLM_NORM, cb, il); - cb(cur, "attn_norm", il); - - struct ggml_tensor * inpSA = cur; - - // self-attention - { - // compute Q and K and RoPE them - struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur); - cb(Qcur, "Qcur", il); - if (model.layers[il].bq) { - Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); - cb(Qcur, "Qcur", il); - } - - struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur); - cb(Kcur, "Kcur", il); - if (model.layers[il].bk) { - Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); - cb(Kcur, "Kcur", il); - } - - struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur); - cb(Vcur, "Vcur", il); - if (model.layers[il].bv) { - Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); - cb(Vcur, "Vcur", il); - } - - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - cb(Qcur, "Qcur", il); - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); - cb(Kcur, "Kcur", il); - - if (model.layers[il].attn_q_norm) { - Qcur = llm_build_norm(ctx0, Qcur, hparams, - model.layers[il].attn_q_norm, - NULL, - LLM_NORM, cb, il); - cb(Qcur, "Qcur", il); - } - if (model.layers[il].attn_k_norm) { - Kcur = llm_build_norm(ctx0, Kcur, hparams, - model.layers[il].attn_k_norm, - NULL, - LLM_NORM, cb, il); - cb(Kcur, "Kcur", il); - } - - - Qcur = ggml_rope_ext( - ctx0, Qcur, inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Qcur, "Qcur", il); - - Kcur = ggml_rope_ext( - ctx0, Kcur, inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Kcur, "Kcur", il); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, NULL, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); - inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); - } - - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); - cb(ffn_inp, "ffn_inp", il); - - // feed-forward network - { - if (model.layers[il].ffn_norm) { - cur = llm_build_norm(ctx0, ffn_inp, hparams, - model.layers[il].ffn_norm, - model.layers[il].ffn_norm_b, - LLM_NORM, cb, il); - cb(cur, "ffn_norm", il); - } else { - // parallel residual - cur = inpSA; - } - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, NULL, NULL, - model.layers[il].ffn_gate, NULL, NULL, - model.layers[il].ffn_down, NULL, NULL, - NULL, - LLM_FFN_SILU, LLM_FFN_PAR, cb, il); - cb(cur, "ffn_out", il); - } - - cur = ggml_add(ctx0, cur, ffn_inp); - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - - cur = llm_build_norm(ctx0, cur, hparams, - model.output_norm, - model.output_norm_b, - LLM_NORM, cb, -1); - cb(cur, "result_norm", -1); - - // lm_head - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_qwen() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - const int64_t n_embd_head = hparams.n_embd_head_v; - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * inpSA = inpL; - - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "attn_norm", il); - - // self-attention - { - cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wqkv, cur); - cb(cur, "wqkv", il); - - cur = ggml_add(ctx0, cur, model.layers[il].bqkv); - cb(cur, "bqkv", il); - - struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); - struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); - struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 2*sizeof(float)*(n_embd))); - - cb(Qcur, "Qcur", il); - cb(Kcur, "Kcur", il); - cb(Vcur, "Vcur", il); - - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); - - // using mode = 2 for neox mode - Qcur = ggml_rope_ext( - ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, - freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Qcur, "Qcur", il); - - Kcur = ggml_rope_ext( - ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, - freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Kcur, "Kcur", il); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, NULL, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); - } - - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); - cb(ffn_inp, "ffn_inp", il); - - // feed-forward forward - { - cur = llm_build_norm(ctx0, ffn_inp, hparams, - model.layers[il].ffn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "ffn_norm", il); - - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, NULL, NULL, - model.layers[il].ffn_gate, NULL, NULL, - model.layers[il].ffn_down, NULL, NULL, - NULL, - LLM_FFN_SILU, LLM_FFN_PAR, cb, il); - cb(cur, "ffn_out", il); - } - - cur = ggml_add(ctx0, cur, ffn_inp); - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - - cur = llm_build_norm(ctx0, cur, hparams, - model.output_norm, NULL, - LLM_NORM_RMS, cb, -1); - cb(cur, "result_norm", -1); - - // lm_head - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_qwen2() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - const int64_t n_embd_head = hparams.n_embd_head_v; - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - GGML_ASSERT(n_embd_head == hparams.n_rot); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * inpSA = inpL; - - // norm - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "attn_norm", il); - - // self-attention - { - // compute Q and K and RoPE them - struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur); - cb(Qcur, "Qcur", il); - Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); - cb(Qcur, "Qcur", il); - - struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur); - cb(Kcur, "Kcur", il); - Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); - cb(Kcur, "Kcur", il); - - struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur); - cb(Vcur, "Vcur", il); - Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); - cb(Vcur, "Vcur", il); - - Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Qcur, "Qcur", il); - - Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Kcur, "Kcur", il); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); - } - - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); - cb(ffn_inp, "ffn_inp", il); - - // feed-forward network - cur = llm_build_norm(ctx0, ffn_inp, hparams, - model.layers[il].ffn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "ffn_norm", il); - - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, NULL, NULL, - model.layers[il].ffn_gate, NULL, NULL, - model.layers[il].ffn_down, NULL, NULL, - NULL, - LLM_FFN_SILU, LLM_FFN_PAR, cb, il); - cb(cur, "ffn_out", il); - - cur = ggml_add(ctx0, cur, ffn_inp); - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - - cur = llm_build_norm(ctx0, cur, hparams, - model.output_norm, NULL, - LLM_NORM_RMS, cb, -1); - cb(cur, "result_norm", -1); - - // lm_head - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_qwen2vl() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - const int64_t n_embd_head = hparams.n_embd_head_v; - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - GGML_ASSERT(n_embd_head == hparams.n_rot); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // inp_pos - contains the positions - lctx.inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens * 4); - cb(lctx.inp_pos, "inp_pos", -1); - ggml_set_input(lctx.inp_pos); - struct ggml_tensor * inp_pos = lctx.inp_pos; - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - int sections[4]; - std::copy(std::begin(hparams.rope_sections), std::begin(hparams.rope_sections) + 4, sections); - - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * inpSA = inpL; - - // norm - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "attn_norm", il); - - // self-attention - { - // compute Q and K and RoPE them - struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur); - cb(Qcur, "Qcur", il); - Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); - cb(Qcur, "Qcur", il); - - struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur); - cb(Kcur, "Kcur", il); - Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); - cb(Kcur, "Kcur", il); - - struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur); - cb(Vcur, "Vcur", il); - Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); - cb(Vcur, "Vcur", il); - - Qcur = ggml_rope_multi( - ctx0, - ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, - n_rot, sections, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Qcur, "Qcur", il); - - Kcur = ggml_rope_multi( - ctx0, - ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, - n_rot, sections, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Kcur, "Kcur", il); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); - } - - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); - cb(ffn_inp, "ffn_inp", il); - - // feed-forward network - cur = llm_build_norm(ctx0, ffn_inp, hparams, - model.layers[il].ffn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "ffn_norm", il); - - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, NULL, NULL, - model.layers[il].ffn_gate, NULL, NULL, - model.layers[il].ffn_down, NULL, NULL, - NULL, - LLM_FFN_SILU, LLM_FFN_PAR, cb, il); - cb(cur, "ffn_out", il); - - cur = ggml_add(ctx0, cur, ffn_inp); - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - - cur = llm_build_norm(ctx0, cur, hparams, - model.output_norm, NULL, - LLM_NORM_RMS, cb, -1); - cb(cur, "result_norm", -1); - - // lm_head - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_qwen2moe() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - // mutable variable, needed during the last layer of the computation to skip unused tokens - int32_t n_tokens = this->n_tokens; - - const int64_t n_embd_head = hparams.n_embd_head_v; - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - GGML_ASSERT(n_embd_head == hparams.n_rot); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * inpSA = inpL; - - // norm - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "attn_norm", il); - - // self_attention - { - // compute Q and K and RoPE them - struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur); - cb(Qcur, "Qcur", il); - Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); - cb(Qcur, "Qcur", il); - - struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur); - cb(Kcur, "Kcur", il); - Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); - cb(Kcur, "Kcur", il); - - struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur); - cb(Vcur, "Vcur", il); - Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); - cb(Vcur, "Vcur", il); - - Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Qcur, "Qcur", il); - - Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Kcur, "Kcur", il); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - n_tokens = n_outputs; - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); - } - - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); - cb(ffn_inp, "ffn_inp", il); - - // MoE branch - cur = llm_build_norm(ctx0, ffn_inp, hparams, - model.layers[il].ffn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "ffn_norm", il); - - ggml_tensor * moe_out = - llm_build_moe_ffn(ctx0, lctx, cur, - model.layers[il].ffn_gate_inp, - model.layers[il].ffn_up_exps, - model.layers[il].ffn_gate_exps, - model.layers[il].ffn_down_exps, - nullptr, - n_expert, n_expert_used, - LLM_FFN_SILU, false, - false, 0.0, - LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX, - cb, il); - cb(cur, "ffn_moe_out", il); - - // FFN shared expert - { - ggml_tensor * cur_gate_inp = llm_build_lora_mm(lctx, ctx0, model.layers[il].ffn_gate_inp_shexp, cur); - cb(cur_gate_inp, "ffn_shexp_gate_inp", il); - - // sigmoid - ggml_tensor * cur_gate = ggml_div(ctx0, ggml_silu(ctx0, cur_gate_inp), cur_gate_inp); - cb(cur_gate, "ffn_shexp_gate", il); - - ggml_tensor * cur_ffn = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up_shexp, NULL, NULL, - model.layers[il].ffn_gate_shexp, NULL, NULL, - model.layers[il].ffn_down_shexp, NULL, NULL, - NULL, - LLM_FFN_SILU, LLM_FFN_PAR, cb, il); - cb(cur_ffn, "ffn_shexp", il); - - ggml_tensor * ffn_shexp_out = ggml_mul(ctx0, cur_ffn, cur_gate); - cb(ffn_shexp_out, "ffn_shexp_out", il); - - moe_out = ggml_add(ctx0, moe_out, ffn_shexp_out); - cb(moe_out, "ffn_out", il); - - cur = moe_out; - } - - cur = ggml_add(ctx0, cur, ffn_inp); - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - - cur = llm_build_norm(ctx0, cur, hparams, - model.output_norm, NULL, - LLM_NORM_RMS, cb, -1); - cb(cur, "result_norm", -1); - - // lm_head - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_phi2() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - const int64_t n_embd_head = hparams.n_embd_head_v; - const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - - struct ggml_tensor * cur; - struct ggml_tensor * attn_norm_output; - struct ggml_tensor * ffn_output; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - - for (int il = 0; il < n_layer; ++il) { - attn_norm_output = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, - model.layers[il].attn_norm_b, - LLM_NORM, cb, il); - cb(attn_norm_output, "attn_norm", il); - - // self-attention - { - struct ggml_tensor * Qcur = nullptr; - struct ggml_tensor * Kcur = nullptr; - struct ggml_tensor * Vcur = nullptr; - - if (model.layers[il].wqkv) { - cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wqkv, attn_norm_output); - cb(cur, "wqkv", il); - - cur = ggml_add(ctx0, cur, model.layers[il].bqkv); - cb(cur, "bqkv", il); - - Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); - Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); - Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); - } else { - Qcur = ggml_add(ctx0, llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, attn_norm_output), model.layers[il].bq); - Kcur = ggml_add(ctx0, llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, attn_norm_output), model.layers[il].bk); - Vcur = ggml_add(ctx0, llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, attn_norm_output), model.layers[il].bv); - } - - cb(Qcur, "Qcur", il); - cb(Kcur, "Kcur", il); - cb(Vcur, "Vcur", il); - - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); - - Qcur = ggml_rope_ext( - ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, - freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Qcur, "Qcur", il); - - // with phi2, we scale the Q to avoid precision issues - // ref: https://github.com/ml-explore/mlx-examples/blob/08e862336ade809bc37d1035f94b359e7d1a5152/phi2/phi2.py#L64-L66 - Qcur = ggml_scale(ctx0, Qcur, 1.0f/sqrtf(float(n_embd_head))); - cb(Qcur, "Qcur", il); - - Kcur = ggml_rope_ext( - ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, - freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Kcur, "Kcur", il); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f, cb, il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); - attn_norm_output = ggml_get_rows(ctx0, attn_norm_output, inp_out_ids); - } - - // FF - { - ffn_output = llm_build_ffn(ctx0, lctx, attn_norm_output, - model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, - NULL, NULL, NULL, - model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, - NULL, - LLM_FFN_GELU, LLM_FFN_SEQ, cb, il); - cb(ffn_output, "ffn_out", il); - } - - cur = ggml_add(ctx0, cur, ffn_output); - cur = ggml_add(ctx0, cur, inpL); - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = llm_build_norm(ctx0, inpL, hparams, - model.output_norm, - model.output_norm_b, - LLM_NORM, cb, -1); - cb(cur, "result_norm", -1); - - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - cb(cur, "result_output_no_bias", -1); - - cur = ggml_add(ctx0, cur, model.output_b); - cb(cur, "result_output", -1); - ggml_build_forward_expand(gf, cur); - return gf; - } - - struct ggml_cgraph * build_phi3() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - const int64_t n_embd_head = hparams.n_embd_head_v; - const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = nullptr; - if (hparams.n_swa == 0) { - // Phi-4 doesn't use sliding window attention - KQ_mask = build_inp_KQ_mask(); - } else { - KQ_mask = build_inp_KQ_mask_swa(); - } - - for (int il = 0; il < n_layer; ++il) { - auto residual = inpL; - - // self-attention - { - // rope freq factors for 128k context - struct ggml_tensor * rope_factors = build_rope_factors(il); - - struct ggml_tensor* attn_norm_output = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, - model.layers[il].attn_norm_b, - LLM_NORM_RMS, cb, il); - cb(attn_norm_output, "attn_norm", il); - - struct ggml_tensor * Qcur = nullptr; - struct ggml_tensor * Kcur = nullptr; - struct ggml_tensor * Vcur = nullptr; - - if (model.layers[il].wqkv) { - cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wqkv, attn_norm_output); - cb(cur, "wqkv", il); - - Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0 * sizeof(float) * (n_embd))); - Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1 * sizeof(float) * (n_embd))); - Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1 * sizeof(float) * (n_embd + n_embd_gqa))); - } else { - Qcur = ggml_add(ctx0, llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, attn_norm_output), model.layers[il].bq); - Kcur = ggml_add(ctx0, llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, attn_norm_output), model.layers[il].bk); - Vcur = ggml_add(ctx0, llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, attn_norm_output), model.layers[il].bv); - } - - cb(Qcur, "Qcur", il); - cb(Kcur, "Kcur", il); - cb(Vcur, "Vcur", il); - - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); - - Qcur = ggml_rope_ext( - ctx0, Qcur, inp_pos, rope_factors, n_rot, rope_type, n_ctx_orig, - freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Qcur, "Qcur", il); - - Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head))); - cb(Qcur, "Qcur", il); - - Kcur = ggml_rope_ext( - ctx0, Kcur, inp_pos, rope_factors, n_rot, rope_type, n_ctx_orig, - freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Kcur, "Kcur", il); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f, cb, il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor* inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - residual = ggml_get_rows(ctx0, residual, inp_out_ids); - } - - cur = ggml_add(ctx0, cur, residual); - residual = cur; - - cur = llm_build_norm(ctx0, cur, hparams, - model.layers[il].ffn_norm, model.layers[il].ffn_norm_b, - LLM_NORM_RMS, cb, il); - cb(cur, "ffn_norm", il); - - // feed-forward network - if (model.layers[il].ffn_gate_inp == nullptr) { - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, NULL, NULL, - NULL, NULL, NULL, - model.layers[il].ffn_down, NULL, NULL, - NULL, - LLM_FFN_SWIGLU, LLM_FFN_SEQ, cb, il); - cb(cur, "ffn_out", il); - } else { - // MoE branch - cur = llm_build_moe_ffn(ctx0, lctx, cur, - model.layers[il].ffn_gate_inp, - model.layers[il].ffn_up_exps, - model.layers[il].ffn_gate_exps, - model.layers[il].ffn_down_exps, - nullptr, - n_expert, n_expert_used, - LLM_FFN_SILU, true, - false, 0.0, - LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX, - cb, il); - cb(cur, "ffn_moe_out", il); - } - - cur = ggml_add(ctx0, residual, cur); - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = llm_build_norm(ctx0, inpL, hparams, - model.output_norm, - model.output_norm_b, - LLM_NORM_RMS, cb, -1); - cb(cur, "result_norm", -1); - - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - - if (model.output_b != nullptr) { - cb(cur, "result_output_no_bias", -1); - cur = ggml_add(ctx0, cur, model.output_b); - } - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - - struct ggml_cgraph * build_plamo() { - struct ggml_cgraph * gf = ggml_new_graph(ctx0); - - const int64_t n_embd_head = hparams.n_embd_head_v; - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - GGML_ASSERT(n_embd_head == hparams.n_rot); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - - for (int il = 0; il < n_layer; ++il) { - - // norm - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "attn_norm", il); - - struct ggml_tensor * attention_norm = cur; - - // self-attention - { - // compute Q and K and RoPE them - struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur); - cb(Qcur, "Qcur", il); - - struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur); - cb(Kcur, "Kcur", il); - - struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur); - cb(Vcur, "Vcur", il); - - Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_rot, n_head, n_tokens), inp_pos, nullptr, - n_embd_head, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow); - cb(Qcur, "Qcur", il); - - Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_rot, n_head_kv, n_tokens), inp_pos, nullptr, - n_embd_head, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow); - cb(Kcur, "Kcur", il); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, NULL, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); - } - struct ggml_tensor * sa_out = cur; - - cur = attention_norm; - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - sa_out = ggml_get_rows(ctx0, sa_out, inp_out_ids); - inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); - } - - // feed-forward network - { - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, NULL, NULL, - model.layers[il].ffn_gate, NULL, NULL, - model.layers[il].ffn_down, NULL, NULL, - NULL, - LLM_FFN_SILU, LLM_FFN_PAR, cb, il); - cb(cur, "ffn_out", il); - } - - cur = ggml_add(ctx0, cur, sa_out); - cur = ggml_add(ctx0, cur, inpL); - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - - cur = llm_build_norm(ctx0, cur, hparams, - model.output_norm, NULL, - LLM_NORM_RMS, cb, -1); - cb(cur, "result_norm", -1); - - // lm_head - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_gpt2() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - const int64_t n_embd_head = hparams.n_embd_head_v; - const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - - struct ggml_tensor * cur; - struct ggml_tensor * pos; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - - pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos); - cb(pos, "pos_embd", -1); - - inpL = ggml_add(ctx0, inpL, pos); - cb(inpL, "inpL", -1); - - for (int il = 0; il < n_layer; ++il) { - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, - model.layers[il].attn_norm_b, - LLM_NORM, cb, il); - cb(cur, "attn_norm", il); - - // self-attention - { - cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wqkv, cur); - cb(cur, "wqkv", il); - - cur = ggml_add(ctx0, cur, model.layers[il].bqkv); - cb(cur, "bqkv", il); - - struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); - struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); - struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); - - cb(Qcur, "Qcur", il); - cb(Kcur, "Kcur", il); - cb(Vcur, "Vcur", il); - - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); - } - - // add the input - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); - cb(ffn_inp, "ffn_inp", il); - - // FF - { - cur = llm_build_norm(ctx0, ffn_inp, hparams, - model.layers[il].ffn_norm, - model.layers[il].ffn_norm_b, - LLM_NORM, cb, il); - cb(cur, "ffn_norm", il); - - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, - NULL, NULL, NULL, - model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, - NULL, - LLM_FFN_GELU, LLM_FFN_SEQ, cb, il); - cb(cur, "ffn_out", il); - } - - cur = ggml_add(ctx0, cur, ffn_inp); - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = llm_build_norm(ctx0, inpL, hparams, - model.output_norm, - model.output_norm_b, - LLM_NORM, cb, -1); - cb(cur, "result_norm", -1); - - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_codeshell() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - const int64_t n_embd_head = hparams.n_embd_head_v; - const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - GGML_ASSERT(n_embd_head == hparams.n_rot); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - - for (int il = 0; il < n_layer; ++il) { - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, - model.layers[il].attn_norm_b, - LLM_NORM, cb, il); - cb(cur, "attn_norm", il); - - // self-attention - { - cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wqkv, cur); - cb(cur, "wqkv", il); - - cur = ggml_add(ctx0, cur, model.layers[il].bqkv); - cb(cur, "bqkv", il); - - struct ggml_tensor * tmpq = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); - struct ggml_tensor * tmpk = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); - struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); - - cb(tmpq, "tmpq", il); - cb(tmpk, "tmpk", il); - cb(Vcur, "Vcur", il); - - struct ggml_tensor * Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, tmpq, n_embd_head, n_head, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Qcur, "Qcur", il); - - struct ggml_tensor * Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, tmpk, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Kcur, "Kcur", il); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); - } - - // add the input - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); - cb(ffn_inp, "ffn_inp", il); - - // FF - { - cur = llm_build_norm(ctx0, ffn_inp, hparams, - model.layers[il].ffn_norm, - model.layers[il].ffn_norm_b, - LLM_NORM, cb, il); - cb(cur, "ffn_norm", il); - - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, - NULL, NULL, NULL, - model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, - NULL, - LLM_FFN_GELU, LLM_FFN_SEQ, cb, il); - cb(cur, "ffn_out", il); - } - - cur = ggml_add(ctx0, cur, ffn_inp); - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = llm_build_norm(ctx0, inpL, hparams, - model.output_norm, - model.output_norm_b, - LLM_NORM, cb, -1); - cb(cur, "result_norm", -1); - - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_orion() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - const int64_t n_embd_head = hparams.n_embd_head_v; - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - GGML_ASSERT(n_embd_head == hparams.n_rot); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * inpSA = inpL; - - // norm - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, model.layers[il].attn_norm_b, - LLM_NORM, cb, il); - cb(cur, "attn_norm", il); - - // self-attention - { - // compute Q and K and RoPE them - struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur); - cb(Qcur, "Qcur", il); - // if (model.layers[il].bq) { - // Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); - // cb(Qcur, "Qcur", il); - // } - - struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur); - cb(Kcur, "Kcur", il); - // if (model.layers[il].bk) { - // Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); - // cb(Kcur, "Kcur", il); - // } - - struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur); - cb(Vcur, "Vcur", il); - // if (model.layers[il].bv) { - // Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); - // cb(Vcur, "Vcur", il); - // } - - Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Qcur, "Qcur", il); - - Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Kcur, "Kcur", il); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, NULL, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); - } - - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); - cb(ffn_inp, "ffn_inp", il); - - // feed-forward network - cur = llm_build_norm(ctx0, ffn_inp, hparams, - model.layers[il].ffn_norm, model.layers[il].ffn_norm_b, - LLM_NORM, cb, il); - cb(cur, "ffn_norm", il); - - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, NULL, NULL, - model.layers[il].ffn_gate, NULL, NULL, - model.layers[il].ffn_down, NULL, NULL, - NULL, - LLM_FFN_SILU, LLM_FFN_PAR, cb, il); - cb(cur, "ffn_out", il); - - cur = ggml_add(ctx0, cur, ffn_inp); - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - - cur = llm_build_norm(ctx0, cur, hparams, - model.output_norm, model.output_norm_b, - LLM_NORM, cb, -1); - cb(cur, "result_norm", -1); - - // lm_head - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_internlm2() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - const int64_t n_embd_head = hparams.n_embd_head_v; - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - GGML_ASSERT(n_embd_head == hparams.n_rot); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * inpSA = inpL; - - // norm - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "attn_norm", il); - - // self-attention - { - // compute Q and K and RoPE them - struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur); - cb(Qcur, "Qcur", il); - if (model.layers[il].bq) { - Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); - cb(Qcur, "Qcur", il); - } - - struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur); - cb(Kcur, "Kcur", il); - if (model.layers[il].bk) { - Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); - cb(Kcur, "Kcur", il); - } - - struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur); - cb(Vcur, "Vcur", il); - if (model.layers[il].bv) { - Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); - cb(Vcur, "Vcur", il); - } - - Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Qcur, "Qcur", il); - - Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Kcur, "Kcur", il); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); - } - - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); - cb(ffn_inp, "ffn_inp", il); - - // feed-forward network - cur = llm_build_norm(ctx0, ffn_inp, hparams, - model.layers[il].ffn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "ffn_norm", il); - - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, NULL, NULL, - model.layers[il].ffn_gate, NULL, NULL, - model.layers[il].ffn_down, NULL, NULL, - NULL, - LLM_FFN_SILU, LLM_FFN_PAR, cb, il); - cb(cur, "ffn_out", il); - - cur = ggml_add(ctx0, cur, ffn_inp); - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - - cur = llm_build_norm(ctx0, cur, hparams, - model.output_norm, NULL, - LLM_NORM_RMS, cb, -1); - cb(cur, "result_norm", -1); - - // lm_head - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_minicpm3() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - //TODO: if the model varies, these parameters need to be read from the model - const int64_t n_embd_base = 256; - const float scale_embd = 12.0f; - const float scale_depth = 1.4f; - const float kq_scale = 1.0f / sqrtf(float(hparams.n_embd_head_k)); - - const uint32_t n_embd_head_qk_rope = hparams.n_rot; - const uint32_t n_embd_head_qk_nope = hparams.n_embd_head_k - hparams.n_rot; - const uint32_t kv_lora_rank = hparams.n_lora_kv; - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // scale the input embeddings - inpL = ggml_scale(ctx0, inpL, scale_embd); - cb(inpL, "inp_scaled", -1); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * inpSA = inpL; - - struct ggml_tensor * rope_factors = build_rope_factors(il); - // norm - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "attn_norm", il); - - // self_attention - { - struct ggml_tensor * q = NULL; - // {n_embd, q_lora_rank} * {n_embd, n_tokens} -> {q_lora_rank, n_tokens} - q = ggml_mul_mat(ctx0, model.layers[il].wq_a, cur); - cb(q, "q", il); - - q = llm_build_norm(ctx0, q, hparams, - model.layers[il].attn_q_a_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(q, "q", il); - - // {q_lora_rank, n_head * hparams.n_embd_head_k} * {q_lora_rank, n_tokens} -> {n_head * hparams.n_embd_head_k, n_tokens} - q = ggml_mul_mat(ctx0, model.layers[il].wq_b, q); - cb(q, "q", il); - - // split into {n_head * n_embd_head_qk_nope, n_tokens} - struct ggml_tensor * q_nope = ggml_view_3d(ctx0, q, n_embd_head_qk_nope, n_head, n_tokens, - ggml_row_size(q->type, hparams.n_embd_head_k), - ggml_row_size(q->type, hparams.n_embd_head_k * n_head), - 0); - cb(q_nope, "q_nope", il); - - // and {n_head * n_embd_head_qk_rope, n_tokens} - struct ggml_tensor * q_pe = ggml_view_3d(ctx0, q, n_embd_head_qk_rope, n_head, n_tokens, - ggml_row_size(q->type, hparams.n_embd_head_k), - ggml_row_size(q->type, hparams.n_embd_head_k * n_head), - ggml_row_size(q->type, n_embd_head_qk_nope)); - cb(q_pe, "q_pe", il); - - // {n_embd, kv_lora_rank + n_embd_head_qk_rope} * {n_embd, n_tokens} -> {kv_lora_rank + n_embd_head_qk_rope, n_tokens} - struct ggml_tensor * kv_pe_compresseed = ggml_mul_mat(ctx0, model.layers[il].wkv_a_mqa, cur); - cb(kv_pe_compresseed, "kv_pe_compresseed", il); - - // split into {kv_lora_rank, n_tokens} - struct ggml_tensor * kv_compressed = ggml_view_2d(ctx0, kv_pe_compresseed, kv_lora_rank, n_tokens, - kv_pe_compresseed->nb[1], - 0); - cb(kv_compressed, "kv_compressed", il); - - // and {n_embd_head_qk_rope, n_tokens} - struct ggml_tensor * k_pe = ggml_view_3d(ctx0, kv_pe_compresseed, n_embd_head_qk_rope, 1, n_tokens, - kv_pe_compresseed->nb[1], - kv_pe_compresseed->nb[1], - ggml_row_size(kv_pe_compresseed->type, kv_lora_rank)); - cb(k_pe, "k_pe", il); - - // TODO: the CUDA backend used to not support non-cont. (RMS) norm, investigate removing ggml_cont - kv_compressed = ggml_cont(ctx0, kv_compressed); - kv_compressed = llm_build_norm(ctx0, kv_compressed, hparams, - model.layers[il].attn_kv_a_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(kv_compressed, "kv_compressed", il); - - // {kv_lora_rank, n_head * (n_embd_head_qk_nope + n_embd_head_v)} * {kv_lora_rank, n_tokens} -> {n_head * (n_embd_head_qk_nope + n_embd_head_v), n_tokens} - struct ggml_tensor * kv = ggml_mul_mat(ctx0, model.layers[il].wkv_b, kv_compressed); - cb(kv, "kv", il); - - // split into {n_head * n_embd_head_qk_nope, n_tokens} - struct ggml_tensor * k_nope = ggml_view_3d(ctx0, kv, n_embd_head_qk_nope, n_head, n_tokens, - ggml_row_size(kv->type, n_embd_head_qk_nope + hparams.n_embd_head_v), - ggml_row_size(kv->type, n_head * (n_embd_head_qk_nope + hparams.n_embd_head_v)), - 0); - cb(k_nope, "k_nope", il); - - // and {n_head * n_embd_head_v, n_tokens} - struct ggml_tensor * v_states = ggml_view_3d(ctx0, kv, hparams.n_embd_head_v, n_head, n_tokens, - ggml_row_size(kv->type, (n_embd_head_qk_nope + hparams.n_embd_head_v)), - ggml_row_size(kv->type, (n_embd_head_qk_nope + hparams.n_embd_head_v)*n_head), - ggml_row_size(kv->type, (n_embd_head_qk_nope))); - cb(v_states, "v_states", il); - - v_states = ggml_cont(ctx0, v_states); - cb(v_states, "v_states", il); - - v_states = ggml_view_2d(ctx0, v_states, hparams.n_embd_head_v * n_head, n_tokens, - ggml_row_size(kv->type, hparams.n_embd_head_v * n_head), - 0); - cb(v_states, "v_states", il); - - q_pe = ggml_cont(ctx0, q_pe); // TODO: the CUDA backend used to not support non-cont. RoPE, investigate removing this - q_pe = ggml_rope_ext( - ctx0, q_pe, inp_pos, rope_factors, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(q_pe, "q_pe", il); - - // shared RoPE key - k_pe = ggml_cont(ctx0, k_pe); // TODO: the CUDA backend used to not support non-cont. RoPE, investigate removing this - k_pe = ggml_rope_ext( - ctx0, k_pe, inp_pos, rope_factors, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(k_pe, "k_pe", il); - - struct ggml_tensor * q_states = ggml_concat(ctx0, q_nope, q_pe, 0); - cb(q_states, "q_states", il); - - struct ggml_tensor * k_states = ggml_concat(ctx0, k_nope, ggml_repeat(ctx0, k_pe, q_pe), 0); - cb(k_states, "k_states", il); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, NULL, - k_states, v_states, q_states, KQ_mask, n_tokens, kv_head, n_kv, kq_scale, cb, il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); - } - - // scale_res - scale the hidden states for residual connection - const float scale_res = scale_depth/sqrtf(float(n_layer)); - cur = ggml_scale(ctx0, cur, scale_res); - cb(cur, "hidden_scaled", il); - - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); - cb(ffn_inp, "ffn_inp", il); - - // feed-forward network - { - cur = llm_build_norm(ctx0, ffn_inp, hparams, - model.layers[il].ffn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "ffn_norm", il); - - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, NULL, NULL, - model.layers[il].ffn_gate, NULL, NULL, - model.layers[il].ffn_down, NULL, NULL, - NULL, - LLM_FFN_SILU, LLM_FFN_PAR, cb, il); - cb(cur, "ffn_out", il); - } - - // scale the hidden states for residual connection - cur = ggml_scale(ctx0, cur, scale_res); - cb(cur, "hidden_scaled_ffn", il); - - cur = ggml_add(ctx0, cur, ffn_inp); - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - - cur = llm_build_norm(ctx0, cur, hparams, - model.output_norm, NULL, - LLM_NORM_RMS, cb, -1); - cb(cur, "result_norm", -1); - - // lm_head scaling - const float scale_lmhead = float(n_embd_base)/float(n_embd); - cur = ggml_scale(ctx0, cur, scale_lmhead); - cb(cur, "lmhead_scaling", -1); - - // lm_head - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_gemma() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - const int64_t n_embd_head_k = hparams.n_embd_head_k; - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - inpL = ggml_scale(ctx0, inpL, sqrtf(n_embd)); - cb(inpL, "inp_scaled", -1); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - - for (int il = 0; il < n_layer; ++il) { - // norm - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "attn_norm", il); - - // self-attention - { - // compute Q and K and RoPE them - struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur); - cb(Qcur, "Qcur", il); - - struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur); - cb(Kcur, "Kcur", il); - - struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur); - cb(Vcur, "Vcur", il); - - Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head_k, n_head, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow); - cb(Qcur, "Qcur", il); - - Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head_k))); - cb(Qcur, "Qcur_scaled", il); - - Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head_k, n_head_kv, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow); - cb(Kcur, "Kcur", il); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, NULL, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f, cb, il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); - } - - struct ggml_tensor * sa_out = ggml_add(ctx0, cur, inpL); - cb(sa_out, "sa_out", il); - - cur = llm_build_norm(ctx0, sa_out, hparams, - model.layers[il].ffn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "ffn_norm", il); - - // feed-forward network - { - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, NULL, NULL, - model.layers[il].ffn_gate, NULL, NULL, - model.layers[il].ffn_down, NULL, NULL, - NULL, - LLM_FFN_GELU, LLM_FFN_PAR, cb, il); - cb(cur, "ffn_out", il); - } - - cur = ggml_add(ctx0, cur, sa_out); - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - - cur = llm_build_norm(ctx0, cur, hparams, - model.output_norm, NULL, - LLM_NORM_RMS, cb, -1); - cb(cur, "result_norm", -1); - - // lm_head - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_gemma2() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - const int64_t n_embd_head_k = hparams.n_embd_head_k; - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - inpL = ggml_scale(ctx0, inpL, sqrtf(n_embd)); - cb(inpL, "inp_scaled", -1); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - // gemma 2 requires different mask for layers using sliding window (SWA) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(true); - struct ggml_tensor * KQ_mask_swa = build_inp_KQ_mask_swa(true); - - for (int il = 0; il < n_layer; ++il) { - // (il % 2) layers use SWA - struct ggml_tensor * KQ_mask_l = (il % 2 == 0) ? KQ_mask_swa : KQ_mask; - - // norm - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "attn_norm", il); - - // self-attention - { - // compute Q and K and RoPE them - struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur); - cb(Qcur, "Qcur", il); - - struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur); - cb(Kcur, "Kcur", il); - - struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur); - cb(Vcur, "Vcur", il); - - Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head_k, n_head, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow); - cb(Qcur, "Qcur", il); - - // ref: https://github.com/google/gemma_pytorch/commit/03e657582d17cb5a8617ebf333c1c16f3694670e - switch (model.type) { - case LLM_TYPE_2B: - case LLM_TYPE_9B: Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head_k))); break; - case LLM_TYPE_27B: Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd / n_head))); break; - default: GGML_ABORT("fatal error"); - }; - cb(Qcur, "Qcur_scaled", il); - - Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head_k, n_head_kv, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow); - cb(Kcur, "Kcur", il); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, NULL, - Kcur, Vcur, Qcur, KQ_mask_l, n_tokens, kv_head, n_kv, 1.0f, cb, il); - } - - cur = llm_build_norm(ctx0, cur, hparams, - model.layers[il].attn_post_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "attn_post_norm", il); - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); - } - - struct ggml_tensor * sa_out = ggml_add(ctx0, cur, inpL); - cb(sa_out, "sa_out", il); - - cur = llm_build_norm(ctx0, sa_out, hparams, - model.layers[il].ffn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "ffn_norm", il); - - // feed-forward network - { - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, NULL, NULL, - model.layers[il].ffn_gate, NULL, NULL, - model.layers[il].ffn_down, NULL, NULL, - NULL, - LLM_FFN_GELU, LLM_FFN_PAR, cb, il); - cb(cur, "ffn_out", il); - } - - cur = llm_build_norm(ctx0, cur, hparams, - model.layers[il].ffn_post_norm, NULL, - LLM_NORM_RMS, cb, -1); - cb(cur, "ffn_post_norm", -1); - - cur = ggml_add(ctx0, cur, sa_out); - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - - cur = llm_build_norm(ctx0, cur, hparams, - model.output_norm, NULL, - LLM_NORM_RMS, cb, -1); - cb(cur, "result_norm", -1); - - // lm_head - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - - // final logit soft-capping - cur = ggml_scale(ctx0, cur, 1.0f / hparams.f_final_logit_softcapping); - cur = ggml_tanh(ctx0, cur); - cur = ggml_scale(ctx0, cur, hparams.f_final_logit_softcapping); - - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_gemma3() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - const int64_t n_embd_head_k = hparams.n_embd_head_k; - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // important: do not normalize weights for raw embeddings input (i.e. encoded image emdeddings) - if (ubatch.token) { - inpL = ggml_scale(ctx0, inpL, sqrtf(n_embd)); - cb(inpL, "inp_scaled", -1); - } - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - // gemma3 requires different mask for layers using sliding window (SWA) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(true); - struct ggml_tensor * KQ_mask_swa = build_inp_KQ_mask_swa(true); - - // "5-to-1 interleaved attention" - // 5 layers of local attention followed by 1 layer of global attention - static const int sliding_window_pattern = 6; - - for (int il = 0; il < n_layer; ++il) { - const bool is_sliding = (il + 1) % sliding_window_pattern; - const float freq_base_l = is_sliding ? 10000.0f : freq_base; - const float freq_scale_l = is_sliding ? 1.0f : freq_scale; - struct ggml_tensor * KQ_mask_l = is_sliding ? KQ_mask_swa : KQ_mask; - - // norm - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "attn_norm", il); - - // self-attention - { - // compute Q and K and RoPE them - struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur); - cb(Qcur, "Qcur", il); - - struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur); - cb(Kcur, "Kcur", il); - - struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur); - cb(Vcur, "Vcur", il); - - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head_k, n_head, n_tokens); - Qcur = llm_build_norm(ctx0, Qcur, hparams, - model.layers[il].attn_q_norm, - NULL, - LLM_NORM_RMS, cb, il); - cb(Qcur, "Qcur_normed", il); - - Qcur = ggml_rope_ext( - ctx0, Qcur, inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base_l, freq_scale_l, - ext_factor, attn_factor, beta_fast, beta_slow); - cb(Qcur, "Qcur", il); - - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head_k, n_head_kv, n_tokens); - Kcur = llm_build_norm(ctx0, Kcur, hparams, - model.layers[il].attn_k_norm, - NULL, - LLM_NORM_RMS, cb, il); - cb(Kcur, "Kcur_normed", il); - - Kcur = ggml_rope_ext( - ctx0, Kcur, inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base_l, freq_scale_l, - ext_factor, attn_factor, beta_fast, beta_slow); - cb(Kcur, "Kcur", il); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, NULL, - Kcur, Vcur, Qcur, KQ_mask_l, n_tokens, kv_head, n_kv, hparams.f_attention_scale, cb, il); - } - - cur = llm_build_norm(ctx0, cur, hparams, - model.layers[il].attn_post_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "attn_post_norm", il); - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); - } - - struct ggml_tensor * sa_out = ggml_add(ctx0, cur, inpL); - cb(sa_out, "sa_out", il); - - cur = llm_build_norm(ctx0, sa_out, hparams, - model.layers[il].ffn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "ffn_norm", il); - - // feed-forward network - { - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, NULL, NULL, - model.layers[il].ffn_gate, NULL, NULL, - model.layers[il].ffn_down, NULL, NULL, - NULL, - LLM_FFN_GELU, LLM_FFN_PAR, cb, il); - cb(cur, "ffn_out", il); - } - - cur = llm_build_norm(ctx0, cur, hparams, - model.layers[il].ffn_post_norm, NULL, - LLM_NORM_RMS, cb, -1); - cb(cur, "ffn_post_norm", -1); - - cur = ggml_add(ctx0, cur, sa_out); - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - - cur = llm_build_norm(ctx0, cur, hparams, - model.output_norm, NULL, - LLM_NORM_RMS, cb, -1); - cb(cur, "result_norm", -1); - - // lm_head - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_starcoder2() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - const int64_t n_embd_head = hparams.n_embd_head_v; - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - GGML_ASSERT(n_embd_head == hparams.n_rot); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * inpSA = inpL; - - // norm - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, model.layers[il].attn_norm_b, - LLM_NORM, cb, il); - cb(cur, "attn_norm", il); - - // self-attention - { - // compute Q and K and RoPE them - struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur); - cb(Qcur, "Qcur", il); - if (model.layers[il].bq) { - Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); - cb(Qcur, "Qcur", il); - } - - struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur); - cb(Kcur, "Kcur", il); - if (model.layers[il].bk) { - Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); - cb(Kcur, "Kcur", il); - } - - struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur); - cb(Vcur, "Vcur", il); - if (model.layers[il].bv) { - Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); - cb(Vcur, "Vcur", il); - } - - Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Qcur, "Qcur", il); - - Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Kcur, "Kcur", il); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); - } - - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); - cb(ffn_inp, "ffn_inp", il); - - // feed-forward network - - cur = llm_build_norm(ctx0, ffn_inp, hparams, - model.layers[il].ffn_norm, model.layers[il].ffn_norm_b, - LLM_NORM, cb, il); - cb(cur, "ffn_norm", il); - - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, - NULL, NULL, NULL, - model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, - NULL, - LLM_FFN_GELU, LLM_FFN_SEQ, cb, il); - cb(cur, "ffn_out", il); - - cur = ggml_add(ctx0, cur, ffn_inp); - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - - cur = llm_build_norm(ctx0, cur, hparams, - model.output_norm, model.output_norm_b, - LLM_NORM, cb, -1); - cb(cur, "result_norm", -1); - - // lm_head - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_mamba() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - // {n_embd, n_tokens} - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - struct ggml_tensor * state_copy = build_inp_s_copy(); - struct ggml_tensor * state_mask = build_inp_s_mask(); - - for (int il = 0; il < n_layer; ++il) { - // norm - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "attn_norm", il); - - cur = llm_build_mamba(ctx0, lctx, ubatch, gf, cur, - state_copy, state_mask, - kv_head, n_kv, cb, il); - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); - } - - // residual - cur = ggml_add(ctx0, cur, inpL); - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - // final rmsnorm - cur = llm_build_norm(ctx0, inpL, hparams, - model.output_norm, NULL, - LLM_NORM_RMS, cb, -1); - cb(cur, "result_norm", -1); - - // lm_head - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_command_r() { - - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - const int64_t n_embd_head = hparams.n_embd_head_v; - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - const float f_logit_scale = hparams.f_logit_scale; - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - - for (int il = 0; il < n_layer; ++il) { - - // norm - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, NULL, - LLM_NORM, cb, il); - cb(cur, "attn_norm", il); - struct ggml_tensor * ffn_inp = cur; - - // self-attention - { - // compute Q and K and RoPE them - struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur); - cb(Qcur, "Qcur", il); - if (model.layers[il].bq) { - Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); - cb(Qcur, "Qcur", il); - } - - struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur); - cb(Kcur, "Kcur", il); - if (model.layers[il].bk) { - Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); - cb(Kcur, "Kcur", il); - } - - struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur); - cb(Vcur, "Vcur", il); - if (model.layers[il].bv) { - Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); - cb(Vcur, "Vcur", il); - } - - if (model.layers[il].attn_q_norm) { - Qcur = ggml_view_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens, - ggml_element_size(Qcur) * n_embd_head, - ggml_element_size(Qcur) * n_embd_head * n_head, - 0); - cb(Qcur, "Qcur", il); - Kcur = ggml_view_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens, - ggml_element_size(Kcur) * n_embd_head, - ggml_element_size(Kcur) * n_embd_head * n_head_kv, - 0); - cb(Kcur, "Kcur", il); - - Qcur = llm_build_norm(ctx0, Qcur, hparams, - model.layers[il].attn_q_norm, - NULL, - LLM_NORM, cb, il); - cb(Qcur, "Qcur", il); - - Kcur = llm_build_norm(ctx0, Kcur, hparams, - model.layers[il].attn_k_norm, - NULL, - LLM_NORM, cb, il); - cb(Kcur, "Kcur", il); - } - - Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Qcur, "Qcur", il); - - Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Kcur, "Kcur", il); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); - ffn_inp = ggml_get_rows(ctx0, ffn_inp, inp_out_ids); - } - - struct ggml_tensor * attn_out = cur; - - // feed-forward network - { - cur = llm_build_ffn(ctx0, lctx, ffn_inp, - model.layers[il].ffn_up, NULL, NULL, - model.layers[il].ffn_gate, NULL, NULL, - model.layers[il].ffn_down, NULL, NULL, - NULL, - LLM_FFN_SILU, LLM_FFN_PAR, cb, il); - cb(cur, "ffn_out", il); - } - - // add together residual + FFN + self-attention - cur = ggml_add(ctx0, cur, inpL); - cur = ggml_add(ctx0, cur, attn_out); - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - - cur = llm_build_norm(ctx0, cur, hparams, - model.output_norm, NULL, - LLM_NORM, cb, -1); - cb(cur, "result_norm", -1); - - // lm_head - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - - if (f_logit_scale) { - cur = ggml_scale(ctx0, cur, f_logit_scale); - } - - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - - } - - struct ggml_cgraph * build_cohere2() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - const int64_t n_embd_head = hparams.n_embd_head_v; - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - const float f_logit_scale = hparams.f_logit_scale; - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - // cohere2 requires different mask for layers using sliding window (SWA) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - struct ggml_tensor * KQ_mask_swa = build_inp_KQ_mask_swa(); - - // sliding window switch pattern - const int32_t sliding_window_pattern = 4; - - for (int il = 0; il < n_layer; ++il) { - // three layers sliding window attention (window size 4096) and ROPE - // fourth layer uses global attention without positional embeddings - const bool is_sliding = il % sliding_window_pattern < (sliding_window_pattern - 1); - struct ggml_tensor * KQ_mask_l = is_sliding ? KQ_mask_swa : KQ_mask; - - // norm - cur = llm_build_norm(ctx0, inpL, hparams, model.layers[il].attn_norm, NULL, LLM_NORM, cb, il); - cb(cur, "attn_norm", il); - struct ggml_tensor * ffn_inp = cur; - - // self-attention - { - // rope freq factors for 128k context - struct ggml_tensor * rope_factors = build_rope_factors(il); - - // compute Q and K and RoPE them - struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur); - cb(Qcur, "Qcur", il); - if (model.layers[il].bq) { - Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); - cb(Qcur, "Qcur", il); - } - - struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur); - cb(Kcur, "Kcur", il); - if (model.layers[il].bk) { - Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); - cb(Kcur, "Kcur", il); - } - - struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur); - cb(Vcur, "Vcur", il); - if (model.layers[il].bv) { - Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); - cb(Vcur, "Vcur", il); - } - - if (is_sliding) { - Qcur = ggml_rope_ext(ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, rope_factors, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, - beta_fast, beta_slow); - cb(Qcur, "Qcur", il); - - Kcur = ggml_rope_ext(ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, - rope_factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, - attn_factor, beta_fast, beta_slow); - cb(Kcur, "Kcur", il); - } else { - // For non-sliding layers, just reshape without applying RoPE - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - cb(Qcur, "Qcur", il); - - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); - cb(Kcur, "Kcur", il); - } - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, model.layers[il].wo, model.layers[il].bo, Kcur, Vcur, Qcur, - KQ_mask_l, n_tokens, kv_head, n_kv, 1.0f / sqrtf(float(n_embd_head)), cb, il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); - ffn_inp = ggml_get_rows(ctx0, ffn_inp, inp_out_ids); - } - - struct ggml_tensor * attn_out = cur; - - // feed-forward network - { - cur = llm_build_ffn(ctx0, lctx, ffn_inp, model.layers[il].ffn_up, NULL, NULL, model.layers[il].ffn_gate, - NULL, NULL, model.layers[il].ffn_down, NULL, NULL, NULL, LLM_FFN_SILU, LLM_FFN_PAR, - cb, il); - cb(cur, "ffn_out", il); - } - - // add together residual + FFN + self-attention - cur = ggml_add(ctx0, cur, inpL); - cur = ggml_add(ctx0, cur, attn_out); - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - - cur = llm_build_norm(ctx0, cur, hparams, model.output_norm, NULL, LLM_NORM, cb, -1); - cb(cur, "result_norm", -1); - - // lm_head - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - - if (f_logit_scale) { - cur = ggml_scale(ctx0, cur, f_logit_scale); - } - - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - // ref: https://allenai.org/olmo - // based on the original build_llama() function, changes: - // * non-parametric layer norm - // * clamp qkv - // * removed bias - // * removed MoE - struct ggml_cgraph * build_olmo() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - // mutable variable, needed during the last layer of the computation to skip unused tokens - int32_t n_tokens = this->n_tokens; - - const int64_t n_embd_head = hparams.n_embd_head_v; - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - GGML_ASSERT(n_embd_head == hparams.n_rot); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * inpSA = inpL; - - // norm - cur = llm_build_norm(ctx0, inpL, hparams, - NULL, NULL, - LLM_NORM, cb, il); - cb(cur, "attn_norm", il); - - // self-attention - { - // compute Q and K and RoPE them - struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur); - cb(Qcur, "Qcur", il); - if (hparams.f_clamp_kqv > 0.0f) { - Qcur = ggml_clamp(ctx0, Qcur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv); - cb(Qcur, "Qcur", il); - } - - struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur); - cb(Kcur, "Kcur", il); - if (hparams.f_clamp_kqv > 0.0f) { - Kcur = ggml_clamp(ctx0, Kcur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv); - cb(Kcur, "Kcur", il); - } - - struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur); - cb(Vcur, "Vcur", il); - if (hparams.f_clamp_kqv > 0.0f) { - Vcur = ggml_clamp(ctx0, Vcur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv); - cb(Vcur, "Vcur", il); - } - - Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Qcur, "Qcur", il); - - Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Kcur, "Kcur", il); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, nullptr, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - n_tokens = n_outputs; - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); - } - - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); - cb(ffn_inp, "ffn_inp", il); - - // feed-forward network - cur = llm_build_norm(ctx0, ffn_inp, hparams, - NULL, NULL, - LLM_NORM, cb, il); - cb(cur, "ffn_norm", il); - - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, NULL, NULL, - model.layers[il].ffn_gate, NULL, NULL, - model.layers[il].ffn_down, NULL, NULL, - NULL, - LLM_FFN_SILU, LLM_FFN_PAR, cb, il); - cb(cur, "ffn_out", il); - - cur = ggml_add(ctx0, cur, ffn_inp); - cb(cur, "ffn_out", il); - - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - - cur = llm_build_norm(ctx0, cur, hparams, - NULL, NULL, - LLM_NORM, cb, -1); - cb(cur, "result_norm", -1); - - // lm_head - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_olmo2() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - // mutable variable, needed during the last layer of the computation to skip unused tokens - int32_t n_tokens = this->n_tokens; - - const int64_t n_embd_head = hparams.n_embd_head_v; - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - GGML_ASSERT(n_embd_head == hparams.n_rot); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * inpSA = inpL; - - cur = inpL; - - // self_attention - { - // compute Q and K and RoPE them - struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur); - cb(Qcur, "Qcur", il); - - struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur); - cb(Kcur, "Kcur", il); - - struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur); - cb(Vcur, "Vcur", il); - - Qcur = llm_build_norm(ctx0, Qcur, hparams, model.layers[il].attn_q_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(Qcur, "Qcur_normed", il); - - Kcur = llm_build_norm(ctx0, Kcur, hparams, model.layers[il].attn_k_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(Kcur, "Kcur_normed", il); - - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); - - Qcur = ggml_rope_ext( - ctx0, Qcur, inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Qcur, "Qcur_rope", il); - - Kcur = ggml_rope_ext( - ctx0, Kcur, inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Kcur, "Kcur_rope", il); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, NULL, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); - } - - cur = llm_build_norm(ctx0, cur, hparams, - model.layers[il].attn_post_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "attn_post_norm", il); - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - n_tokens = n_outputs; - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); - } - - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); - cb(ffn_inp, "ffn_inp", il); - - // feed-forward network - cur = llm_build_ffn(ctx0, lctx, ffn_inp, - model.layers[il].ffn_up, NULL, NULL, - model.layers[il].ffn_gate, NULL, NULL, - model.layers[il].ffn_down, NULL, NULL, - NULL, - LLM_FFN_SILU, LLM_FFN_PAR, cb, il); - cb(cur, "ffn_out", il); - - cur = llm_build_norm(ctx0, cur, hparams, - model.layers[il].ffn_post_norm, NULL, - LLM_NORM_RMS, cb, -1); - cb(cur, "ffn_post_norm", -1); - - cur = ggml_add(ctx0, cur, ffn_inp); - cb(cur, "ffn_out", il); - - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - - cur = llm_build_norm(ctx0, cur, hparams, - model.output_norm, NULL, - LLM_NORM_RMS, cb, -1); - cb(cur, "result_norm", -1); - - // lm_head - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - // based on the build_qwen2moe() function, changes: - // * removed shared experts - // * removed bias - // * added q, k norm - struct ggml_cgraph * build_olmoe() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - // mutable variable, needed during the last layer of the computation to skip unused tokens - int32_t n_tokens = this->n_tokens; - - const int64_t n_embd_head = hparams.n_embd_head_v; - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - GGML_ASSERT(n_embd_head == hparams.n_rot); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * inpSA = inpL; - - // norm - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "attn_norm", il); - - // self_attention - { - // compute Q and K and RoPE them - struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur); - cb(Qcur, "Qcur", il); - - struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur); - cb(Kcur, "Kcur", il); - - struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur); - cb(Vcur, "Vcur", il); - - Qcur = llm_build_norm(ctx0, Qcur, hparams, model.layers[il].attn_q_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(Qcur, "Qcur_normed", il); - - Kcur = llm_build_norm(ctx0, Kcur, hparams, model.layers[il].attn_k_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(Kcur, "Kcur_normed", il); - - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); - - Qcur = ggml_rope_ext( - ctx0, Qcur, inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Qcur, "Qcur_rope", il); - - Kcur = ggml_rope_ext( - ctx0, Kcur, inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Kcur, "Kcur_rope", il); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, NULL, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - n_tokens = n_outputs; - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); - } - - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); - cb(ffn_inp, "ffn_inp", il); - - // MoE branch - cur = llm_build_norm(ctx0, ffn_inp, hparams, - model.layers[il].ffn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "ffn_norm", il); - - cur = llm_build_moe_ffn(ctx0, lctx, cur, - model.layers[il].ffn_gate_inp, - model.layers[il].ffn_up_exps, - model.layers[il].ffn_gate_exps, - model.layers[il].ffn_down_exps, - nullptr, - n_expert, n_expert_used, - LLM_FFN_SILU, false, - false, 0.0, - LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX, - cb, il); - cb(cur, "ffn_moe_out", il); - - cur = ggml_add(ctx0, cur, ffn_inp); - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - - cur = llm_build_norm(ctx0, cur, hparams, - model.output_norm, NULL, - LLM_NORM_RMS, cb, -1); - cb(cur, "result_norm", -1); - - // lm_head - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_openelm() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - const int64_t n_embd_head = hparams.n_embd_head_v; - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - - for (int il = 0; il < n_layer; ++il) { - const int64_t n_head = hparams.n_head(il); - const int64_t n_head_kv = hparams.n_head_kv(il); - const int64_t n_head_qkv = 2*n_head_kv + n_head; - - cur = inpL; - struct ggml_tensor * residual = cur; - - // norm - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "attn_norm", il); - - // self-attention - { - cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wqkv, cur); - cb(cur, "wqkv", il); - - cur = ggml_reshape_3d(ctx0, cur, n_embd_head_k, n_head_qkv, n_tokens); - - struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_3d(ctx0, cur, n_embd_head, n_head, n_tokens, cur->nb[1], cur->nb[2], 0)); - cb(Qcur, "Qcur", il); - - struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, cur->nb[1], cur->nb[2], cur->nb[1]*n_head)); - cb(Kcur, "Kcur", il); - - struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, cur->nb[1], cur->nb[2], cur->nb[1]*(n_head+n_head_kv))); - cb(Vcur, "Vcur", il); - - Qcur = llm_build_norm(ctx0, Qcur, hparams, - model.layers[il].attn_q_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(Qcur, "Qcur", il); - - Kcur = llm_build_norm(ctx0, Kcur, hparams, - model.layers[il].attn_k_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(Kcur, "Kcur", il); - - Qcur = ggml_rope_ext( - ctx0, Qcur, inp_pos, NULL, n_rot, rope_type, n_ctx_orig, - freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Qcur, "Qcur", il); - - Kcur = ggml_rope_ext( - ctx0, Kcur, inp_pos, NULL, n_rot, rope_type, n_ctx_orig, - freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Kcur, "Kcur", il); - - Vcur = ggml_reshape_2d(ctx0, Vcur, n_embd_head * n_head_kv, n_tokens); - cb(Qcur, "Vcur", il); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, NULL, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - residual = ggml_get_rows(ctx0, residual, inp_out_ids); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - } - - struct ggml_tensor * ffn_inp = ggml_add(ctx0, residual, cur); - cb(ffn_inp, "ffn_inp", il); - - // feed-forward network - { - cur = llm_build_norm(ctx0, ffn_inp, hparams, - model.layers[il].ffn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "ffn_norm", il); - - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, NULL, NULL, - model.layers[il].ffn_gate, NULL, NULL, - model.layers[il].ffn_down, NULL, NULL, - NULL, - LLM_FFN_SILU, LLM_FFN_PAR, cb, il); - cb(cur, "ffn_out", il); - } - - cur = ggml_add(ctx0, cur, ffn_inp); - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - inpL = cur; - } - - cur = inpL; - - // norm - cur = llm_build_norm(ctx0, cur, hparams, - model.output_norm, NULL, - LLM_NORM_RMS, cb, -1); - cb(cur, "result_norm", -1); - - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_gptneox() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - const int64_t n_embd_head = hparams.n_embd_head_v; - const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - - for (int il = 0; il < n_layer; ++il) { - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, - model.layers[il].attn_norm_b, - LLM_NORM, cb, il); - cb(cur, "attn_norm", il); - - // self-attention - { - cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wqkv, cur); - cb(cur, "wqkv", il); - - cur = ggml_add(ctx0, cur, model.layers[il].bqkv); - cb(cur, "bqkv", il); - - struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); - struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); - struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); - - cb(Qcur, "Qcur", il); - cb(Kcur, "Kcur", il); - cb(Vcur, "Vcur", il); - - Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Qcur, "Qcur", il); - - Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Kcur, "Kcur", il); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); - } - - // ffn - if (hparams.use_par_res) { - // attention and ffn are computed in parallel - // x = x + attn(ln1(x)) + ffn(ln2(x)) - - struct ggml_tensor * attn_out = cur; - - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].ffn_norm, - model.layers[il].ffn_norm_b, - LLM_NORM, cb, il); - cb(cur, "ffn_norm", il); - - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, - NULL, NULL, NULL, - model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, - NULL, - LLM_FFN_GELU, LLM_FFN_SEQ, cb, il); - cb(cur, "ffn_out", il); - - cur = ggml_add(ctx0, cur, inpL); - cb(cur, "ffn_out", il); - - cur = ggml_add(ctx0, cur, attn_out); - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } else { - // attention and ffn are computed sequentially - // x = x + attn(ln1(x)) - // x = x + ffn(ln2(x)) - - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); - cb(ffn_inp, "ffn_inp", il); - - cur = llm_build_norm(ctx0, ffn_inp, hparams, - model.layers[il].ffn_norm, - model.layers[il].ffn_norm_b, - LLM_NORM, cb, il); - cb(cur, "ffn_norm", il); - - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, - NULL, NULL, NULL, - model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, - NULL, - LLM_FFN_GELU, LLM_FFN_SEQ, cb, il); - cb(cur, "ffn_out", il); - - cur = ggml_add(ctx0, cur, ffn_inp); - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - } - - cur = llm_build_norm(ctx0, inpL, hparams, - model.output_norm, - model.output_norm_b, - LLM_NORM, cb, -1); - cb(cur, "result_norm", -1); - - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_arctic() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - // mutable variable, needed during the last layer of the computation to skip unused tokens - int32_t n_tokens = this->n_tokens; - - const int64_t n_embd_head = hparams.n_embd_head_v; - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - GGML_ASSERT(n_embd_head == hparams.n_rot); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * inpSA = inpL; - - // norm - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "attn_norm", il); - - // self-attention - { - // compute Q and K and RoPE them - struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur); - cb(Qcur, "Qcur", il); - - struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur); - cb(Kcur, "Kcur", il); - - struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur); - cb(Vcur, "Vcur", il); - - Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Qcur, "Qcur", il); - - Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Kcur, "Kcur", il); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, NULL, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - n_tokens = n_outputs; - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); - } - - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); - cb(ffn_inp, "ffn_inp", il); - - // feed-forward network - cur = llm_build_norm(ctx0, ffn_inp, hparams, - model.layers[il].ffn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "ffn_norm", il); - - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, NULL, NULL, - model.layers[il].ffn_gate, NULL, NULL, - model.layers[il].ffn_down, NULL, NULL, - NULL, - LLM_FFN_SILU, LLM_FFN_PAR, cb, il); - cb(cur, "ffn_out", il); - - struct ggml_tensor * ffn_out = ggml_add(ctx0, cur, ffn_inp); - cb(ffn_out, "ffn_out", il); - - // MoE - cur = llm_build_norm(ctx0, inpSA, hparams, - model.layers[il].ffn_norm_exps, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "ffn_norm_exps", il); - - cur = llm_build_moe_ffn(ctx0, lctx, cur, - model.layers[il].ffn_gate_inp, - model.layers[il].ffn_up_exps, - model.layers[il].ffn_gate_exps, - model.layers[il].ffn_down_exps, - nullptr, - n_expert, n_expert_used, - LLM_FFN_SILU, true, - false, 0.0, - LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX, - cb, il); - cb(cur, "ffn_moe_out", il); - - cur = ggml_add(ctx0, cur, ffn_out); - cb(cur, "ffn_out", il); - - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - - cur = llm_build_norm(ctx0, cur, hparams, - model.output_norm, NULL, - LLM_NORM_RMS, cb, -1); - cb(cur, "result_norm", -1); - - // lm_head - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_deepseek() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - // mutable variable, needed during the last layer of the computation to skip unused tokens - int32_t n_tokens = this->n_tokens; - - const int64_t n_embd_head = hparams.n_embd_head_v; - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - GGML_ASSERT(n_embd_head == hparams.n_rot); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale; - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * inpSA = inpL; - - // norm - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "attn_norm", il); - - // self-attention - { - // rope freq factors for llama3; may return nullptr for llama2 and other models - struct ggml_tensor * rope_factors = build_rope_factors(il); - - // compute Q and K and RoPE them - struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur); - cb(Qcur, "Qcur", il); - if (model.layers[il].bq) { - Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); - cb(Qcur, "Qcur", il); - } - - struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur); - cb(Kcur, "Kcur", il); - if (model.layers[il].bk) { - Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); - cb(Kcur, "Kcur", il); - } - - struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur); - cb(Vcur, "Vcur", il); - if (model.layers[il].bv) { - Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); - cb(Vcur, "Vcur", il); - } - - Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, rope_factors, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Qcur, "Qcur", il); - - Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, rope_factors, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Kcur, "Kcur", il); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, kq_scale, cb, il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - n_tokens = n_outputs; - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); - } - - - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); - cb(ffn_inp, "ffn_inp", il); - - cur = llm_build_norm(ctx0, ffn_inp, hparams, - model.layers[il].ffn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "ffn_norm", il); - - if ((uint32_t) il < hparams.n_layer_dense_lead) { - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, NULL, NULL, - model.layers[il].ffn_gate, NULL, NULL, - model.layers[il].ffn_down, NULL, NULL, - NULL, - LLM_FFN_SILU, LLM_FFN_PAR, cb, il); - cb(cur, "ffn_out", il); - } else { - // MoE branch - ggml_tensor * moe_out = - llm_build_moe_ffn(ctx0, lctx, cur, - model.layers[il].ffn_gate_inp, - model.layers[il].ffn_up_exps, - model.layers[il].ffn_gate_exps, - model.layers[il].ffn_down_exps, - nullptr, - n_expert, n_expert_used, - LLM_FFN_SILU, false, - false, hparams.expert_weights_scale, - LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX, - cb, il); - cb(moe_out, "ffn_moe_out", il); - - // FFN shared expert - { - ggml_tensor * ffn_shexp = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up_shexp, NULL, NULL, - model.layers[il].ffn_gate_shexp, NULL, NULL, - model.layers[il].ffn_down_shexp, NULL, NULL, - NULL, - LLM_FFN_SILU, LLM_FFN_PAR, cb, il); - cb(ffn_shexp, "ffn_shexp", il); - - cur = ggml_add(ctx0, moe_out, ffn_shexp); - cb(cur, "ffn_out", il); - } - } - - cur = ggml_add(ctx0, cur, ffn_inp); - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - - cur = llm_build_norm(ctx0, cur, hparams, - model.output_norm, NULL, - LLM_NORM_RMS, cb, -1); - cb(cur, "result_norm", -1); - - // lm_head - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_deepseek2() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - // mutable variable, needed during the last layer of the computation to skip unused tokens - int32_t n_tokens = this->n_tokens; - - bool is_lite = (hparams.n_layer == 27); - - // We have to pre-scale kq_scale and attn_factor to make the YaRN RoPE work correctly. - // See https://github.com/ggerganov/llama.cpp/discussions/7416 for detailed explanation. - const float mscale = attn_factor * (1.0f + hparams.rope_yarn_log_mul * logf(1.0f / freq_scale)); - const float kq_scale = 1.0f*mscale*mscale/sqrtf(float(hparams.n_embd_head_k)); - const float attn_factor_scaled = 1.0f / (1.0f + 0.1f * logf(1.0f / freq_scale)); - - const uint32_t n_embd_head_qk_rope = hparams.n_rot; - const uint32_t n_embd_head_qk_nope = hparams.n_embd_head_k - hparams.n_rot; - const uint32_t kv_lora_rank = hparams.n_lora_kv; - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - // {n_embd, n_tokens} - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * inpSA = inpL; - - // norm - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "attn_norm", il); - - // self_attention - { - struct ggml_tensor * q = NULL; - if (!is_lite) { - // {n_embd, q_lora_rank} * {n_embd, n_tokens} -> {q_lora_rank, n_tokens} - q = ggml_mul_mat(ctx0, model.layers[il].wq_a, cur); - cb(q, "q", il); - - q = llm_build_norm(ctx0, q, hparams, - model.layers[il].attn_q_a_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(q, "q", il); - - // {q_lora_rank, n_head * hparams.n_embd_head_k} * {q_lora_rank, n_tokens} -> {n_head * hparams.n_embd_head_k, n_tokens} - q = ggml_mul_mat(ctx0, model.layers[il].wq_b, q); - cb(q, "q", il); - } else { - q = ggml_mul_mat(ctx0, model.layers[il].wq, cur); - cb(q, "q", il); - } - - // split into {n_head * n_embd_head_qk_nope, n_tokens} - struct ggml_tensor * q_nope = ggml_view_3d(ctx0, q, n_embd_head_qk_nope, n_head, n_tokens, - ggml_row_size(q->type, hparams.n_embd_head_k), - ggml_row_size(q->type, hparams.n_embd_head_k * n_head), - 0); - cb(q_nope, "q_nope", il); - - // and {n_head * n_embd_head_qk_rope, n_tokens} - struct ggml_tensor * q_pe = ggml_view_3d(ctx0, q, n_embd_head_qk_rope, n_head, n_tokens, - ggml_row_size(q->type, hparams.n_embd_head_k), - ggml_row_size(q->type, hparams.n_embd_head_k * n_head), - ggml_row_size(q->type, n_embd_head_qk_nope)); - cb(q_pe, "q_pe", il); - - // {n_embd, kv_lora_rank + n_embd_head_qk_rope} * {n_embd, n_tokens} -> {kv_lora_rank + n_embd_head_qk_rope, n_tokens} - struct ggml_tensor * kv_pe_compresseed = ggml_mul_mat(ctx0, model.layers[il].wkv_a_mqa, cur); - cb(kv_pe_compresseed, "kv_pe_compresseed", il); - - // split into {kv_lora_rank, n_tokens} - struct ggml_tensor * kv_compressed = ggml_view_2d(ctx0, kv_pe_compresseed, kv_lora_rank, n_tokens, - kv_pe_compresseed->nb[1], - 0); - cb(kv_compressed, "kv_compressed", il); - - // and {n_embd_head_qk_rope, n_tokens} - struct ggml_tensor * k_pe = ggml_view_3d(ctx0, kv_pe_compresseed, n_embd_head_qk_rope, 1, n_tokens, - kv_pe_compresseed->nb[1], - kv_pe_compresseed->nb[1], - ggml_row_size(kv_pe_compresseed->type, kv_lora_rank)); - cb(k_pe, "k_pe", il); - - // TODO: the CUDA backend used to not support non-cont. (RMS) norm, investigate removing ggml_cont - kv_compressed = ggml_cont(ctx0, kv_compressed); - kv_compressed = llm_build_norm(ctx0, kv_compressed, hparams, - model.layers[il].attn_kv_a_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(kv_compressed, "kv_compressed", il); - - // {kv_lora_rank, n_head * (n_embd_head_qk_nope + n_embd_head_v)} * {kv_lora_rank, n_tokens} -> {n_head * (n_embd_head_qk_nope + n_embd_head_v), n_tokens} - struct ggml_tensor * kv = ggml_mul_mat(ctx0, model.layers[il].wkv_b, kv_compressed); - cb(kv, "kv", il); - - // split into {n_head * n_embd_head_qk_nope, n_tokens} - struct ggml_tensor * k_nope = ggml_view_3d(ctx0, kv, n_embd_head_qk_nope, n_head, n_tokens, - ggml_row_size(kv->type, n_embd_head_qk_nope + hparams.n_embd_head_v), - ggml_row_size(kv->type, n_head * (n_embd_head_qk_nope + hparams.n_embd_head_v)), - 0); - cb(k_nope, "k_nope", il); - - // and {n_head * n_embd_head_v, n_tokens} - struct ggml_tensor * v_states = ggml_view_3d(ctx0, kv, hparams.n_embd_head_v, n_head, n_tokens, - ggml_row_size(kv->type, (n_embd_head_qk_nope + hparams.n_embd_head_v)), - ggml_row_size(kv->type, (n_embd_head_qk_nope + hparams.n_embd_head_v)*n_head), - ggml_row_size(kv->type, (n_embd_head_qk_nope))); - cb(v_states, "v_states", il); - - v_states = ggml_cont(ctx0, v_states); - cb(v_states, "v_states", il); - - v_states = ggml_view_2d(ctx0, v_states, hparams.n_embd_head_v * n_head, n_tokens, - ggml_row_size(kv->type, hparams.n_embd_head_v * n_head), - 0); - cb(v_states, "v_states", il); - - q_pe = ggml_cont(ctx0, q_pe); // TODO: the CUDA backend used to not support non-cont. RoPE, investigate removing this - q_pe = ggml_rope_ext( - ctx0, q_pe, inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor_scaled, beta_fast, beta_slow - ); - cb(q_pe, "q_pe", il); - - // shared RoPE key - k_pe = ggml_cont(ctx0, k_pe); // TODO: the CUDA backend used to not support non-cont. RoPE, investigate removing this - k_pe = ggml_rope_ext( - ctx0, k_pe, inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor_scaled, beta_fast, beta_slow - ); - cb(k_pe, "k_pe", il); - - struct ggml_tensor * q_states = ggml_concat(ctx0, q_nope, q_pe, 0); - cb(q_states, "q_states", il); - - struct ggml_tensor * k_states = ggml_concat(ctx0, k_nope, ggml_repeat(ctx0, k_pe, q_pe), 0); - cb(k_states, "k_states", il); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, NULL, - k_states, v_states, q_states, KQ_mask, n_tokens, kv_head, n_kv, kq_scale, cb, il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - n_tokens = n_outputs; - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); - } - - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); - cb(ffn_inp, "ffn_inp", il); - - cur = llm_build_norm(ctx0, ffn_inp, hparams, - model.layers[il].ffn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "ffn_norm", il); - - if ((uint32_t) il < hparams.n_layer_dense_lead) { - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, NULL, NULL, - model.layers[il].ffn_gate, NULL, NULL, - model.layers[il].ffn_down, NULL, NULL, - NULL, - LLM_FFN_SILU, LLM_FFN_PAR, cb, il); - cb(cur, "ffn_out", il); - } else { - // MoE branch - ggml_tensor * moe_out = - llm_build_moe_ffn(ctx0, lctx, cur, - model.layers[il].ffn_gate_inp, - model.layers[il].ffn_up_exps, - model.layers[il].ffn_gate_exps, - model.layers[il].ffn_down_exps, - model.layers[il].ffn_exp_probs_b, - n_expert, n_expert_used, - LLM_FFN_SILU, hparams.expert_weights_norm, - true, hparams.expert_weights_scale, - (enum llama_expert_gating_func_type) hparams.expert_gating_func, - cb, il); - cb(moe_out, "ffn_moe_out", il); - - // FFN shared expert - { - ggml_tensor * ffn_shexp = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up_shexp, NULL, NULL, - model.layers[il].ffn_gate_shexp, NULL, NULL, - model.layers[il].ffn_down_shexp, NULL, NULL, - NULL, - LLM_FFN_SILU, LLM_FFN_PAR, cb, il); - cb(ffn_shexp, "ffn_shexp", il); - - cur = ggml_add(ctx0, moe_out, ffn_shexp); - cb(cur, "ffn_out", il); - } - } - - cur = ggml_add(ctx0, cur, ffn_inp); - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - - cur = llm_build_norm(ctx0, cur, hparams, - model.output_norm, NULL, - LLM_NORM_RMS, cb, -1); - cb(cur, "result_norm", -1); - - // lm_head - cur = ggml_mul_mat(ctx0, model.output, cur); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_bitnet() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - const int64_t n_embd_head = hparams.n_embd_head_v; - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * inpSA = inpL; - - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "attn_norm", il); - - // self-attention - { - // compute Q and K and RoPE them - struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur); - if (model.layers[il].wq_scale) { - Qcur = ggml_mul(ctx0, Qcur, model.layers[il].wq_scale); - } - cb(Qcur, "Qcur", il); - if (model.layers[il].bq) { - Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); - cb(Qcur, "Qcur", il); - } - - // B1.K - struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur); - if (model.layers[il].wk_scale) { - Kcur = ggml_mul(ctx0, Kcur, model.layers[il].wk_scale); - } - cb(Kcur, "Kcur", il); - if (model.layers[il].bk) { - Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); - cb(Kcur, "Kcur", il); - } - - // B1.V - struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur); - if (model.layers[il].wv_scale) { - Vcur = ggml_mul(ctx0, Vcur, model.layers[il].wv_scale); - } - cb(Vcur, "Vcur", il); - if (model.layers[il].bv) { - Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); - cb(Vcur, "Vcur", il); - } - - Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Qcur, "Qcur", il); - - Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Kcur, "Kcur", il); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - NULL, NULL, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); - - cur = llm_build_norm(ctx0, cur, hparams, - model.layers[il].attn_sub_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "attn_sub_norm", il); - - cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wo, cur); - if (model.layers[il].wo_scale) { - cur = ggml_mul(ctx0, cur, model.layers[il].wo_scale); - } - if (model.layers[il].bo) { - cur = ggml_add(ctx0, cur, model.layers[il].bo); - } - cb(cur, "attn_o_out", il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); - } - - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); - cb(ffn_inp, "ffn_inp", il); - - // feed-forward forward - cur = llm_build_norm(ctx0, ffn_inp, hparams, - model.layers[il].ffn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "ffn_norm", il); - - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, NULL, model.layers[il].ffn_up_scale, - model.layers[il].ffn_gate, NULL, model.layers[il].ffn_gate_scale, - NULL, NULL, NULL, - NULL, - LLM_FFN_SILU, LLM_FFN_PAR, cb, il); - cb(cur, "ffn_sub_out", il); - - cur = llm_build_norm(ctx0, cur, hparams, - model.layers[il].ffn_sub_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "ffn_sub_norm", il); - - cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].ffn_down, cur); - if (model.layers[il].ffn_down_scale) { - cur = ggml_mul(ctx0, cur, model.layers[il].ffn_down_scale); - } - cb(cur, "ffn_down", il); - - cur = ggml_add(ctx0, cur, ffn_inp); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - - cur = llm_build_norm(ctx0, cur, hparams, - model.output_norm, NULL, - LLM_NORM_RMS, cb, -1); - cb(cur, "result_norm", -1); - - // lm_head - // FIXME: do not use model.tok_embd directly, duplicate as model.output - cur = llm_build_lora_mm(lctx, ctx0, model.tok_embd, cur); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - return gf; - } - - struct ggml_cgraph * build_t5_enc() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - // mutable variable, needed during the last layer of the computation to skip unused tokens - int32_t n_tokens = this->n_tokens; - - const int64_t n_embd_head = hparams.n_embd_head_v; - const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - GGML_ASSERT(lctx.is_encoding); - struct ggml_tensor * pos_bucket_enc = llm_build_pos_bucket(false); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask_enc = build_inp_KQ_mask(false); - - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * inpSA = inpL; - - // norm - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm_enc, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "attn_norm", il); - - // self-attention - { - struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq_enc, cur); - cb(Qcur, "Qcur", il); - - struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk_enc, cur); - cb(Kcur, "Kcur", il); - - struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv_enc, cur); - cb(Vcur, "Vcur", il); - - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); - - struct ggml_tensor * q = ggml_permute(ctx0, Qcur, 0, 2, 1, 3); - struct ggml_tensor * k = ggml_cont(ctx0, ggml_permute(ctx0, Kcur, 0, 2, 1, 3)); - - struct ggml_tensor * kq = ggml_mul_mat(ctx0, k, q); - cb(kq, "kq", il); - - struct ggml_tensor * attn_rel_b = model.layers[il].attn_rel_b_enc ? model.layers[il].attn_rel_b_enc : model.layers[0].attn_rel_b_enc; - struct ggml_tensor * pos_bias = llm_build_pos_bias(pos_bucket_enc, attn_rel_b); - struct ggml_tensor * kq_b = ggml_add(ctx0, kq, pos_bias); - cb(kq_b, "kq_b", il); - - kq = ggml_soft_max_ext(ctx0, kq_b, KQ_mask_enc, 1.0f, hparams.f_max_alibi_bias); - cb(kq, "kq_soft_max_ext", il); - - struct ggml_tensor * v = ggml_cont(ctx0, ggml_transpose(ctx0, ggml_reshape_2d(ctx0, Vcur, n_embd_gqa, n_tokens))); - cb(v, "v", il); - - struct ggml_tensor * kqv = ggml_mul_mat(ctx0, ggml_reshape_3d(ctx0, v, n_tokens, n_embd_head, n_head_kv), kq); - cb(kqv, "kqv", il); - - struct ggml_tensor * kqv_merged = ggml_permute(ctx0, kqv, 0, 2, 1, 3); - cb(kqv_merged, "kqv_merged", il); - - cur = ggml_cont_2d(ctx0, kqv_merged, n_embd_gqa, n_tokens); - cb(cur, "kqv_merged_cont", il); - - ggml_build_forward_expand(gf, cur); - - cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wo_enc, cur); - cb(cur, "kqv_out", il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - n_tokens = n_outputs; - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); - } - - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); - cb(ffn_inp, "ffn_inp", il); - - // feed-forward network - { - cur = llm_build_norm(ctx0, ffn_inp, hparams, - model.layers[il].ffn_norm_enc, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "ffn_norm", il); - - // T5 uses relu, flan-T5 uses gelu-gated - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up_enc, NULL, NULL, - model.layers[il].ffn_gate_enc, NULL, NULL, - model.layers[il].ffn_down_enc, NULL, NULL, - NULL, - model.layers[il].ffn_gate_enc ? LLM_FFN_GELU : LLM_FFN_RELU, - model.layers[il].ffn_gate_enc ? LLM_FFN_PAR : LLM_FFN_SEQ, - cb, il); - cb(cur, "ffn_out", il); - } - - cur = ggml_add(ctx0, cur, ffn_inp); - cb(cur, "ffn_out", il); - - ggml_tensor * layer_dir = lctx.cvec.tensor_for(il); - if (layer_dir != nullptr) { - cur = ggml_add(ctx0, cur, layer_dir); - } - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - cb(cur, "result_embd", -1); - - cur = llm_build_norm(ctx0, cur, hparams, - model.output_norm_enc, NULL, - LLM_NORM_RMS, cb, -1); - cb(cur, "result_norm", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_t5_dec() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - // mutable variable, needed during the last layer of the computation to skip unused tokens - int32_t n_tokens = this->n_tokens; - - const int64_t n_embd_head = hparams.n_embd_head_v; - const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - GGML_ASSERT(!lctx.is_encoding); - GGML_ASSERT(n_outputs_enc > 0 && "call llama_encode() first"); - - struct ggml_tensor * embd_enc = llm_build_inp_embd_enc(); - struct ggml_tensor * pos_bucket_dec = llm_build_pos_bucket(true); - - struct ggml_tensor * KQ_mask_dec = build_inp_KQ_mask(); - struct ggml_tensor * KQ_mask_cross = llm_build_inp_KQ_mask_cross(); - - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * inpSA = inpL; - - // norm - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "attn_norm", il); - - // self-attention - { - struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur); - cb(Qcur, "Qcur", il); - - struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur); - cb(Kcur, "Kcur", il); - - struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur); - cb(Vcur, "Vcur", il); - - llm_build_kv_store(ctx0, hparams, cparams, kv_self, gf, Kcur, Vcur, n_tokens, kv_head, cb, il); - - struct ggml_tensor * k = - ggml_view_3d(ctx0, kv_self.k_l[il], - n_embd_head_k, n_kv, n_head_kv, - ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa), - ggml_row_size(kv_self.k_l[il]->type, n_embd_head_k), - 0); - cb(k, "k", il); - - struct ggml_tensor * v = - ggml_view_3d(ctx0, kv_self.v_l[il], - n_kv, n_embd_head_v, n_head_kv, - ggml_element_size(kv_self.v_l[il])*n_ctx, - ggml_element_size(kv_self.v_l[il])*n_ctx*n_embd_head_v, - 0); - cb(v, "v", il); - - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - - struct ggml_tensor * q = ggml_permute(ctx0, Qcur, 0, 2, 1, 3); - - struct ggml_tensor * kq = ggml_mul_mat(ctx0, k, q); - cb(kq, "kq", il); - - struct ggml_tensor * attn_rel_b = model.layers[il].attn_rel_b ? model.layers[il].attn_rel_b : model.layers[0].attn_rel_b; - struct ggml_tensor * pos_bias = llm_build_pos_bias(pos_bucket_dec, attn_rel_b); - struct ggml_tensor * kq_b = ggml_add(ctx0, kq, pos_bias); - cb(kq_b, "kq_b", il); - - kq = ggml_soft_max_ext(ctx0, kq_b, KQ_mask_dec, 1.0f, hparams.f_max_alibi_bias); - cb(kq, "kq_soft_max_ext", il); - - struct ggml_tensor * kqv = ggml_mul_mat(ctx0, v, kq); - cb(kqv, "kqv", il); - - struct ggml_tensor * kqv_merged = ggml_permute(ctx0, kqv, 0, 2, 1, 3); - cb(kqv_merged, "kqv_merged", il); - - cur = ggml_cont_2d(ctx0, kqv_merged, n_embd_gqa, n_tokens); - cb(cur, "kqv_merged_cont", il); - - ggml_build_forward_expand(gf, cur); - - cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wo, cur); - cb(cur, "kqv_out", il); - } - - cur = ggml_add(ctx0, cur, inpSA); - cb(cur, "cross_inp", il); - - struct ggml_tensor * inpCA = cur; - - // norm - cur = llm_build_norm(ctx0, cur, hparams, - model.layers[il].attn_norm_cross, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "attn_norm_cross", il); - - // cross-attention - { - struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq_cross, cur); - cb(Qcur, "Qcur", il); - - struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk_cross, embd_enc); - cb(Kcur, "Kcur", il); - - struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv_cross, embd_enc); - cb(Vcur, "Vcur", il); - - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_outputs_enc); - - struct ggml_tensor * q = ggml_permute(ctx0, Qcur, 0, 2, 1, 3); - struct ggml_tensor * k = ggml_cont(ctx0, ggml_permute(ctx0, Kcur, 0, 2, 1, 3)); - - struct ggml_tensor * kq = ggml_mul_mat(ctx0, k, q); - cb(kq, "kq", il); - - kq = ggml_soft_max_ext(ctx0, kq, KQ_mask_cross, 1.0f, hparams.f_max_alibi_bias); - cb(kq, "kq_soft_max_ext", il); - - struct ggml_tensor * v = ggml_cont(ctx0, ggml_transpose(ctx0, ggml_reshape_2d(ctx0, Vcur, n_embd_gqa, n_outputs_enc))); - cb(v, "v", il); - - struct ggml_tensor * kqv = ggml_mul_mat(ctx0, ggml_reshape_3d(ctx0, v, n_outputs_enc, n_embd_head, n_head_kv), kq); - cb(kqv, "kqv", il); - - struct ggml_tensor * kqv_merged = ggml_permute(ctx0, kqv, 0, 2, 1, 3); - cb(kqv_merged, "kqv_merged", il); - - cur = ggml_cont_2d(ctx0, kqv_merged, n_embd_gqa, n_tokens); - cb(cur, "kqv_merged_cont", il); - - ggml_build_forward_expand(gf, cur); - - cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wo_cross, cur); - cb(cur, "kqv_out", il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - n_tokens = n_outputs; - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); - inpCA = ggml_get_rows(ctx0, inpCA, inp_out_ids); - } - - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpCA); - cb(ffn_inp, "ffn_inp", il); - - // feed-forward network - { - cur = llm_build_norm(ctx0, ffn_inp, hparams, - model.layers[il].ffn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "ffn_norm", il); - - // T5 uses relu, flan-T5 uses gelu-gated - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, NULL, NULL, - model.layers[il].ffn_gate, NULL, NULL, - model.layers[il].ffn_down, NULL, NULL, - NULL, - model.layers[il].ffn_gate_enc ? LLM_FFN_GELU : LLM_FFN_RELU, - model.layers[il].ffn_gate_enc ? LLM_FFN_PAR : LLM_FFN_SEQ, - cb, il); - cb(cur, "ffn_out", il); - } - - cur = ggml_add(ctx0, cur, ffn_inp); - cb(cur, "ffn_out", il); - - ggml_tensor * layer_dir = lctx.cvec.tensor_for(il); - if (layer_dir != nullptr) { - cur = ggml_add(ctx0, cur, layer_dir); - } - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - cb(cur, "result_embd", -1); - - cur = llm_build_norm(ctx0, cur, hparams, - model.output_norm, NULL, - LLM_NORM_RMS, cb, -1); - cb(cur, "result_norm", -1); - - // lm_head - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_jais() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - const int64_t n_embd_head = hparams.n_embd_head_v; - const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - - for (int il = 0; il < n_layer; ++il) { - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, - model.layers[il].attn_norm_b, - LLM_NORM, cb, il); - cb(cur, "attn_norm", il); - - // self-attention - { - cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wqkv, cur); - cb(cur, "wqkv", il); - - cur = ggml_add(ctx0, cur, model.layers[il].bqkv); - cb(cur, "bqkv", il); - - struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*cur->nb[0]*(n_embd))); - struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*cur->nb[0]*(n_embd))); - struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*cur->nb[0]*(n_embd + n_embd_gqa))); - - cb(Qcur, "Qcur", il); - cb(Kcur, "Kcur", il); - cb(Vcur, "Vcur", il); - - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/float(n_embd_head), cb, il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); - } - - // add the input - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); - cb(ffn_inp, "ffn_inp", il); - - // FF - { - cur = llm_build_norm(ctx0, ffn_inp, hparams, - model.layers[il].ffn_norm, - model.layers[il].ffn_norm_b, - LLM_NORM, cb, il); - cb(cur, "ffn_norm", il); - - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, - model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, NULL, - model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, - NULL, - LLM_FFN_SILU, LLM_FFN_PAR, cb, il); - cb(cur, "ffn_out", il); - } - - inpL = ggml_add(ctx0, cur, ffn_inp); - cb(inpL, "l_out", il); - } - - cur = llm_build_norm(ctx0, inpL, hparams, - model.output_norm, - model.output_norm_b, - LLM_NORM, cb, -1); - cb(cur, "result_norm", -1); - - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_chatglm() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - const int64_t n_embd_head = hparams.n_embd_head_v; - const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * inpSA = inpL; - - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, - NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "attn_norm", il); - - // self-attention - { - struct ggml_tensor * Qcur = nullptr; - struct ggml_tensor * Kcur = nullptr; - struct ggml_tensor * Vcur = nullptr; - if (model.layers[il].wqkv == nullptr) { - Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur); - if (model.layers[il].bq) { - Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); - } - Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur); - if (model.layers[il].bk) { - Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); - } - Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur); - if (model.layers[il].bv) { - Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); - } - } else { - cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wqkv, cur); - cb(cur, "wqkv", il); - if (model.layers[il].bqkv) { - cur = ggml_add(ctx0, cur, model.layers[il].bqkv); - cb(cur, "bqkv", il); - } - Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); - Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); - Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); - } - cb(Qcur, "Qcur", il); - cb(Kcur, "Kcur", il); - cb(Vcur, "Vcur", il); - //printf("freq_base: %f freq_scale: %f ext_factor: %f attn_factor: %f\n", freq_base, freq_scale, ext_factor, attn_factor); - Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Qcur, "Qcur_rope", il); - - Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Kcur, "Kcur_rope", il); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, NULL, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); - - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); - } - - // Add the input - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); - cb(ffn_inp, "ffn_inp", il); - - // FF - { - cur = llm_build_norm(ctx0, ffn_inp, hparams, - model.layers[il].ffn_norm, - NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "ffn_norm", il); - - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, NULL, NULL, - NULL, NULL, NULL, - model.layers[il].ffn_down, NULL, NULL, - NULL, - LLM_FFN_SWIGLU, LLM_FFN_SEQ, cb, il); - cb(cur, "ffn_out", il); - - } - - inpL = ggml_add(ctx0, cur, ffn_inp); - cb(inpL, "l_out", il); - } - - cur = llm_build_norm(ctx0, inpL, hparams, - model.output_norm, - NULL, - LLM_NORM_RMS, cb, -1); - cb(cur, "result_norm", -1); - - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_nemotron() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - const int64_t n_embd_head = hparams.n_embd_head_v; - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - //GGML_ASSERT(n_embd_head == hparams.n_rot); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * inpSA = inpL; - - // norm - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, - model.layers[il].attn_norm_b, - LLM_NORM, cb, il); - cb(cur, "attn_norm", il); - - // self-attention - { - // compute Q and K and RoPE them - struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur); - cb(Qcur, "Qcur", il); - if (model.layers[il].bq) { - Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); - cb(Qcur, "Qcur", il); - } - - struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur); - cb(Kcur, "Kcur", il); - if (model.layers[il].bk) { - Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); - cb(Kcur, "Kcur", il); - } - - struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur); - cb(Vcur, "Vcur", il); - if (model.layers[il].bv) { - Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); - cb(Vcur, "Vcur", il); - } - - Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Qcur, "Qcur", il); - - Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Kcur, "Kcur", il); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); - } - - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); - cb(ffn_inp, "ffn_inp", il); - - // feed-forward network - cur = llm_build_norm(ctx0, ffn_inp, hparams, - model.layers[il].ffn_norm, - model.layers[il].ffn_norm_b, - LLM_NORM, cb, il); - cb(cur, "ffn_norm", il); - - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, - NULL, NULL, NULL, - model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, - NULL, - LLM_FFN_RELU_SQR, LLM_FFN_SEQ, cb, il); - - cur = ggml_add(ctx0, cur, ffn_inp); - cb(cur, "ffn_out", il); - - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - - cur = llm_build_norm(ctx0, cur, hparams, - model.output_norm, model.output_norm_b, - LLM_NORM, cb, -1); - cb(cur, "result_norm", -1); - - // lm_head - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_exaone() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - // mutable variable, needed during the last layer of the computation to skip unused tokens - int32_t n_tokens = this->n_tokens; - - const int64_t n_embd_head = hparams.n_embd_head_v; - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - GGML_ASSERT(n_embd_head == hparams.n_rot); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * inpSA = inpL; - - // norm - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "attn_norm", il); - - // self-attention - { - // rope freq factors for llama3; may return nullptr for llama2 and other models - struct ggml_tensor * rope_factors = build_rope_factors(il); - - // compute Q and K and RoPE them - struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur); - cb(Qcur, "Qcur", il); - if (model.layers[il].bq) { - Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); - cb(Qcur, "Qcur", il); - } - - struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur); - cb(Kcur, "Kcur", il); - if (model.layers[il].bk) { - Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); - cb(Kcur, "Kcur", il); - } - - struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur); - cb(Vcur, "Vcur", il); - if (model.layers[il].bv) { - Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); - cb(Vcur, "Vcur", il); - } - - Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, rope_factors, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Qcur, "Qcur", il); - - Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, rope_factors, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Kcur, "Kcur", il); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - n_tokens = n_outputs; - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); - } - - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); - cb(ffn_inp, "ffn_inp", il); - - // feed-forward network - cur = llm_build_norm(ctx0, ffn_inp, hparams, - model.layers[il].ffn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "ffn_norm", il); - - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, NULL, NULL, - model.layers[il].ffn_gate, NULL, NULL, - model.layers[il].ffn_down, NULL, NULL, - NULL, - LLM_FFN_SILU, LLM_FFN_PAR, cb, il); - cb(cur, "ffn_out", il); - - cur = ggml_add(ctx0, cur, ffn_inp); - cb(cur, "ffn_out", il); - - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - - cur = llm_build_norm(ctx0, cur, hparams, - model.output_norm, NULL, - LLM_NORM_RMS, cb, -1); - cb(cur, "result_norm", -1); - - // lm_head - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - ggml_cgraph * build_rwkv6() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - // Token shift state dimensions should be 2 * n_emb - GGML_ASSERT(n_embd == hparams.n_embd_k_s() / 2); - - const int64_t n_seqs = ubatch.n_seqs; - const int64_t n_seq_tokens = ubatch.n_seq_tokens; - const int64_t n_tokens = ubatch.n_tokens; - GGML_ASSERT(n_seqs != 0); - GGML_ASSERT(ubatch.equal_seqs); - GGML_ASSERT(n_tokens == n_seq_tokens * n_seqs); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - struct ggml_tensor * state_copy = build_inp_s_copy(); - struct ggml_tensor * state_mask = build_inp_s_mask(); - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - inpL = llm_build_norm(ctx0, inpL, hparams, model.tok_norm, model.tok_norm_b, LLM_NORM, cb, -1); - - for (int il = 0; il < n_layer; ++il) { - const llama_layer * layer = &model.layers[il]; - - // (ab)using the KV cache to store the states - struct ggml_tensor * token_shift = llm_build_copy_mask_state(ctx0, - gf, kv_self.k_l[il], state_copy, state_mask, - hparams.n_embd_k_s(), kv_self.size, kv_head, n_kv, n_seqs); - struct ggml_tensor * wkv_states = llm_build_copy_mask_state(ctx0, - gf, kv_self.v_l[il], state_copy, state_mask, - hparams.n_embd_v_s(), kv_self.size, kv_head, n_kv, n_seqs); - - cur = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs); - token_shift = ggml_reshape_3d(ctx0, token_shift, n_embd, 2, n_seqs); - - struct ggml_tensor * att_shift = ggml_view_3d(ctx0, token_shift, n_embd, 1, n_seqs, token_shift->nb[1], token_shift->nb[2], 0); - struct ggml_tensor * ffn_shift = ggml_view_3d(ctx0, token_shift, n_embd, 1, n_seqs, token_shift->nb[1], token_shift->nb[2], n_embd * ggml_element_size(token_shift)); - - struct ggml_tensor * x_norm_att = llm_build_norm(ctx0, cur, hparams, layer->attn_norm, layer->attn_norm_b, LLM_NORM, cb, il); - struct ggml_tensor * x_prev = ggml_concat( - ctx0, - att_shift, - ggml_view_3d(ctx0, x_norm_att, n_embd, n_seq_tokens - 1, n_seqs, x_norm_att->nb[1], x_norm_att->nb[2], 0), - 1 - ); - - cur = ggml_add(ctx0, cur, llm_build_rwkv6_time_mix(lctx, ctx0, layer, x_norm_att, x_prev, &wkv_states, hparams.wkv_head_size, n_embd / hparams.wkv_head_size)); - ggml_build_forward_expand(gf, cur); - ggml_build_forward_expand( - gf, - ggml_cpy( - ctx0, - wkv_states, - ggml_view_1d( - ctx0, - kv_self.v_l[il], - hparams.n_embd_v_s() * n_seqs, - hparams.n_embd_v_s() * kv_head * ggml_element_size(kv_self.v_l[il]) - ) - ) - ); - - struct ggml_tensor * x_norm_ffn = llm_build_norm(ctx0, cur, hparams, layer->attn_norm_2, layer->attn_norm_2_b, LLM_NORM, cb, il); - x_prev = ggml_concat( - ctx0, - ffn_shift, - ggml_view_3d(ctx0, x_norm_ffn, n_embd, n_seq_tokens - 1, n_seqs, x_norm_ffn->nb[1], x_norm_ffn->nb[2], 0), - 1 - ); - cur = ggml_add(ctx0, cur, llm_build_rwkv6_channel_mix(lctx, ctx0, layer, x_norm_ffn, x_prev)); - ggml_build_forward_expand(gf, cur); - - struct ggml_tensor * last_norm_att = ggml_view_3d(ctx0, x_norm_att, n_embd, 1, n_seqs, x_norm_att->nb[1], x_norm_att->nb[2], (n_seq_tokens-1)*n_embd*ggml_element_size(x_norm_att)); - struct ggml_tensor * last_norm_ffn = ggml_view_3d(ctx0, x_norm_ffn, n_embd, 1, n_seqs, x_norm_ffn->nb[1], x_norm_ffn->nb[2], (n_seq_tokens-1)*n_embd*ggml_element_size(x_norm_ffn)); - - token_shift = ggml_concat(ctx0, last_norm_att, last_norm_ffn, 1); - - ggml_build_forward_expand( - gf, - ggml_cpy( - ctx0, - ggml_view_1d(ctx0, token_shift, n_embd * n_seqs * 2, 0), - ggml_view_1d(ctx0, kv_self.k_l[il], hparams.n_embd_k_s() * n_seqs, hparams.n_embd_k_s() * kv_head * ggml_element_size(kv_self.k_l[il])) - ) - ); - - if (hparams.rescale_every_n_layers != 0 && (il + 1) % hparams.rescale_every_n_layers == 0) { - cur = ggml_scale(ctx0, cur, 0.5F); - } - - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_reshape_2d(ctx0, cur, n_embd, n_tokens); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - - cur = llm_build_norm(ctx0, cur, hparams, model.output_norm, model.output_norm_b, LLM_NORM, cb, -1); - cb(cur, "result_norm", -1); - - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - // ref: https://huggingface.co/recursal/QRWKV6-32B-Instruct-Preview-v0.1/blob/main/modeling_rwkv6qwen2.py - ggml_cgraph * build_rwkv6qwen2() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - GGML_ASSERT(n_embd == hparams.n_embd_k_s()); - - const int64_t n_seqs = ubatch.n_seqs; - const int64_t n_seq_tokens = ubatch.n_seq_tokens; - const int64_t n_tokens = ubatch.n_tokens; - GGML_ASSERT(n_seqs != 0); - GGML_ASSERT(ubatch.equal_seqs); - GGML_ASSERT(n_tokens == n_seq_tokens * n_seqs); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - struct ggml_tensor * state_copy = build_inp_s_copy(); - struct ggml_tensor * state_mask = build_inp_s_mask(); - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - for (int il = 0; il < n_layer; ++il) { - const llama_layer * layer = &model.layers[il]; - - // (ab)using the KV cache to store the states - struct ggml_tensor * token_shift = llm_build_copy_mask_state(ctx0, - gf, kv_self.k_l[il], state_copy, state_mask, - hparams.n_embd_k_s(), kv_self.size, kv_head, n_kv, n_seqs); - struct ggml_tensor * wkv_states = llm_build_copy_mask_state(ctx0, - gf, kv_self.v_l[il], state_copy, state_mask, - hparams.n_embd_v_s(), kv_self.size, kv_head, n_kv, n_seqs); - - cur = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs); - token_shift = ggml_reshape_3d(ctx0, token_shift, n_embd, 1, n_seqs); - - struct ggml_tensor * x_norm_att = llm_build_norm(ctx0, cur, hparams, layer->attn_norm, layer->attn_norm_b, LLM_NORM_RMS, cb, il); - struct ggml_tensor * x_prev = ggml_concat( - ctx0, - token_shift, - ggml_view_3d(ctx0, x_norm_att, n_embd, n_seq_tokens - 1, n_seqs, x_norm_att->nb[1], x_norm_att->nb[2], 0), - 1 - ); - - struct ggml_tensor * last_norm_att = ggml_view_3d(ctx0, x_norm_att, n_embd, 1, n_seqs, x_norm_att->nb[1], x_norm_att->nb[2], (n_seq_tokens-1)*n_embd*ggml_element_size(x_norm_att)); - ggml_build_forward_expand( - gf, - ggml_cpy( - ctx0, - ggml_view_1d(ctx0, last_norm_att, n_embd * n_seqs, 0), - ggml_view_1d(ctx0, kv_self.k_l[il], hparams.n_embd_k_s() * n_seqs, hparams.n_embd_k_s() * kv_head * ggml_element_size(kv_self.k_l[il])) - ) - ); - - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, llm_build_rwkv6_time_mix(lctx, ctx0, layer, x_norm_att, x_prev, &wkv_states, hparams.wkv_head_size, hparams.n_head_kv())); - ggml_build_forward_expand(gf, ffn_inp); - ggml_build_forward_expand( - gf, - ggml_cpy( - ctx0, - wkv_states, - ggml_view_1d( - ctx0, - kv_self.v_l[il], - hparams.n_embd_v_s() * n_seqs, - hparams.n_embd_v_s() * kv_head * ggml_element_size(kv_self.v_l[il]) - ) - ) - ); - - cb(ffn_inp, "ffn_inp", il); - - // feed-forward network - cur = llm_build_norm(ctx0, ffn_inp, hparams, - model.layers[il].ffn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "ffn_norm", il); - - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, NULL, NULL, - model.layers[il].ffn_gate, NULL, NULL, - model.layers[il].ffn_down, NULL, NULL, - NULL, - LLM_FFN_SILU, LLM_FFN_PAR, cb, il); - cb(cur, "ffn_out", il); - - cur = ggml_add(ctx0, cur, ffn_inp); - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_reshape_2d(ctx0, cur, n_embd, n_tokens); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - - cur = llm_build_norm(ctx0, cur, hparams, model.output_norm, model.output_norm_b, LLM_NORM_RMS, cb, -1); - cb(cur, "result_norm", -1); - - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - // ref: https://github.com/facebookresearch/chameleon - // based on the original build_llama() function, changes: - // * qk-norm - // * swin-norm - // * removed bias - // * removed MoE - struct ggml_cgraph * build_chameleon() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - // mutable variable, needed during the last layer of the computation to skip unused tokens - int32_t n_tokens = this->n_tokens; - - const int64_t n_embd_head = hparams.n_embd_head_v; - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - GGML_ASSERT(n_embd_head == hparams.n_rot); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * inpSA = inpL; - - // norm - if (hparams.swin_norm) { - cur = inpL; - } else { - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "attn_norm", il); - } - - // self-attention - { - // compute Q and K and RoPE them - struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur); - cb(Qcur, "Qcur", il); - - struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur); - cb(Kcur, "Kcur", il); - - struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur); - cb(Vcur, "Vcur", il); - - if (model.layers[il].attn_q_norm) { - Qcur = ggml_view_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens, - ggml_element_size(Qcur) * n_embd_head, - ggml_element_size(Qcur) * n_embd_head * n_head, - 0); - cb(Qcur, "Qcur", il); - - Qcur = llm_build_norm(ctx0, Qcur, hparams, - model.layers[il].attn_q_norm, - model.layers[il].attn_q_norm_b, - LLM_NORM, cb, il); - cb(Qcur, "Qcur", il); - } - - if (model.layers[il].attn_k_norm) { - Kcur = ggml_view_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens, - ggml_element_size(Kcur) * n_embd_head, - ggml_element_size(Kcur) * n_embd_head * n_head_kv, - 0); - cb(Kcur, "Kcur", il); - - Kcur = llm_build_norm(ctx0, Kcur, hparams, - model.layers[il].attn_k_norm, - model.layers[il].attn_k_norm_b, - LLM_NORM, cb, il); - cb(Kcur, "Kcur", il); - } - - Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Qcur, "Qcur", il); - - Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Kcur, "Kcur", il); - - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, nullptr, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); - - if (hparams.swin_norm) { - cur = llm_build_norm(ctx0, cur, hparams, - model.layers[il].attn_norm, NULL, - LLM_NORM_RMS, cb, il); - } - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - n_tokens = n_outputs; - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); - } - - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); - cb(ffn_inp, "ffn_inp", il); - - // feed-forward network - if (!hparams.swin_norm) { - cur = llm_build_norm(ctx0, ffn_inp, hparams, - model.layers[il].ffn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "ffn_norm", il); - } - - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, NULL, NULL, - model.layers[il].ffn_gate, NULL, NULL, - model.layers[il].ffn_down, NULL, NULL, - NULL, - LLM_FFN_SILU, LLM_FFN_PAR, cb, il); - cb(cur, "ffn_out", il); - - if (hparams.swin_norm) { - cur = llm_build_norm(ctx0, cur, hparams, - model.layers[il].ffn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "ffn_norm", il); - } - - cur = ggml_add(ctx0, cur, ffn_inp); - cb(cur, "ffn_out", il); - - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - - cur = llm_build_norm(ctx0, cur, hparams, - model.output_norm, NULL, - LLM_NORM_RMS, cb, -1); - cb(cur, "result_norm", -1); - - // lm_head - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - cb(cur, "result_output_with_img_logits", -1); - - // TODO: this suppresses the output of image tokens, which is required to enable text-only outputs. - // Needs to be removed once image outputs are supported. - int img_token_end_idx = 8196; - int img_token_start_idx = 4; - int num_img_tokens = img_token_end_idx - img_token_start_idx; - // creates 1d tensor of size num_img_tokens and values -FLT_MAX, - // which ensures that text token values are always at least larger than image token values - struct ggml_tensor * img_logits = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, num_img_tokens); - img_logits = ggml_clamp(ctx0, img_logits, -FLT_MAX, -FLT_MAX); - cb(img_logits, "img_logits", -1); - cur = ggml_set_1d(ctx0, cur, img_logits, ggml_element_size(cur) * img_token_start_idx); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_wavtokenizer_dec() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - cur = ggml_cont(ctx0, ggml_transpose(ctx0, inpL)); - - cur = ggml_conv_1d_ph(ctx0, model.conv1d, cur, 1, 1); - cur = ggml_add(ctx0, cur, model.conv1d_b); - - // posnet - for (uint32_t il = 0; il < hparams.posnet.n_layer; ++il) { - const auto & layer = model.layers[il].posnet; - - inpL = cur; - - switch (il) { - case 0: - case 1: - case 3: - case 4: - { - cur = llm_build_norm(ctx0, cur, hparams, - layer.norm1, - layer.norm1_b, - LLM_NORM_GROUP, cb, 0); - - cur = ggml_mul(ctx0, ggml_sigmoid(ctx0, cur), cur); - - cur = ggml_conv_1d_ph(ctx0, layer.conv1, cur, 1, 1); - cur = ggml_add(ctx0, cur, layer.conv1_b); - - cur = llm_build_norm(ctx0, cur, hparams, - layer.norm2, - layer.norm2_b, - LLM_NORM_GROUP, cb, 0); - - cur = ggml_mul(ctx0, ggml_sigmoid(ctx0, cur), cur); - - cur = ggml_conv_1d_ph(ctx0, layer.conv2, cur, 1, 1); - cur = ggml_add(ctx0, cur, layer.conv2_b); - - cur = ggml_add(ctx0, cur, inpL); - } break; - case 2: - { - cur = llm_build_norm(ctx0, cur, hparams, - layer.attn_norm, - layer.attn_norm_b, - LLM_NORM_GROUP, cb, 0); - - struct ggml_tensor * q; - struct ggml_tensor * k; - struct ggml_tensor * v; - - q = ggml_conv_1d_ph(ctx0, layer.attn_q, cur, 1, 1); - k = ggml_conv_1d_ph(ctx0, layer.attn_k, cur, 1, 1); - v = ggml_conv_1d_ph(ctx0, layer.attn_v, cur, 1, 1); - - q = ggml_add(ctx0, q, layer.attn_q_b); - k = ggml_add(ctx0, k, layer.attn_k_b); - v = ggml_add(ctx0, v, layer.attn_v_b); - - q = ggml_cont(ctx0, ggml_transpose(ctx0, q)); - k = ggml_cont(ctx0, ggml_transpose(ctx0, k)); - - struct ggml_tensor * kq = ggml_mul_mat(ctx0, k, q); - - kq = ggml_soft_max_ext(ctx0, kq, nullptr, 1.0f/sqrtf(float(hparams.posnet.n_embd)), 0.0f); - - cur = ggml_mul_mat(ctx0, kq, v); - - cur = ggml_conv_1d_ph(ctx0, layer.attn_o, cur, 1, 1); - cur = ggml_add(ctx0, cur, layer.attn_o_b); - - cur = ggml_add(ctx0, cur, inpL); - } break; - case 5: - { - cur = llm_build_norm(ctx0, cur, hparams, - layer.norm, - layer.norm_b, - LLM_NORM_GROUP, cb, 0); - } break; - default: GGML_ABORT("unknown posnet layer"); - }; - } - - cur = ggml_cont(ctx0, ggml_transpose(ctx0, cur)); - - cur = llm_build_norm(ctx0, cur, hparams, - model.tok_norm, - model.tok_norm_b, - LLM_NORM, cb, -1); - - cur = ggml_cont(ctx0, ggml_transpose(ctx0, cur)); - - inpL = cur; - - // convnext - for (uint32_t il = 0; il < hparams.convnext.n_layer; ++il) { - const auto & layer = model.layers[il].convnext; - - cur = inpL; - - cur = ggml_conv_1d_dw_ph(ctx0, layer.dw, cur, 1, 1); - cur = ggml_add(ctx0, cur, layer.dw_b); - - cur = ggml_cont(ctx0, ggml_transpose(ctx0, cur)); - - cur = llm_build_norm(ctx0, cur, hparams, - layer.norm, - layer.norm_b, - LLM_NORM, cb, -1); - - cur = llm_build_ffn(ctx0, lctx, cur, - layer.pw1, layer.pw1_b, NULL, - NULL, NULL, NULL, - layer.pw2, layer.pw2_b, NULL, - NULL, - LLM_FFN_GELU, LLM_FFN_SEQ, cb, il); - - cur = ggml_mul(ctx0, cur, layer.gamma); - - cur = ggml_cont(ctx0, ggml_transpose(ctx0, cur)); - - inpL = ggml_add(ctx0, cur, inpL); - } - - cur = inpL; - - cur = ggml_cont(ctx0, ggml_transpose(ctx0, cur)); - - cur = llm_build_norm(ctx0, cur, hparams, - model.output_norm, - model.output_norm_b, - LLM_NORM, cb, -1); - - // lm_head - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - - cur = ggml_add(ctx0, cur, model.output_b); - cb(cur, "result_embd", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } -}; - -static struct ggml_cgraph * llama_build_graph_defrag(llama_context & lctx, const std::vector & ids) { - llama_ubatch dummy = {}; - dummy.equal_seqs = true; - - llm_build_cb cb = [&](struct ggml_tensor * , const char * , int ) { }; - - struct llm_build_context llm(lctx, dummy, cb, false); - - llm.init(); - - struct ggml_cgraph * result = llm.build_defrag(ids); - - llm.free(); - - return result; -} - -static struct ggml_cgraph * llama_build_graph_k_shift(llama_context & lctx) { - llama_ubatch dummy = {}; - dummy.equal_seqs = true; - - llm_build_cb cb = [&](struct ggml_tensor * , const char * , int ) { }; - - struct llm_build_context llm(lctx, dummy, cb, false); - - llm.init(); - - struct ggml_cgraph * result = llm.build_k_shift(); - - llm.free(); - - return result; -} - -static struct ggml_cgraph * llama_build_graph( - llama_context & lctx, - const llama_ubatch & ubatch, - bool worst_case) { - const auto & model = lctx.model; - - // this callback allows us to apply custom logic to each tensor (e.g. ggml-alloc, offloading, etc.) - llm_build_cb cb = [&](struct ggml_tensor * cur, const char * name, int il) { - if (il >= 0) { - ggml_format_name(cur, "%s-%d", name, il); - } else { - ggml_set_name(cur, name); - } - - if (!lctx.cparams.offload_kqv) { - if (strcmp(name, "kqv_merged_cont") == 0) { - // all nodes between the KV store and the attention output are run on the CPU - ggml_backend_sched_set_tensor_backend(lctx.sched.get(), cur, lctx.backend_cpu); - } - } - - // norm may be automatically assigned to the backend of the previous layer, increasing data transfer between backends - // FIXME: fix in ggml_backend_sched - const bool full_offload = lctx.model.params.n_gpu_layers > (int) lctx.model.hparams.n_layer; - if (ubatch.n_tokens < 32 || full_offload) { - if (il != -1 && strcmp(name, "norm") == 0) { - const auto & dev_layer = lctx.model.dev_layer(il); - for (auto & backend : lctx.backends) { - if (ggml_backend_get_device(backend.get()) == dev_layer) { - if (ggml_backend_supports_op(backend.get(), cur)) { - ggml_backend_sched_set_tensor_backend(lctx.sched.get(), cur, backend.get()); - } - } - } - } - } - }; - - struct ggml_cgraph * result = NULL; - - struct llm_build_context llm(lctx, ubatch, cb, worst_case); - - llm.init(); - - switch (model.arch) { - case LLM_ARCH_LLAMA: - case LLM_ARCH_MINICPM: - case LLM_ARCH_GRANITE: - case LLM_ARCH_GRANITE_MOE: - { - result = llm.build_llama(); - } break; - case LLM_ARCH_DECI: - { - result = llm.build_deci(); - } break; - case LLM_ARCH_BAICHUAN: - { - result = llm.build_baichuan(); - } break; - case LLM_ARCH_FALCON: - { - result = llm.build_falcon(); - } break; - case LLM_ARCH_GROK: - { - result = llm.build_grok(); - } break; - case LLM_ARCH_STARCODER: - { - result = llm.build_starcoder(); - } break; - case LLM_ARCH_REFACT: - { - result = llm.build_refact(); - } break; - case LLM_ARCH_BERT: - case LLM_ARCH_JINA_BERT_V2: - case LLM_ARCH_NOMIC_BERT: - { - result = llm.build_bert(); - } break; - case LLM_ARCH_BLOOM: - { - result = llm.build_bloom(); - } break; - case LLM_ARCH_MPT: - { - result = llm.build_mpt(); - } break; - case LLM_ARCH_STABLELM: - { - result = llm.build_stablelm(); - } break; - case LLM_ARCH_QWEN: - { - result = llm.build_qwen(); - } break; - case LLM_ARCH_QWEN2: - { - result = llm.build_qwen2(); - } break; - case LLM_ARCH_QWEN2VL: - { - lctx.n_pos_per_token = 4; - result = llm.build_qwen2vl(); - } break; - case LLM_ARCH_QWEN2MOE: - { - result = llm.build_qwen2moe(); - } break; - case LLM_ARCH_PHI2: - { - result = llm.build_phi2(); - } break; - case LLM_ARCH_PHI3: - case LLM_ARCH_PHIMOE: - { - result = llm.build_phi3(); - } break; - case LLM_ARCH_PLAMO: - { - result = llm.build_plamo(); - } break; - case LLM_ARCH_GPT2: - { - result = llm.build_gpt2(); - } break; - case LLM_ARCH_CODESHELL: - { - result = llm.build_codeshell(); - } break; - case LLM_ARCH_ORION: - { - result = llm.build_orion(); - } break; - case LLM_ARCH_INTERNLM2: - { - result = llm.build_internlm2(); - } break; - case LLM_ARCH_MINICPM3: - { - result = llm.build_minicpm3(); - } break; - case LLM_ARCH_GEMMA: - { - result = llm.build_gemma(); - } break; - case LLM_ARCH_GEMMA2: - { - result = llm.build_gemma2(); - } break; - case LLM_ARCH_GEMMA3: - { - result = llm.build_gemma3(); - } break; - case LLM_ARCH_STARCODER2: - { - result = llm.build_starcoder2(); - } break; - case LLM_ARCH_MAMBA: - { - result = llm.build_mamba(); - } break; - case LLM_ARCH_XVERSE: - { - result = llm.build_xverse(); - } break; - case LLM_ARCH_COMMAND_R: - { - result = llm.build_command_r(); - } break; - case LLM_ARCH_COHERE2: - { - result = llm.build_cohere2(); - } break; - case LLM_ARCH_DBRX: - { - result = llm.build_dbrx(); - } break; - case LLM_ARCH_OLMO: - { - result = llm.build_olmo(); - } break; - case LLM_ARCH_OLMO2: - { - result = llm.build_olmo2(); - } break; - case LLM_ARCH_OLMOE: - { - result = llm.build_olmoe(); - } break; - case LLM_ARCH_OPENELM: - { - result = llm.build_openelm(); - } break; - case LLM_ARCH_GPTNEOX: - { - result = llm.build_gptneox(); - } break; - case LLM_ARCH_ARCTIC: - { - result = llm.build_arctic(); - } break; - case LLM_ARCH_DEEPSEEK: - { - result = llm.build_deepseek(); - } break; - case LLM_ARCH_DEEPSEEK2: - { - result = llm.build_deepseek2(); - } break; - case LLM_ARCH_CHATGLM: - { - result = llm.build_chatglm(); - } break; - case LLM_ARCH_BITNET: - { - result = llm.build_bitnet(); - } break; - case LLM_ARCH_T5: - { - if (lctx.is_encoding) { - result = llm.build_t5_enc(); - } else { - result = llm.build_t5_dec(); - } - } break; - case LLM_ARCH_T5ENCODER: - { - result = llm.build_t5_enc(); - } break; - case LLM_ARCH_JAIS: - { - result = llm.build_jais(); - } break; - case LLM_ARCH_NEMOTRON: - { - result = llm.build_nemotron(); - } break; - case LLM_ARCH_EXAONE: - { - result = llm.build_exaone(); - } break; - case LLM_ARCH_RWKV6: - { - result = llm.build_rwkv6(); - } break; - case LLM_ARCH_RWKV6QWEN2: - { - result = llm.build_rwkv6qwen2(); - } break; - case LLM_ARCH_CHAMELEON: - { - result = llm.build_chameleon(); - } break; - case LLM_ARCH_WAVTOKENIZER_DEC: - { - result = llm.build_wavtokenizer_dec(); - } break; - default: - GGML_ABORT("fatal error"); - } - - // add on pooling layer - if (lctx.cparams.embeddings) { - result = llm.append_pooling(result); - } - - llm.free(); - - return result; -} - -// returns the result of ggml_backend_sched_graph_compute_async execution -static enum ggml_status llama_graph_compute( - llama_context & lctx, - ggml_cgraph * gf, - int n_threads, - ggml_threadpool * threadpool) { - if (lctx.backend_cpu != nullptr) { - auto * reg = ggml_backend_dev_backend_reg(ggml_backend_get_device(lctx.backend_cpu)); - auto * set_threadpool_fn = (decltype(ggml_backend_cpu_set_threadpool) *) ggml_backend_reg_get_proc_address(reg, "ggml_backend_cpu_set_threadpool"); - set_threadpool_fn(lctx.backend_cpu, threadpool); - } - - // set the number of threads for all the backends - for (const auto & set_n_threads_fn : lctx.set_n_threads_fns) { - set_n_threads_fn.second(set_n_threads_fn.first, n_threads); - } - - auto status = ggml_backend_sched_graph_compute_async(lctx.sched.get(), gf); - if (status != GGML_STATUS_SUCCESS) { - LLAMA_LOG_ERROR("%s: ggml_backend_sched_graph_compute_async failed with error %d\n", __func__, status); - } - - // fprintf(stderr, "splits: %d\n", ggml_backend_sched_get_n_splits(lctx.sched)); - - return status; -} - -static int llama_prepare_sbatch( - llama_context & lctx, - const llama_batch & batch, - uint32_t & n_outputs) { - const auto & model = lctx.model; - const auto & hparams = model.hparams; - const auto & cparams = lctx.cparams; - - const uint32_t n_tokens_all = batch.n_tokens; - const int64_t n_embd = hparams.n_embd; - - // this indicates we are doing pooled embedding, so we ignore batch.logits and output all tokens - const bool embd_pooled = cparams.embeddings && cparams.pooling_type != LLAMA_POOLING_TYPE_NONE; - - GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT - if (batch.token) { - for (uint32_t i = 0; i < n_tokens_all; ++i) { - if (batch.token[i] < 0 || uint32_t(batch.token[i]) >= model.vocab.n_tokens()) { - LLAMA_LOG_ERROR("%s: invalid token[%d] = %d\n", __func__, i, batch.token[i]); - return -1; - } - } - } - GGML_ASSERT(n_tokens_all <= cparams.n_batch); - GGML_ASSERT((cparams.causal_attn || cparams.n_ubatch >= n_tokens_all) && "non-causal attention requires n_ubatch >= n_tokens"); - - lctx.n_queued_tokens += n_tokens_all; - lctx.embd_seq.clear(); - - // count outputs - if (batch.logits && !embd_pooled) { - for (uint32_t i = 0; i < n_tokens_all; ++i) { - n_outputs += batch.logits[i] != 0; - } - } else if (lctx.logits_all || embd_pooled) { - n_outputs = n_tokens_all; - } else { - // keep last output only - n_outputs = 1; - } - - lctx.sbatch.from_batch(batch, n_embd, - /* simple_split */ !lctx.kv_self.recurrent, - /* logits_all */ n_outputs == n_tokens_all); - - // reserve output buffer - if (llama_output_reserve(lctx, n_outputs) < n_outputs) { - LLAMA_LOG_ERROR("%s: could not reserve space for batch with %u outputs\n", __func__, n_outputs); - return -2; - }; - - return 0; -} - -static int llama_prepare_ubatch( - llama_context & lctx, - llama_kv_slot_restorer & kv_slot_restorer, - llama_ubatch & ubatch, - const uint32_t n_outputs, - const uint32_t n_tokens_all) { - GGML_ASSERT(lctx.sbatch.n_tokens > 0); - - auto & kv_self = lctx.kv_self; - const auto & cparams = lctx.cparams; - const auto & hparams = lctx.model.hparams; - - // this indicates we are doing pooled embedding, so we ignore batch.logits and output all tokens - const bool embd_pooled = cparams.embeddings && cparams.pooling_type != LLAMA_POOLING_TYPE_NONE; - - if (lctx.kv_self.recurrent) { - if (embd_pooled) { - // Pooled embeddings cannot be split across ubatches (yet) - ubatch = lctx.sbatch.split_seq(cparams.n_ubatch); - } else { - // recurrent model architectures are easier to implement - // with equal-length sequences - ubatch = lctx.sbatch.split_equal(cparams.n_ubatch); - } - } else { - ubatch = lctx.sbatch.split_simple(cparams.n_ubatch); - } - - // count the outputs in this u_batch - { - int32_t n_outputs_new = 0; - - if (n_outputs == n_tokens_all) { - n_outputs_new = ubatch.n_tokens; - } else { - GGML_ASSERT(ubatch.output); - for (uint32_t i = 0; i < ubatch.n_tokens; i++) { - n_outputs_new += int32_t(ubatch.output[i] != 0); - } - } - - // needs to happen before the graph is built - lctx.n_outputs = n_outputs_new; - } - - // non-causal masks do not use the KV cache - if (hparams.causal_attn) { - llama_kv_cache_update(&lctx); - - // if we have enough unused cells before the current head -> - // better to start searching from the beginning of the cache, hoping to fill it - if (kv_self.head > kv_self.used + 2*ubatch.n_tokens) { - kv_self.head = 0; - } - - const auto slot = llama_kv_cache_find_slot(kv_self, ubatch); - if (!slot) { - return 1; - } - kv_slot_restorer.save(slot); - - if (!kv_self.recurrent) { - // a heuristic, to avoid attending the full cache if it is not yet utilized - // after enough generations, the benefit from this heuristic disappears - // if we start defragmenting the cache, the benefit from this will be more important - const uint32_t pad = llama_kv_cache_get_padding(cparams); - kv_self.n = std::min(kv_self.size, std::max(pad, GGML_PAD(llama_kv_cache_cell_max(kv_self), pad))); - //kv_self.n = llama_kv_cache_cell_max(kv_self); - } - } - - return 0; -} - -// decode a batch of tokens by evaluating the transformer -// in case of unsuccessful decoding (error or warning), -// the kv_cache state will be returned to its original state -// (for non-recurrent models) or cleaned (for recurrent models) -// -// - lctx: llama context -// - inp_batch: batch to evaluate -// -// return 0 on success -// return positive int on warning -// return negative int on error -// -static int llama_decode_impl( - llama_context & lctx, - llama_batch inp_batch) { - - lctx.is_encoding = false; - - if (inp_batch.n_tokens == 0) { - LLAMA_LOG_ERROR("%s: n_tokens == 0\n", __func__); - return -1; - } - - // temporarily allocate memory for the input batch if needed - llama_batch_allocr batch_allocr(inp_batch, inp_batch.pos ? -1 : lctx.kv_self.max_pos() + 1); - const llama_batch & batch = batch_allocr.batch; - - const auto & model = lctx.model; - const auto & vocab = model.vocab; - const auto & hparams = model.hparams; - const auto & cparams = lctx.cparams; - - if (lctx.t_compute_start_us == 0) { - lctx.t_compute_start_us = ggml_time_us(); - } - auto & kv_self = lctx.kv_self; - llama_kv_slot_restorer kv_slot_restorer(kv_self); - - const int64_t n_embd = hparams.n_embd; - const int64_t n_vocab = vocab.n_tokens(); - - uint32_t n_outputs = 0; - uint32_t n_outputs_prev = 0; - - { - const int ret = llama_prepare_sbatch(lctx, batch, n_outputs); - if (ret != 0) { - return ret; - } - } - - while (lctx.sbatch.n_tokens > 0) { - llama_ubatch ubatch; - { - const int ret = llama_prepare_ubatch(lctx, kv_slot_restorer, ubatch, n_outputs, batch.n_tokens); - if (ret != 0) { - return ret; - } - } - - const int n_threads = ubatch.n_tokens == 1 ? cparams.n_threads : cparams.n_threads_batch; - ggml_threadpool_t threadpool = ubatch.n_tokens == 1 ? lctx.threadpool : lctx.threadpool_batch; - - GGML_ASSERT(n_threads > 0); - - //printf("kv_self.n = %5d, kv_self.used = %5d, kv_self.head = %5d\n", kv_self.n, kv_self.used, kv_self.head); - - ggml_backend_sched_reset(lctx.sched.get()); - ggml_backend_sched_set_eval_callback(lctx.sched.get(), lctx.cparams.cb_eval, lctx.cparams.cb_eval_user_data); - - ggml_cgraph * gf = llama_build_graph(lctx, ubatch, false); - - // the output is always the last tensor in the graph - struct ggml_tensor * res = ggml_graph_node(gf, -1); - struct ggml_tensor * embd = ggml_graph_node(gf, -2); - - if (lctx.n_outputs == 0) { - // no output - res = nullptr; - embd = nullptr; - } else if (cparams.embeddings) { - res = nullptr; // do not extract logits for embedding case - embd = nullptr; - for (int i = ggml_graph_n_nodes(gf) - 1; i >= 0; --i) { - if (strcmp(ggml_graph_node(gf, i)->name, "result_embd_pooled") == 0) { - embd = ggml_graph_node(gf, i); - break; - } - } - GGML_ASSERT(embd != nullptr && "missing embeddings tensor"); - } else { - embd = nullptr; // do not extract embeddings when not needed - GGML_ASSERT(strcmp(res->name, "result_output") == 0 && "missing result_output tensor"); - } - - // LLAMA_LOG_INFO("graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs); - - ggml_backend_sched_alloc_graph(lctx.sched.get(), gf); - - llama_set_inputs(lctx, ubatch); - - const auto compute_status = llama_graph_compute(lctx, gf, n_threads, threadpool); - if (compute_status != GGML_STATUS_SUCCESS) { - kv_slot_restorer.restore(kv_self); - switch (compute_status) { - case GGML_STATUS_ABORTED: - return 2; - case GGML_STATUS_ALLOC_FAILED: - return -2; - case GGML_STATUS_FAILED: - default: - return -3; - } - } - - // update the kv ring buffer - { - kv_self.head += ubatch.n_tokens; - - // Ensure kv cache head points to a valid index. - if (kv_self.head >= kv_self.size) { - kv_self.head = 0; - } - } - - // plot the computation graph in dot format (for debugging purposes) - //if (n_past%100 == 0) { - // ggml_graph_dump_dot(gf, NULL, "llama.dot"); - //} - - // extract logits - if (res) { - ggml_backend_t backend_res = ggml_backend_sched_get_tensor_backend(lctx.sched.get(), res); - GGML_ASSERT(backend_res != nullptr); - GGML_ASSERT(lctx.logits != nullptr); - - float * logits_out = lctx.logits + n_outputs_prev*n_vocab; - const int32_t n_outputs_new = lctx.n_outputs; - - if (n_outputs_new) { - GGML_ASSERT( n_outputs_prev + n_outputs_new <= n_outputs); - GGML_ASSERT((n_outputs_prev + n_outputs_new)*n_vocab <= (int64_t) lctx.logits_size); - ggml_backend_tensor_get_async(backend_res, res, logits_out, 0, n_outputs_new*n_vocab*sizeof(float)); - } - } - - // extract embeddings - if (embd) { - ggml_backend_t backend_embd = ggml_backend_sched_get_tensor_backend(lctx.sched.get(), embd); - GGML_ASSERT(backend_embd != nullptr); - - switch (cparams.pooling_type) { - case LLAMA_POOLING_TYPE_NONE: - { - // extract token embeddings - GGML_ASSERT(lctx.embd != nullptr); - float * embd_out = lctx.embd + n_outputs_prev*n_embd; - const int32_t n_outputs_new = lctx.n_outputs; - - if (n_outputs_new) { - GGML_ASSERT( n_outputs_prev + n_outputs_new <= n_outputs); - GGML_ASSERT((n_outputs_prev + n_outputs_new)*n_embd <= (int64_t) lctx.embd_size); - ggml_backend_tensor_get_async(backend_embd, embd, embd_out, 0, n_outputs_new*n_embd*sizeof(float)); - } - } break; - case LLAMA_POOLING_TYPE_MEAN: - case LLAMA_POOLING_TYPE_CLS: - case LLAMA_POOLING_TYPE_LAST: - { - // extract sequence embeddings (cleared before processing each batch) - auto & embd_seq_out = lctx.embd_seq; - - for (uint32_t s = 0; s < ubatch.n_seqs; ++s) { - const llama_seq_id seq_id = ubatch.seq_id[s][0]; - if (embd_seq_out.find(seq_id) != embd_seq_out.end()) { - continue; - } - embd_seq_out[seq_id].resize(n_embd); - ggml_backend_tensor_get_async(backend_embd, embd, embd_seq_out[seq_id].data(), (n_embd*seq_id)*sizeof(float), n_embd*sizeof(float)); - } - } break; - case LLAMA_POOLING_TYPE_RANK: - { - // extract the rerank score - a single float per sequence - auto & embd_seq_out = lctx.embd_seq; - - for (uint32_t s = 0; s < ubatch.n_seqs; ++s) { - const llama_seq_id seq_id = ubatch.seq_id[s][0]; - if (embd_seq_out.find(seq_id) != embd_seq_out.end()) { - continue; - } - embd_seq_out[seq_id].resize(1); - ggml_backend_tensor_get_async(backend_embd, embd, embd_seq_out[seq_id].data(), (seq_id)*sizeof(float), sizeof(float)); - } - } break; - case LLAMA_POOLING_TYPE_UNSPECIFIED: - { - GGML_ABORT("unknown pooling type"); - } - } - } - n_outputs_prev += lctx.n_outputs; - } - - // set output mappings - { - bool sorted_output = true; - - GGML_ASSERT(lctx.sbatch.out_ids.size() == n_outputs); - - for (size_t i = 0; i < n_outputs; ++i) { - size_t out_id = lctx.sbatch.out_ids[i]; - lctx.output_ids[out_id] = i; - if (out_id != i) { - sorted_output = false; - } - } - - if (sorted_output) { - lctx.sbatch.out_ids.clear(); - } - } - - // set to total number of outputs in the batch, for use in llama_get_logits_ith - lctx.n_outputs = n_outputs; - - // wait for the computation to finish (automatically done when obtaining the model output) - //llama_synchronize(&lctx); - - // decide if we need to defrag the kv cache - if (cparams.causal_attn && cparams.defrag_thold > 0.0f) { - // - do not defrag small contexts (i.e. < 2048 tokens) - // - count the padding towards the number of used tokens - const float fragmentation = kv_self.n >= 2048 ? std::max(0.0f, 1.0f - float(kv_self.used + llama_kv_cache_get_padding(cparams))/float(kv_self.n)) : 0.0f; - - // queue defragmentation for next llama_kv_cache_update - if (fragmentation > cparams.defrag_thold) { - LLAMA_LOG_DEBUG("%s: fragmentation: %.2f - requesting defrag\n", __func__, fragmentation); - - llama_kv_cache_defrag(kv_self); - } - } - - // Reset state for the next token before backend sync, to allow the CPU activities in the reset to - // overlap with device computation. - ggml_backend_sched_reset(lctx.sched.get()); - - return 0; -} - -// encode a batch of tokens by evaluating the encoder part of the transformer -// -// - lctx: llama context -// - batch: batch to evaluate -// -// return 0 on success -// return positive int on warning -// return negative int on error -// -static int llama_encode_impl( - llama_context & lctx, - llama_batch inp_batch) { - - lctx.is_encoding = true; - - if (inp_batch.n_tokens == 0) { - LLAMA_LOG_ERROR("%s: n_tokens == 0\n", __func__); - return -1; - } - - // temporary allocate memory for the input batch if needed - llama_batch_allocr batch_allocr(inp_batch, inp_batch.pos ? -1 : lctx.kv_self.max_pos() + 1); - - const llama_batch & batch = batch_allocr.batch; - const uint32_t n_tokens = batch.n_tokens; - - const auto & model = lctx.model; - const auto & hparams = model.hparams; - const auto & cparams = lctx.cparams; - - GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT - - if (batch.token) { - for (uint32_t i = 0; i < n_tokens; ++i) { - if (batch.token[i] < 0 || (uint32_t) batch.token[i] >= model.vocab.n_tokens()) { - LLAMA_LOG_ERROR("%s: invalid token[%d] = %d\n", __func__, i, batch.token[i]); - return -1; - } - } - } - - // micro-batching is not possible for non-causal encoding, so we process the batch in a single shot - GGML_ASSERT(cparams.n_ubatch >= n_tokens && "encoder requires n_ubatch >= n_tokens"); - - if (lctx.t_compute_start_us == 0) { - lctx.t_compute_start_us = ggml_time_us(); - } - - lctx.n_queued_tokens += n_tokens; - - const int64_t n_embd = hparams.n_embd; - - lctx.sbatch.from_batch(batch, n_embd, /* simple_split */ true, /* logits_all */ true); - - const llama_ubatch ubatch = lctx.sbatch.split_simple(n_tokens); - - // reserve output buffer - if (llama_output_reserve(lctx, n_tokens) < n_tokens) { - LLAMA_LOG_ERROR("%s: could not reserve space for batch with %u outputs\n", __func__, n_tokens); - return -2; - }; - - for (uint32_t i = 0; i < n_tokens; ++i) { - lctx.output_ids[i] = i; - } - - lctx.inp_embd_enc = NULL; - lctx.n_outputs = n_tokens; - - int n_threads = n_tokens == 1 ? cparams.n_threads : cparams.n_threads_batch; - ggml_threadpool_t threadpool = n_tokens == 1 ? lctx.threadpool : lctx.threadpool_batch; - - GGML_ASSERT(n_threads > 0); - - ggml_backend_sched_reset(lctx.sched.get()); - ggml_backend_sched_set_eval_callback(lctx.sched.get(), lctx.cparams.cb_eval, lctx.cparams.cb_eval_user_data); - - ggml_cgraph * gf = llama_build_graph(lctx, ubatch, false); - - // the output embeddings after the final encoder normalization - struct ggml_tensor * embd = nullptr; - - // there are two cases here - if (llama_model_has_decoder(&lctx.model)) { - // first case is an encoder-decoder T5 model where embeddings are passed to decoder - embd = ggml_graph_node(gf, -1); - GGML_ASSERT(strcmp(embd->name, "result_norm") == 0 && "missing result_output tensor"); - } else { - // second case is an encoder-only T5 model - if (cparams.embeddings) { - // only output embeddings if required - embd = ggml_graph_node(gf, -1); - if (strcmp(embd->name, "result_embd_pooled") != 0) { - embd = ggml_graph_node(gf, -2); - } - GGML_ASSERT(strcmp(embd->name, "result_embd_pooled") == 0 && "missing embeddings tensor"); - } - } - - ggml_backend_sched_alloc_graph(lctx.sched.get(), gf); - - llama_set_inputs(lctx, ubatch); - - const auto compute_status = llama_graph_compute(lctx, gf, n_threads, threadpool); - switch (compute_status) { - case GGML_STATUS_SUCCESS: - break; - case GGML_STATUS_ABORTED: - return 2; - case GGML_STATUS_ALLOC_FAILED: - return -2; - case GGML_STATUS_FAILED: - default: - return -3; - } - - // extract embeddings - if (embd) { - ggml_backend_t backend_embd = ggml_backend_sched_get_tensor_backend(lctx.sched.get(), embd); - GGML_ASSERT(backend_embd != nullptr); - - if (llama_model_has_decoder(&lctx.model)) { - lctx.embd_enc.resize(n_tokens*n_embd); - float * embd_out = lctx.embd_enc.data(); - - ggml_backend_tensor_get_async(backend_embd, embd, embd_out, 0, n_tokens*n_embd*sizeof(float)); - GGML_ASSERT(!ubatch.equal_seqs); // TODO: handle equal splits - - // remember the sequence ids used during the encoding - needed for cross attention later - lctx.seq_ids_enc.resize(n_tokens); - for (uint32_t i = 0; i < n_tokens; i++) { - for (int s = 0; s < ubatch.n_seq_id[i]; s++) { - llama_seq_id seq_id = ubatch.seq_id[i][s]; - lctx.seq_ids_enc[i].insert(seq_id); - } - } - } else { - GGML_ASSERT(lctx.embd != nullptr); - - switch (cparams.pooling_type) { - case LLAMA_POOLING_TYPE_NONE: - { - // extract token embeddings - GGML_ASSERT(lctx.embd != nullptr); - float * embd_out = lctx.embd; - - GGML_ASSERT(n_tokens*n_embd <= (int64_t) lctx.embd_size); - ggml_backend_tensor_get_async(backend_embd, embd, embd_out, 0, n_tokens*n_embd*sizeof(float)); - } break; - case LLAMA_POOLING_TYPE_MEAN: - case LLAMA_POOLING_TYPE_CLS: - case LLAMA_POOLING_TYPE_LAST: - { - // extract sequence embeddings - auto & embd_seq_out = lctx.embd_seq; - embd_seq_out.clear(); - - GGML_ASSERT(!ubatch.equal_seqs); // TODO: handle equal splits - - for (uint32_t i = 0; i < n_tokens; i++) { - const llama_seq_id seq_id = ubatch.seq_id[i][0]; - if (embd_seq_out.find(seq_id) != embd_seq_out.end()) { - continue; - } - embd_seq_out[seq_id].resize(n_embd); - ggml_backend_tensor_get_async(backend_embd, embd, embd_seq_out[seq_id].data(), (n_embd*seq_id)*sizeof(float), n_embd*sizeof(float)); - } - } break; - case LLAMA_POOLING_TYPE_RANK: - { - // TODO: this likely should be the same logic as in llama_decoder_internal, but better to - // wait for an encoder model that requires this pooling type in order to test it - // https://github.com/ggerganov/llama.cpp/pull/9510 - GGML_ABORT("RANK pooling not implemented yet"); - } - case LLAMA_POOLING_TYPE_UNSPECIFIED: - { - GGML_ABORT("unknown pooling type"); - } - } - } - } - - // Reset state for the next token before backend sync, to allow the CPU activities in the reset to - // overlap with device computation. - ggml_backend_sched_reset(lctx.sched.get()); - - return 0; -} - -// find holes from the beginning of the KV cache and fill them by moving data from the end of the cache -static void llama_kv_cache_defrag_impl(struct llama_context & lctx) { - auto & kv_self = lctx.kv_self; - - const auto & hparams = lctx.model.hparams; - - const uint32_t n_layer = hparams.n_layer; - - const uint32_t n_kv = llama_kv_cache_cell_max(kv_self); - const uint32_t n_used = kv_self.used; - - assert(n_used <= n_kv); - - //const int64_t t_start = ggml_time_us(); - - // number of cells moved - uint32_t n_moves = 0; - - // each move requires 6*n_layer tensors (see build_defrag) - // - source view, destination view, copy operation - // - x2 for keys and values - //const uint32_t max_moves = model.max_nodes()/(6*n_layer); - // TODO: tmp fix https://github.com/ggerganov/llama.cpp/issues/6685#issuecomment-2057579516 - const uint32_t max_moves = (lctx.model.max_nodes() - 2*n_layer)/(6*n_layer); - - // determine which KV cells to move where - // - // cell i moves to ids[i] - // - // if ids[i] == i || ids[i] == n_kv, then cell i is not moved - // - std::vector ids(n_kv, n_kv); - - for (uint32_t i0 = 0; i0 < n_used; ++i0) { - const auto & cell0 = kv_self.cells[i0]; - - if (!cell0.is_empty()) { - ids[i0] = i0; - - continue; - } - - // found a hole - fill it with data from the end of the cache - - uint32_t nh = 1; - - // determine the size of the hole - while (i0 + nh < n_used && kv_self.cells[i0 + nh].is_empty()) { - nh++; - } - - uint32_t nf = 0; - uint32_t is = n_kv - 1; - - // starting from the end, find nh non-empty cells - for (; is > i0; --is) { - const auto & cell1 = kv_self.cells[is]; - - if (cell1.is_empty() || ids[is] != n_kv) { - continue; - } - - // non-empty cell which is not yet moved - nf++; - - if (nf == nh) { - break; - } - } - - // this can only happen if `n_used` is not accurate, which would be a bug - GGML_ASSERT(nf == nh && "KV defrag bug: nf != nh"); - - nf = 0; - - uint32_t i1 = is; - - // are we moving a continuous block of memory? - bool cont = false; - - // should we stop searching for the next move? - bool stop = false; - - // go back and move the nf cells to the hole - for (; i1 < n_kv; ++i1) { - auto & cell1 = kv_self.cells[i1]; - - if (cell1.is_empty() || ids[i1] != n_kv) { - if (n_moves == max_moves) { - stop = true; - break; - } - - cont = false; - continue; - } - - // this cell goes to (i0 + nf) - ids[i1] = i0 + nf; - - // move the cell meta data - kv_self.cells[i0 + nf] = cell1; - - // clear the old cell and move the head there - cell1 = llama_kv_cell(); - kv_self.head = n_used; - - if (!cont) { - n_moves++; - cont = true; - } - - nf++; - - if (nf == nh) { - break; - } - } - - if (stop || n_moves == max_moves) { - break; - } - - //LLAMA_LOG_INFO("(tmp log) KV defrag: move [%u, %u) to [%u, %u)\n", is, i1 + 1, i0, i0 + nh); - - i0 += nh - 1; - } - - if (n_moves == 0) { - return; - } - - //LLAMA_LOG_INFO("(tmp log) KV defrag cell moves: %u\n", n_moves); - - //LLAMA_LOG_INFO("expected gf nodes: %u\n", 6*n_moves*n_layer); - -#if 0 - // CPU defrag - // - // TODO: optimizations are possible: - // - multiple threads - // - avoid copying to the host memory when already there - // - // likely not worth the effort, as we have ggml_graph based defrag - // - - const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(); - const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(); - - const uint32_t kv_size = kv_self.size; - - std::vector buf_k; - std::vector buf_v; - - for (uint32_t il = 0; il < n_layer; ++il) { - const size_t k_size_row = ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa); - const size_t k_size = ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa*kv_size); - - const size_t v_size_el = ggml_type_size(kv_self.v_l[il]->type); - const size_t v_size = ggml_row_size (kv_self.v_l[il]->type, n_embd_v_gqa*kv_size); - - buf_k.resize(k_size); - buf_v.resize(v_size); - - ggml_backend_tensor_get(kv_self.k_l[il], buf_k.data(), 0, buf_k.size()); - ggml_backend_tensor_get(kv_self.v_l[il], buf_v.data(), 0, buf_v.size()); - - // batch move [i, i+nm) to [id, id+nm) - // note: cells can move only to a lower index - for (uint32_t i = 0; i < n_kv; ++i) { - const uint32_t id = ids[i]; - - if (i == id || id == n_kv) { - continue; - } - - uint32_t nm = 1; - - while (i + nm < n_kv && ids[i + nm] == id + nm) { - nm++; - } - - // move keys - { - const int64_t os = i*k_size_row; - const int64_t od = id*k_size_row; - - memcpy(buf_k.data() + od, buf_k.data() + os, nm*k_size_row); - } - - // move values (note: they are transposed) - { - const int64_t os = i; - const int64_t od = id; - - for (uint32_t j = 0; j < n_embd_v_gqa; ++j) { - memcpy(buf_v.data() + (od + j*kv_size)*v_size_el, buf_v.data() + (os + j*kv_size)*v_size_el, nm*v_size_el); - } - } - - i += nm - 1; - } - - ggml_backend_tensor_set(kv_self.k_l[il], buf_k.data(), 0, buf_k.size()); - ggml_backend_tensor_set(kv_self.v_l[il], buf_v.data(), 0, buf_v.size()); - } -#else - // ggml_graph defrag - - ggml_backend_sched_reset(lctx.sched.get()); - - ggml_cgraph * gf = llama_build_graph_defrag(lctx, ids); - - llama_graph_compute(lctx, gf, lctx.cparams.n_threads, lctx.threadpool); -#endif - - //const int64_t t_end = ggml_time_us(); - - //LLAMA_LOG_INFO("(tmp log) KV defrag time: %.3f ms\n", (t_end - t_start)/1000.0); -} - -static void llama_kv_cache_update_impl(struct llama_context & lctx) { - bool need_reserve = false; - - if (lctx.kv_self.has_shift) { - if (!llama_kv_cache_can_shift(&lctx)) { - GGML_ABORT("The current context does not support K-shift"); - } - - // apply K-shift if needed - if (lctx.model.hparams.rope_type != LLAMA_ROPE_TYPE_NONE) { - ggml_backend_sched_reset(lctx.sched.get()); - - ggml_cgraph * gf = llama_build_graph_k_shift(lctx); - - ggml_backend_sched_alloc_graph(lctx.sched.get(), gf); - - llama_set_k_shift(lctx); - - llama_graph_compute(lctx, gf, lctx.cparams.n_threads, lctx.threadpool); - - need_reserve = true; - } - - { - auto & kv_self = lctx.kv_self; - - kv_self.has_shift = false; - - for (uint32_t i = 0; i < kv_self.size; ++i) { - kv_self.cells[i].delta = 0; - } - } - } - - // defragment the KV cache if needed - if (lctx.kv_self.do_defrag) { - llama_kv_cache_defrag_impl(lctx); - - need_reserve = true; - - lctx.kv_self.do_defrag = false; - } - - // reserve a worst case graph again - if (need_reserve) { - // TODO: extract to a function - // build worst-case graph - uint32_t n_seqs = 1; // TODO: worst-case number of sequences - uint32_t n_tokens = std::min(lctx.cparams.n_ctx, lctx.cparams.n_ubatch); - llama_token token = lctx.model.vocab.token_bos(); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph - llama_ubatch ubatch = { true, n_tokens, n_tokens / n_seqs, n_seqs, &token, nullptr, nullptr, nullptr, nullptr, nullptr}; - ggml_cgraph * gf = llama_build_graph(lctx, ubatch, true); - - // initialize scheduler with the worst-case graph - ggml_backend_sched_reset(lctx.sched.get()); - if (!ggml_backend_sched_reserve(lctx.sched.get(), gf)) { - LLAMA_LOG_ERROR("%s: failed to allocate compute buffers\n", __func__); - } - } -} - -int32_t llama_set_adapter_lora( - struct llama_context * ctx, - struct llama_adapter_lora * adapter, - float scale) { - ctx->lora[adapter] = scale; - return 0; -} - -int32_t llama_rm_adapter_lora( - struct llama_context * ctx, - struct llama_adapter_lora * adapter) { - auto pos = ctx->lora.find(adapter); - if (pos != ctx->lora.end()) { - ctx->lora.erase(pos); - return 0; - } - - return -1; -} - -void llama_clear_adapter_lora(struct llama_context * ctx) { - ctx->lora.clear(); -} - -int32_t llama_apply_adapter_cvec( - struct llama_context * ctx, - const float * data, - size_t len, - int32_t n_embd, - int32_t il_start, - int32_t il_end) { - return ctx->cvec.apply(ctx->model, data, len, n_embd, il_start, il_end); -} - // // interface implementation // -struct llama_context_params llama_context_default_params() { - struct llama_context_params result = { - /*.n_ctx =*/ 512, - /*.n_batch =*/ 2048, - /*.n_ubatch =*/ 512, - /*.n_seq_max =*/ 1, - /*.n_threads =*/ GGML_DEFAULT_N_THREADS, // TODO: better default - /*.n_threads_batch =*/ GGML_DEFAULT_N_THREADS, - /*.rope_scaling_type =*/ LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED, - /*.pooling_type =*/ LLAMA_POOLING_TYPE_UNSPECIFIED, - /*.attention_type =*/ LLAMA_ATTENTION_TYPE_UNSPECIFIED, - /*.rope_freq_base =*/ 0.0f, - /*.rope_freq_scale =*/ 0.0f, - /*.yarn_ext_factor =*/ -1.0f, - /*.yarn_attn_factor =*/ 1.0f, - /*.yarn_beta_fast =*/ 32.0f, - /*.yarn_beta_slow =*/ 1.0f, - /*.yarn_orig_ctx =*/ 0, - /*.defrag_thold =*/ -1.0f, - /*.cb_eval =*/ nullptr, - /*.cb_eval_user_data =*/ nullptr, - /*.type_k =*/ GGML_TYPE_F16, - /*.type_v =*/ GGML_TYPE_F16, - /*.logits_all =*/ false, - /*.embeddings =*/ false, - /*.offload_kqv =*/ true, - /*.flash_attn =*/ false, - /*.no_perf =*/ true, - /*.abort_callback =*/ nullptr, - /*.abort_callback_data =*/ nullptr, - }; - - return result; -} - struct llama_sampler_chain_params llama_sampler_chain_default_params() { struct llama_sampler_chain_params result = { /*.no_perf =*/ true, @@ -9571,6 +82,57 @@ int64_t llama_time_us(void) { return ggml_time_us(); } +// Returns 0 on success, -1 on error, and -2 on cancellation via llama_progress_callback +static int llama_model_load(const std::string & fname, std::vector & splits, llama_model & model, llama_model_params & params) { + // loading time will be recalculated after the first eval, so + // we take page faults deferred by mmap() into consideration + model.t_load_us = 0; + time_meas tm(model.t_load_us); + + model.t_start_us = tm.t_start_us; + + try { + llama_model_loader ml(fname, splits, params.use_mmap, params.check_tensors, params.kv_overrides); + + ml.print_info(); + + model.hparams.vocab_only = params.vocab_only; + + try { + model.load_arch(ml); + } catch(const std::exception & e) { + throw std::runtime_error("error loading model architecture: " + std::string(e.what())); + } + try { + model.load_hparams(ml); + } catch(const std::exception & e) { + throw std::runtime_error("error loading model hyperparameters: " + std::string(e.what())); + } + try { + model.load_vocab(ml); + } catch(const std::exception & e) { + throw std::runtime_error("error loading model vocabulary: " + std::string(e.what())); + } + + model.load_stats(ml); + model.print_info(); + + if (params.vocab_only) { + LLAMA_LOG_INFO("%s: vocab only - skipping tensors\n", __func__); + return 0; + } + + if (!model.load_tensors(ml)) { + return -2; + } + } catch (const std::exception & err) { + LLAMA_LOG_ERROR("%s: error loading model: %s\n", __func__, err.what()); + return -1; + } + + return 0; +} + static struct llama_model * llama_model_load_from_file_impl( const std::string & path_model, std::vector & splits, @@ -9691,460 +253,6 @@ struct llama_model * llama_model_load_from_splits( return llama_model_load_from_file_impl(splits.front(), splits, params); } -struct llama_context * llama_init_from_model( - struct llama_model * model, - struct llama_context_params params) { - - if (!model) { - LLAMA_LOG_ERROR("%s: model cannot be NULL\n", __func__); - return nullptr; - } - - if (params.n_batch == 0 && params.n_ubatch == 0) { - LLAMA_LOG_ERROR("%s: n_batch and n_ubatch cannot both be zero\n", __func__); - return nullptr; - } - - if (params.n_ctx == 0 && model->hparams.n_ctx_train == 0) { - LLAMA_LOG_ERROR("%s: n_ctx and model->hparams.n_ctx_train cannot both be zero\n", __func__); - return nullptr; - } - - if (params.flash_attn && model->arch == LLM_ARCH_GROK) { - LLAMA_LOG_WARN("%s: flash_attn is not compatible with Grok - forcing off\n", __func__); - params.flash_attn = false; - } - - if (params.flash_attn && model->hparams.n_embd_head_k != model->hparams.n_embd_head_v) { - LLAMA_LOG_WARN("%s: flash_attn requires n_embd_head_k == n_embd_head_v - forcing off\n", __func__); - params.flash_attn = false; - } - - if (ggml_is_quantized(params.type_v) && !params.flash_attn) { - LLAMA_LOG_ERROR("%s: V cache quantization requires flash_attn\n", __func__); - return nullptr; - } - - llama_context * ctx = new llama_context(*model); - - const auto & hparams = model->hparams; - auto & cparams = ctx->cparams; - - cparams.n_seq_max = std::max(1u, params.n_seq_max); - cparams.n_threads = params.n_threads; - cparams.n_threads_batch = params.n_threads_batch; - cparams.yarn_ext_factor = params.yarn_ext_factor; - cparams.yarn_attn_factor = params.yarn_attn_factor; - cparams.yarn_beta_fast = params.yarn_beta_fast; - cparams.yarn_beta_slow = params.yarn_beta_slow; - cparams.defrag_thold = params.defrag_thold; - cparams.embeddings = params.embeddings; - cparams.offload_kqv = params.offload_kqv; - cparams.flash_attn = params.flash_attn; - cparams.no_perf = params.no_perf; - cparams.pooling_type = params.pooling_type; - - cparams.n_ctx = params.n_ctx == 0 ? hparams.n_ctx_train : params.n_ctx; - cparams.rope_freq_base = params.rope_freq_base == 0.0f ? hparams.rope_freq_base_train : params.rope_freq_base; - cparams.rope_freq_scale = params.rope_freq_scale == 0.0f ? hparams.rope_freq_scale_train : params.rope_freq_scale; - - // this is necessary due to kv_self.n being padded later during inference - cparams.n_ctx = GGML_PAD(cparams.n_ctx, llama_kv_cache_get_padding(cparams)); - - // with causal attention, the batch size is limited by the context size - cparams.n_batch = hparams.causal_attn ? std::min(cparams.n_ctx, params.n_batch) : params.n_batch; - - // the batch has to be at least GGML_KQ_MASK_PAD because we will be padding the KQ_mask - // this is required by GPU kernels in order to avoid out-of-bounds accesses (e.g. ggml_flash_attn_ext) - // ref: https://github.com/ggerganov/llama.cpp/pull/5021 - if (cparams.n_batch < GGML_KQ_MASK_PAD) { - LLAMA_LOG_WARN("%s: n_batch is less than GGML_KQ_MASK_PAD - increasing to %d\n", __func__, GGML_KQ_MASK_PAD); - cparams.n_batch = GGML_KQ_MASK_PAD; - } - - cparams.n_ubatch = std::min(cparams.n_batch, params.n_ubatch == 0 ? params.n_batch : params.n_ubatch); - - cparams.n_ctx_orig_yarn = params.yarn_orig_ctx != 0 ? params.yarn_orig_ctx : - hparams.n_ctx_orig_yarn != 0 ? hparams.n_ctx_orig_yarn : - hparams.n_ctx_train; - - cparams.cb_eval = params.cb_eval; - cparams.cb_eval_user_data = params.cb_eval_user_data; - - auto rope_scaling_type = params.rope_scaling_type; - if (rope_scaling_type == LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED) { - rope_scaling_type = hparams.rope_scaling_type_train; - } - - if (rope_scaling_type == LLAMA_ROPE_SCALING_TYPE_NONE) { - cparams.rope_freq_scale = 1.0f; // never scale if scaling type is none - } - - if (cparams.yarn_ext_factor < 0.0f) { // negative indicates 'not set' - cparams.yarn_ext_factor = rope_scaling_type == LLAMA_ROPE_SCALING_TYPE_YARN ? 1.0f : 0.0f; - } - - cparams.yarn_attn_factor *= hparams.rope_attn_factor; - - if (cparams.pooling_type == LLAMA_POOLING_TYPE_UNSPECIFIED) { - if (hparams.pooling_type == LLAMA_POOLING_TYPE_UNSPECIFIED) { - cparams.pooling_type = LLAMA_POOLING_TYPE_NONE; - } else { - cparams.pooling_type = hparams.pooling_type; - } - } - - if (params.attention_type == LLAMA_ATTENTION_TYPE_UNSPECIFIED) { - cparams.causal_attn = hparams.causal_attn; - } else { - cparams.causal_attn = params.attention_type == LLAMA_ATTENTION_TYPE_CAUSAL; - } - - const uint32_t n_ctx_per_seq = cparams.n_ctx / cparams.n_seq_max; - - LLAMA_LOG_INFO("%s: n_seq_max = %u\n", __func__, cparams.n_seq_max); - LLAMA_LOG_INFO("%s: n_ctx = %u\n", __func__, cparams.n_ctx); - LLAMA_LOG_INFO("%s: n_ctx_per_seq = %u\n", __func__, n_ctx_per_seq); - LLAMA_LOG_INFO("%s: n_batch = %u\n", __func__, cparams.n_batch); - LLAMA_LOG_INFO("%s: n_ubatch = %u\n", __func__, cparams.n_ubatch); - LLAMA_LOG_INFO("%s: flash_attn = %d\n", __func__, cparams.flash_attn); - LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, cparams.rope_freq_base); - LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, cparams.rope_freq_scale); - - if (n_ctx_per_seq < hparams.n_ctx_train) { - LLAMA_LOG_WARN("%s: n_ctx_per_seq (%u) < n_ctx_train (%u) -- the full capacity of the model will not be utilized\n", - __func__, n_ctx_per_seq, hparams.n_ctx_train); - } - - if (n_ctx_per_seq > hparams.n_ctx_train) { - LLAMA_LOG_WARN("%s: n_ctx_pre_seq (%u) > n_ctx_train (%u) -- possible training context overflow\n", - __func__, n_ctx_per_seq, hparams.n_ctx_train); - } - - ctx->logits_all = params.logits_all; - - // build worst-case graph for encoder if a model contains encoder - ctx->is_encoding = llama_model_has_encoder(model); - - uint32_t kv_size = cparams.n_ctx; - ggml_type type_k = params.type_k; - ggml_type type_v = params.type_v; - - // Mamba only needs a constant number of KV cache cells per sequence - if (llama_model_is_recurrent(model)) { - // Mamba needs at least as many KV cells as there are sequences kept at any time - kv_size = std::max((uint32_t) 1, params.n_seq_max); - // it's probably best to keep as much precision as possible for the states - type_k = GGML_TYPE_F32; // required by ggml_ssm_conv for Mamba's conv_states - type_v = GGML_TYPE_F32; // required by ggml_ssm_scan for Mamba's ssm_states - } - - GGML_ASSERT(hparams.n_embd_head_k % ggml_blck_size(type_k) == 0); - GGML_ASSERT(hparams.n_embd_head_v % ggml_blck_size(type_v) == 0); - - if (!hparams.vocab_only) { - // GPU backends - for (auto * dev : model->devices) { - ggml_backend_t backend = ggml_backend_dev_init(dev, nullptr); - if (backend == nullptr) { - LLAMA_LOG_ERROR("%s: failed to initialize %s backend\n", __func__, ggml_backend_dev_name(dev)); - llama_free(ctx); - return nullptr; - } - ctx->backends.emplace_back(backend); - } - - // add ACCEL backends (such as BLAS) - for (size_t i = 0; i < ggml_backend_dev_count(); ++i) { - ggml_backend_dev_t dev = ggml_backend_dev_get(i); - if (ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_ACCEL) { - ggml_backend_t backend = ggml_backend_dev_init(dev, nullptr); - if (backend == nullptr) { - LLAMA_LOG_ERROR("%s: failed to initialize %s backend\n", __func__, ggml_backend_dev_name(dev)); - llama_free(ctx); - return nullptr; - } - ctx->backends.emplace_back(backend); - } - } - - // add CPU backend - ctx->backend_cpu = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr); - if (ctx->backend_cpu == nullptr) { - LLAMA_LOG_ERROR("%s: failed to initialize CPU backend\n", __func__); - llama_free(ctx); - return nullptr; - } - ctx->backends.emplace_back(ctx->backend_cpu); - - // create a list of the set_n_threads functions in the backends - for (auto & backend : ctx->backends) { - ggml_backend_dev_t dev = ggml_backend_get_device(backend.get()); - ggml_backend_reg_t reg = dev ? ggml_backend_dev_backend_reg(dev) : nullptr; - if (reg) { - auto ggml_backend_set_n_threads_fn = (ggml_backend_set_n_threads_t) ggml_backend_reg_get_proc_address(reg, "ggml_backend_set_n_threads"); - if (ggml_backend_set_n_threads_fn) { - ctx->set_n_threads_fns.emplace_back(backend.get(), ggml_backend_set_n_threads_fn); - } - } - } - - llama_set_abort_callback(ctx, params.abort_callback, params.abort_callback_data); - - if (!llama_kv_cache_init(ctx->kv_self, ctx->model, ctx->cparams, type_k, type_v, kv_size, cparams.offload_kqv)) { - LLAMA_LOG_ERROR("%s: llama_kv_cache_init() failed for self-attention cache\n", __func__); - llama_free(ctx); - return nullptr; - } - - { - size_t memory_size_k = 0; - size_t memory_size_v = 0; - - for (auto & k : ctx->kv_self.k_l) { - memory_size_k += ggml_nbytes(k); - } - - for (auto & v : ctx->kv_self.v_l) { - memory_size_v += ggml_nbytes(v); - } - - LLAMA_LOG_INFO("%s: KV self size = %7.2f MiB, K (%s): %7.2f MiB, V (%s): %7.2f MiB\n", __func__, - (float)(memory_size_k + memory_size_v) / (1024.0f * 1024.0f), - ggml_type_name(type_k), (float)memory_size_k / (1024.0f * 1024.0f), - ggml_type_name(type_v), (float)memory_size_v / (1024.0f * 1024.0f)); - } - - // graph outputs buffer - { - // resized during inference when a batch uses more outputs - if (llama_output_reserve(*ctx, params.n_seq_max) < params.n_seq_max) { - LLAMA_LOG_ERROR("%s: failed to reserve initial output buffer\n", __func__); - llama_free(ctx); - return nullptr; - } - - LLAMA_LOG_INFO("%s: %10s output buffer size = %8.2f MiB\n", __func__, - ggml_backend_buffer_name(ctx->buf_output.get()), - ggml_backend_buffer_get_size(ctx->buf_output.get()) / 1024.0 / 1024.0); - } - - // scheduler and compute buffers - { - // buffer types used for the compute buffer of each backend - std::vector backend_buft; - std::vector backend_ptrs; - for (auto & backend : ctx->backends) { - auto * buft = ggml_backend_get_default_buffer_type(backend.get()); - auto backend_type = ggml_backend_dev_type(ggml_backend_get_device(backend.get())); - if (backend_type == GGML_BACKEND_DEVICE_TYPE_CPU && !model->devices.empty()) { - // use the host buffer of the first device CPU for faster transfer of the intermediate state - auto * dev = model->devices[0]; - auto * host_buft = ggml_backend_dev_host_buffer_type(dev); - if (host_buft) { - buft = host_buft; - } - } - backend_buft.push_back(buft); - backend_ptrs.push_back(backend.get()); - } - - const size_t max_nodes = model->max_nodes(); - - // buffer used to store the computation graph and the tensor meta data - ctx->buf_compute_meta.resize(ggml_tensor_overhead()*max_nodes + ggml_graph_overhead_custom(max_nodes, false)); - - // TODO: move these checks to ggml_backend_sched - // enabling pipeline parallelism in the scheduler increases memory usage, so it is only done when necessary - bool pipeline_parallel = - model->n_devices() > 1 && - model->params.n_gpu_layers > (int)model->hparams.n_layer && - model->params.split_mode == LLAMA_SPLIT_MODE_LAYER && - params.offload_kqv; - - // pipeline parallelism requires support for async compute and events in all devices - if (pipeline_parallel) { - for (auto & backend : ctx->backends) { - auto dev_type = ggml_backend_dev_type(ggml_backend_get_device(backend.get())); - if (dev_type == GGML_BACKEND_DEVICE_TYPE_CPU) { - // ignore CPU backend - continue; - } - auto * dev = ggml_backend_get_device(backend.get()); - ggml_backend_dev_props props; - ggml_backend_dev_get_props(dev, &props); - if (!props.caps.async || !props.caps.events) { - // device does not support async compute or events - pipeline_parallel = false; - break; - } - } - } - - ctx->sched.reset(ggml_backend_sched_new(backend_ptrs.data(), backend_buft.data(), backend_ptrs.size(), max_nodes, pipeline_parallel)); - - if (pipeline_parallel) { - LLAMA_LOG_INFO("%s: pipeline parallelism enabled (n_copies=%d)\n", __func__, ggml_backend_sched_get_n_copies(ctx->sched.get())); - } - - // initialize scheduler with the worst-case graph - uint32_t n_seqs = 1; // TODO: worst-case number of sequences - uint32_t n_tokens = std::min(cparams.n_ctx, cparams.n_ubatch); - llama_token token = ctx->model.vocab.token_bos(); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph - - llama_ubatch ubatch_pp = { true, n_tokens, n_tokens / n_seqs, n_seqs, &token, nullptr, nullptr, nullptr, nullptr, nullptr}; - ggml_cgraph * gf_pp = llama_build_graph(*ctx, ubatch_pp, true); - - // reserve pp graph first so that buffers are only allocated once - ggml_backend_sched_reserve(ctx->sched.get(), gf_pp); - int n_splits_pp = ggml_backend_sched_get_n_splits(ctx->sched.get()); - int n_nodes_pp = ggml_graph_n_nodes(gf_pp); - - // reserve with tg graph to get the number of splits and nodes - llama_ubatch ubatch_tg = { true, 1, 1, n_seqs, &token, nullptr, nullptr, nullptr, nullptr, nullptr}; - ggml_cgraph * gf_tg = llama_build_graph(*ctx, ubatch_tg, true); - ggml_backend_sched_reserve(ctx->sched.get(), gf_tg); - int n_splits_tg = ggml_backend_sched_get_n_splits(ctx->sched.get()); - int n_nodes_tg = ggml_graph_n_nodes(gf_tg); - - // reserve again with pp graph to avoid ggml-alloc reallocations during inference - gf_pp = llama_build_graph(*ctx, ubatch_pp, true); - if (!ggml_backend_sched_reserve(ctx->sched.get(), gf_pp)) { - LLAMA_LOG_ERROR("%s: failed to allocate compute buffers\n", __func__); - llama_free(ctx); - return nullptr; - } - - for (size_t i = 0; i < backend_ptrs.size(); ++i) { - ggml_backend_t backend = backend_ptrs[i]; - ggml_backend_buffer_type_t buft = backend_buft[i]; - size_t size = ggml_backend_sched_get_buffer_size(ctx->sched.get(), backend); - if (size > 1) { - LLAMA_LOG_INFO("%s: %10s compute buffer size = %8.2f MiB\n", __func__, - ggml_backend_buft_name(buft), - size / 1024.0 / 1024.0); - } - } - - if (n_nodes_pp == n_nodes_tg) { - LLAMA_LOG_INFO("%s: graph nodes = %d\n", __func__, n_nodes_pp); - } else { - LLAMA_LOG_INFO("%s: graph nodes = %d (with bs=%d), %d (with bs=1)\n", __func__, n_nodes_pp, n_tokens, n_nodes_tg); - } - if (n_splits_pp == n_splits_tg) { - LLAMA_LOG_INFO("%s: graph splits = %d\n", __func__, n_splits_pp); - } else { - LLAMA_LOG_INFO("%s: graph splits = %d (with bs=%d), %d (with bs=1)\n", __func__, n_splits_pp, n_tokens, n_splits_tg); - } - } - } - - return ctx; -} - -struct llama_context * llama_new_context_with_model( - struct llama_model * model, - struct llama_context_params params) { - return llama_init_from_model(model, params); -} - -// -// kv cache -// - -// TODO: tmp bridges below until `struct llama_kv_cache` is exposed through the public API - -struct llama_kv_cache_view llama_kv_cache_view_init(const struct llama_context * ctx, int32_t n_seq_max) { - return llama_kv_cache_view_init(ctx->kv_self, n_seq_max); -} - -void llama_kv_cache_view_update(const struct llama_context * ctx, struct llama_kv_cache_view * view) { - llama_kv_cache_view_update(view, ctx->kv_self); -} - -int32_t llama_get_kv_cache_token_count(const struct llama_context * ctx) { - return llama_get_kv_cache_token_count(ctx->kv_self); -} - -int32_t llama_get_kv_cache_used_cells(const struct llama_context * ctx) { - return llama_get_kv_cache_used_cells(ctx->kv_self); -} - -void llama_kv_cache_clear(struct llama_context * ctx) { - llama_kv_cache_clear(ctx->kv_self); -} - -bool llama_kv_cache_seq_rm(struct llama_context * ctx, llama_seq_id seq_id, llama_pos p0, llama_pos p1) { - return llama_kv_cache_seq_rm(ctx->kv_self, seq_id, p0, p1); -} - -void llama_kv_cache_seq_cp(struct llama_context * ctx, llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) { - if (seq_id_src == seq_id_dst) { - return; - } - llama_kv_cache_seq_cp(ctx->kv_self, seq_id_src, seq_id_dst, p0, p1); -} - -void llama_kv_cache_seq_keep(struct llama_context * ctx, llama_seq_id seq_id) { - llama_kv_cache_seq_keep(ctx->kv_self, seq_id); -} - -void llama_kv_cache_seq_add(struct llama_context * ctx, llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) { - if (delta == 0) { - return; - } - - llama_kv_cache_seq_add(ctx->kv_self, seq_id, p0, p1, delta); -} - -void llama_kv_cache_seq_div(struct llama_context * ctx, llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) { - if (d == 1) { - return; - } - - llama_kv_cache_seq_div(ctx->kv_self, seq_id, p0, p1, d); -} - -llama_pos llama_kv_cache_seq_pos_max(struct llama_context * ctx, llama_seq_id seq_id) { - return llama_kv_cache_seq_pos_max(ctx->kv_self, seq_id); -} - -void llama_kv_cache_defrag(struct llama_context * ctx) { - llama_kv_cache_defrag(ctx->kv_self); -} - -void llama_kv_cache_update(struct llama_context * ctx) { - llama_kv_cache_update_impl(*ctx); -} - -bool llama_kv_cache_can_shift(struct llama_context * ctx) { - return llama_kv_cache_can_shift(ctx->kv_self); -} - -/// - -int32_t llama_encode( - struct llama_context * ctx, - struct llama_batch batch) { - const int ret = llama_encode_impl(*ctx, batch); - if (ret != 0) { - LLAMA_LOG_ERROR("%s: failed to encode, ret = %d\n", __func__, ret); - } - - return ret; -} - -int32_t llama_decode( - struct llama_context * ctx, - struct llama_batch batch) { - const int ret = llama_decode_impl(*ctx, batch); - if (ret != 0) { - LLAMA_LOG_ERROR("%s: failed to decode, ret = %d\n", __func__, ret); - } - - return ret; -} - // // chat templates // @@ -10212,7 +320,6 @@ const char * llama_print_system_info(void) { static std::string s; s.clear(); // Clear the string, since it's static, otherwise it will accumulate data from previous calls. - for (size_t i = 0; i < ggml_backend_reg_count(); i++) { auto * reg = ggml_backend_reg_get(i); auto * get_features_fn = (ggml_backend_get_features_t) ggml_backend_reg_get_proc_address(reg, "ggml_backend_get_features"); @@ -10231,43 +338,3 @@ const char * llama_print_system_info(void) { return s.c_str(); } - -// -// perf -// - -struct llama_perf_context_data llama_perf_context(const struct llama_context * ctx) { - struct llama_perf_context_data data = {}; - - if (ctx == nullptr) { - return data; - } - - data.t_start_ms = 1e-3 * ctx->t_start_us; - data.t_load_ms = 1e-3 * ctx->t_load_us; - data.t_p_eval_ms = 1e-3 * ctx->t_p_eval_us; - data.t_eval_ms = 1e-3 * ctx->t_eval_us; - data.n_p_eval = std::max(1, ctx->n_p_eval); - data.n_eval = std::max(1, ctx->n_eval); - - return data; -} - -void llama_perf_context_print(const struct llama_context * ctx) { - const auto data = llama_perf_context(ctx); - - const double t_end_ms = 1e-3 * ggml_time_us(); - - LLAMA_LOG_INFO("%s: load time = %10.2f ms\n", __func__, data.t_load_ms); - LLAMA_LOG_INFO("%s: prompt eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n", - __func__, data.t_p_eval_ms, data.n_p_eval, data.t_p_eval_ms / data.n_p_eval, 1e3 / data.t_p_eval_ms * data.n_p_eval); - LLAMA_LOG_INFO("%s: eval time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n", - __func__, data.t_eval_ms, data.n_eval, data.t_eval_ms / data.n_eval, 1e3 / data.t_eval_ms * data.n_eval); - LLAMA_LOG_INFO("%s: total time = %10.2f ms / %5d tokens\n", __func__, (t_end_ms - data.t_start_ms), (data.n_p_eval + data.n_eval)); -} - -void llama_perf_context_reset(struct llama_context * ctx) { - ctx->t_start_us = ggml_time_us(); - ctx->t_eval_us = ctx->n_eval = 0; - ctx->t_p_eval_us = ctx->n_p_eval = 0; -} From be7c3034108473beda214fd1d7c98fd6a7a3bdf5 Mon Sep 17 00:00:00 2001 From: Xuan-Son Nguyen Date: Thu, 13 Mar 2025 12:34:54 +0100 Subject: [PATCH 047/398] arg : no n_predict = -2 for examples except for main and infill (#12364) --- common/arg.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/common/arg.cpp b/common/arg.cpp index 8531f0871d44a..fe6a1eece7fed 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -764,7 +764,11 @@ common_params_context common_params_parser_init(common_params & params, llama_ex ).set_env("LLAMA_ARG_CTX_SIZE")); add_opt(common_arg( {"-n", "--predict", "--n-predict"}, "N", - string_format("number of tokens to predict (default: %d, -1 = infinity, -2 = until context filled)", params.n_predict), + string_format( + ex == LLAMA_EXAMPLE_MAIN || ex == LLAMA_EXAMPLE_INFILL + ? "number of tokens to predict (default: %d, -1 = infinity, -2 = until context filled)" + : "number of tokens to predict (default: %d, -1 = infinity)", + params.n_predict), [](common_params & params, int value) { params.n_predict = value; } From 84d547554123a62e9ac77107cb20e4f6cc503af4 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 13 Mar 2025 19:08:07 +0200 Subject: [PATCH 048/398] llama : fix Gemma3 SWA KV cache shift (#12373) * llama : fix Gemma3 SWA KV cache shift ggml-ci * hparams : add comment [no ci] --- src/llama-context.cpp | 17 ++++++++++++++--- src/llama-context.h | 2 ++ src/llama-graph.cpp | 29 +---------------------------- src/llama-hparams.cpp | 8 ++++++++ src/llama-hparams.h | 3 +++ src/llama-model.cpp | 21 +++++++++------------ 6 files changed, 37 insertions(+), 43 deletions(-) diff --git a/src/llama-context.cpp b/src/llama-context.cpp index 0a43a3af8e003..89fb33cbcdae2 100644 --- a/src/llama-context.cpp +++ b/src/llama-context.cpp @@ -442,10 +442,10 @@ ggml_tensor * llama_context::build_rope_shift( ggml_tensor * cur, ggml_tensor * shift, ggml_tensor * factors, + float freq_base, + float freq_scale, ggml_backend_buffer * bbuf) const { const auto & n_ctx_orig = cparams.n_ctx_orig_yarn; - const auto & freq_base = cparams.rope_freq_base; - const auto & freq_scale = cparams.rope_freq_scale; const auto & yarn_ext_factor = cparams.yarn_ext_factor; const auto & yarn_attn_factor = cparams.yarn_attn_factor; @@ -537,6 +537,17 @@ llm_graph_result_ptr llama_context::build_kv_self_shift( const int64_t n_head_kv = hparams.n_head_kv(il); const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(il); + float freq_base_l = cparams.rope_freq_base; + float freq_scale_l = cparams.rope_freq_scale; + + // TODO: improve + if (model.arch == LLM_ARCH_GEMMA3) { + const bool is_sliding = hparams.is_sliding(il); + + freq_base_l = is_sliding ? 10000.0f : cparams.rope_freq_base; + freq_scale_l = is_sliding ? 1.0f : cparams.rope_freq_scale; + } + ggml_tensor * rope_factors = kv_self->cbs.get_rope_factors(n_ctx_per_seq(), il); ggml_tensor * k = @@ -546,7 +557,7 @@ llm_graph_result_ptr llama_context::build_kv_self_shift( ggml_row_size(kv_self->k_l[il]->type, n_embd_k_gqa), 0); - ggml_tensor * cur = build_rope_shift(ctx0, k, inp->k_shift, rope_factors, kv_self->k_l[il]->buffer); + ggml_tensor * cur = build_rope_shift(ctx0, k, inp->k_shift, rope_factors, freq_base_l, freq_scale_l, kv_self->k_l[il]->buffer); ggml_build_forward_expand(gf, cur); } diff --git a/src/llama-context.h b/src/llama-context.h index 71d702e8baeeb..88df8950e4cb0 100644 --- a/src/llama-context.h +++ b/src/llama-context.h @@ -168,6 +168,8 @@ struct llama_context { ggml_tensor * cur, ggml_tensor * shift, ggml_tensor * factors, + float freq_base, + float freq_scale, ggml_backend_buffer * bbuf) const; llm_graph_result_ptr build_kv_self_shift( diff --git a/src/llama-graph.cpp b/src/llama-graph.cpp index 1e3f2efc89d2c..4a53e83929f41 100644 --- a/src/llama-graph.cpp +++ b/src/llama-graph.cpp @@ -1403,34 +1403,7 @@ ggml_tensor * llm_graph_context::build_attn( ggml_build_forward_expand(gf, ggml_cpy(ctx0, v_cur, v_cache_view)); } - // TODO: improve - bool is_sliding = false; - - switch (arch) { - case LLM_ARCH_COHERE2: - { - const int32_t sliding_window_pattern = 4; - is_sliding = il % sliding_window_pattern < (sliding_window_pattern - 1); - } break; - case LLM_ARCH_GEMMA2: - { - const int32_t sliding_window_pattern = 2; - is_sliding = il % sliding_window_pattern < (sliding_window_pattern - 1); - } break; - case LLM_ARCH_GEMMA3: - { - const int32_t sliding_window_pattern = 6; - is_sliding = il % sliding_window_pattern < (sliding_window_pattern - 1); - } break; - case LLM_ARCH_PHI3: - { - is_sliding = hparams.n_swa > 0; - } break; - default: - { - is_sliding = false; - } - }; + const bool is_sliding = hparams.is_sliding(il); const auto & kq_mask = is_sliding ? inp->get_kq_mask_swa() : inp->get_kq_mask(); diff --git a/src/llama-hparams.cpp b/src/llama-hparams.cpp index ea87b2953d9dd..58e98bf2311db 100644 --- a/src/llama-hparams.cpp +++ b/src/llama-hparams.cpp @@ -69,3 +69,11 @@ uint32_t llama_hparams::n_embd_v_s() const { // corresponds to Mamba's ssm_states size return ssm_d_state * ssm_d_inner; } + +bool llama_hparams::is_sliding(uint32_t il) const { + if (il < n_layer) { + return n_swa > 0 && n_swa_pattern > 0 && il % n_swa_pattern < (n_swa_pattern - 1); + } + + GGML_ABORT("fatal error"); +} diff --git a/src/llama-hparams.h b/src/llama-hparams.h index 1fe45410371b9..e3091c8127dd5 100644 --- a/src/llama-hparams.h +++ b/src/llama-hparams.h @@ -36,6 +36,7 @@ struct llama_hparams { uint32_t n_layer; uint32_t n_rot; uint32_t n_swa = 0; // sliding window attention (SWA) + uint32_t n_swa_pattern = 1; // by default, all layers use non-sliding-window attention uint32_t n_embd_head_k; // dimension of keys (d_k). d_q is assumed to be the same, but there are n_head q heads, and only n_head_kv k-v heads uint32_t n_embd_head_v; // dimension of values (d_v) aka n_embd_head uint32_t n_expert = 0; @@ -133,6 +134,8 @@ struct llama_hparams { // dimension of the recurrent state embeddings uint32_t n_embd_v_s() const; + + bool is_sliding(uint32_t il) const; }; static_assert(std::is_trivially_copyable::value, "llama_hparams must be trivially copyable"); diff --git a/src/llama-model.cpp b/src/llama-model.cpp index 522219c012242..5647d2ad6245b 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -858,11 +858,13 @@ void llama_model::load_hparams(llama_model_loader & ml) { case LLM_ARCH_GEMMA2: { hparams.n_swa = 4096; // default value of gemma 2 + hparams.n_swa_pattern = 2; + hparams.attn_soft_cap = true; + ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false); ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); ml.get_key(LLM_KV_ATTN_LOGIT_SOFTCAPPING, hparams.f_attn_logit_softcapping, false); ml.get_key(LLM_KV_FINAL_LOGIT_SOFTCAPPING, hparams.f_final_logit_softcapping, false); - hparams.attn_soft_cap = true; switch (hparams.n_layer) { case 26: type = LLM_TYPE_2B; break; @@ -873,6 +875,8 @@ void llama_model::load_hparams(llama_model_loader & ml) { } break; case LLM_ARCH_GEMMA3: { + hparams.n_swa_pattern = 6; + ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa); ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); @@ -952,6 +956,8 @@ void llama_model::load_hparams(llama_model_loader & ml) { } break; case LLM_ARCH_COHERE2: { + hparams.n_swa_pattern = 4; + ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa); ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale); ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); @@ -7374,12 +7380,8 @@ struct llm_build_gemma3 : public llm_graph_context { // TODO: is causal == true correct? might need some changes auto * inp_attn = build_attn_inp_kv_unified(true, true); - // "5-to-1 interleaved attention" - // 5 layers of local attention followed by 1 layer of global attention - static const int sliding_window_pattern = 6; - for (int il = 0; il < n_layer; ++il) { - const bool is_sliding = il % sliding_window_pattern < (sliding_window_pattern - 1); + const bool is_sliding = hparams.is_sliding(il); const float freq_base_l = is_sliding ? 10000.0f : freq_base; const float freq_scale_l = is_sliding ? 1.0f : freq_scale; @@ -7970,13 +7972,8 @@ struct llm_build_cohere2 : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(true, true); - // sliding window switch pattern - const int32_t sliding_window_pattern = 4; - for (int il = 0; il < n_layer; ++il) { - // three layers sliding window attention (window size 4096) and ROPE - // fourth layer uses global attention without positional embeddings - const bool is_sliding = il % sliding_window_pattern < (sliding_window_pattern - 1); + const bool is_sliding = hparams.is_sliding(il); // norm cur = build_norm(inpL, model.layers[il].attn_norm, NULL, LLM_NORM, il); From 081bee8c643b1f6302e9edfe789ce2d5f0be6c77 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 14 Mar 2025 09:03:24 +0200 Subject: [PATCH 049/398] hparams : add SWA rope parameters (#12374) ggml-ci --- src/llama-context.cpp | 14 +++++--------- src/llama-graph.cpp | 4 ++-- src/llama-hparams.cpp | 2 +- src/llama-hparams.h | 4 +++- src/llama-model.cpp | 22 +++++++++++++++------- 5 files changed, 26 insertions(+), 20 deletions(-) diff --git a/src/llama-context.cpp b/src/llama-context.cpp index 89fb33cbcdae2..4df6b18ec1de3 100644 --- a/src/llama-context.cpp +++ b/src/llama-context.cpp @@ -537,16 +537,12 @@ llm_graph_result_ptr llama_context::build_kv_self_shift( const int64_t n_head_kv = hparams.n_head_kv(il); const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(il); - float freq_base_l = cparams.rope_freq_base; - float freq_scale_l = cparams.rope_freq_scale; + const bool is_swa = hparams.is_swa(il); - // TODO: improve - if (model.arch == LLM_ARCH_GEMMA3) { - const bool is_sliding = hparams.is_sliding(il); - - freq_base_l = is_sliding ? 10000.0f : cparams.rope_freq_base; - freq_scale_l = is_sliding ? 1.0f : cparams.rope_freq_scale; - } + // note: the swa rope params could become part of the cparams in the future + // if we decide to make them configurable, like the non-sliding ones + const float freq_base_l = is_swa ? hparams.rope_freq_base_train_swa : cparams.rope_freq_base; + const float freq_scale_l = is_swa ? hparams.rope_freq_scale_train_swa : cparams.rope_freq_scale; ggml_tensor * rope_factors = kv_self->cbs.get_rope_factors(n_ctx_per_seq(), il); diff --git a/src/llama-graph.cpp b/src/llama-graph.cpp index 4a53e83929f41..1041ba29fbb57 100644 --- a/src/llama-graph.cpp +++ b/src/llama-graph.cpp @@ -1403,9 +1403,9 @@ ggml_tensor * llm_graph_context::build_attn( ggml_build_forward_expand(gf, ggml_cpy(ctx0, v_cur, v_cache_view)); } - const bool is_sliding = hparams.is_sliding(il); + const bool is_swa = hparams.is_swa(il); - const auto & kq_mask = is_sliding ? inp->get_kq_mask_swa() : inp->get_kq_mask(); + const auto & kq_mask = is_swa ? inp->get_kq_mask_swa() : inp->get_kq_mask(); const auto n_kv = kv_self->n; diff --git a/src/llama-hparams.cpp b/src/llama-hparams.cpp index 58e98bf2311db..90dfe7a7fcc00 100644 --- a/src/llama-hparams.cpp +++ b/src/llama-hparams.cpp @@ -70,7 +70,7 @@ uint32_t llama_hparams::n_embd_v_s() const { return ssm_d_state * ssm_d_inner; } -bool llama_hparams::is_sliding(uint32_t il) const { +bool llama_hparams::is_swa(uint32_t il) const { if (il < n_layer) { return n_swa > 0 && n_swa_pattern > 0 && il % n_swa_pattern < (n_swa_pattern - 1); } diff --git a/src/llama-hparams.h b/src/llama-hparams.h index e3091c8127dd5..dbb7abd317b6f 100644 --- a/src/llama-hparams.h +++ b/src/llama-hparams.h @@ -79,7 +79,9 @@ struct llama_hparams { float rope_attn_factor = 1.0f; float rope_freq_base_train; + float rope_freq_base_train_swa; float rope_freq_scale_train; + float rope_freq_scale_train_swa; uint32_t n_ctx_orig_yarn; float rope_yarn_log_mul; @@ -135,7 +137,7 @@ struct llama_hparams { // dimension of the recurrent state embeddings uint32_t n_embd_v_s() const; - bool is_sliding(uint32_t il) const; + bool is_swa(uint32_t il) const; }; static_assert(std::is_trivially_copyable::value, "llama_hparams must be trivially copyable"); diff --git a/src/llama-model.cpp b/src/llama-model.cpp index 5647d2ad6245b..cce943df08a83 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -475,6 +475,10 @@ void llama_model::load_hparams(llama_model_loader & ml) { } hparams.rope_freq_scale_train = ropescale == 0.0f ? 1.0f : 1.0f/ropescale; + // by default assume that the sliding-window layers use the same scaling type as the non-sliding-window layers + hparams.rope_freq_base_train_swa = hparams.rope_freq_base_train; + hparams.rope_freq_scale_train_swa = hparams.rope_freq_scale_train; + ml.get_key(LLM_KV_ROPE_SCALING_ATTN_FACTOR, hparams.rope_attn_factor, false); // non-transformer models do not have attention heads @@ -877,6 +881,9 @@ void llama_model::load_hparams(llama_model_loader & ml) { { hparams.n_swa_pattern = 6; + hparams.rope_freq_base_train_swa = 10000.0f; + hparams.rope_freq_scale_train_swa = 1.0f; + ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa); ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); @@ -1346,13 +1353,14 @@ bool llama_model::load_tensors(llama_model_loader & ml) { const int i_gpu_start = std::max((int) hparams.n_layer - n_gpu_layers, (int) 0); const int act_gpu_layers = devices.empty() ? 0 : std::min(n_gpu_layers, (int)n_layer + 1); auto get_layer_buft_list = [&](int il) -> llama_model::impl::layer_dev { + const bool is_swa = il < (int) hparams.n_layer && hparams.is_swa(il); if (il < i_gpu_start || (il - i_gpu_start) >= act_gpu_layers) { - LLAMA_LOG_DEBUG("load_tensors: layer %3d assigned to device %s\n", il, ggml_backend_dev_name(cpu_dev)); + LLAMA_LOG_DEBUG("load_tensors: layer %3d assigned to device %s, is_swa = %d\n", il, ggml_backend_dev_name(cpu_dev), is_swa); return {cpu_dev, &pimpl->cpu_buft_list}; } const int layer_gpu = std::upper_bound(splits.begin(), splits.begin() + n_devices(), float(il - i_gpu_start)/act_gpu_layers) - splits.begin(); auto * dev = devices.at(layer_gpu); - LLAMA_LOG_DEBUG("load_tensors: layer %3d assigned to device %s\n", il, ggml_backend_dev_name(dev)); + LLAMA_LOG_DEBUG("load_tensors: layer %3d assigned to device %s, is_swa = %d\n", il, ggml_backend_dev_name(dev), is_swa); return {dev, &pimpl->gpu_buft_list.at(dev)}; }; @@ -7381,10 +7389,10 @@ struct llm_build_gemma3 : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(true, true); for (int il = 0; il < n_layer; ++il) { - const bool is_sliding = hparams.is_sliding(il); + const bool is_swa = hparams.is_swa(il); - const float freq_base_l = is_sliding ? 10000.0f : freq_base; - const float freq_scale_l = is_sliding ? 1.0f : freq_scale; + const float freq_base_l = is_swa ? hparams.rope_freq_base_train_swa : cparams.rope_freq_base; + const float freq_scale_l = is_swa ? hparams.rope_freq_scale_train_swa : cparams.rope_freq_scale; // norm cur = build_norm(inpL, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, il); @@ -7973,7 +7981,7 @@ struct llm_build_cohere2 : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(true, true); for (int il = 0; il < n_layer; ++il) { - const bool is_sliding = hparams.is_sliding(il); + const bool is_swa = hparams.is_swa(il); // norm cur = build_norm(inpL, model.layers[il].attn_norm, NULL, LLM_NORM, il); @@ -8007,7 +8015,7 @@ struct llm_build_cohere2 : public llm_graph_context { cb(Vcur, "Vcur", il); } - if (is_sliding) { + if (is_swa) { Qcur = ggml_rope_ext(ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, rope_factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow); From c522ce4143a2b5c277f1e5f65cd570dbd0626466 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 14 Mar 2025 10:47:44 +0200 Subject: [PATCH 050/398] graph : simplify attn input build for unified KV cache (#12381) ggml-ci --- src/llama-graph.cpp | 14 ++----- src/llama-graph.h | 4 +- src/llama-model.cpp | 93 +++++++++++++++++++++++---------------------- 3 files changed, 53 insertions(+), 58 deletions(-) diff --git a/src/llama-graph.cpp b/src/llama-graph.cpp index 1041ba29fbb57..e4af507780aa1 100644 --- a/src/llama-graph.cpp +++ b/src/llama-graph.cpp @@ -1311,29 +1311,23 @@ ggml_tensor * llm_graph_context::build_attn( return cur; } -llm_graph_input_attn_kv_unified * llm_graph_context::build_attn_inp_kv_unified( - bool causal, - bool swa) const { +llm_graph_input_attn_kv_unified * llm_graph_context::build_attn_inp_kv_unified() const { const llama_kv_cache_unified * kv_self = static_cast(memory); auto inp = std::make_unique(hparams, cparams, kv_self); const auto n_kv = kv_self->n; - inp->self_kq_mask = causal - ? ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD)) - : ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD)); + inp->self_kq_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD)); //cb(inp->self_kq_mask, "KQ_mask", -1); ggml_set_input(inp->self_kq_mask); inp->self_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask, GGML_TYPE_F16) : inp->self_kq_mask; - if (swa) { + if (hparams.n_swa_pattern > 1) { GGML_ASSERT(hparams.n_swa > 0); - inp->self_kq_mask_swa = causal - ? ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD)) - : ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD)); + inp->self_kq_mask_swa = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD)); //cb(inp->self_kq_mask_swa, "KQ_mask_swa", -1); ggml_set_input(inp->self_kq_mask_swa); diff --git a/src/llama-graph.h b/src/llama-graph.h index b7a66d1898736..c4328e6f9e627 100644 --- a/src/llama-graph.h +++ b/src/llama-graph.h @@ -509,9 +509,7 @@ struct llm_graph_context { float kq_scale, int il) const; - llm_graph_input_attn_kv_unified * build_attn_inp_kv_unified( - bool causal, - bool swa) const; + llm_graph_input_attn_kv_unified * build_attn_inp_kv_unified() const; ggml_tensor * build_attn( llm_graph_input_attn_kv_unified * inp, diff --git a/src/llama-model.cpp b/src/llama-model.cpp index cce943df08a83..750a702ff77a4 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -784,9 +784,11 @@ void llama_model::load_hparams(llama_model_loader & ml) { hparams.n_swa = 2047; } else if (hparams.n_layer == 32 && hparams.n_head_kv(0) == 32 && hparams.n_ctx_train == 131072) { // default value for Phi-3-mini-128k-instruct + // note: this seems incorrect because the window is bigger than the train context? hparams.n_swa = 262144; } else if (hparams.n_layer == 40 && hparams.n_ctx_train == 131072) { // default value for Phi-3-medium-128k-instruct + // note: this seems incorrect because the window is equal to the train context? hparams.n_swa = 131072; } bool found_swa = ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false); @@ -3710,6 +3712,7 @@ void llama_model::print_info() const { LLAMA_LOG_INFO("%s: n_head_kv = %s\n", __func__, print_f([&](uint32_t il) { return hparams.n_head_kv(il); }, hparams.n_layer).c_str()); LLAMA_LOG_INFO("%s: n_rot = %u\n", __func__, hparams.n_rot); LLAMA_LOG_INFO("%s: n_swa = %u\n", __func__, hparams.n_swa); + LLAMA_LOG_INFO("%s: n_swa_pattern = %u\n", __func__, hparams.n_swa_pattern); LLAMA_LOG_INFO("%s: n_embd_head_k = %u\n", __func__, hparams.n_embd_head_k); LLAMA_LOG_INFO("%s: n_embd_head_v = %u\n", __func__, hparams.n_embd_head_v); LLAMA_LOG_INFO("%s: n_gqa = %s\n", __func__, print_f([&](uint32_t il) { return hparams.n_gqa(il); }, hparams.n_layer).c_str()); @@ -3871,7 +3874,7 @@ struct llm_build_llama : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(true, false); + auto * inp_attn = build_attn_inp_kv_unified(); const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale; for (int il = 0; il < n_layer; ++il) { @@ -4034,7 +4037,7 @@ struct llm_build_deci : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(true, false); + auto * inp_attn = build_attn_inp_kv_unified(); const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale; for (int il = 0; il < n_layer; ++il) { @@ -4192,7 +4195,7 @@ struct llm_build_baichuan : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = model.type == LLM_TYPE_7B ? build_inp_pos() : nullptr; - auto * inp_attn = build_attn_inp_kv_unified(true, false); + auto * inp_attn = build_attn_inp_kv_unified(); for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -4310,7 +4313,7 @@ struct llm_build_xverse : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(true, false); + auto * inp_attn = build_attn_inp_kv_unified(); for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -4418,7 +4421,7 @@ struct llm_build_falcon : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(true, false); + auto * inp_attn = build_attn_inp_kv_unified(); for (int il = 0; il < n_layer; ++il) { ggml_tensor * attn_norm; @@ -4543,7 +4546,7 @@ struct llm_build_grok : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(true, false); + auto * inp_attn = build_attn_inp_kv_unified(); for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -4697,7 +4700,7 @@ struct llm_build_dbrx : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(true, false); + auto * inp_attn = build_attn_inp_kv_unified(); for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -4821,7 +4824,7 @@ struct llm_build_starcoder : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(true, false); + auto * inp_attn = build_attn_inp_kv_unified(); ggml_tensor * pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos); cb(pos, "pos_embd", -1); @@ -4924,7 +4927,7 @@ struct llm_build_refact : public llm_graph_context { inpL = build_inp_embd(model.tok_embd); - auto * inp_attn = build_attn_inp_kv_unified(true, false); + auto * inp_attn = build_attn_inp_kv_unified(); for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -5187,7 +5190,7 @@ struct llm_build_bloom : public llm_graph_context { inpL = build_inp_embd(model.tok_embd); - auto * inp_attn = build_attn_inp_kv_unified(true, false); + auto * inp_attn = build_attn_inp_kv_unified(); inpL = build_norm(inpL, model.tok_norm, @@ -5292,7 +5295,7 @@ struct llm_build_mpt : public llm_graph_context { inpL = build_inp_embd(model.tok_embd); - auto * inp_attn = build_attn_inp_kv_unified(true, false); + auto * inp_attn = build_attn_inp_kv_unified(); if (model.pos_embd) { // inp_pos - contains the positions @@ -5436,7 +5439,7 @@ struct llm_build_stablelm : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(true, false); + auto * inp_attn = build_attn_inp_kv_unified(); for (int il = 0; il < n_layer; ++il) { // norm @@ -5587,7 +5590,7 @@ struct llm_build_qwen : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(true, false); + auto * inp_attn = build_attn_inp_kv_unified(); for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -5703,7 +5706,7 @@ struct llm_build_qwen2 : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(true, false); + auto * inp_attn = build_attn_inp_kv_unified(); for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -5818,7 +5821,7 @@ struct llm_build_qwen2vl : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(true, false); + auto * inp_attn = build_attn_inp_kv_unified(); int sections[4]; std::copy(std::begin(hparams.rope_sections), std::begin(hparams.rope_sections) + 4, sections); @@ -5938,7 +5941,7 @@ struct llm_build_qwen2moe : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(true, false); + auto * inp_attn = build_attn_inp_kv_unified(); for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -6087,7 +6090,7 @@ struct llm_build_phi2 : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(true, false); + auto * inp_attn = build_attn_inp_kv_unified(); for (int il = 0; il < n_layer; ++il) { attn_norm_output = build_norm(inpL, @@ -6211,7 +6214,7 @@ struct llm_build_phi3 : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(true, true); + auto * inp_attn = build_attn_inp_kv_unified(); for (int il = 0; il < n_layer; ++il) { auto * residual = inpL; @@ -6357,7 +6360,7 @@ struct llm_build_plamo : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(true, false); + auto * inp_attn = build_attn_inp_kv_unified(); for (int il = 0; il < n_layer; ++il) { @@ -6465,7 +6468,7 @@ struct llm_build_gpt2 : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(true, false); + auto * inp_attn = build_attn_inp_kv_unified(); pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos); cb(pos, "pos_embd", -1); @@ -6573,7 +6576,7 @@ struct llm_build_codeshell : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(true, false); + auto * inp_attn = build_attn_inp_kv_unified(); for (int il = 0; il < n_layer; ++il) { cur = build_norm(inpL, @@ -6686,7 +6689,7 @@ struct llm_build_orion : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(true, false); + auto * inp_attn = build_attn_inp_kv_unified(); for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -6807,7 +6810,7 @@ struct llm_build_internlm2 : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(true, false); + auto * inp_attn = build_attn_inp_kv_unified(); for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -6937,7 +6940,7 @@ struct llm_build_minicpm3 : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(true, false); + auto * inp_attn = build_attn_inp_kv_unified(); for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -7141,7 +7144,7 @@ struct llm_build_gemma : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(true, false); + auto * inp_attn = build_attn_inp_kv_unified(); for (int il = 0; il < n_layer; ++il) { // norm @@ -7251,7 +7254,7 @@ struct llm_build_gemma2 : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(true, true); + auto * inp_attn = build_attn_inp_kv_unified(); for (int il = 0; il < n_layer; ++il) { // norm @@ -7386,7 +7389,7 @@ struct llm_build_gemma3 : public llm_graph_context { ggml_tensor * inp_pos = build_inp_pos(); // TODO: is causal == true correct? might need some changes - auto * inp_attn = build_attn_inp_kv_unified(true, true); + auto * inp_attn = build_attn_inp_kv_unified(); for (int il = 0; il < n_layer; ++il) { const bool is_swa = hparams.is_swa(il); @@ -7515,7 +7518,7 @@ struct llm_build_starcoder2 : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(true, false); + auto * inp_attn = build_attn_inp_kv_unified(); for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -7828,7 +7831,7 @@ struct llm_build_command_r : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(true, false); + auto * inp_attn = build_attn_inp_kv_unified(); for (int il = 0; il < n_layer; ++il) { @@ -7978,7 +7981,7 @@ struct llm_build_cohere2 : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(true, true); + auto * inp_attn = build_attn_inp_kv_unified(); for (int il = 0; il < n_layer; ++il) { const bool is_swa = hparams.is_swa(il); @@ -8110,7 +8113,7 @@ struct llm_build_olmo : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(true, false); + auto * inp_attn = build_attn_inp_kv_unified(); for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -8232,7 +8235,7 @@ struct llm_build_olmo2 : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(true, false); + auto * inp_attn = build_attn_inp_kv_unified(); for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -8358,7 +8361,7 @@ struct llm_build_olmoe : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(true, false); + auto * inp_attn = build_attn_inp_kv_unified(); for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -8481,7 +8484,7 @@ struct llm_build_openelm : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(true, false); + auto * inp_attn = build_attn_inp_kv_unified(); for (int il = 0; il < n_layer; ++il) { const int64_t n_head = hparams.n_head(il); @@ -8611,7 +8614,7 @@ struct llm_build_gptneox : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(true, false); + auto * inp_attn = build_attn_inp_kv_unified(); for (int il = 0; il < n_layer; ++il) { cur = build_norm(inpL, @@ -8757,7 +8760,7 @@ struct llm_build_arctic : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(true, false); + auto * inp_attn = build_attn_inp_kv_unified(); for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -8889,7 +8892,7 @@ struct llm_build_deepseek : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(true, false); + auto * inp_attn = build_attn_inp_kv_unified(); const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale; @@ -9054,7 +9057,7 @@ struct llm_build_deepseek2 : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(true, false); + auto * inp_attn = build_attn_inp_kv_unified(); for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -9274,7 +9277,7 @@ struct llm_build_bitnet : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(true, false); + auto * inp_attn = build_attn_inp_kv_unified(); for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -9532,7 +9535,7 @@ struct llm_build_t5_dec : public llm_graph_context { const int64_t n_outputs_enc = embd_enc->ne[1]; - auto * inp_attn_self = build_attn_inp_kv_unified(true, false); + auto * inp_attn_self = build_attn_inp_kv_unified(); auto * inp_attn_cross = build_attn_inp_cross(); for (int il = 0; il < n_layer; ++il) { @@ -9698,7 +9701,7 @@ struct llm_build_jais : public llm_graph_context { inpL = build_inp_embd(model.tok_embd); - auto * inp_attn = build_attn_inp_kv_unified(true, false); + auto * inp_attn = build_attn_inp_kv_unified(); for (int il = 0; il < n_layer; ++il) { cur = build_norm(inpL, @@ -9794,7 +9797,7 @@ struct llm_build_chatglm : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(true, false); + auto * inp_attn = build_attn_inp_kv_unified(); for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -9926,7 +9929,7 @@ struct llm_build_nemotron : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(true, false); + auto * inp_attn = build_attn_inp_kv_unified(); for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -10049,7 +10052,7 @@ struct llm_build_exaone : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(true, false); + auto * inp_attn = build_attn_inp_kv_unified(); for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -10565,7 +10568,7 @@ struct llm_build_chameleon : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(true, false); + auto * inp_attn = build_attn_inp_kv_unified(); for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; From add2a3aa5a1571211aa5c7303b8e80c8d1824b91 Mon Sep 17 00:00:00 2001 From: Victor <194116445+dodekapod@users.noreply.github.com> Date: Fri, 14 Mar 2025 11:21:17 +0100 Subject: [PATCH 051/398] server: fix "--grammar-file" parameter (#12285) --- examples/server/utils.hpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/examples/server/utils.hpp b/examples/server/utils.hpp index 36ad276fd3ce0..58cdd6af92974 100644 --- a/examples/server/utils.hpp +++ b/examples/server/utils.hpp @@ -621,7 +621,9 @@ static json oaicompat_completion_params_parse( llama_params["chat_format"] = static_cast(chat_params.format); llama_params["prompt"] = chat_params.prompt; - llama_params["grammar"] = chat_params.grammar; + if (!chat_params.grammar.empty()) { + llama_params["grammar"] = chat_params.grammar; + } llama_params["grammar_lazy"] = chat_params.grammar_lazy; auto grammar_triggers = json::array(); for (const auto & trigger : chat_params.grammar_triggers) { From 8fcb563613e20a04dd9791f0a9b8a41086428c09 Mon Sep 17 00:00:00 2001 From: fairydreaming <166155368+fairydreaming@users.noreply.github.com> Date: Fri, 14 Mar 2025 13:47:05 +0100 Subject: [PATCH 052/398] Load all MoE experts during warmup (#11571) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * llama : introduce llama_set_warmup() API call that controls warmup mode; use all MoE experts during warmup * common : use new API to enable warmup mode during model warmup --------- Co-authored-by: Stanisław Szymczyk --- common/common.cpp | 3 +++ include/llama.h | 4 ++++ src/llama-context.cpp | 13 ++++++++++++- src/llama-context.h | 1 + src/llama-cparams.h | 1 + src/llama-graph.cpp | 2 +- 6 files changed, 22 insertions(+), 2 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index 8487e3834bccb..18ffb4e738aee 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -1033,6 +1033,8 @@ struct common_init_result common_init_from_params(common_params & params) { if (params.warmup) { LOG_WRN("%s: warming up the model with an empty run - please wait ... (--no-warmup to disable)\n", __func__); + llama_set_warmup(lctx, true); + std::vector tmp; llama_token bos = llama_vocab_bos(vocab); llama_token eos = llama_vocab_eos(vocab); @@ -1063,6 +1065,7 @@ struct common_init_result common_init_from_params(common_params & params) { llama_kv_self_clear(lctx); llama_synchronize(lctx); llama_perf_context_reset(lctx); + llama_set_warmup(lctx, false); } iparams.model.reset(model); diff --git a/include/llama.h b/include/llama.h index e5286f06162ab..6a44be404d914 100644 --- a/include/llama.h +++ b/include/llama.h @@ -945,6 +945,10 @@ extern "C" { // If set to true, the model will only attend to the past tokens LLAMA_API void llama_set_causal_attn(struct llama_context * ctx, bool causal_attn); + // Set whether the model is in warmup mode or not + // If true, all model tensors are activated during llama_decode() to load and cache their weights. + LLAMA_API void llama_set_warmup(struct llama_context * ctx, bool warmup); + // Set abort callback LLAMA_API void llama_set_abort_callback(struct llama_context * ctx, ggml_abort_callback abort_callback, void * abort_callback_data); diff --git a/src/llama-context.cpp b/src/llama-context.cpp index 4df6b18ec1de3..c2fcce42a7d58 100644 --- a/src/llama-context.cpp +++ b/src/llama-context.cpp @@ -39,6 +39,7 @@ llama_context::llama_context( cparams.flash_attn = params.flash_attn; cparams.no_perf = params.no_perf; cparams.pooling_type = params.pooling_type; + cparams.warmup = false; cparams.n_ctx = params.n_ctx == 0 ? hparams.n_ctx_train : params.n_ctx; cparams.rope_freq_base = params.rope_freq_base == 0.0f ? hparams.rope_freq_base_train : params.rope_freq_base; @@ -948,6 +949,12 @@ void llama_context::set_causal_attn(bool value) { cparams.causal_attn = value; } +void llama_context::set_warmup(bool value) { + LLAMA_LOG_DEBUG("%s: value = %d\n", __func__, value); + + cparams.warmup = value; +} + void llama_context::set_adapter_lora( llama_adapter_lora * adapter, float scale) { @@ -1594,7 +1601,7 @@ void llama_context::output_reorder() { // int32_t llama_context::graph_max_nodes() const { - return std::max(8192, 5*model.n_tensors()); + return std::max(65536, 5*model.n_tensors()); } ggml_cgraph * llama_context::graph_init() { @@ -2372,6 +2379,10 @@ void llama_set_causal_attn(llama_context * ctx, bool causal_attn) { ctx->set_causal_attn(causal_attn); } +void llama_set_warmup(llama_context * ctx, bool warmup) { + ctx->set_warmup(warmup); +} + void llama_synchronize(llama_context * ctx) { ctx->synchronize(); } diff --git a/src/llama-context.h b/src/llama-context.h index 88df8950e4cb0..04facb544cb1a 100644 --- a/src/llama-context.h +++ b/src/llama-context.h @@ -64,6 +64,7 @@ struct llama_context { void set_embeddings (bool value); void set_causal_attn(bool value); + void set_warmup(bool value); void set_adapter_lora( llama_adapter_lora * adapter, diff --git a/src/llama-cparams.h b/src/llama-cparams.h index 252012f3d9405..30e550f023a9e 100644 --- a/src/llama-cparams.h +++ b/src/llama-cparams.h @@ -29,6 +29,7 @@ struct llama_cparams { bool offload_kqv; bool flash_attn; bool no_perf; + bool warmup; enum llama_pooling_type pooling_type; diff --git a/src/llama-graph.cpp b/src/llama-graph.cpp index e4af507780aa1..4e90873397ca4 100644 --- a/src/llama-graph.cpp +++ b/src/llama-graph.cpp @@ -577,7 +577,7 @@ llm_graph_context::llm_graph_context(const llm_graph_params & params) : n_embd_head_v (hparams.n_embd_head_v), n_embd_v_gqa (hparams.n_embd_v_gqa()), n_expert (hparams.n_expert), - n_expert_used (hparams.n_expert_used), + n_expert_used (cparams.warmup ? hparams.n_expert : hparams.n_expert_used), freq_base (cparams.rope_freq_base), freq_scale (cparams.rope_freq_scale), ext_factor (cparams.yarn_ext_factor), From 774973b8f3d5e375b0b74d58638eeb1817e950a8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Fri, 14 Mar 2025 16:57:05 +0100 Subject: [PATCH 053/398] main : add -sysf / --system-prompt-file (#12249) (#12250) * add system_prompt_file * add -sysf / --system-prompt-file * remove system_prompt_file --- common/arg.cpp | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/common/arg.cpp b/common/arg.cpp index fe6a1eece7fed..240c699a2cf76 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -853,6 +853,20 @@ common_params_context common_params_parser_init(common_params & params, llama_ex } } ).set_excludes({LLAMA_EXAMPLE_SERVER})); + add_opt(common_arg( + {"-sysf", "--system-prompt-file"}, "FNAME", + "a file containing the system prompt (default: none)", + [](common_params & params, const std::string & value) { + std::ifstream file(value); + if (!file) { + throw std::runtime_error(string_format("error: failed to open file '%s'\n", value.c_str())); + } + std::copy(std::istreambuf_iterator(file), std::istreambuf_iterator(), back_inserter(params.system_prompt)); + if (!params.system_prompt.empty() && params.system_prompt.back() == '\n') { + params.system_prompt.pop_back(); + } + } + ).set_examples({LLAMA_EXAMPLE_MAIN})); add_opt(common_arg( {"--in-file"}, "FNAME", "an input file (repeat to specify multiple files)", From 9f2250ba722738ec0e6ab684636268a79160c854 Mon Sep 17 00:00:00 2001 From: Eric Curtin Date: Fri, 14 Mar 2025 16:41:20 +0000 Subject: [PATCH 054/398] Add CLI arg to llama-run to adjust the number of threads used (#12370) We default to 4, sometimes we want to manually adjust this Signed-off-by: Eric Curtin --- examples/run/run.cpp | 133 +++++++++++++++++++++++++++++-------------- 1 file changed, 89 insertions(+), 44 deletions(-) diff --git a/examples/run/run.cpp b/examples/run/run.cpp index 437f2533e5777..462a6d151933e 100644 --- a/examples/run/run.cpp +++ b/examples/run/run.cpp @@ -79,6 +79,7 @@ class Opt { ctx_params = llama_context_default_params(); model_params = llama_model_default_params(); context_size_default = ctx_params.n_batch; + n_threads_default = ctx_params.n_threads; ngl_default = model_params.n_gpu_layers; common_params_sampling sampling; temperature_default = sampling.temp; @@ -104,6 +105,7 @@ class Opt { ctx_params.n_batch = context_size >= 0 ? context_size : context_size_default; ctx_params.n_ctx = ctx_params.n_batch; + ctx_params.n_threads = ctx_params.n_threads_batch = n_threads >= 0 ? n_threads : n_threads_default; model_params.n_gpu_layers = ngl >= 0 ? ngl : ngl_default; temperature = temperature >= 0 ? temperature : temperature_default; @@ -116,12 +118,12 @@ class Opt { std::string chat_template_file; std::string user; bool use_jinja = false; - int context_size = -1, ngl = -1; + int context_size = -1, ngl = -1, n_threads = -1; float temperature = -1; bool verbose = false; private: - int context_size_default = -1, ngl_default = -1; + int context_size_default = -1, ngl_default = -1, n_threads_default = -1; float temperature_default = -1; bool help = false; @@ -159,53 +161,94 @@ class Opt { return 0; } + int parse_options_with_value(int argc, const char ** argv, int & i, bool & options_parsing) { + if (options_parsing && (strcmp(argv[i], "-c") == 0 || strcmp(argv[i], "--context-size") == 0)) { + if (handle_option_with_value(argc, argv, i, context_size) == 1) { + return 1; + } + } else if (options_parsing && + (strcmp(argv[i], "-n") == 0 || strcmp(argv[i], "-ngl") == 0 || strcmp(argv[i], "--ngl") == 0)) { + if (handle_option_with_value(argc, argv, i, ngl) == 1) { + return 1; + } + } else if (options_parsing && (strcmp(argv[i], "-t") == 0 || strcmp(argv[i], "--threads") == 0)) { + if (handle_option_with_value(argc, argv, i, n_threads) == 1) { + return 1; + } + } else if (options_parsing && strcmp(argv[i], "--temp") == 0) { + if (handle_option_with_value(argc, argv, i, temperature) == 1) { + return 1; + } + } else if (options_parsing && strcmp(argv[i], "--chat-template-file") == 0) { + if (handle_option_with_value(argc, argv, i, chat_template_file) == 1) { + return 1; + } + use_jinja = true; + } else { + return 2; + } + + return 0; + } + + int parse_options(const char ** argv, int & i, bool & options_parsing) { + if (options_parsing && (parse_flag(argv, i, "-v", "--verbose") || parse_flag(argv, i, "-v", "--log-verbose"))) { + verbose = true; + } else if (options_parsing && strcmp(argv[i], "--jinja") == 0) { + use_jinja = true; + } else if (options_parsing && parse_flag(argv, i, "-h", "--help")) { + help = true; + return 0; + } else if (options_parsing && strcmp(argv[i], "--") == 0) { + options_parsing = false; + } else { + return 2; + } + + return 0; + } + + int parse_positional_args(const char ** argv, int & i, int & positional_args_i) { + if (positional_args_i == 0) { + if (!argv[i][0] || argv[i][0] == '-') { + return 1; + } + + ++positional_args_i; + model_ = argv[i]; + } else if (positional_args_i == 1) { + ++positional_args_i; + user = argv[i]; + } else { + user += " " + std::string(argv[i]); + } + + return 0; + } + int parse(int argc, const char ** argv) { bool options_parsing = true; for (int i = 1, positional_args_i = 0; i < argc; ++i) { - if (options_parsing && (strcmp(argv[i], "-c") == 0 || strcmp(argv[i], "--context-size") == 0)) { - if (handle_option_with_value(argc, argv, i, context_size) == 1) { - return 1; - } - } else if (options_parsing && - (strcmp(argv[i], "-n") == 0 || strcmp(argv[i], "-ngl") == 0 || strcmp(argv[i], "--ngl") == 0)) { - if (handle_option_with_value(argc, argv, i, ngl) == 1) { - return 1; - } - } else if (options_parsing && strcmp(argv[i], "--temp") == 0) { - if (handle_option_with_value(argc, argv, i, temperature) == 1) { - return 1; - } - } else if (options_parsing && - (parse_flag(argv, i, "-v", "--verbose") || parse_flag(argv, i, "-v", "--log-verbose"))) { - verbose = true; - } else if (options_parsing && strcmp(argv[i], "--jinja") == 0) { - use_jinja = true; - } else if (options_parsing && strcmp(argv[i], "--chat-template-file") == 0){ - if (handle_option_with_value(argc, argv, i, chat_template_file) == 1) { - return 1; - } - use_jinja = true; - } else if (options_parsing && parse_flag(argv, i, "-h", "--help")) { - help = true; - return 0; - } else if (options_parsing && strcmp(argv[i], "--") == 0) { - options_parsing = false; - } else if (positional_args_i == 0) { - if (!argv[i][0] || argv[i][0] == '-') { - return 1; - } - - ++positional_args_i; - model_ = argv[i]; - } else if (positional_args_i == 1) { - ++positional_args_i; - user = argv[i]; - } else { - user += " " + std::string(argv[i]); + int ret = parse_options_with_value(argc, argv, i, options_parsing); + if (ret == 0) { + continue; + } else if (ret == 1) { + return ret; + } + + ret = parse_options(argv, i, options_parsing); + if (ret == 0) { + continue; + } else if (ret == 1) { + return ret; + } + + if (parse_positional_args(argv, i, positional_args_i)) { + return 1; } } - if (model_.empty()){ + if (model_.empty()) { return 1; } @@ -232,6 +275,8 @@ class Opt { " Number of GPU layers (default: %d)\n" " --temp \n" " Temperature (default: %.1f)\n" + " -t, --threads \n" + " Number of threads to use during generation (default: %d)\n" " -v, --verbose, --log-verbose\n" " Set verbosity level to infinity (i.e. log all messages, useful for debugging)\n" " -h, --help\n" @@ -260,7 +305,7 @@ class Opt { " llama-run file://some-file3.gguf\n" " llama-run --ngl 999 some-file4.gguf\n" " llama-run --ngl 999 some-file5.gguf Hello World\n", - context_size_default, ngl_default, temperature_default); + context_size_default, ngl_default, temperature_default, n_threads_default); } }; From 92a391327e9201b9b5b32fdd3afb452026c22d4c Mon Sep 17 00:00:00 2001 From: Chenguang Li <757486878@qq.com> Date: Sat, 15 Mar 2025 09:31:08 +0800 Subject: [PATCH 055/398] [CANN]MUL_MAT optimization (#12382) --- ggml/src/ggml-cann/aclnn_ops.cpp | 8 ++++++-- ggml/src/ggml-cann/ggml-cann.cpp | 5 ----- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/ggml/src/ggml-cann/aclnn_ops.cpp b/ggml/src/ggml-cann/aclnn_ops.cpp index b2d857e1e549b..6bb5d08349197 100644 --- a/ggml/src/ggml-cann/aclnn_ops.cpp +++ b/ggml/src/ggml-cann/aclnn_ops.cpp @@ -2790,10 +2790,14 @@ static void ggml_cann_mul_mat_quant(ggml_backend_cann_context& ctx, (char*)output_buffer + batch1 * output_stride, ACL_FLOAT16, output_elem_size, output_ne, output_nb, 2, ACL_FORMAT_ND, output_ne_offset); + int64_t antiquantGroupSize = 0; + if (src0->ne[0] > QK8_0) { + antiquantGroupSize = QK8_0; + } ACL_CHECK(aclnnWeightQuantBatchMatmulV2GetWorkspaceSize( acl_input_tensor, acl_weight_tensor, acl_scale_tensor, nullptr, - nullptr, nullptr, nullptr, QK8_0, acl_output_tensor, + nullptr, nullptr, nullptr, antiquantGroupSize, acl_output_tensor, &workspaceSize, &executor)); if (workspaceAddr == nullptr) { workspaceAddr = workspace_allocator.alloc(workspaceSize); @@ -2833,7 +2837,7 @@ static void ggml_cann_mul_mat_quant(ggml_backend_cann_context& ctx, ACL_CHECK(aclnnWeightQuantBatchMatmulV2GetWorkspaceSize( acl_input_tensor, acl_weight_tensor, acl_scale_tensor, - nullptr, nullptr, nullptr, nullptr, QK8_0, + nullptr, nullptr, nullptr, nullptr, antiquantGroupSize, acl_output_tensor, &workspaceSize, &executor)); ACL_CHECK(aclnnWeightQuantBatchMatmulV2( workspaceAddr, workspaceSize, executor, ctx.stream())); diff --git a/ggml/src/ggml-cann/ggml-cann.cpp b/ggml/src/ggml-cann/ggml-cann.cpp index b8d272cda600c..68cd9920d1ace 100644 --- a/ggml/src/ggml-cann/ggml-cann.cpp +++ b/ggml/src/ggml-cann/ggml-cann.cpp @@ -1689,11 +1689,6 @@ static bool ggml_backend_cann_supports_op(ggml_backend_dev_t dev, case GGML_OP_MUL_MAT: { switch (op->src[0]->type) { case GGML_TYPE_Q8_0: - // Current groupsize should not be greater than k-1 in - // aclnnWeightQuantBatchMatmulV2GetWorkspaceSize - if (op->src[0]->ne[0] <= QK8_0) { - return false; - } case GGML_TYPE_F16: case GGML_TYPE_F32: case GGML_TYPE_Q4_0: From b19bd064c09822cb81efe4a38abafab3e979c9ce Mon Sep 17 00:00:00 2001 From: fairydreaming <166155368+fairydreaming@users.noreply.github.com> Date: Sat, 15 Mar 2025 15:19:30 +0100 Subject: [PATCH 056/398] SYCL : support non-contiguous tensors in binary ops (add, sub, etc) (#12399) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * sycl : support non-contiguous tensors in binary ops * sycl : silence unused variable warning --------- Co-authored-by: Stanisław Szymczyk --- ggml/src/ggml-sycl/common.hpp | 87 ++++++++++++++++++++++++----------- 1 file changed, 61 insertions(+), 26 deletions(-) diff --git a/ggml/src/ggml-sycl/common.hpp b/ggml/src/ggml-sycl/common.hpp index a92988b7dbd24..fdd07d9cafa11 100644 --- a/ggml/src/ggml-sycl/common.hpp +++ b/ggml/src/ggml-sycl/common.hpp @@ -474,6 +474,7 @@ static void k_bin_bcast(const src0_t * src0, const src1_t * src1, dst_t * dst, int ne0, int ne1, int ne2, int ne3, int ne10, int ne11, int ne12, int ne13, /*int s0, */ int s1, int s2, int s3, + /*int s00,*/ int s01, int s02, int s03, /*int s10,*/ int s11, int s12, int s13, const sycl::nd_item<3> &item_ct1) { const int i0s = item_ct1.get_local_range(2) * item_ct1.get_group(2) + @@ -495,9 +496,9 @@ static void k_bin_bcast(const src0_t * src0, const src1_t * src1, dst_t * dst, const int i12 = i2 % ne12; const int i13 = i3 % ne13; - const size_t i_src0 = i3*s3 + i2*s2 + i1*s1; + const size_t i_src0 = i3*s03 + i2*s02 + i1*s01; const size_t i_src1 = i13*s13 + i12*s12 + i11*s11; - const size_t i_dst = i_src0; + const size_t i_dst = i3*s3 + i2*s2 + i1*s1; const src0_t * src0_row = src0 + i_src0; const src1_t * src1_row = src1 + i_src1; @@ -515,6 +516,7 @@ static void k_bin_bcast_unravel(const src0_t * src0, const src1_t * src1, dst_t int ne0, int ne1, int ne2, int ne3, int ne10, int ne11, int ne12, int ne13, /*int s0, */ int s1, int s2, int s3, + /*int s00,*/ int s01, int s02, int s03, /*int s10,*/ int s11, int s12, int s13, const sycl::nd_item<3> &item_ct1) { @@ -534,9 +536,9 @@ static void k_bin_bcast_unravel(const src0_t * src0, const src1_t * src1, dst_t const int i12 = i2 % ne12; const int i13 = i3 % ne13; - const size_t i_src0 = i3*s3 + i2*s2 + i1*s1; + const size_t i_src0 = i3*s03 + i2*s02 + i1*s01; const size_t i_src1 = i13*s13 + i12*s12 + i11*s11; - const size_t i_dst = i_src0; + const size_t i_dst = i3*s3 + i2*s2 + i1*s1; const src0_t * src0_row = src0 + i_src0; const src1_t * src1_row = src1 + i_src1; @@ -566,9 +568,11 @@ struct bin_bcast_sycl { int nr[4] = { nr0, nr1, nr2, nr3 }; // collapse dimensions until first broadcast dimension - int64_t cne0[] = {ne0, ne1, ne2, ne3}; + int64_t cne[] = {ne0, ne1, ne2, ne3}; + int64_t cne0[] = {ne00, ne01, ne02, ne03}; int64_t cne1[] = {ne10, ne11, ne12, ne13}; - size_t cnb0[] = {nb0, nb1, nb2, nb3}; + size_t cnb[] = {nb0, nb1, nb2, nb3}; + size_t cnb0[] = {nb00, nb01, nb02, nb03}; size_t cnb1[] = {nb10, nb11, nb12, nb13}; auto collapse = [](int64_t cne[]) { cne[0] *= cne[1]; @@ -583,32 +587,41 @@ struct bin_bcast_sycl { cnb[3] *= cne[3]; }; - for (int i = 0; i < 4; i++) { - if (nr[i] != 1) { - break; - } - if (i > 0) { - collapse_nb(cnb0, cne0); - collapse_nb(cnb1, cne1); - collapse(cne0); - collapse(cne1); + if (ggml_is_contiguous(src0) && ggml_is_contiguous(src1) && ggml_is_contiguous(dst)) { + for (int i = 0; i < 4; i++) { + if (nr[i] != 1) { + break; + } + if (i > 0) { + collapse_nb(cnb, cne); + collapse_nb(cnb0, cne0); + collapse_nb(cnb1, cne1); + collapse(cne); + collapse(cne0); + collapse(cne1); + } } } { - int64_t ne0 = cne0[0]; - int64_t ne1 = cne0[1]; - int64_t ne2 = cne0[2]; - int64_t ne3 = cne0[3]; + int64_t ne0 = cne[0]; + int64_t ne1 = cne[1]; + int64_t ne2 = cne[2]; + int64_t ne3 = cne[3]; int64_t ne10 = cne1[0]; int64_t ne11 = cne1[1]; int64_t ne12 = cne1[2]; int64_t ne13 = cne1[3]; - size_t nb0 = cnb0[0]; - size_t nb1 = cnb0[1]; - size_t nb2 = cnb0[2]; - size_t nb3 = cnb0[3]; + size_t nb0 = cnb[0]; + size_t nb1 = cnb[1]; + size_t nb2 = cnb[2]; + size_t nb3 = cnb[3]; + + size_t nb00 = cnb0[0]; + size_t nb01 = cnb0[1]; + size_t nb02 = cnb0[2]; + size_t nb03 = cnb0[3]; size_t nb10 = cnb1[0]; size_t nb11 = cnb1[1]; @@ -625,6 +638,28 @@ struct bin_bcast_sycl { size_t s12 = nb12 / sizeof(src1_t); size_t s13 = nb13 / sizeof(src1_t); + size_t s00 = nb00 / sizeof(src0_t); + size_t s01 = nb01 / sizeof(src0_t); + size_t s02 = nb02 / sizeof(src0_t); + size_t s03 = nb03 / sizeof(src0_t); + + GGML_UNUSED(s00); + + GGML_ASSERT(nb0 % sizeof(dst_t) == 0); + GGML_ASSERT(nb1 % sizeof(dst_t) == 0); + GGML_ASSERT(nb2 % sizeof(dst_t) == 0); + GGML_ASSERT(nb3 % sizeof(dst_t) == 0); + + GGML_ASSERT(nb00 % sizeof(src0_t) == 0); + GGML_ASSERT(nb01 % sizeof(src0_t) == 0); + GGML_ASSERT(nb02 % sizeof(src0_t) == 0); + GGML_ASSERT(nb03 % sizeof(src0_t) == 0); + + GGML_ASSERT(nb10 % sizeof(src1_t) == 0); + GGML_ASSERT(nb11 % sizeof(src1_t) == 0); + GGML_ASSERT(nb12 % sizeof(src1_t) == 0); + GGML_ASSERT(nb13 % sizeof(src1_t) == 0); + GGML_ASSERT(s0 == 1); GGML_ASSERT(s10 == 1); @@ -661,8 +696,8 @@ struct bin_bcast_sycl { [=](sycl::nd_item<3> item_ct1) { k_bin_bcast_unravel( src0_dd, src1_dd, dst_dd, ne0, ne1, ne2, ne3, - ne10, ne11, ne12, ne13, s1, s2, s3, s11, s12, - s13, item_ct1); + ne10, ne11, ne12, ne13, s1, s2, s3, s01, s02, + s03, s11, s12, s13, item_ct1); }); } } else { @@ -680,7 +715,7 @@ struct bin_bcast_sycl { [=](sycl::nd_item<3> item_ct1) { k_bin_bcast(src0_dd, src1_dd, dst_dd, ne0, ne1, ne2, ne3, ne10, ne11, ne12, ne13, - s1, s2, s3, s11, s12, s13, + s1, s2, s3, s01, s02, s03, s11, s12, s13, item_ct1); }); } From 3d35d87b4113648e224b837bb88e6b2c4c7f29e5 Mon Sep 17 00:00:00 2001 From: aubreyli Date: Sat, 15 Mar 2025 22:49:03 +0800 Subject: [PATCH 057/398] SYCL: Delete redundant plus sign and space (#12391) --- ggml/src/ggml-sycl/ggml-sycl.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ggml/src/ggml-sycl/ggml-sycl.cpp b/ggml/src/ggml-sycl/ggml-sycl.cpp index 6977b705e4877..ef7d5fa01a8e3 100644 --- a/ggml/src/ggml-sycl/ggml-sycl.cpp +++ b/ggml/src/ggml-sycl/ggml-sycl.cpp @@ -3113,8 +3113,8 @@ static void ggml_sycl_mul_mat_id(ggml_backend_sycl_context & ctx, const int64_t i2 = i12; src0_row.data = src0_original + i02*nb02; - src1_row.data = src1_original + + i11*nb11 + i12*nb12; - dst_row.data = dst_original + i1*nb1 + i2*nb2; + src1_row.data = src1_original + i11*nb11 + i12*nb12; + dst_row.data = dst_original + i1*nb1 + i2*nb2; ggml_sycl_mul_mat(ctx, &src0_row, &src1_row, &dst_row); } From f4c3dd5daa3a79f713813cf1aabdc5886071061d Mon Sep 17 00:00:00 2001 From: marcoStocchi Date: Sat, 15 Mar 2025 17:23:11 +0100 Subject: [PATCH 058/398] llama-tts : add '-o' option (#12398) * added -o option to specify an output file name * llama-tts returns ENOENT in case of file write error note : PR #12042 is closed as superseded with this one. --- common/arg.cpp | 2 +- examples/tts/tts.cpp | 21 ++++++++++++--------- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/common/arg.cpp b/common/arg.cpp index 240c699a2cf76..b6bfe6f89bead 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -1889,7 +1889,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex [](common_params & params, const std::string & value) { params.out_file = value; } - ).set_examples({LLAMA_EXAMPLE_IMATRIX, LLAMA_EXAMPLE_CVECTOR_GENERATOR, LLAMA_EXAMPLE_EXPORT_LORA})); + ).set_examples({LLAMA_EXAMPLE_IMATRIX, LLAMA_EXAMPLE_CVECTOR_GENERATOR, LLAMA_EXAMPLE_EXPORT_LORA, LLAMA_EXAMPLE_TTS})); add_opt(common_arg( {"-ofreq", "--output-frequency"}, "N", string_format("output the imatrix every N iterations (default: %d)", params.n_out_freq), diff --git a/examples/tts/tts.cpp b/examples/tts/tts.cpp index c658f3182f4c2..d953cadd62dcf 100644 --- a/examples/tts/tts.cpp +++ b/examples/tts/tts.cpp @@ -87,11 +87,11 @@ struct wav_header { uint32_t data_size; }; -static void save_wav16(const std::string & fname, const std::vector & data, int sample_rate) { +static bool save_wav16(const std::string & fname, const std::vector & data, int sample_rate) { std::ofstream file(fname, std::ios::binary); if (!file) { - LOG_ERR("%s: Failed to open file '%s' for writing", __func__, fname.c_str()); - return; + LOG_ERR("%s: Failed to open file '%s' for writing.\n", __func__, fname.c_str()); + return false; } wav_header header; @@ -108,7 +108,7 @@ static void save_wav16(const std::string & fname, const std::vector & dat file.write(reinterpret_cast(&pcm_sample), sizeof(pcm_sample)); } - file.close(); + return file.good(); } static void fill_hann_window(int length, bool periodic, float * output) { @@ -536,6 +536,7 @@ static std::string audio_data_from_speaker(json speaker, const outetts_version t int main(int argc, char ** argv) { common_params params; + params.out_file = "output.wav"; params.prompt = ""; params.n_predict = 4096; @@ -1060,8 +1061,6 @@ lovely<|t_0.56|><|code_start|><|634|><|596|><|1766|><|1556|><|1306|><|1285|><|14 } #endif - const std::string fname = "output.wav"; - const int n_sr = 24000; // sampling rate // zero out first 0.25 seconds @@ -1072,11 +1071,15 @@ lovely<|t_0.56|><|code_start|><|634|><|596|><|1766|><|1556|><|1306|><|1285|><|14 LOG_INF("%s: time for spectral ops: %.3f ms\n", __func__, (ggml_time_us() - t_spec_start) / 1000.0f); LOG_INF("%s: total time: %.3f ms\n", __func__, (ggml_time_us() - t_main_start) / 1000.0f); - save_wav16(fname, audio, n_sr); + int retval = 0; - LOG_INF("%s: audio written to file '%s'\n", __func__, fname.c_str()); + if (save_wav16(params.out_file, audio, n_sr)) { + LOG_INF("%s: audio written to file '%s'\n", __func__, params.out_file.c_str()); + } else { + retval = ENOENT; + } llama_backend_free(); - return 0; + return retval; } From 7b61bcc87cfdeab88350e82df1c4b7be64331ea6 Mon Sep 17 00:00:00 2001 From: Daniel Bevenius Date: Sun, 16 Mar 2025 18:22:05 +0100 Subject: [PATCH 059/398] ci : add --symlinks to xcframework zip command (#12409) This commit adds the --symlinks option to the zip command used to create the xcframework zip file. This is necessary to create symlinks in the zip file. Without this option, the Versions symlink is stored as a regular directory entry in the zip file, rather than as a symlink in the zip which causes the followig error in xcode: ```console Couldn't resolve framework symlink for '/Users/danbev/work/ai/llama.cpp/tmp_1/build-apple/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/Current': readlink(/Users/danbev/work/ai/llama.cpp/tmp_1/build-apple/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/Current): Invalid argument (22) ``` Refs: https://github.com/ggml-org/llama.cpp/pull/11996#issuecomment-2727026377 --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 1e2429364513a..03cde0a48436f 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1379,7 +1379,7 @@ jobs: id: pack_artifacts if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} run: | - zip -r llama-${{ steps.tag.outputs.name }}-xcframework.zip build-apple/llama.xcframework + zip --symlinks -r llama-${{ steps.tag.outputs.name }}-xcframework.zip build-apple/llama.xcframework - name: Upload artifacts if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} From dc079cfdffa1141a6caf5d41a33d73a1ef03da55 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 16 Mar 2025 19:29:36 +0200 Subject: [PATCH 060/398] context : fix init of n_outputs (#12397) ggml-ci --- src/llama-context.cpp | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/llama-context.cpp b/src/llama-context.cpp index c2fcce42a7d58..abb7e526f6171 100644 --- a/src/llama-context.cpp +++ b/src/llama-context.cpp @@ -285,11 +285,15 @@ llama_context::llama_context( // reserve worst-case graph if (!hparams.vocab_only) { - uint32_t n_seqs = 1; // TODO: worst-case number of sequences - uint32_t n_tokens = std::min(cparams.n_ctx, cparams.n_ubatch); + const uint32_t n_seqs = 1; // TODO: worst-case number of sequences + const uint32_t n_tokens = std::min(cparams.n_ctx, cparams.n_ubatch); llama_token token = model.vocab.token_bos(); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph + // restore later + // TODO: something cleaner + const auto n_outputs_save = n_outputs; + // max number of outputs n_outputs = n_tokens; @@ -341,6 +345,8 @@ llama_context::llama_context( } } + n_outputs = n_outputs_save; + for (size_t i = 0; i < backend_ptrs.size(); ++i) { ggml_backend_t backend = backend_ptrs[i]; ggml_backend_buffer_type_t buft = backend_buft[i]; From 8ba95dca2065c0073698afdfcda4c8a8f08bf0d9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Sun, 16 Mar 2025 18:46:36 +0100 Subject: [PATCH 061/398] llama : fix OLMo-2-0325-32B-Instruct K-norm size (#12400) --- src/llama-model.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/llama-model.cpp b/src/llama-model.cpp index 750a702ff77a4..4b288d8f66a33 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -1005,6 +1005,7 @@ void llama_model::load_hparams(llama_model_loader & ml) { case 16: type = LLM_TYPE_1B; break; case 32: type = LLM_TYPE_7B; break; case 40: type = LLM_TYPE_13B; break; + case 64: type = LLM_TYPE_32B; break; default: type = LLM_TYPE_UNKNOWN; } } break; @@ -2726,6 +2727,8 @@ bool llama_model::load_tensors(llama_model_loader & ml) { } break; case LLM_ARCH_OLMO2: { + const int64_t n_embd_head = n_embd / n_head; + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); // output @@ -2740,7 +2743,7 @@ bool llama_model::load_tensors(llama_model_loader & ml) { layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0); layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd}, 0); - layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd}, 0); + layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_head_kv * n_embd_head}, 0); layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0); layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); From b3c9a65673a63a6c9a75da24ee00d13499494e0c Mon Sep 17 00:00:00 2001 From: Akarshan Biswas Date: Mon, 17 Mar 2025 07:15:12 +0530 Subject: [PATCH 062/398] SYCL: set extras only on GGML_TYPE_Q4_0 (#12366) * SYCL: set extras only on GGML_TYPE_Q4_0 * release tensor_extras in reset buffer interface --- ggml/src/ggml-sycl/ggml-sycl.cpp | 29 ++++++++++++++++++++++------- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/ggml/src/ggml-sycl/ggml-sycl.cpp b/ggml/src/ggml-sycl/ggml-sycl.cpp index ef7d5fa01a8e3..05984d8c5ac4e 100644 --- a/ggml/src/ggml-sycl/ggml-sycl.cpp +++ b/ggml/src/ggml-sycl/ggml-sycl.cpp @@ -333,10 +333,11 @@ ggml_backend_sycl_buffer_init_tensor(ggml_backend_buffer_t buffer, assert(tensor->view_src->buffer->buft == buffer->buft); return GGML_STATUS_SUCCESS; } - - ggml_tensor_extra_gpu * extra = new ggml_tensor_extra_gpu{}; - tensor->extra = extra; - ctx->tensor_extras.push_back(extra); //used to release it when destroy ctx. + if (tensor->type == GGML_TYPE_Q4_0) { + ggml_tensor_extra_gpu * extra = new ggml_tensor_extra_gpu{}; + tensor->extra = extra; + ctx->tensor_extras.push_back(extra); //used to release it when destroy ctx. + } if (ggml_is_quantized(tensor->type)) { // initialize padding to 0 to avoid possible NaN values @@ -486,6 +487,22 @@ catch (sycl::exception const &exc) { std::exit(1); } +static void ggml_backend_sycl_buffer_reset(ggml_backend_buffer_t buffer) { + GGML_SYCL_DEBUG("[SYCL] call %s\n", __func__); + if (buffer == nullptr) { + return; + } + + ggml_backend_sycl_buffer_context * ctx = (ggml_backend_sycl_buffer_context *) buffer->context; + + if (ctx != nullptr) { + for (ggml_tensor_extra_gpu * extra : ctx->tensor_extras) { + release_extra_gpu(extra); + } + ctx->tensor_extras.clear(); // reset the tensor_extras vector + } +} + static const ggml_backend_buffer_i ggml_backend_sycl_buffer_interface = { /* .free_buffer = */ ggml_backend_sycl_buffer_free_buffer, /* .get_base = */ ggml_backend_sycl_buffer_get_base, @@ -495,7 +512,7 @@ static const ggml_backend_buffer_i ggml_backend_sycl_buffer_interface = { /* .get_tensor = */ ggml_backend_sycl_buffer_get_tensor, /* .cpy_tensor = */ ggml_backend_sycl_buffer_cpy_tensor, /* .clear = */ ggml_backend_sycl_buffer_clear, - /* .reset = */ NULL, + /* .reset = */ ggml_backend_sycl_buffer_reset, }; // sycl buffer type @@ -576,7 +593,6 @@ ggml_backend_buffer_type_t ggml_backend_sycl_buffer_type(int device) { static std::mutex mutex; std::lock_guard lock(mutex); - GGML_SYCL_DEBUG("[SYCL] call ggml_backend_sycl_buffer_type\n"); auto dev_count = ggml_backend_sycl_get_device_count(); @@ -3761,7 +3777,6 @@ bool ggml_backend_is_sycl(ggml_backend_t backend) { } int ggml_backend_sycl_get_device_count() { - GGML_SYCL_DEBUG("[SYCL] call ggml_backend_sycl_get_device_count\n"); return ggml_sycl_info().device_count; } From 374101fd742bb35cb9bf46c86836e54d51191ffd Mon Sep 17 00:00:00 2001 From: Christian Kastner Date: Mon, 17 Mar 2025 10:05:23 +0100 Subject: [PATCH 063/398] cmake : enable building llama.cpp using system libggml (#12321) * cmake: Factor out compiler flag function from ggml llama.cpps's build requires it, too, and we may want to make use of it without add_subdirectory(ggml). * cmake: Enable building against system ggml This facilitates package maintenance for Linux distributions, where the libggml library most likely will be shipped as an individual package upon which a llama.cpp package depends. --- CMakeLists.txt | 10 +++++++++- cmake/common.cmake | 2 ++ ggml/cmake/common.cmake | 26 ++++++++++++++++++++++++++ ggml/src/CMakeLists.txt | 28 +--------------------------- 4 files changed, 38 insertions(+), 28 deletions(-) create mode 100644 ggml/cmake/common.cmake diff --git a/CMakeLists.txt b/CMakeLists.txt index 7b2a1845e5c7c..23cfbce5ae566 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -29,6 +29,8 @@ else() set(LLAMA_STANDALONE OFF) endif() +option(LLAMA_USE_SYSTEM_GGML "Use system libggml" OFF) + if (EMSCRIPTEN) set(BUILD_SHARED_LIBS_DEFAULT OFF) @@ -145,7 +147,13 @@ endif() # 3rd-party # -if (NOT TARGET ggml) +if (LLAMA_USE_SYSTEM_GGML) + message(STATUS "Using system-provided libggml, skipping ggml build") + find_package(ggml REQUIRED) + add_library(ggml ALIAS ggml::ggml) +endif() + +if (NOT TARGET ggml AND NOT LLAMA_USE_SYSTEM_GGML) add_subdirectory(ggml) # ... otherwise assume ggml is added by a parent CMakeLists.txt endif() diff --git a/cmake/common.cmake b/cmake/common.cmake index 0f54871e4143d..a5bb787f1519d 100644 --- a/cmake/common.cmake +++ b/cmake/common.cmake @@ -1,3 +1,5 @@ +include("ggml/cmake/common.cmake") + function(llama_add_compile_flags) if (LLAMA_FATAL_WARNINGS) if (CMAKE_CXX_COMPILER_ID MATCHES "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") diff --git a/ggml/cmake/common.cmake b/ggml/cmake/common.cmake new file mode 100644 index 0000000000000..1976d0ae9b1e8 --- /dev/null +++ b/ggml/cmake/common.cmake @@ -0,0 +1,26 @@ +function(ggml_get_flags CCID CCVER) + set(C_FLAGS "") + set(CXX_FLAGS "") + + if (CCID MATCHES "Clang") + set(C_FLAGS -Wunreachable-code-break -Wunreachable-code-return) + set(CXX_FLAGS -Wunreachable-code-break -Wunreachable-code-return -Wmissing-prototypes -Wextra-semi) + + if ( + (CCID STREQUAL "Clang" AND CCVER VERSION_GREATER_EQUAL 3.8.0) OR + (CCID STREQUAL "AppleClang" AND CCVER VERSION_GREATER_EQUAL 7.3.0) + ) + list(APPEND C_FLAGS -Wdouble-promotion) + endif() + elseif (CCID STREQUAL "GNU") + set(C_FLAGS -Wdouble-promotion) + set(CXX_FLAGS -Wno-array-bounds) + + if (CCVER VERSION_GREATER_EQUAL 8.1.0) + list(APPEND CXX_FLAGS -Wextra-semi) + endif() + endif() + + set(GF_C_FLAGS ${C_FLAGS} PARENT_SCOPE) + set(GF_CXX_FLAGS ${CXX_FLAGS} PARENT_SCOPE) +endfunction() diff --git a/ggml/src/CMakeLists.txt b/ggml/src/CMakeLists.txt index 52817510f6e75..a797e2b187fbe 100644 --- a/ggml/src/CMakeLists.txt +++ b/ggml/src/CMakeLists.txt @@ -1,4 +1,5 @@ include(CheckCXXCompilerFlag) +include("../cmake/common.cmake") add_compile_definitions(GGML_SCHED_MAX_COPIES=${GGML_SCHED_MAX_COPIES}) @@ -24,33 +25,6 @@ if (NOT MSVC) endif() endif() -function(ggml_get_flags CCID CCVER) - set(C_FLAGS "") - set(CXX_FLAGS "") - - if (CCID MATCHES "Clang") - set(C_FLAGS -Wunreachable-code-break -Wunreachable-code-return) - set(CXX_FLAGS -Wunreachable-code-break -Wunreachable-code-return -Wmissing-prototypes -Wextra-semi) - - if ( - (CCID STREQUAL "Clang" AND CCVER VERSION_GREATER_EQUAL 3.8.0) OR - (CCID STREQUAL "AppleClang" AND CCVER VERSION_GREATER_EQUAL 7.3.0) - ) - list(APPEND C_FLAGS -Wdouble-promotion) - endif() - elseif (CCID STREQUAL "GNU") - set(C_FLAGS -Wdouble-promotion) - set(CXX_FLAGS -Wno-array-bounds) - - if (CCVER VERSION_GREATER_EQUAL 8.1.0) - list(APPEND CXX_FLAGS -Wextra-semi) - endif() - endif() - - set(GF_C_FLAGS ${C_FLAGS} PARENT_SCOPE) - set(GF_CXX_FLAGS ${CXX_FLAGS} PARENT_SCOPE) -endfunction() - if (GGML_FATAL_WARNINGS) if (CMAKE_CXX_COMPILER_ID MATCHES "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") list(APPEND C_FLAGS -Werror) From 2f21123c1deb3ce1be3c0578c5f6980fe19ed077 Mon Sep 17 00:00:00 2001 From: Jeff Bolz Date: Mon, 17 Mar 2025 04:35:00 -0500 Subject: [PATCH 064/398] vulkan: Adjust coopmat2 tile sizes and selection heuristic (#12258) --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 42 +++++++++++++++++----------- 1 file changed, 25 insertions(+), 17 deletions(-) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index ff53bdfbe171c..e46007a52f56e 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -1476,26 +1476,26 @@ static void ggml_vk_load_shaders(vk_device& device) { // spec constants and tile sizes for quant matmul (non-Qi_K) l_warptile_mmq = { 256, 128, 256, 64 }; m_warptile_mmq = { 256, 128, 128, 64 }; - s_warptile_mmq = { 256, 128, 128, 64 }; + s_warptile_mmq = { 256, 32, 64, 128 }; l_mmq_wg_denoms = { 128, 256, 1 }; m_mmq_wg_denoms = { 128, 128, 1 }; - s_mmq_wg_denoms = { 128, 128, 1 }; + s_mmq_wg_denoms = { 32, 64, 1 }; // spec constants and tile sizes for quant matmul (Qi_K) - l_warptile_mmq_k = { 256, 128, 512, 16 }; - m_warptile_mmq_k = { 256, 128, 256, 16 }; - s_warptile_mmq_k = { 256, 32, 128, 64 }; - l_mmq_wg_denoms_k = { 128, 512, 1 }; - m_mmq_wg_denoms_k = { 128, 256, 1 }; - s_mmq_wg_denoms_k = { 32, 128, 1 }; + l_warptile_mmq_k = { 256, 64, 128, 64 }; + m_warptile_mmq_k = { 256, 32, 64, 64 }; + s_warptile_mmq_k = { 256, 32, 32, 128 }; + l_mmq_wg_denoms_k = { 64, 128, 1 }; + m_mmq_wg_denoms_k = { 32, 64, 1 }; + s_mmq_wg_denoms_k = { 32, 32, 1 }; // spec constants and tile sizes for quant matmul_id - l_warptile_mmqid = { 256, 128, 128, 16 }; + l_warptile_mmqid = { 256, 128, 64, 16 }; m_warptile_mmqid = { 256, 128, 64, 16 }; - s_warptile_mmqid = { 256, 64, 64, 16 }; - l_mmqid_wg_denoms = { 128, 128, 1 }; + s_warptile_mmqid = { 256, 128, 64, 16 }; + l_mmqid_wg_denoms = { 128, 64, 1 }; m_mmqid_wg_denoms = { 128, 64, 1 }; - s_mmqid_wg_denoms = { 64, 64, 1 }; + s_mmqid_wg_denoms = { 128, 64, 1 }; l_align = 128; m_align = 64; @@ -3850,10 +3850,14 @@ static vk_pipeline ggml_vk_guess_matmul_pipeline(ggml_backend_vk_context * ctx, VK_LOG_DEBUG("ggml_vk_guess_matmul_pipeline(" << m << ", " << n << ", " << aligned << ", " << ggml_type_name(src0_type) << ")"); if (ctx->device->coopmat2) { - if ((ctx->device->mul_mat_l[src0_type] && (m % mmp->l->wg_denoms[0]) == 0 && (n % mmp->l->wg_denoms[1]) == 0) || (!ctx->device->mul_mat_m[src0_type] && !ctx->device->mul_mat_s[src0_type])) { + // Use large shader when the N dimension is greater than the medium shader's tile size + uint32_t crossover_large = mmp->m->wg_denoms[1]; + if ((ctx->device->mul_mat_l[src0_type] && (n > crossover_large)) || (!ctx->device->mul_mat_m[src0_type] && !ctx->device->mul_mat_s[src0_type])) { return aligned ? mmp->a_l : mmp->l; } - if ((ctx->device->mul_mat_m[src0_type] && (m % mmp->m->wg_denoms[0]) == 0 && (n % mmp->m->wg_denoms[1]) == 0) || !ctx->device->mul_mat_s[src0_type]) { + // Use medium shader when the N dimension is greater than the small shader's tile size + uint32_t crossover_medium = mmp->s->wg_denoms[1]; + if ((ctx->device->mul_mat_m[src0_type] && (n > crossover_medium)) || !ctx->device->mul_mat_s[src0_type]) { return aligned ? mmp->a_m : mmp->m; } return aligned ? mmp->a_s : mmp->s; @@ -3898,13 +3902,17 @@ static void ggml_vk_matmul( } static vk_pipeline ggml_vk_guess_matmul_id_pipeline(ggml_backend_vk_context * ctx, vk_matmul_pipeline& mmp, int m, int n, bool aligned, ggml_type src0_type) { - VK_LOG_DEBUG("ggml_vk_guess_matmul_pipeline(" << m << ", " << n << ", " << aligned << ", " << ggml_type_name(src0_type) << ")"); + VK_LOG_DEBUG("ggml_vk_guess_matmul_id_pipeline(" << m << ", " << n << ", " << aligned << ", " << ggml_type_name(src0_type) << ")"); if (ctx->device->coopmat2) { - if ((ctx->device->mul_mat_id_l[src0_type] && (m % mmp->l->wg_denoms[0]) == 0 && (n % mmp->l->wg_denoms[1]) == 0) || (!ctx->device->mul_mat_id_m[src0_type] && !ctx->device->mul_mat_id_s[src0_type])) { + // Use large shader when the N dimension is greater than the medium shader's tile size + uint32_t crossover_large = mmp->m->wg_denoms[1]; + if ((ctx->device->mul_mat_id_l[src0_type] && (n > crossover_large)) || (!ctx->device->mul_mat_id_m[src0_type] && !ctx->device->mul_mat_id_s[src0_type])) { return aligned ? mmp->a_l : mmp->l; } - if ((ctx->device->mul_mat_id_m[src0_type] && (m % mmp->m->wg_denoms[0]) == 0 && (n % mmp->m->wg_denoms[1]) == 0) || !ctx->device->mul_mat_id_s[src0_type]) { + // Use medium shader when the N dimension is greater than the small shader's tile size + uint32_t crossover_medium = mmp->s->wg_denoms[1]; + if ((ctx->device->mul_mat_id_m[src0_type] && (n > crossover_medium)) || !ctx->device->mul_mat_id_s[src0_type]) { return aligned ? mmp->a_m : mmp->m; } return aligned ? mmp->a_s : mmp->s; From 891c63956dbfbdf7ed2ecd0b5882cff49dbfe90f Mon Sep 17 00:00:00 2001 From: Jeff Bolz Date: Mon, 17 Mar 2025 04:41:59 -0500 Subject: [PATCH 065/398] vulkan: Pad N dimension of B matrix for coopmat2 perf, to avoid bounds checking (#12273) * vulkan: Pad N dimension of B matrix for coopmat2 perf, to avoid bounds checking --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 43 +++++++++++-------- .../vulkan-shaders/mul_mm_cm2.comp | 13 +++--- 2 files changed, 34 insertions(+), 22 deletions(-) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index e46007a52f56e..a837b0dda4cbf 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -29,6 +29,7 @@ #include "ggml-vulkan-shaders.hpp" +#define ROUNDUP_POW2(M, N) (((M) + (N) - 1) & ~((N) - 1)) #define CEIL_DIV(M, N) (((M) + (N)-1) / (N)) #define VK_VENDOR_ID_AMD 0x1002 @@ -368,6 +369,7 @@ struct vk_mat_mat_push_constants { uint32_t batch_stride_a; uint32_t batch_stride_b; uint32_t batch_stride_d; uint32_t k_split; uint32_t ne02; uint32_t ne12; uint32_t broadcast2; uint32_t broadcast3; + uint32_t padded_N; }; struct vk_mat_vec_push_constants { uint32_t ncols; uint32_t stride_a; uint32_t stride_b; uint32_t stride_d; @@ -380,6 +382,7 @@ struct vk_mat_mat_id_push_constants { uint32_t stride_a; uint32_t stride_b; uint32_t stride_d; uint32_t batch_stride_a; uint32_t batch_stride_b; uint32_t batch_stride_d; uint32_t nei0; uint32_t nei1; uint32_t nbi1; uint32_t ne11; + uint32_t padded_N; }; struct vk_mat_vec_id_push_constants { uint32_t ncols; uint32_t stride_a; uint32_t stride_b; uint32_t stride_d; @@ -3882,18 +3885,19 @@ static void ggml_vk_matmul( vk_subbuffer&& a, vk_subbuffer&& b, vk_subbuffer&& d, vk_subbuffer&& split_k_buffer, uint32_t m, uint32_t n, uint32_t k, uint32_t stride_a, uint32_t stride_b, uint32_t stride_d, uint32_t batch_stride_a, uint32_t batch_stride_b, uint32_t batch_stride_d, - uint32_t split_k, uint32_t batch, uint32_t ne02, uint32_t ne12, uint32_t broadcast2, uint32_t broadcast3) { + uint32_t split_k, uint32_t batch, uint32_t ne02, uint32_t ne12, uint32_t broadcast2, uint32_t broadcast3, + uint32_t padded_n) { VK_LOG_DEBUG("ggml_vk_matmul(a: (" << a.buffer->buffer << ", " << a.offset << ", " << a.size << "), b: (" << b.buffer->buffer << ", " << b.offset << ", " << b.size << "), d: (" << d.buffer->buffer << ", " << d.offset << ", " << d.size << "), split_k: (" << (split_k_buffer.buffer != nullptr ? split_k_buffer.buffer->buffer : VK_NULL_HANDLE) << ", " << split_k_buffer.offset << ", " << split_k_buffer.size << "), m: " << m << ", n: " << n << ", k: " << k << ", stride_a: " << stride_a << ", stride_b: " << stride_b << ", stride_d: " << stride_d << ", batch_stride_a: " << batch_stride_a << ", batch_stride_b: " << batch_stride_b << ", batch_stride_d: " << batch_stride_d << ", split_k: " << split_k << ", batch: " << batch << ", ne02: " << ne02 << ", ne12: " << ne12 << ", broadcast2: " << broadcast2 << ", broadcast3: " << broadcast3 << ")"); ggml_vk_sync_buffers(subctx); if (split_k == 1) { - const vk_mat_mat_push_constants pc = { m, n, k, stride_a, stride_b, stride_d, batch_stride_a, batch_stride_b, batch_stride_d, k, ne02, ne12, broadcast2, broadcast3 }; + const vk_mat_mat_push_constants pc = { m, n, k, stride_a, stride_b, stride_d, batch_stride_a, batch_stride_b, batch_stride_d, k, ne02, ne12, broadcast2, broadcast3, padded_n }; ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { a, b, d }, sizeof(vk_mat_mat_push_constants), &pc, { m, n, batch }); return; } GGML_ASSERT(batch_stride_d == m * n); - const vk_mat_mat_push_constants pc1 = { m, n, k, stride_a, stride_b, stride_d, batch_stride_a, batch_stride_b, batch_stride_d, CEIL_DIV(k, split_k), ne02, ne12, broadcast2, broadcast3 }; + const vk_mat_mat_push_constants pc1 = { m, n, k, stride_a, stride_b, stride_d, batch_stride_a, batch_stride_b, batch_stride_d, CEIL_DIV(k, split_k), ne02, ne12, broadcast2, broadcast3, padded_n }; // Make sure enough workgroups get assigned for split k to work ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { a, b, split_k_buffer }, sizeof(vk_mat_mat_push_constants), &pc1, { (CEIL_DIV(m, pipeline->wg_denoms[0]) * pipeline->wg_denoms[0]) * split_k, n, batch }); ggml_vk_sync_buffers(subctx); @@ -3937,14 +3941,15 @@ static void ggml_vk_matmul_id( vk_subbuffer&& a, vk_subbuffer&& b, vk_subbuffer&& d, vk_subbuffer&& ids, uint32_t m, uint32_t n, uint32_t k, uint32_t stride_a, uint32_t stride_b, uint32_t stride_d, uint32_t batch_stride_a, uint32_t batch_stride_b, uint32_t batch_stride_d, - uint32_t n_as, uint32_t nei0, uint32_t nei1, uint32_t nbi1, uint32_t ne11) { + uint32_t n_as, uint32_t nei0, uint32_t nei1, uint32_t nbi1, uint32_t ne11, + uint32_t padded_n) { VK_LOG_DEBUG("ggml_vk_matmul_id(a: (" << a.buffer->buffer << ", " << a.offset << ", " << a.size << "), b: (" << b.buffer->buffer << ", " << b.offset << ", " << b.size << "), d: (" << d.buffer->buffer << ", " << d.offset << ", " << d.size << "), ids: (" << ids.buffer->buffer << ", " << ids.offset << ", " << ids.size << "), " << "m: " << m << ", n: " << n << ", k: " << k << ", stride_a: " << stride_a << ", stride_b: " << stride_b << ", stride_d: " << stride_d << ", " << "batch_stride_a: " << batch_stride_a << ", batch_stride_b: " << batch_stride_b << ", batch_stride_d: " << batch_stride_d << ", " << "n_as: " << n_as << ", nei0: " << nei0 << ", nei1: " << nei1 << ", nbi1: " << nbi1 << ", ne11: " << ne11 << ")"); ggml_vk_sync_buffers(subctx); const vk_mat_mat_id_push_constants pc = { m, n, k, stride_a, stride_b, stride_d, batch_stride_a, batch_stride_b, batch_stride_d, - nei0, nei1, nbi1, ne11 }; + nei0, nei1, nbi1, ne11, padded_n }; ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { a, b, d, ids }, sizeof(vk_mat_mat_id_push_constants), &pc, { m, nei1, n_as }); } @@ -4106,15 +4111,17 @@ static void ggml_vk_mul_mat_q_f16(ggml_backend_vk_context * ctx, vk_context& sub // Not implemented GGML_ASSERT(y_non_contig || !qy_needs_dequant); // NOLINT - const int x_ne = ne01 * ne00; - const int y_ne = ne11 * ne10; - const int d_ne = ne11 * ne01; - const uint32_t kpad = ggml_vk_align_size(ne10, ggml_vk_guess_matmul_pipeline_align(ctx, mmp, ne01, ne11, qx_needs_dequant ? GGML_TYPE_F16 : src0->type)); const bool aligned = ne10 == kpad && ne01 > 8 && ne11 > 8; vk_pipeline pipeline = ggml_vk_guess_matmul_pipeline(ctx, mmp, ne01, ne11, aligned, qx_needs_dequant ? GGML_TYPE_F16 : src0->type); + // Reserve extra storage in the N dimension for the Y matrix, so we can avoid bounds-checking + uint32_t padded_n = qy_needs_dequant ? ROUNDUP_POW2(ne11, pipeline->wg_denoms[1]) :ne11; + const int x_ne = ne01 * ne00; + const int y_ne = padded_n * ne10; + const int d_ne = ne11 * ne01; + const uint32_t split_k = ggml_vk_guess_split_k(ctx, ne01, ne11, ne10, pipeline); const uint64_t qx_sz = ggml_type_size(src0->type) * x_ne / ggml_blck_size(src0->type); @@ -4237,7 +4244,7 @@ static void ggml_vk_mul_mat_q_f16(ggml_backend_vk_context * ctx, vk_context& sub { d_D, d_buf_offset, d_sz * ne12 * ne13 }, { ctx->prealloc_split_k, 0, d_sz * ne12 * ne13 * split_k }, ne01, ne11, ne10, ne10, ne10, ne01, stride_batch_x, stride_batch_y, ne20*ne21, - split_k, ne12*ne13, ne02, ne12, r2, r3 + split_k, ne12*ne13, ne02, ne12, r2, r3, padded_n ); // NOLINT } @@ -4688,15 +4695,17 @@ static void ggml_vk_mul_mat_id_q_f16(ggml_backend_vk_context * ctx, vk_context& // Not implemented GGML_ASSERT(y_non_contig || !qy_needs_dequant); // NOLINT - const uint64_t x_ne = ne01 * ne00; - const uint64_t y_ne = ne11 * ne10; - const uint64_t d_ne = ne21 * ne20; - const uint32_t kpad = ggml_vk_align_size(ne10, ggml_vk_guess_matmul_id_pipeline_align(ctx, mmp, ne01, nei1, qx_needs_dequant ? GGML_TYPE_F16 : src0->type)); const bool aligned = ne10 == kpad && ne01 > 8 && nei1 > 8; vk_pipeline pipeline = ggml_vk_guess_matmul_id_pipeline(ctx, mmp, ne01, nei1, aligned, qx_needs_dequant ? GGML_TYPE_F16 : src0->type); + // Reserve extra storage in the N dimension for the Y matrix, so we can avoid bounds-checking + uint32_t padded_n = qy_needs_dequant ? ROUNDUP_POW2(ne11, pipeline->wg_denoms[1]) :ne11; + const uint64_t x_ne = ne01 * ne00; + const uint64_t y_ne = padded_n * ne10; + const uint64_t d_ne = ne21 * ne20; + const uint64_t qx_sz = ggml_type_size(src0->type) * x_ne / ggml_blck_size(src0->type); const uint64_t qy_sz = ggml_type_size(src1->type) * y_ne / ggml_blck_size(src1->type); const uint64_t x_sz = !qx_needs_dequant ? qx_sz : sizeof(ggml_fp16_t) * x_ne; @@ -4815,7 +4824,7 @@ static void ggml_vk_mul_mat_id_q_f16(ggml_backend_vk_context * ctx, vk_context& { d_D, d_buf_offset, d_sz * ne22 * ne23 }, { d_ids, ids_buf_offset, ids_sz }, ne01, ne21, ne10, ne10, ne10, ne01, stride_batch_x, stride_batch_y, ne20*ne21, - n_as, nei0, nei1, nbi1 / ggml_type_size(ids->type), ne11 + n_as, nei0, nei1, nbi1 / ggml_type_size(ids->type), ne11, padded_n ); // NOLINT } @@ -6775,7 +6784,7 @@ static void ggml_vk_test_matmul(ggml_backend_vk_context * ctx, size_t m, size_t ctx, subctx, p, ggml_vk_subbuffer(d_X), ggml_vk_subbuffer(d_Y), ggml_vk_subbuffer(d_D), ggml_vk_subbuffer(ctx->prealloc_split_k), m, n, k, k, k, m, k*m, k*n, m*n, - split_k, batch, batch, batch, 1, 1 + split_k, batch, batch, batch, 1, 1, n ); } ggml_vk_ctx_end(subctx); @@ -7120,7 +7129,7 @@ static void ggml_vk_test_dequant_matmul(ggml_backend_vk_context * ctx, size_t m, ctx, subctx, p, ggml_vk_subbuffer(qx_buf), ggml_vk_subbuffer(y_buf), ggml_vk_subbuffer(d_buf), ggml_vk_subbuffer(ctx->prealloc_split_k), m, n, k, k, k, m, k*m, k*n, m*n, - split_k, batch, batch, batch, 1, 1 + split_k, batch, batch, batch, 1, 1, n ); } ggml_vk_ctx_end(subctx); diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp index 66dd2c860d82d..5b7a4efe2ca8e 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp @@ -48,6 +48,8 @@ layout (push_constant) uniform parameter uint broadcast2; uint broadcast3; #endif + // N dimension for the B matrix can be >= p.N + uint padded_N; } p; @@ -202,18 +204,19 @@ void main() { #endif // Use end_k rather than p.K as the dimension because that's what - // we need to bound check against when using split_k + // we need to bound check against when using split_k. + // Bounds check B against padded_N, but bounds check D against N. tensorLayoutA = setTensorLayoutDimensionNV(tensorLayoutA, p.M, end_k); - tensorLayoutB = setTensorLayoutDimensionNV(tensorLayoutB, p.N, end_k); + tensorLayoutB = setTensorLayoutDimensionNV(tensorLayoutB, p.padded_N, end_k); tensorLayoutD = setTensorLayoutDimensionNV(tensorLayoutD, p.N, p.M); tensorLayoutAClamp = setTensorLayoutDimensionNV(tensorLayoutAClamp, p.M, end_k); - tensorLayoutBClamp = setTensorLayoutDimensionNV(tensorLayoutBClamp, p.N, end_k); + tensorLayoutBClamp = setTensorLayoutDimensionNV(tensorLayoutBClamp, p.padded_N, end_k); tensorViewNV<2, false, 1, 0> tensorViewTranspose = createTensorViewNV(2, false, 1, 0); #if !defined(MUL_MAT_ID) // Detect a fast path where all loads are entirely in bounds and no clamping is required - if ((ir + 1) * BM <= p.M && (ic + 1) * BN <= p.N && (start_k % BK) == 0 && (end_k % BK) == 0 && + if ((ir + 1) * BM <= p.M && (ic + 1) * BN <= p.padded_N && (start_k % BK) == 0 && (end_k % BK) == 0 && #if QUANT_K == 1 (stride_a % 8) == 0 && #endif @@ -263,7 +266,7 @@ void main() { #ifdef MUL_MAT_ID bool unclampedB = true; #else - bool unclampedB = (ic + 1) * BN <= p.N && block_k + BK <= end_k && (block_k % 8) == 0; + bool unclampedB = (ic + 1) * BN <= p.padded_N && block_k + BK <= end_k && (block_k % 8) == 0; #endif if (unclampedA && unclampedB) { coopMatLoadTensorNV(mat_a, data_a, pos_a, sliceTensorLayoutNV(tensorLayoutA, ir * BM, BM, (block_k & ~7), BK) DECODEFUNCA); From f07690c930f74d82d4f108e567c7092544847f77 Mon Sep 17 00:00:00 2001 From: Jeff Bolz Date: Mon, 17 Mar 2025 04:43:35 -0500 Subject: [PATCH 066/398] vulkan: use fp32 in coopmat2 q4_k dequant function (#12309) --- .../ggml-vulkan/vulkan-shaders/dequant_funcs_cm2.comp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs_cm2.comp b/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs_cm2.comp index 4ccbe613af2ce..8efe4653ffe75 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs_cm2.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs_cm2.comp @@ -178,7 +178,7 @@ float16_t dequantFuncQ4_K(const in decodeBufQ4_K bl, const in uint blockCoords[2 uvec4 v = bl128.block.q4k[0]; - const f16vec2 loadd = unpackFloat2x16(v.x); + const vec2 loadd = vec2(unpackFloat2x16(v.x)); uint32_t sc; uint32_t mbyte; @@ -199,15 +199,15 @@ float16_t dequantFuncQ4_K(const in decodeBufQ4_K bl, const in uint blockCoords[2 sc &= 0x3F; mbyte &= 0x3F; - const float16_t d = loadd.x * float16_t(sc); - const float16_t m = loadd.y * float16_t(mbyte); + const float d = loadd.x * float(sc); + const float m = loadd.y * float(mbyte); uint qs = uint32_t(bl16.block.qs[((idx & 0xC0) >> 2) + ((idx & 0x1E) >> 1)]); qs = (qs >> (b * 4 + 8 * (idx & 1))) & 0xF; - float16_t ret = d * float16_t(qs) - m; + float ret = d * float(qs) - m; - return ret; + return float16_t(ret); } layout(buffer_reference, std430, buffer_reference_align = 16) buffer decodeBufQ5_K { From cf2270e4d3685ac46f4a166d8718997ba7cbc45a Mon Sep 17 00:00:00 2001 From: Daniele Date: Mon, 17 Mar 2025 12:42:33 +0100 Subject: [PATCH 067/398] vulkan: subgroup size tuning (#12087) * vulkan: subgroup size test * Vulkan: Add device architecture enum and logic to recognize AMD generations * vulkan: use new architecture logic to specify subgroup size * Initial vulkan subgroup size tuning for RDNA3 * vulkan: commonize RDNA subgroup tuning * vulkan: override subgroup size if required_subgroup_size = 0 * vulkan: disable warp 32 for RDNA3 * vulkan: fine tuned RDNA1 subgroup sizes * vulkan: adjusted subgroup size map * vulkan: fixed RDNA2 subgroup map --------- Co-authored-by: 0cc4m --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 155 +++++++++++++++++++++++++-- 1 file changed, 145 insertions(+), 10 deletions(-) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index a837b0dda4cbf..aa7281acbf916 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -150,6 +150,66 @@ static void ggml_vk_destroy_buffer(vk_buffer& buf); static constexpr uint32_t mul_mat_vec_max_cols = 8; +enum vk_device_architecture { + OTHER, + AMD_GCN, + AMD_RDNA1, + AMD_RDNA2, + AMD_RDNA3, +}; + +static vk_device_architecture get_device_architecture(const vk::PhysicalDevice& device) { + vk::PhysicalDeviceProperties props = device.getProperties(); + + if (props.vendorID == VK_VENDOR_ID_AMD) { + const std::vector ext_props = device.enumerateDeviceExtensionProperties(); + + bool amd_shader_core_properties = false; + bool integer_dot_product = false; + bool subgroup_size_control = false; + + for (const auto& properties : ext_props) { + if (strcmp("VK_AMD_shader_core_properties", properties.extensionName) == 0) { + amd_shader_core_properties = true; + } else if (strcmp("VK_KHR_shader_integer_dot_product", properties.extensionName) == 0) { + integer_dot_product = true; + } else if (strcmp("VK_EXT_subgroup_size_control", properties.extensionName) == 0) { + subgroup_size_control = true; + } + } + + if (!amd_shader_core_properties || !integer_dot_product || !subgroup_size_control) { + return vk_device_architecture::OTHER; + } + + vk::PhysicalDeviceProperties2 props2; + vk::PhysicalDeviceShaderCorePropertiesAMD shader_core_props_amd; + vk::PhysicalDeviceShaderIntegerDotProductPropertiesKHR integer_dot_props; + vk::PhysicalDeviceSubgroupSizeControlPropertiesEXT subgroup_size_control_props; + + props2.pNext = &shader_core_props_amd; + shader_core_props_amd.pNext = &integer_dot_props; + integer_dot_props.pNext = &subgroup_size_control_props; + + device.getProperties2(&props2); + + if (subgroup_size_control_props.maxSubgroupSize == 64 && subgroup_size_control_props.minSubgroupSize == 64) { + return vk_device_architecture::AMD_GCN; + } + if (subgroup_size_control_props.maxSubgroupSize == 64 && subgroup_size_control_props.minSubgroupSize == 32) { + // RDNA + if (shader_core_props_amd.wavefrontsPerSimd == 20) { + return vk_device_architecture::AMD_RDNA1; + } + if (integer_dot_props.integerDotProduct4x8BitPackedMixedSignednessAccelerated) { + return vk_device_architecture::AMD_RDNA3; + } + return vk_device_architecture::AMD_RDNA2; + } + } + return vk_device_architecture::OTHER; +} + struct vk_device_struct { std::mutex mutex; @@ -162,6 +222,7 @@ struct vk_device_struct { bool pipeline_robustness; vk::Device device; uint32_t vendor_id; + vk_device_architecture architecture; vk_queue compute_queue; vk_queue transfer_queue; bool single_queue; @@ -1448,6 +1509,73 @@ static bool ggml_vk_matmul_shmem_support(const vk_device& device, const std::vec return supported; } +struct GpuPipelineConfig { + // GPU architecture identifier. + // Example: vk_device_architecture::AMD_GCN + vk_device_architecture arch; + + // Mapping of pipeline names to their specific subgroup sizes. + // Example: {"soft_max_f32", 64} + std::unordered_map pipelines; + + // Default subgroup size for this GPU. + // Defaults to 0 if not explicitly provided. + uint32_t default_subgroup_size = 0; +}; + +// Pipeline configuration for RDNA1 GPUs. +static const std::unordered_map rdna1_pipelines = { + {"soft_max", 64}, {"im2col", 64}, + {"argmax", 64}, {"mul_mat_vec", 64}, + {"mul_mat_vec_f16", 32}, {"mul_mat_vec_f32_f16", 32} +}; + +// Pipeline configuration for RDNA2 GPUs. +static const std::unordered_map rdna2_pipelines = { + {"soft_max", 64}, {"im2col", 64}, +}; + +static constexpr uint32_t RDNA_DEFAULT_SUBGROUP_SIZE = 32; + +// Define configurations for different GPUs. +static std::vector gpu_pipeline_configs = { + { + vk_device_architecture::AMD_RDNA1, + { + rdna1_pipelines, + }, + RDNA_DEFAULT_SUBGROUP_SIZE + }, + { + vk_device_architecture::AMD_RDNA2, + { + rdna2_pipelines, + }, + RDNA_DEFAULT_SUBGROUP_SIZE + }, +}; + +static uint32_t get_subgroup_size(const std::string &pipeline_name, const vk_device_architecture &arch) { + for (const auto &config : gpu_pipeline_configs) { + if (config.arch == arch) { + auto pipIt = config.pipelines.find(pipeline_name); + if (pipIt != config.pipelines.end()) { + return pipIt->second; + } + std::vector> sorted_pipelines(config.pipelines.begin(), config.pipelines.end()); + std::sort(sorted_pipelines.begin(), sorted_pipelines.end(), + [](const auto &a, const auto &b) { return a.first.size() > b.first.size(); }); + for (const auto &entry : sorted_pipelines) { + if (pipeline_name.find(entry.first) != std::string::npos) { + return entry.second; + } + } + return config.default_subgroup_size; + } + } + return 0; // If no matching configuration is found +} + static void ggml_vk_load_shaders(vk_device& device) { VK_LOG_DEBUG("ggml_vk_load_shaders(" << device->name << ")"); @@ -1574,6 +1702,10 @@ static void ggml_vk_load_shaders(vk_device& device) { uint32_t parameter_count, uint32_t push_constant_size, std::array wg_denoms, const std::vector& specialization_constants, uint32_t align, bool disable_robustness = false, bool require_full_subgroups = false, uint32_t required_subgroup_size = 0) { + if (!require_full_subgroups && required_subgroup_size == 0) { + required_subgroup_size = get_subgroup_size(name, device->architecture); + } + if (!pipeline) { pipeline = std::make_shared(); pipeline->name = name; @@ -2250,7 +2382,7 @@ static void ggml_vk_load_shaders(vk_device& device) { device->need_compiles = false; } -static bool ggml_vk_khr_cooperative_matrix_support(const vk::PhysicalDeviceProperties& props, const vk::PhysicalDeviceDriverProperties& driver_props); +static bool ggml_vk_khr_cooperative_matrix_support(const vk::PhysicalDeviceProperties& props, const vk::PhysicalDeviceDriverProperties& driver_props, vk_device_architecture arch); static vk_device ggml_vk_get_device(size_t idx) { VK_LOG_DEBUG("ggml_vk_get_device(" << idx << ")"); @@ -2279,6 +2411,8 @@ static vk_device ggml_vk_get_device(size_t idx) { device->physical_device = physical_devices[dev_num]; const std::vector ext_props = device->physical_device.enumerateDeviceExtensionProperties(); + device->architecture = get_device_architecture(device->physical_device); + const char* GGML_VK_PREFER_HOST_MEMORY = getenv("GGML_VK_PREFER_HOST_MEMORY"); device->prefer_host_memory = GGML_VK_PREFER_HOST_MEMORY != nullptr; @@ -2291,7 +2425,6 @@ static vk_device ggml_vk_get_device(size_t idx) { bool coopmat2_support = false; device->coopmat_support = false; - // Check if maintenance4 is supported for (const auto& properties : ext_props) { if (strcmp("VK_KHR_maintenance4", properties.extensionName) == 0) { maintenance4_support = true; @@ -2404,7 +2537,7 @@ static vk_device ggml_vk_get_device(size_t idx) { device->fp16 = !force_disable_f16 && fp16_storage && fp16_compute; - if (!ggml_vk_khr_cooperative_matrix_support(device->properties, driver_props)) { + if (!ggml_vk_khr_cooperative_matrix_support(device->properties, driver_props, device->architecture)) { device->coopmat_support = false; } @@ -2782,7 +2915,10 @@ static void ggml_vk_print_gpu_info(size_t idx) { subgroup_props.pNext = &driver_props; physical_device.getProperties2(&props2); - const size_t subgroup_size = subgroup_props.subgroupSize; + vk_device_architecture arch = get_device_architecture(physical_device); + uint32_t default_subgroup_size = get_subgroup_size("", arch); + const size_t subgroup_size = (default_subgroup_size != 0) ? default_subgroup_size : subgroup_props.subgroupSize; + const bool uma = props2.properties.deviceType == vk::PhysicalDeviceType::eIntegratedGpu; bool fp16_storage = false; @@ -2808,7 +2944,9 @@ static void ggml_vk_print_gpu_info(size_t idx) { } } - if (!ggml_vk_khr_cooperative_matrix_support(props2.properties, driver_props)) { + const vk_device_architecture device_architecture = get_device_architecture(physical_device); + + if (!ggml_vk_khr_cooperative_matrix_support(props2.properties, driver_props, device_architecture)) { coopmat_support = false; } @@ -8843,7 +8981,7 @@ static bool ggml_vk_instance_portability_enumeration_ext_available(const std::ve UNUSED(instance_extensions); } -static bool ggml_vk_khr_cooperative_matrix_support(const vk::PhysicalDeviceProperties& props, const vk::PhysicalDeviceDriverProperties& driver_props) { +static bool ggml_vk_khr_cooperative_matrix_support(const vk::PhysicalDeviceProperties& props, const vk::PhysicalDeviceDriverProperties& driver_props, vk_device_architecture arch) { switch (props.vendorID) { case VK_VENDOR_ID_INTEL: // Intel drivers don't support coopmat properly yet @@ -8851,10 +8989,7 @@ static bool ggml_vk_khr_cooperative_matrix_support(const vk::PhysicalDevicePrope case VK_VENDOR_ID_AMD: if (driver_props.driverID == vk::DriverId::eAmdProprietary || driver_props.driverID == vk::DriverId::eAmdOpenSource) { // Workaround for AMD proprietary driver reporting support on all GPUs - const std::string name = props.deviceName; - return name.rfind("AMD Radeon RX 7", 0) == 0 || name.rfind("AMD Radeon(TM) RX 7", 0) == 0 || // RDNA 3 consumer GPUs - name.rfind("AMD Radeon PRO W7", 0) == 0 || name.rfind("AMD Radeon(TM) PRO W7", 0) == 0 || // RDNA 3 workstation GPUs - name.rfind("AMD Radeon 7", 0) == 0 || name.rfind("AMD Radeon(TM) 7", 0) == 0; // RDNA 3 APUs + return arch == vk_device_architecture::AMD_RDNA3; } return true; default: From 484a8ab513bbd740cc49f30280c1acf52cb4e7e9 Mon Sep 17 00:00:00 2001 From: Jeff Bolz Date: Mon, 17 Mar 2025 09:26:18 -0500 Subject: [PATCH 068/398] vulkan: Add N/2 and N/4 optimized paths in coopmat2 shader (#12312) --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 24 +++--- .../vulkan-shaders/mul_mm_cm2.comp | 79 ++++++++++++++----- 2 files changed, 72 insertions(+), 31 deletions(-) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index aa7281acbf916..97398f071b80e 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -1597,33 +1597,33 @@ static void ggml_vk_load_shaders(vk_device& device) { uint32_t l_align, m_align, s_align; if (device->coopmat2) { // spec constants and tile sizes for non-quant matmul/matmul_id - l_warptile = { 256, 128, 256, 64 }; - m_warptile = { 256, 128, 128, 64 }; - s_warptile = { 128, 64, 64, 64 }; + l_warptile = { 256, 128, 256, 64, 1 }; + m_warptile = { 256, 128, 128, 64, 0 }; + s_warptile = { 128, 64, 64, 64, 0 }; l_wg_denoms = {128, 256, 1 }; m_wg_denoms = {128, 128, 1 }; s_wg_denoms = { 64, 64, 1 }; // spec constants and tile sizes for quant matmul (non-Qi_K) - l_warptile_mmq = { 256, 128, 256, 64 }; - m_warptile_mmq = { 256, 128, 128, 64 }; - s_warptile_mmq = { 256, 32, 64, 128 }; + l_warptile_mmq = { 256, 128, 256, 64, 1 }; + m_warptile_mmq = { 256, 128, 128, 64, 1 }; + s_warptile_mmq = { 256, 32, 64, 128, 0 }; l_mmq_wg_denoms = { 128, 256, 1 }; m_mmq_wg_denoms = { 128, 128, 1 }; s_mmq_wg_denoms = { 32, 64, 1 }; // spec constants and tile sizes for quant matmul (Qi_K) - l_warptile_mmq_k = { 256, 64, 128, 64 }; - m_warptile_mmq_k = { 256, 32, 64, 64 }; - s_warptile_mmq_k = { 256, 32, 32, 128 }; + l_warptile_mmq_k = { 256, 64, 128, 64, 1 }; + m_warptile_mmq_k = { 256, 32, 64, 64, 0 }; + s_warptile_mmq_k = { 256, 32, 32, 128, 0 }; l_mmq_wg_denoms_k = { 64, 128, 1 }; m_mmq_wg_denoms_k = { 32, 64, 1 }; s_mmq_wg_denoms_k = { 32, 32, 1 }; // spec constants and tile sizes for quant matmul_id - l_warptile_mmqid = { 256, 128, 64, 16 }; - m_warptile_mmqid = { 256, 128, 64, 16 }; - s_warptile_mmqid = { 256, 128, 64, 16 }; + l_warptile_mmqid = { 256, 128, 64, 16, 0 }; + m_warptile_mmqid = { 256, 128, 64, 16, 0 }; + s_warptile_mmqid = { 256, 128, 64, 16, 0 }; l_mmqid_wg_denoms = { 128, 64, 1 }; m_mmqid_wg_denoms = { 128, 64, 1 }; s_mmqid_wg_denoms = { 128, 64, 1 }; diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp index 5b7a4efe2ca8e..7649febb07193 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp @@ -23,6 +23,10 @@ layout (constant_id = 1) const uint BM = 64; layout (constant_id = 2) const uint BN = 64; layout (constant_id = 3) const uint BK = 16; // Assumed to be 32 if working with a quant +layout (constant_id = 4) const bool enable_smaller_matrices = false; +const uint BNover2 = enable_smaller_matrices ? (BN / 2) : BN; +const uint BNover4 = enable_smaller_matrices ? (BN / 4) : BN; + layout (push_constant) uniform parameter { uint M; @@ -168,15 +172,13 @@ void main() { const uint end_k = min(p.K, (ik + 1) * p.k_split); #endif - coopmat sum; - sum = coopmat(0.0); - #ifdef MUL_MAT_ID uint pos_a = (expert_idx * p.batch_stride_a) / QUANT_K; uint pos_b = 0; #else uint pos_a = (batch_idx_a * p.batch_stride_a) / QUANT_K; uint pos_b = batch_idx * p.batch_stride_b; + uint pos_d = batch_idx * p.batch_stride_d + ik * p.batch_stride_d * gl_NumWorkGroups.z; #endif uint stride_a = p.stride_a / QUANT_K; @@ -197,6 +199,7 @@ void main() { tensorLayoutNV<2> tensorLayoutB = createTensorLayoutNV(2); tensorLayoutNV<2, gl_CooperativeMatrixClampModeConstantNV> tensorLayoutBClamp = createTensorLayoutNV(2, gl_CooperativeMatrixClampModeConstantNV); tensorLayoutNV<2, gl_CooperativeMatrixClampModeConstantNV> tensorLayoutD = createTensorLayoutNV(2, gl_CooperativeMatrixClampModeConstantNV); + tensorLayoutD = setTensorLayoutStrideNV(tensorLayoutD, p.stride_d, 1); #if QUANT_K > 1 tensorLayoutA = setTensorLayoutBlockSizeNV(tensorLayoutA, 1, QUANT_K); @@ -232,16 +235,54 @@ void main() { tensorLayoutB = setTensorLayoutStrideNV(tensorLayoutB, stride_b, 1); uint k_iters = (end_k - start_k + BK - 1) / BK; + if (enable_smaller_matrices && ic * BN + BNover4 >= p.N) { + coopmat sum = coopmat(0.0); + for (uint block_k = start_k, i = 0; i < k_iters; block_k += BK, ++i) { - for (uint block_k = start_k, i = 0; i < k_iters; block_k += BK, ++i) { + coopmat mat_a; + coopmat mat_b; - coopmat mat_a; - coopmat mat_b; + coopMatLoadTensorNV(mat_a, data_a, pos_a, sliceTensorLayoutNV(tensorLayoutA, ir * BM, BM, block_k, BK) DECODEFUNCA); + coopMatLoadTensorNV(mat_b, data_b, pos_b, sliceTensorLayoutNV(tensorLayoutB, ic * BN, BNover4, block_k, BK), tensorViewTranspose); + + sum = coopMatMulAdd(mat_a, mat_b, sum); + } + coopmat mat_d = coopmat(sum); + + coopMatStoreTensorNV(mat_d, data_d, pos_d, sliceTensorLayoutNV(tensorLayoutD, ic * BN, BNover4, ir * BM, BM), tensorViewTranspose); + return; + } else if (enable_smaller_matrices && ic * BN + BNover2 >= p.N) { + coopmat sum = coopmat(0.0); + for (uint block_k = start_k, i = 0; i < k_iters; block_k += BK, ++i) { + + coopmat mat_a; + coopmat mat_b; + + coopMatLoadTensorNV(mat_a, data_a, pos_a, sliceTensorLayoutNV(tensorLayoutA, ir * BM, BM, block_k, BK) DECODEFUNCA); + coopMatLoadTensorNV(mat_b, data_b, pos_b, sliceTensorLayoutNV(tensorLayoutB, ic * BN, BNover2, block_k, BK), tensorViewTranspose); + + sum = coopMatMulAdd(mat_a, mat_b, sum); + } + coopmat mat_d = coopmat(sum); + + coopMatStoreTensorNV(mat_d, data_d, pos_d, sliceTensorLayoutNV(tensorLayoutD, ic * BN, BNover2, ir * BM, BM), tensorViewTranspose); + return; + } else { + coopmat sum = coopmat(0.0); + for (uint block_k = start_k, i = 0; i < k_iters; block_k += BK, ++i) { + + coopmat mat_a; + coopmat mat_b; - coopMatLoadTensorNV(mat_a, data_a, pos_a, sliceTensorLayoutNV(tensorLayoutA, ir * BM, BM, block_k, BK) DECODEFUNCA); - coopMatLoadTensorNV(mat_b, data_b, pos_b, sliceTensorLayoutNV(tensorLayoutB, ic * BN, BN, block_k, BK), tensorViewTranspose); + coopMatLoadTensorNV(mat_a, data_a, pos_a, sliceTensorLayoutNV(tensorLayoutA, ir * BM, BM, block_k, BK) DECODEFUNCA); + coopMatLoadTensorNV(mat_b, data_b, pos_b, sliceTensorLayoutNV(tensorLayoutB, ic * BN, BN, block_k, BK), tensorViewTranspose); + + sum = coopMatMulAdd(mat_a, mat_b, sum); + } + coopmat mat_d = coopmat(sum); - sum = coopMatMulAdd(mat_a, mat_b, sum); + coopMatStoreTensorNV(mat_d, data_d, pos_d, sliceTensorLayoutNV(tensorLayoutD, ic * BN, BN, ir * BM, BM), tensorViewTranspose); + return; } } else #endif // !defined(MUL_MAT_ID) @@ -254,6 +295,9 @@ void main() { tensorLayoutBClamp = setTensorLayoutStrideNV(tensorLayoutBClamp, stride_b, 1); + coopmat sum; + sum = coopmat(0.0); + [[dont_unroll]] for (uint block_k = start_k; block_k < end_k; block_k += BK) { @@ -296,19 +340,16 @@ void main() { sum = coopMatMulAdd(mat_a, mat_b, sum); } } - } - // Convert from ACC_TYPE to D_TYPE - coopmat mat_d; - mat_d = coopmat(sum); + // Convert from ACC_TYPE to D_TYPE + coopmat mat_d; + mat_d = coopmat(sum); #ifdef MUL_MAT_ID - // Call callback to store each element, remapping row through shared memory - coopMatPerElementNV(mat_d, mat_d, perElemOpD, ir, ic); + // Call callback to store each element, remapping row through shared memory + coopMatPerElementNV(mat_d, mat_d, perElemOpD, ir, ic); #else - tensorLayoutD = setTensorLayoutStrideNV(tensorLayoutD, p.stride_d, 1); - - uint pos_d = batch_idx * p.batch_stride_d + ik * p.batch_stride_d * gl_NumWorkGroups.z; - coopMatStoreTensorNV(mat_d, data_d, pos_d, sliceTensorLayoutNV(tensorLayoutD, ic * BN, BN, ir * BM, BM), tensorViewTranspose); + coopMatStoreTensorNV(mat_d, data_d, pos_d, sliceTensorLayoutNV(tensorLayoutD, ic * BN, BN, ir * BM, BM), tensorViewTranspose); #endif + } } From 01e8f2138b2e40902afe2983ecbf503a08d74b1d Mon Sep 17 00:00:00 2001 From: Guus Waals <_@guusw.nl> Date: Tue, 18 Mar 2025 00:35:43 +0800 Subject: [PATCH 069/398] ggml-vulkan: remove unused find_program(glslc) (#12416) It's already found by FindVulkan.cmake in the parent CMakeLists --- ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt | 4 ---- 1 file changed, 4 deletions(-) diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt b/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt index 074031087f45a..51c78b7d2293a 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt +++ b/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt @@ -1,8 +1,4 @@ find_package (Threads REQUIRED) -find_program(GLSLC_EXECUTABLE glslc) -if(NOT GLSLC_EXECUTABLE) - message(FATAL_ERROR "glslc not found.") -endif() set(TARGET vulkan-shaders-gen) add_executable(${TARGET} vulkan-shaders-gen.cpp) From b1b132efcba216c873715c483809730bb253f4a1 Mon Sep 17 00:00:00 2001 From: Gaurav Garg <52341457+gaugarg-nv@users.noreply.github.com> Date: Mon, 17 Mar 2025 23:55:13 +0530 Subject: [PATCH 070/398] cuda : enable CUDA Graph on CUDA Toolkit < 12.x (#12394) * Enable CUDA Graph on CTK < 12.x `cudaGraphExecUpdate` API was changed on 12.x. For this reason CUDA graph support was disabled on older CUDA toolkit. This change enables CUDA support in CTK version < 12.x by using older API if CTK < 12.x. * Fix compilation errors with MUSA * Disable CUDA Graph for MUSA --- ggml/src/ggml-cuda/common.cuh | 2 +- ggml/src/ggml-cuda/ggml-cuda.cu | 12 +++++++----- ggml/src/ggml-cuda/vendors/hip.h | 2 +- ggml/src/ggml-cuda/vendors/musa.h | 3 ++- ggml/src/ggml-musa/CMakeLists.txt | 4 ---- 5 files changed, 11 insertions(+), 12 deletions(-) diff --git a/ggml/src/ggml-cuda/common.cuh b/ggml/src/ggml-cuda/common.cuh index 4d4ac47c034e1..e78205e5d53af 100644 --- a/ggml/src/ggml-cuda/common.cuh +++ b/ggml/src/ggml-cuda/common.cuh @@ -678,7 +678,7 @@ struct ggml_tensor_extra_gpu { }; -#if ((CUDART_VERSION >= 12000) && defined(GGML_CUDA_USE_GRAPHS)) || defined(GGML_HIP_GRAPHS) +#if (defined(GGML_CUDA_USE_GRAPHS) || defined(GGML_HIP_GRAPHS)) #define USE_CUDA_GRAPH #endif diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index 497de37be8210..9bba398ce6be1 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -2610,13 +2610,15 @@ static bool is_cuda_graph_update_required(ggml_backend_cuda_context * cuda_ctx, static void update_cuda_graph_executable(ggml_backend_cuda_context * cuda_ctx) { +#if CUDART_VERSION >= 12000 cudaGraphExecUpdateResultInfo result_info; -#ifdef __HIP_PLATFORM_AMD__ - hipGraphNode_t errorNode; - hipError_t stat = hipGraphExecUpdate(cuda_ctx->cuda_graph->instance, cuda_ctx->cuda_graph->graph, &errorNode, &result_info); -#else cudaError_t stat = cudaGraphExecUpdate(cuda_ctx->cuda_graph->instance, cuda_ctx->cuda_graph->graph, &result_info); -#endif +#else + cudaGraphNode_t errorNode; + cudaGraphExecUpdateResult result_info; + cudaError_t stat = cudaGraphExecUpdate(cuda_ctx->cuda_graph->instance, cuda_ctx->cuda_graph->graph, &errorNode, &result_info); +#endif // CUDART_VERSION >= 12000 + if (stat == cudaErrorGraphExecUpdateFailure) { #ifndef NDEBUG GGML_LOG_DEBUG("%s: CUDA graph update failed\n", __func__); diff --git a/ggml/src/ggml-cuda/vendors/hip.h b/ggml/src/ggml-cuda/vendors/hip.h index 81964611c6064..aace21e3a8b18 100644 --- a/ggml/src/ggml-cuda/vendors/hip.h +++ b/ggml/src/ggml-cuda/vendors/hip.h @@ -112,7 +112,7 @@ #define cudaGraphExecDestroy hipGraphExecDestroy #define cudaGraphLaunch hipGraphLaunch #define cudaErrorGraphExecUpdateFailure hipErrorGraphExecUpdateFailure -#define cudaGraphExecUpdateResultInfo hipGraphExecUpdateResult +#define cudaGraphExecUpdateResult hipGraphExecUpdateResult #define cudaGraphNodeType hipGraphNodeType #define cudaGraphNodeTypeKernel hipGraphNodeTypeKernel #define cudaGraphInstantiate hipGraphInstantiate diff --git a/ggml/src/ggml-cuda/vendors/musa.h b/ggml/src/ggml-cuda/vendors/musa.h index 6cc1b69ee3390..997f671431e01 100644 --- a/ggml/src/ggml-cuda/vendors/musa.h +++ b/ggml/src/ggml-cuda/vendors/musa.h @@ -119,7 +119,7 @@ #define cudaGraphExecDestroy musaGraphExecDestroy #define cudaGraphExec_t musaGraphExec_t #define cudaGraphExecUpdate musaGraphExecUpdate -#define cudaGraphExecUpdateResultInfo musaGraphExecUpdateResult +#define cudaGraphExecUpdateResult musaGraphExecUpdateResult #define cudaGraphGetNodes musaGraphGetNodes #define cudaGraphInstantiate musaGraphInstantiate #define cudaGraphKernelNodeGetParams musaGraphKernelNodeGetParams @@ -132,6 +132,7 @@ #define cudaGraph_t musaGraph_t #define cudaKernelNodeParams musaKernelNodeParams #define cudaStreamCaptureModeRelaxed musaStreamCaptureModeRelaxed +#define cudaStreamBeginCapture musaStreamBeginCapture #define cudaStreamEndCapture musaStreamEndCapture typedef mt_bfloat16 nv_bfloat16; diff --git a/ggml/src/ggml-musa/CMakeLists.txt b/ggml/src/ggml-musa/CMakeLists.txt index 166970ca6bfb8..92f05d5558c80 100644 --- a/ggml/src/ggml-musa/CMakeLists.txt +++ b/ggml/src/ggml-musa/CMakeLists.txt @@ -67,10 +67,6 @@ if (MUSAToolkit_FOUND) add_compile_definitions(GGML_USE_MUSA) add_compile_definitions(GGML_CUDA_PEER_MAX_BATCH_SIZE=${GGML_CUDA_PEER_MAX_BATCH_SIZE}) - if (GGML_CUDA_GRAPHS) - add_compile_definitions(GGML_CUDA_USE_GRAPHS) - endif() - if (GGML_CUDA_FORCE_MMQ) add_compile_definitions(GGML_CUDA_FORCE_MMQ) endif() From 60c902926c928f9c2cd6390ce411876f92feeaf3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Mon, 17 Mar 2025 21:14:32 +0100 Subject: [PATCH 071/398] docs : bring llama-cli conversation/template docs up-to-date (#12426) --- examples/main/README.md | 41 ++++++++++++++++++++++++++++++++++++----- 1 file changed, 36 insertions(+), 5 deletions(-) diff --git a/examples/main/README.md b/examples/main/README.md index f7c2497294ab5..e4b3590b5d15e 100644 --- a/examples/main/README.md +++ b/examples/main/README.md @@ -27,12 +27,24 @@ Once downloaded, place your model in the models folder in llama.cpp. ##### Input prompt (One-and-done) ```bash -./llama-cli -m models/gemma-1.1-7b-it.Q4_K_M.gguf --prompt "Once upon a time" +./llama-cli -m models/gemma-1.1-7b-it.Q4_K_M.gguf -no-cnv --prompt "Once upon a time" ``` ##### Conversation mode (Allow for continuous interaction with the model) ```bash -./llama-cli -m models/gemma-1.1-7b-it.Q4_K_M.gguf -cnv --chat-template gemma +./llama-cli -m models/gemma-1.1-7b-it.Q4_K_M.gguf --chat-template gemma +``` + +##### Conversation mode using built-in jinja chat template + +```bash +./llama-cli -m models/gemma-1.1-7b-it.Q4_K_M.gguf --jinja +``` + +##### One-and-done query using jinja with custom system prompt and a starting prompt + +```bash +./llama-cli -m models/gemma-1.1-7b-it.Q4_K_M.gguf --jinja --single-turn -sys "You are a helpful assistant" -p "Hello" ``` ##### Infinite text from a starting prompt (you can use `Ctrl-C` to stop it): @@ -44,12 +56,24 @@ Once downloaded, place your model in the models folder in llama.cpp. ##### Input prompt (One-and-done) ```powershell -./llama-cli.exe -m models\gemma-1.1-7b-it.Q4_K_M.gguf --prompt "Once upon a time" +./llama-cli.exe -m models\gemma-1.1-7b-it.Q4_K_M.gguf -no-cnv --prompt "Once upon a time" ``` ##### Conversation mode (Allow for continuous interaction with the model) ```powershell -./llama-cli.exe -m models\gemma-1.1-7b-it.Q4_K_M.gguf -cnv --chat-template gemma +./llama-cli.exe -m models\gemma-1.1-7b-it.Q4_K_M.gguf --chat-template gemma +``` + +##### Conversation mode using built-in jinja chat template + +```powershell +./llama-cli.exe -m models\gemma-1.1-7b-it.Q4_K_M.gguf --jinja +``` + +##### One-and-done query using jinja with custom system prompt and a starting prompt + +```powershell +./llama-cli.exe -m models\gemma-1.1-7b-it.Q4_K_M.gguf --jinja --single-turn -sys "You are a helpful assistant" -p "Hello" ``` #### Infinite text from a starting prompt (you can use `Ctrl-C` to stop it): @@ -77,6 +101,8 @@ The `llama-cli` program provides several ways to interact with the LLaMA models - `--prompt PROMPT`: Provide a prompt directly as a command-line option. - `--file FNAME`: Provide a file containing a prompt or multiple prompts. +- `--system-prompt PROMPT`: Provide a system prompt (will otherwise use the default one in the chat template (if provided)). +- `--system-prompt-file FNAME`: Provide a file containing a system prompt. - `--interactive-first`: Run the program in interactive mode and wait for input right away. (More on this below.) ## Interaction @@ -89,7 +115,10 @@ In interactive mode, users can participate in text generation by injecting their - `-i, --interactive`: Run the program in interactive mode, allowing users to engage in real-time conversations or provide specific instructions to the model. - `--interactive-first`: Run the program in interactive mode and immediately wait for user input before starting the text generation. -- `-cnv, --conversation`: Run the program in conversation mode (does not print special tokens and suffix/prefix, use default chat template) (default: false) +- `-cnv, --conversation`: Run the program in conversation mode (does not print special tokens and suffix/prefix, use default or provided chat template) (default: true if chat template found) +- `-no-cnv`: Disable conversation mode (default: false) +- `-st, --single-turn`: Only process a single conversation turn (user input) and then exit. +- `--jinja`: Enable jinja chat template parser, will use the model's built-in template or a user-provided one (default: false) - `--color`: Enable colorized output to differentiate visually distinguishing between prompts, user input, and generated text. By understanding and utilizing these interaction options, you can create engaging and dynamic experiences with the LLaMA models, tailoring the text generation process to your specific needs. @@ -125,6 +154,8 @@ When --in-prefix or --in-suffix options are enabled the chat template ( --chat-t Example usage: `--chat-template gemma` +`--chat-template-file FNAME`: Load a custom jinja chat template from an external file, useful if the model contains outdated or incompatible template, some examples can be found in models/templates. Up-to-date chat templates can be downloaded from Hugging Face using scripts/get_chat_template.py + ## Context Management During text generation, LLaMA models have a limited context size, which means they can only consider a certain number of tokens from the input and generated text. When the context fills up, the model resets internally, potentially losing some information from the beginning of the conversation or instructions. Context management options help maintain continuity and coherence in these situations. From 7dfad387e3f6ac98d383ded2d175eb59736a3993 Mon Sep 17 00:00:00 2001 From: Molly Sophia Date: Tue, 18 Mar 2025 07:27:50 +0800 Subject: [PATCH 072/398] llama: Add support for RWKV v7 architecture (#12412) * ggml: Add op l2_norm Signed-off-by: Molly Sophia * ggml: Add op rwkv_wkv7 Signed-off-by: Molly Sophia * llama: Add support for RWKV7 and ARWKV7 models Signed-off-by: Molly Sophia * llama: fix inference with RWKV6Qwen2 Signed-off-by: Molly Sophia * llama: add more (a)rwkv7 variants in size Signed-off-by: Molly Sophia * Apply code-format changes Signed-off-by: Molly Sophia * fix MUSA build Signed-off-by: Molly Sophia * llama: fix shape error with rwkv using llama-parallel Signed-off-by: Molly Sophia --------- Signed-off-by: Molly Sophia --- convert_hf_to_gguf.py | 229 ++++++- ggml/include/ggml.h | 24 + ggml/src/ggml-cpu/ggml-cpu.c | 255 +++++++- ggml/src/ggml-cuda/ggml-cuda.cu | 10 +- ggml/src/ggml-cuda/norm.cu | 116 ++++ ggml/src/ggml-cuda/norm.cuh | 2 + ggml/src/ggml-cuda/wkv.cu | 199 ++++++ ggml/src/ggml-cuda/{wkv6.cuh => wkv.cuh} | 2 + ggml/src/ggml-cuda/wkv6.cu | 89 --- ggml/src/ggml-metal/ggml-metal-impl.h | 7 + ggml/src/ggml-metal/ggml-metal.m | 122 ++++ ggml/src/ggml-metal/ggml-metal.metal | 221 +++++++ ggml/src/ggml-sycl/backend.hpp | 2 +- ggml/src/ggml-sycl/ggml-sycl.cpp | 14 + ggml/src/ggml-sycl/norm.cpp | 108 ++++ ggml/src/ggml-sycl/norm.hpp | 6 + ggml/src/ggml-sycl/wkv.cpp | 305 +++++++++ ggml/src/ggml-sycl/wkv.hpp | 10 + ggml/src/ggml-sycl/wkv6.cpp | 143 ----- ggml/src/ggml-sycl/wkv6.hpp | 9 - ggml/src/ggml-vulkan/ggml-vulkan.cpp | 208 ++++--- .../ggml-vulkan/vulkan-shaders/l2_norm.comp | 41 ++ .../vulkan-shaders/vulkan-shaders-gen.cpp | 3 + ggml/src/ggml-vulkan/vulkan-shaders/wkv7.comp | 91 +++ ggml/src/ggml.c | 87 ++- gguf-py/gguf/constants.py | 126 +++- gguf-py/gguf/gguf_writer.py | 12 + gguf-py/gguf/tensor_mapping.py | 131 +++- src/llama-arch.cpp | 118 +++- src/llama-arch.h | 18 + src/llama-hparams.h | 4 + src/llama-model.cpp | 581 +++++++++++++++++- src/llama-model.h | 16 + src/llama-quant.cpp | 11 +- tests/test-backend-ops.cpp | 68 ++ 35 files changed, 2949 insertions(+), 439 deletions(-) create mode 100644 ggml/src/ggml-cuda/wkv.cu rename ggml/src/ggml-cuda/{wkv6.cuh => wkv.cuh} (62%) delete mode 100644 ggml/src/ggml-cuda/wkv6.cu create mode 100644 ggml/src/ggml-sycl/wkv.cpp create mode 100644 ggml/src/ggml-sycl/wkv.hpp delete mode 100644 ggml/src/ggml-sycl/wkv6.cpp delete mode 100644 ggml/src/ggml-sycl/wkv6.hpp create mode 100644 ggml/src/ggml-vulkan/vulkan-shaders/l2_norm.comp create mode 100644 ggml/src/ggml-vulkan/vulkan-shaders/wkv7.comp diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index b5d95bd5639f3..d13d57c54154a 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -908,6 +908,40 @@ def _set_vocab_llama_hf(self): special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens)) special_vocab.add_to_gguf(self.gguf_writer) + def _set_vocab_rwkv_world(self): + assert (self.dir_model / "rwkv_vocab_v20230424.txt").is_file() + vocab_size = self.hparams.get("vocab_size", 65536) + + tokens: list[bytes] = [''.encode("utf-8")] + toktypes: list[int] = [gguf.TokenType.CONTROL] + + with open(self.dir_model / "rwkv_vocab_v20230424.txt", "r", encoding="utf-8") as f: + lines = f.readlines() + for line in lines: + parts = line.split(' ') + assert len(parts) >= 3 + token, token_len = ast.literal_eval(' '.join(parts[1:-1])), int(parts[-1]) + token = token.encode("utf-8") if isinstance(token, str) else token + assert isinstance(token, bytes) + assert len(token) == token_len + token_text: str = repr(token)[2:-1] # "b'\xff'" -> "\xff" + tokens.append(token_text.encode("utf-8")) + toktypes.append(gguf.TokenType.NORMAL) + remainder = vocab_size - len(tokens) + assert remainder >= 0 + for i in range(len(tokens), vocab_size): + tokens.append(f"[PAD{i}]".encode("utf-8")) + toktypes.append(gguf.TokenType.UNUSED) + + self.gguf_writer.add_tokenizer_model("rwkv") + self.gguf_writer.add_token_list(tokens) + self.gguf_writer.add_token_types(toktypes) + special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False) + special_vocab.chat_template = "rwkv-world" + # hack: Add '\n\n' as the EOT token to make it chat normally + special_vocab._set_special_token("eot", 261) + special_vocab.add_to_gguf(self.gguf_writer) + def _set_vocab_builtin(self, model_name: Literal["gpt-neox", "llama-spm"], vocab_size: int): tokenizer_path = Path(sys.path[0]) / "models" / f"ggml-vocab-{model_name}.gguf" logger.warning(f"Using tokenizer from '{os.path.relpath(tokenizer_path, os.getcwd())}'") @@ -3412,38 +3446,7 @@ class Rwkv6Model(Model): model_arch = gguf.MODEL_ARCH.RWKV6 def set_vocab(self): - assert (self.dir_model / "rwkv_vocab_v20230424.txt").is_file() - vocab_size = self.hparams.get("vocab_size", 65536) - - tokens: list[bytes] = [''.encode("utf-8")] - toktypes: list[int] = [gguf.TokenType.CONTROL] - - with open(self.dir_model / "rwkv_vocab_v20230424.txt", "r", encoding="utf-8") as f: - lines = f.readlines() - for line in lines: - parts = line.split(' ') - assert len(parts) >= 3 - token, token_len = ast.literal_eval(' '.join(parts[1:-1])), int(parts[-1]) - token = token.encode("utf-8") if isinstance(token, str) else token - assert isinstance(token, bytes) - assert len(token) == token_len - token_text: str = repr(token)[2:-1] # "b'\xff'" -> "\xff" - tokens.append(token_text.encode("utf-8")) - toktypes.append(gguf.TokenType.NORMAL) - remainder = vocab_size - len(tokens) - assert remainder >= 0 - for i in range(len(tokens), vocab_size): - tokens.append(f"[PAD{i}]".encode("utf-8")) - toktypes.append(gguf.TokenType.UNUSED) - - self.gguf_writer.add_tokenizer_model("rwkv") - self.gguf_writer.add_token_list(tokens) - self.gguf_writer.add_token_types(toktypes) - special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False) - special_vocab.chat_template = "rwkv-world" - # hack: Add '\n\n' as the EOT token to make it chat normally - special_vocab._set_special_token("eot", 261) - special_vocab.add_to_gguf(self.gguf_writer) + self._set_vocab_rwkv_world() def set_gguf_parameters(self): block_count = self.hparams["num_hidden_layers"] @@ -3565,6 +3568,168 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter yield (new_name, data) +@Model.register("Rwkv7ForCausalLM", "RWKV7ForCausalLM") +class Rwkv7Model(Model): + model_arch = gguf.MODEL_ARCH.RWKV7 + + def set_vocab(self): + self._set_vocab_rwkv_world() + + def calc_lora_rank(self, hidden_size, exponent, multiplier): + return max(1, round(hidden_size ** exponent * multiplier / 32)) * 32 + + def set_gguf_parameters(self): + block_count = self.hparams["num_hidden_layers"] + try: + head_size = self.hparams["head_size"] + layer_norm_eps = self.hparams["layer_norm_epsilon"] + except KeyError: + head_size = self.hparams["head_dim"] + layer_norm_eps = self.hparams["norm_eps"] + hidden_size = self.hparams["hidden_size"] + intermediate_size = self.hparams["intermediate_size"] if self.hparams["intermediate_size"] is not None else (hidden_size * 4) + + # ICLR: In-Context-Learning-Rate + try: + lora_rank_decay = self.hparams["lora_rank_decay"] if self.hparams["lora_rank_decay"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.8) + lora_rank_iclr = self.hparams["lora_rank_iclr"] if self.hparams["lora_rank_iclr"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.8) + lora_rank_value_residual_mix = self.hparams["lora_rank_value_residual_mix"] if self.hparams["lora_rank_value_residual_mix"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.3) + lora_rank_gate = self.hparams["lora_rank_gate"] if self.hparams["lora_rank_gate"] is not None else self.calc_lora_rank(hidden_size, 0.8, 0.6) + except KeyError: + lora_rank_decay = self.hparams["decay_low_rank_dim"] if self.hparams["decay_low_rank_dim"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.8) + lora_rank_iclr = self.hparams["a_low_rank_dim"] if self.hparams["a_low_rank_dim"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.8) + lora_rank_value_residual_mix = self.hparams["v_low_rank_dim"] if self.hparams["v_low_rank_dim"] is not None else self.calc_lora_rank(hidden_size, 0.5, 1.3) + lora_rank_gate = self.hparams["gate_low_rank_dim"] if self.hparams["gate_low_rank_dim"] is not None else self.calc_lora_rank(hidden_size, 0.8, 0.6) + + # RWKV isn't context limited + self.gguf_writer.add_context_length(1048576) + self.gguf_writer.add_embedding_length(hidden_size) + self.gguf_writer.add_block_count(block_count) + self.gguf_writer.add_layer_norm_eps(layer_norm_eps) + self.gguf_writer.add_wkv_head_size(head_size) + self.gguf_writer.add_decay_lora_rank(lora_rank_decay) + self.gguf_writer.add_iclr_lora_rank(lora_rank_iclr) + self.gguf_writer.add_value_residual_mix_lora_rank(lora_rank_value_residual_mix) + self.gguf_writer.add_gate_lora_rank(lora_rank_gate) + self.gguf_writer.add_feed_forward_length(intermediate_size) + self.gguf_writer.add_file_type(self.ftype) + + # required by llama.cpp, unused + self.gguf_writer.add_head_count(0) + + lerp_weights: dict[int, dict[str, Tensor]] = {} + lora_needs_transpose: bool = True + + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + # unify tensor names here to make life easier + name = name.replace("blocks", "layers").replace("ffn", "feed_forward") + name = name.replace("self_attn", "attention").replace("attn", "attention") + name = name.replace("time_mixer.", "") + # lora layer names in fla-hub's impl + if "_lora.lora" in name: + self.lora_needs_transpose = False + name = name.replace("_lora.lora.0.weight", "1.weight") + name = name.replace("_lora.lora.2.weight", "2.weight") + name = name.replace("_lora.lora.2.bias", "0.weight") + + name = name.replace("feed_forward_norm", "ln2") + name = name.replace("g_norm", "ln_x") + + if "attention.v" in name and "value" not in self.map_tensor_name(name) and bid == 0: + # some models have dummy v0/v1/v2 on first layer while others don't + # ignore them all since they are not used + return + + wkv_has_gate = self.hparams.get("wkv_has_gate", True) + lerp_list = ["r", "w", "k", "v", "a", "g"] if wkv_has_gate else ["r", "w", "k", "v", "a"] + + if bid is not None and "attention.x_" in name: + if "attention.x_x" in name: + # already concatenated + new_name = f"blk.{bid}.time_mix_lerp_fused.weight" + data = data_torch.reshape(len(lerp_list), 1, 1, -1) + yield (new_name, data) + else: + try: + self.lerp_weights[bid][name] = data_torch + except KeyError: + self.lerp_weights[bid] = {name: data_torch} + if all(f"model.layers.{bid}.attention.x_{i}" in self.lerp_weights[bid].keys() for i in lerp_list): + new_name = f"blk.{bid}.time_mix_lerp_fused.weight" + data = torch.stack([self.lerp_weights[bid][f"model.layers.{bid}.attention.x_{i}"] for i in lerp_list], dim=0) + yield (new_name, data) + return + else: + data_torch = data_torch.squeeze() + new_name = self.map_tensor_name(name) + + if not (new_name.endswith(".weight") or new_name.endswith(".bias")): + new_name += ".weight" + + if self.lora_needs_transpose and any( + new_name.endswith(t) for t in [ + "time_mix_w1.weight", "time_mix_w2.weight", + "time_mix_a1.weight", "time_mix_a2.weight", + "time_mix_v1.weight", "time_mix_v2.weight", + "time_mix_g1.weight", "time_mix_g2.weight", + ] + ): + data_torch = data_torch.transpose(0, 1) + + if 'r_k' in new_name: + data_torch = data_torch.flatten() + + if bid == 0 and "time_mix_a" in new_name: + # dummy v0/v1/v2 on first layer + # easist way to make llama happy + yield (new_name.replace("time_mix_a", "time_mix_v"), data_torch) + + yield (new_name, data_torch) + + +@Model.register("RwkvHybridForCausalLM") +class ARwkv7Model(Rwkv7Model): + model_arch = gguf.MODEL_ARCH.ARWKV7 + + def set_vocab(self): + try: + self._set_vocab_sentencepiece() + except FileNotFoundError: + self._set_vocab_gpt2() + + def set_gguf_parameters(self): + block_count = self.hparams["num_hidden_layers"] + hidden_size = self.hparams["hidden_size"] + head_size = self.hparams["head_size"] + rms_norm_eps = self.hparams["rms_norm_eps"] + intermediate_size = self.hparams["intermediate_size"] + wkv_has_gate = self.hparams["wkv_has_gate"] + assert self.hparams["wkv_version"] == 7 + + # ICLR: In-Context-Learning-Rate + lora_rank_decay = 64 + lora_rank_iclr = 64 + lora_rank_value_residual_mix = 32 + lora_rank_gate = 128 if wkv_has_gate else 0 + + # RWKV isn't context limited + self.gguf_writer.add_context_length(1048576) + self.gguf_writer.add_embedding_length(hidden_size) + self.gguf_writer.add_block_count(block_count) + self.gguf_writer.add_layer_norm_rms_eps(rms_norm_eps) + self.gguf_writer.add_wkv_head_size(head_size) + self.gguf_writer.add_decay_lora_rank(lora_rank_decay) + self.gguf_writer.add_iclr_lora_rank(lora_rank_iclr) + self.gguf_writer.add_value_residual_mix_lora_rank(lora_rank_value_residual_mix) + self.gguf_writer.add_gate_lora_rank(lora_rank_gate) + self.gguf_writer.add_feed_forward_length(intermediate_size) + self.gguf_writer.add_file_type(self.ftype) + self.gguf_writer.add_token_shift_count(1) + + # required by llama.cpp, unused + self.gguf_writer.add_head_count(0) + + @Model.register("MambaForCausalLM", "MambaLMHeadModel", "FalconMambaForCausalLM") class MambaModel(Model): model_arch = gguf.MODEL_ARCH.MAMBA diff --git a/ggml/include/ggml.h b/ggml/include/ggml.h index 2e5076d36a09f..cb3edb10d4702 100644 --- a/ggml/include/ggml.h +++ b/ggml/include/ggml.h @@ -454,6 +454,7 @@ extern "C" { GGML_OP_RMS_NORM, GGML_OP_RMS_NORM_BACK, GGML_OP_GROUP_NORM, + GGML_OP_L2_NORM, GGML_OP_MUL_MAT, GGML_OP_MUL_MAT_ID, @@ -502,6 +503,7 @@ extern "C" { GGML_OP_ADD_REL_POS, GGML_OP_RWKV_WKV6, GGML_OP_GATED_LINEAR_ATTN, + GGML_OP_RWKV_WKV7, GGML_OP_UNARY, @@ -1095,6 +1097,18 @@ extern "C" { int n_groups, float eps); + // l2 normalize along rows + // used in rwkv v7 + GGML_API struct ggml_tensor * ggml_l2_norm( + struct ggml_context * ctx, + struct ggml_tensor * a, + float eps); + + GGML_API struct ggml_tensor * ggml_l2_norm_inplace( + struct ggml_context * ctx, + struct ggml_tensor * a, + float eps); + // a - x // b - dy GGML_API struct ggml_tensor * ggml_rms_norm_back( @@ -1890,6 +1904,16 @@ extern "C" { struct ggml_tensor * state, float scale); + GGML_API struct ggml_tensor * ggml_rwkv_wkv7( + struct ggml_context * ctx, + struct ggml_tensor * r, + struct ggml_tensor * w, + struct ggml_tensor * k, + struct ggml_tensor * v, + struct ggml_tensor * a, + struct ggml_tensor * b, + struct ggml_tensor * state); + // custom operators typedef void (*ggml_unary_op_f32_t) (const int, float *, const float *); diff --git a/ggml/src/ggml-cpu/ggml-cpu.c b/ggml/src/ggml-cpu/ggml-cpu.c index f2ab4c5d69582..75dc96b478655 100644 --- a/ggml/src/ggml-cpu/ggml-cpu.c +++ b/ggml/src/ggml-cpu/ggml-cpu.c @@ -8548,6 +8548,69 @@ static void ggml_compute_forward_group_norm( } } +// ggml_compute_forward_l2_norm + +static void ggml_compute_forward_l2_norm_f32( + const struct ggml_compute_params * params, + struct ggml_tensor * dst) { + + const struct ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + + GGML_ASSERT(src0->nb[0] == sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_UNARY_OP_LOCALS + + float eps; + memcpy(&eps, dst->op_params, sizeof(float)); + + GGML_ASSERT(eps >= 0.0f); + + // TODO: optimize + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = ith; i01 < ne01; i01 += nth) { + const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03); + + ggml_float sum = 0.0; + for (int64_t i00 = 0; i00 < ne00; i00++) { + sum += (ggml_float)(x[i00] * x[i00]); + } + + float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3); + + memcpy(y, x, ne00 * sizeof(float)); + + const float scale = 1.0f/fmaxf(sqrtf(sum), eps); + + ggml_vec_scale_f32(ne00, y, scale); + } + } + } +} + +static void ggml_compute_forward_l2_norm( + const struct ggml_compute_params * params, + struct ggml_tensor * dst) { + + const struct ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_l2_norm_f32(params, dst); + } break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + // ggml_compute_forward_mul_mat static void ggml_compute_forward_mul_mat_one_chunk( @@ -13604,6 +13667,184 @@ static void ggml_compute_forward_gla( } } +// ggml_compute_forward_rwkv_wkv7 + +static void ggml_compute_forward_rwkv_wkv7_f32( + const struct ggml_compute_params * params, + struct ggml_tensor * dst) { + const int64_t T = dst->src[1]->ne[2]; + const int64_t C = dst->ne[0]; + const int64_t HEADS = dst->src[1]->ne[1]; + const int64_t n_seqs = dst->src[6]->ne[1]; + const int64_t head_size = C / HEADS; + + float * dst_data = (float *) dst->data; + float * state = ((float *) dst->data) + C * T; + + const int ith = params->ith; + const int nth = params->nth; + + if (ith >= HEADS) { + return; + } + + const int h_start = (HEADS * ith) / nth; + const int h_end = ((HEADS * (ith + 1)) / nth < HEADS) ? + (HEADS * (ith + 1)) / nth : HEADS; + + float * r = (float *) dst->src[0]->data; + float * w = (float *) dst->src[1]->data; + float * k = (float *) dst->src[2]->data; + float * v = (float *) dst->src[3]->data; + float * a = (float *) dst->src[4]->data; + float * b = (float *) dst->src[5]->data; + + int64_t t_stride = HEADS * head_size; // Same to C + + int64_t h_stride = C / HEADS; + GGML_ASSERT(C % HEADS == 0); // C must be divisible by HEADS + int64_t h_stride_2d = head_size * head_size; + + #if defined(GGML_SIMD) + for (int64_t t = 0; t < T; t++) { + int64_t t_offset = t * t_stride; + int64_t state_offset = head_size * C * (t / (T / n_seqs)); + float * state_cur = state + state_offset; + float * state_prev = t % (T / n_seqs) ? state_cur : (float*)dst->src[6]->data + state_offset; + + for (int64_t h = h_start; h < h_end; h++) { + int64_t h_offset = h * h_stride; + int64_t t_h_offset = t_offset + h_offset; + int64_t h_2d_offset = h * h_stride_2d; + + for (int64_t ii = 0; ii < head_size; ii++) { + int64_t t_h_i_offset = t_h_offset + ii; + int64_t h_2d_i_offset = h_2d_offset + ii * h_stride; + + GGML_F32_VEC v_vec = GGML_F32_VEC_SET1(v[t_h_i_offset]); + + float sa = 0; + { + GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; + GGML_F32_VEC ax[GGML_F32_ARR]; + GGML_F32_VEC ay[GGML_F32_ARR]; + for (int64_t j = 0; j < head_size; j += GGML_F32_STEP) { + for (int64_t kk = 0; kk < GGML_F32_ARR; kk++) { + ax[kk] = GGML_F32_VEC_LOAD(&a[t_h_offset + j + kk * GGML_F32_EPR]); + ay[kk] = GGML_F32_VEC_LOAD(&state_prev[h_2d_i_offset + j + kk * GGML_F32_EPR]); + sum[kk] = GGML_F32_VEC_FMA(sum[kk], ax[kk], ay[kk]); + } + } + GGML_F32_VEC_REDUCE(sa, sum); + } + + GGML_F32_VEC sa_vec = GGML_F32_VEC_SET1(sa); + + int64_t j = 0; + GGML_F32_VEC result_vec[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; + for (; j < head_size; j += GGML_F32_STEP) { + for (int64_t kk = 0; kk < GGML_F32_ARR; kk++) { + int64_t t_h_j_offset = t_h_offset + j + kk * GGML_F32_EPR; + int64_t h_2d_i_j_offset = h_2d_i_offset + j + kk * GGML_F32_EPR; + + GGML_F32_VEC r_vec = GGML_F32_VEC_LOAD(&r[t_h_j_offset]); + GGML_F32_VEC w_vec = GGML_F32_VEC_LOAD(&w[t_h_j_offset]); + GGML_F32_VEC k_vec = GGML_F32_VEC_LOAD(&k[t_h_j_offset]); + GGML_F32_VEC b_vec = GGML_F32_VEC_LOAD(&b[t_h_j_offset]); + + k_vec = GGML_F32_VEC_MUL(v_vec, k_vec); + + GGML_F32_VEC state_vec = GGML_F32_VEC_LOAD(&state_prev[h_2d_i_j_offset]); + // kv + s * decay + sa * b + state_vec = GGML_F32_VEC_FMA(k_vec, state_vec, w_vec); + state_vec = GGML_F32_VEC_FMA(state_vec, sa_vec, b_vec); + GGML_F32_VEC_STORE(&state_cur[h_2d_i_j_offset], state_vec); + + result_vec[kk] = GGML_F32_VEC_FMA(result_vec[kk], state_vec, r_vec); + } + } + GGML_F32_VEC_REDUCE(dst_data[t_h_i_offset], result_vec); + + // There shouldn't be left-overs though. + for (; j < head_size; j++) { + int64_t t_h_j_offset = t_h_offset + j; + int64_t h_2d_i_j_offset = h_2d_i_offset + j; + + float r_val = r[t_h_j_offset]; + float w_val = w[t_h_j_offset]; + float k_val = k[t_h_j_offset]; + float b_val = b[t_h_j_offset]; + float kv_val = v[t_h_i_offset] * k_val; + + float prev_state_val = state_prev[h_2d_i_j_offset]; + state_cur[h_2d_i_j_offset] = prev_state_val * w_val + kv_val + sa * b_val; + dst_data[t_h_i_offset] += state_cur[h_2d_i_j_offset] * r_val; + } + } + } + } + #else + for (int64_t t = 0; t < T; t++) { + int64_t t_offset = t * t_stride; + int64_t state_offset = head_size * C * (t / (T / n_seqs)); + float * state_cur = state + state_offset; + float * state_prev = t % (T / n_seqs) ? state_cur : (float*)dst->src[6]->data + state_offset; + + for (int64_t h = h_start; h < h_end; h++) { + int64_t h_offset = h * h_stride; + int64_t t_h_offset = t_offset + h_offset; + int64_t h_2d_offset = h * h_stride_2d; + + for (int64_t i = 0; i < head_size; i++) { + int64_t t_h_i_offset = t_h_offset + i; + int64_t h_2d_i_offset = h_2d_offset + i * h_stride; + + float v_val = v[t_h_i_offset]; + + float sa = 0, result = 0; + for (int64_t j = 0; j < head_size; j++) { + sa += a[t_h_offset + j] * state_prev[h_2d_i_offset + j]; + } + + for (int64_t j = 0; j < head_size; j++) { + int64_t t_h_j_offset = t_h_offset + j; + int64_t h_2d_i_j_offset = h_2d_i_offset + j; + + float r_val = r[t_h_j_offset]; + float w_val = w[t_h_j_offset]; + float k_val = k[t_h_j_offset]; + float b_val = b[t_h_j_offset]; + float kv_val = v_val * k_val; + float prev_state_val = state_prev[h_2d_i_j_offset]; + state_cur[h_2d_i_j_offset] = prev_state_val * w_val + kv_val + sa * b_val; + result += state_cur[h_2d_i_j_offset] * r_val; + } + dst_data[t_h_i_offset] = result; + } + } + } + #endif +} + + +static void ggml_compute_forward_rwkv_wkv7( + const struct ggml_compute_params * params, + struct ggml_tensor * dst) { + + const struct ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_rwkv_wkv7_f32(params, dst); + } break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + // ggml_compute_forward_map_unary static void ggml_compute_forward_map_unary_f32( @@ -14170,6 +14411,10 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm { ggml_compute_forward_group_norm(params, tensor); } break; + case GGML_OP_L2_NORM: + { + ggml_compute_forward_l2_norm(params, tensor); + } break; case GGML_OP_MUL_MAT: { ggml_compute_forward_mul_mat(params, tensor); @@ -14357,6 +14602,10 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm { ggml_compute_forward_gla(params, tensor); } break; + case GGML_OP_RWKV_WKV7: + { + ggml_compute_forward_rwkv_wkv7(params, tensor); + } break; case GGML_OP_MAP_UNARY: { ggml_unary_op_f32_t fun; @@ -14582,6 +14831,7 @@ static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads) { case GGML_OP_NORM: case GGML_OP_RMS_NORM: case GGML_OP_RMS_NORM_BACK: + case GGML_OP_L2_NORM: case GGML_OP_GROUP_NORM: case GGML_OP_CONCAT: case GGML_OP_MUL_MAT: @@ -14648,14 +14898,15 @@ static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads) { case GGML_OP_FLASH_ATTN_BACK: case GGML_OP_SSM_CONV: case GGML_OP_SSM_SCAN: + case GGML_OP_RWKV_WKV6: + case GGML_OP_GATED_LINEAR_ATTN: + case GGML_OP_RWKV_WKV7: { n_tasks = n_threads; } break; case GGML_OP_WIN_PART: case GGML_OP_WIN_UNPART: case GGML_OP_GET_REL_POS: - case GGML_OP_RWKV_WKV6: - case GGML_OP_GATED_LINEAR_ATTN: case GGML_OP_MAP_UNARY: case GGML_OP_MAP_BINARY: case GGML_OP_MAP_CUSTOM1_F32: diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index 9bba398ce6be1..8fb063822cfb7 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -36,7 +36,7 @@ #include "ggml-cuda/tsembd.cuh" #include "ggml-cuda/unary.cuh" #include "ggml-cuda/upscale.cuh" -#include "ggml-cuda/wkv6.cuh" +#include "ggml-cuda/wkv.cuh" #include "ggml-cuda/gla.cuh" #include "ggml.h" @@ -2196,6 +2196,9 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg case GGML_OP_GROUP_NORM: ggml_cuda_op_group_norm(ctx, dst); break; + case GGML_OP_L2_NORM: + ggml_cuda_op_l2_norm(ctx, dst); + break; case GGML_OP_CONCAT: ggml_cuda_op_concat(ctx, dst); break; @@ -2304,6 +2307,9 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg case GGML_OP_GATED_LINEAR_ATTN: ggml_cuda_op_gated_linear_attn(ctx, dst); break; + case GGML_OP_RWKV_WKV7: + ggml_cuda_op_rwkv_wkv7(ctx, dst); + break; case GGML_OP_CROSS_ENTROPY_LOSS_BACK: ggml_cuda_cross_entropy_loss_back(ctx, dst); break; @@ -3161,6 +3167,7 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g break; case GGML_OP_NORM: case GGML_OP_RMS_NORM: + case GGML_OP_L2_NORM: return true; case GGML_OP_RMS_NORM_BACK: return ggml_is_contiguous(op->src[0]) && op->ne[0] % WARP_SIZE == 0; @@ -3215,6 +3222,7 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g case GGML_OP_LEAKY_RELU: case GGML_OP_RWKV_WKV6: case GGML_OP_GATED_LINEAR_ATTN: + case GGML_OP_RWKV_WKV7: return true; case GGML_OP_FLASH_ATTN_EXT: { #ifndef FLASH_ATTN_AVAILABLE diff --git a/ggml/src/ggml-cuda/norm.cu b/ggml/src/ggml-cuda/norm.cu index f127616eddade..0020dbcec5fb5 100644 --- a/ggml/src/ggml-cuda/norm.cu +++ b/ggml/src/ggml-cuda/norm.cu @@ -201,6 +201,85 @@ static __global__ void rms_norm_back_f32( } } +// template +// static __global__ void l2_norm_f32(const float * x, float * dst, const int ncols, const float eps) { +// const int row = blockIdx.x*blockDim.y + threadIdx.y; +// const int tid = threadIdx.x; + +// float tmp = 0.0f; // partial sum for thread in warp + +// for (int col = tid; col < ncols; col += block_size) { +// const float xi = x[row*ncols + col]; +// tmp += xi * xi; +// } + +// // sum up partial sums +// tmp = warp_reduce_sum(tmp); +// if (block_size > WARP_SIZE) { +// __shared__ float s_sum[32]; +// int warp_id = threadIdx.x / WARP_SIZE; +// int lane_id = threadIdx.x % WARP_SIZE; +// if (lane_id == 0) { +// s_sum[warp_id] = tmp; +// } +// __syncthreads(); +// tmp = s_sum[lane_id]; +// tmp = warp_reduce_sum(tmp); +// } + +// // from https://pytorch.org/docs/stable/generated/torch.nn.functional.normalize.html +// const float scale = rsqrtf(fmaxf(tmp, eps * eps)); + +// for (int col = tid; col < ncols; col += block_size) { +// dst[row*ncols + col] = scale * x[row*ncols + col]; +// } +// } + +template +static __global__ void l2_norm_f32( + const float * x, float * dst, const int ncols, const int64_t stride_row, const int64_t stride_channel, + const int64_t stride_sample, const float eps) { + const int nrows = gridDim.x; + const int nchannels = gridDim.y; + + const int row = blockIdx.x; + const int channel = blockIdx.y; + const int sample = blockIdx.z; + const int tid = threadIdx.x; + + x += sample*stride_sample + channel*stride_channel + row*stride_row; + dst += ((sample*nchannels + channel)*nrows + row)*ncols; + + float tmp = 0.0f; // partial sum for thread in warp + + for (int col = tid; col < ncols; col += block_size) { + const float xi = x[col]; + tmp += xi * xi; + } + + // sum up partial sums + tmp = warp_reduce_sum(tmp); + if constexpr (block_size > WARP_SIZE) { + static_assert(block_size == 1024, "unexpected block_size"); + __shared__ float s_sum[32]; + const int warp_id = threadIdx.x / WARP_SIZE; + const int lane_id = threadIdx.x % WARP_SIZE; + if (lane_id == 0) { + s_sum[warp_id] = tmp; + } + __syncthreads(); + tmp = s_sum[lane_id]; + tmp = warp_reduce_sum(tmp); + } + + // from https://pytorch.org/docs/stable/generated/torch.nn.functional.normalize.html + const float scale = rsqrtf(fmaxf(tmp, eps * eps)); + + for (int col = tid; col < ncols; col += block_size) { + dst[col] = scale * x[col]; + } +} + static void norm_f32_cuda( const float * x, float * dst, const int ncols, const int nrows, const int nchannels, const int nsamples, const int64_t stride_row, const int64_t stride_channel, const int64_t stride_sample, const float eps, cudaStream_t stream) { @@ -248,6 +327,19 @@ static void rms_norm_back_f32_cuda(const float * grad, const float * xf, float * } } +static void l2_norm_f32_cuda( + const float * x, float * dst, const int ncols, const int nrows, const int nchannels, const int nsamples, + const int64_t stride_row, const int64_t stride_channel, const int64_t stride_sample, const float eps, cudaStream_t stream) { + const dim3 blocks_num(nrows, nchannels, nsamples); + if (ncols < 1024) { + const dim3 block_dims(WARP_SIZE, 1, 1); + l2_norm_f32<<>>(x, dst, ncols, stride_row, stride_channel, stride_sample, eps); + } else { + const dim3 block_dims(1024, 1, 1); + l2_norm_f32<1024><<>>(x, dst, ncols, stride_row, stride_channel, stride_sample, eps); + } +} + void ggml_cuda_op_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const float * src0_d = (const float *) src0->data; @@ -340,3 +432,27 @@ void ggml_cuda_op_rms_norm_back(ggml_backend_cuda_context & ctx, ggml_tensor * d rms_norm_back_f32_cuda(grad_d, src0f_d, dst_d, ne00, nrows, eps, stream); } + +void ggml_cuda_op_l2_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const float * src0_d = (const float *) src0->data; + float * dst_d = (float *) dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + + GGML_TENSOR_UNARY_OP_LOCALS; + + float eps; + memcpy(&eps, dst->op_params, sizeof(float)); + GGML_ASSERT(eps >= 0.0f); + + const size_t ts0 = ggml_type_size(src0->type); + GGML_ASSERT(nb00 == ts0); + const int64_t s01 = nb01 / ts0; + const int64_t s02 = nb02 / ts0; + const int64_t s03 = nb03 / ts0; + + l2_norm_f32_cuda(src0_d, dst_d, ne00, ne01, ne02, ne03, s01, s02, s03, eps, stream); +} diff --git a/ggml/src/ggml-cuda/norm.cuh b/ggml/src/ggml-cuda/norm.cuh index d63d34380b0a7..706a5660a680c 100644 --- a/ggml/src/ggml-cuda/norm.cuh +++ b/ggml/src/ggml-cuda/norm.cuh @@ -7,3 +7,5 @@ void ggml_cuda_op_group_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst) void ggml_cuda_op_rms_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_rms_norm_back(ggml_backend_cuda_context & ctx, ggml_tensor * dst); + +void ggml_cuda_op_l2_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/ggml/src/ggml-cuda/wkv.cu b/ggml/src/ggml-cuda/wkv.cu new file mode 100644 index 0000000000000..d2fced705e095 --- /dev/null +++ b/ggml/src/ggml-cuda/wkv.cu @@ -0,0 +1,199 @@ +#include "common.cuh" +#include "wkv.cuh" + +template +static __global__ void rwkv_wkv_f32(const int B, const int T, const int C, const int H, const float * k, const float * v, const float * r, const float * tf, const float * td, const float * s, float * dst) { + const int tid = threadIdx.x; + const int bid = blockIdx.x; + + const int head_size = block_size; + const int batch_i = bid / H; + const int head_i = bid % H; + const int state_size = C * head_size; + const int n_seq_tokens = T / B; + + float state[head_size]; + __shared__ float _k[head_size], _r[head_size], _tf[head_size], _td[head_size]; + + #pragma unroll + for (int i = 0; i < head_size; i++) { + state[i] = s[batch_i * state_size + head_i * head_size * head_size + i * head_size + tid]; + } + + __syncthreads(); + _tf[tid] = tf[head_i * head_size + tid]; + __syncthreads(); + + for (int t = batch_i * n_seq_tokens * C + head_i * head_size + tid; t < (batch_i + 1) * n_seq_tokens * C + head_i * head_size + tid; t += C) { + __syncthreads(); + _k[tid] = k[t]; + _r[tid] = r[t]; + _td[tid] = td[t]; + __syncthreads(); + + const float _v = v[t]; + float y = 0; + for (int j = 0; j < head_size; j += 4) { + const float4& k = (float4&)(_k[j]); + const float4& r = (float4&)(_r[j]); + const float4& tf = (float4&)(_tf[j]); + const float4& td = (float4&)(_td[j]); + float4& s = (float4&)(state[j]); + float4 kv; + + kv.x = k.x * _v; + kv.y = k.y * _v; + kv.z = k.z * _v; + kv.w = k.w * _v; + + y += r.x * (tf.x * kv.x + s.x); + y += r.y * (tf.y * kv.y + s.y); + y += r.z * (tf.z * kv.z + s.z); + y += r.w * (tf.w * kv.w + s.w); + + s.x = s.x * td.x + kv.x; + s.y = s.y * td.y + kv.y; + s.z = s.z * td.z + kv.z; + s.w = s.w * td.w + kv.w; + } + dst[t] = y; + } + + #pragma unroll + for (int i = 0; i < head_size; i++) { + dst[T * C + batch_i * state_size + head_i * head_size * head_size + i * head_size + tid] = state[i]; + } +} + +template +static __global__ void rwkv_wkv7_f32(const int B, const int T, const int C, const int H, const float * r, const float * w, const float * k, const float * v, const float * a, const float * b, const float * s, float * dst) { + const int tid = threadIdx.x; + const int bid = blockIdx.x; + + const int head_size = block_size; + const int batch_i = bid / H; + const int head_i = bid % H; + const int state_size = C * head_size; + const int n_seq_tokens = T / B; + + float state[head_size]; + __shared__ float _r[head_size], _w[head_size], _k[head_size], _a[head_size], _b[head_size]; + +#ifndef GGML_USE_MUSA + #pragma unroll +#endif + for (int i = 0; i < head_size; i++) { + state[i] = s[batch_i * state_size + head_i * head_size * head_size + tid * head_size + i]; + } + + for (int t = batch_i * n_seq_tokens * C + head_i * head_size + tid; t < (batch_i + 1) * n_seq_tokens * C + head_i * head_size + tid; t += C) { + __syncthreads(); + _r[tid] = r[t]; + _w[tid] = w[t]; + _k[tid] = k[t]; + _a[tid] = a[t]; + _b[tid] = b[t]; + __syncthreads(); + + float sa = 0; + #pragma unroll + for (int j = 0; j < head_size; j += 4) + { + const float4& a = (float4&)(_a[j]); + const float4& s = (float4&)(state[j]); + sa += a.x * s.x; + sa += a.y * s.y; + sa += a.z * s.z; + sa += a.w * s.w; + } + + const float _v = v[t]; + float y = 0; + for (int j = 0; j < head_size; j += 4) { + const float4& r = (float4&)(_r[j]); + const float4& w = (float4&)(_w[j]); + const float4& k = (float4&)(_k[j]); + const float4& b = (float4&)(_b[j]); + float4& s = (float4&)(state[j]); + float4 kv; + + kv.x = k.x * _v; + kv.y = k.y * _v; + kv.z = k.z * _v; + kv.w = k.w * _v; + + s.x = s.x * w.x + kv.x + sa * b.x; + s.y = s.y * w.y + kv.y + sa * b.y; + s.z = s.z * w.z + kv.z + sa * b.z; + s.w = s.w * w.w + kv.w + sa * b.w; + + y += s.x * r.x; + y += s.y * r.y; + y += s.z * r.z; + y += s.w * r.w; + } + dst[t] = y; + } + + #pragma unroll + for (int i = 0; i < head_size; i++) { + dst[T * C + batch_i * state_size + head_i * head_size * head_size + tid * head_size + i] = state[i]; + } +} + +void ggml_cuda_op_rwkv_wkv6(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const float * k_d = (const float *)dst->src[0]->data; + const float * v_d = (const float *)dst->src[1]->data; + const float * r_d = (const float *)dst->src[2]->data; + const float * tf_d = (const float *)dst->src[3]->data; + const float * td_d = (const float *)dst->src[4]->data; + const float * s_d = (const float *)dst->src[5]->data; + + const int64_t B = dst->src[5]->ne[1]; + const int64_t T = dst->src[0]->ne[2]; + const int64_t C = dst->ne[0]; + const int64_t H = dst->src[0]->ne[1]; + + float * dst_d = (float *)dst->data; + + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(dst->src[5]->type == GGML_TYPE_F32); + GGML_ASSERT(C % H == 0); + GGML_ASSERT(C / H == CUDA_WKV_BLOCK_SIZE || C / H == CUDA_WKV_BLOCK_SIZE * 2); + + if (C / H == CUDA_WKV_BLOCK_SIZE) { + rwkv_wkv_f32<<>>(B, T, C, H, k_d, v_d, r_d, tf_d, td_d, s_d, dst_d); + } else { + rwkv_wkv_f32<<>>(B, T, C, H, k_d, v_d, r_d, tf_d, td_d, s_d, dst_d); + } +} + +void ggml_cuda_op_rwkv_wkv7(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const float * r_d = (const float *)dst->src[0]->data; + const float * w_d = (const float *)dst->src[1]->data; + const float * k_d = (const float *)dst->src[2]->data; + const float * v_d = (const float *)dst->src[3]->data; + const float * a_d = (const float *)dst->src[4]->data; + const float * b_d = (const float *)dst->src[5]->data; + const float * s_d = (const float *)dst->src[6]->data; + + const int64_t B = dst->src[6]->ne[1]; + const int64_t T = dst->src[0]->ne[2]; + const int64_t C = dst->ne[0]; + const int64_t H = dst->src[0]->ne[1]; + + float * dst_d = (float *)dst->data; + + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(dst->src[6]->type == GGML_TYPE_F32); + GGML_ASSERT(C % H == 0); + GGML_ASSERT(C / H == CUDA_WKV_BLOCK_SIZE || C / H == CUDA_WKV_BLOCK_SIZE * 2); + + if (C / H == CUDA_WKV_BLOCK_SIZE) { + rwkv_wkv7_f32<<>>(B, T, C, H, r_d, w_d, k_d, v_d, a_d, b_d, s_d, dst_d); + } else { + rwkv_wkv7_f32<<>>(B, T, C, H, r_d, w_d, k_d, v_d, a_d, b_d, s_d, dst_d); + } +} diff --git a/ggml/src/ggml-cuda/wkv6.cuh b/ggml/src/ggml-cuda/wkv.cuh similarity index 62% rename from ggml/src/ggml-cuda/wkv6.cuh rename to ggml/src/ggml-cuda/wkv.cuh index a7124ee517c45..9623dd7f8c7a2 100644 --- a/ggml/src/ggml-cuda/wkv6.cuh +++ b/ggml/src/ggml-cuda/wkv.cuh @@ -3,3 +3,5 @@ #define CUDA_WKV_BLOCK_SIZE 64 void ggml_cuda_op_rwkv_wkv6(ggml_backend_cuda_context & ctx, ggml_tensor * dst); + +void ggml_cuda_op_rwkv_wkv7(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/ggml/src/ggml-cuda/wkv6.cu b/ggml/src/ggml-cuda/wkv6.cu deleted file mode 100644 index bbdafbee5818b..0000000000000 --- a/ggml/src/ggml-cuda/wkv6.cu +++ /dev/null @@ -1,89 +0,0 @@ -#include "common.cuh" -#include "wkv6.cuh" - -static __global__ void rwkv_wkv_f32(const int B, const int T, const int C, const int H, const float * k, const float * v, const float * r, const float * tf, const float * td, const float * s, float * dst) { - const int tid = threadIdx.x; - const int bid = blockIdx.x; - - const int head_size = CUDA_WKV_BLOCK_SIZE; - const int batch_i = bid / H; - const int head_i = bid % H; - const int state_size = C * head_size; - const int n_seq_tokens = T / B; - - float state[head_size]; - __shared__ float _k[head_size], _r[head_size], _tf[head_size], _td[head_size]; - - #pragma unroll - for (int i = 0; i < head_size; i++) { - state[i] = s[batch_i * state_size + head_i * head_size * head_size + i * head_size + tid]; - } - - __syncthreads(); - _tf[tid] = tf[head_i * head_size + tid]; - __syncthreads(); - - for (int t = batch_i * n_seq_tokens * C + head_i * head_size + tid; t < (batch_i + 1) * n_seq_tokens * C + head_i * head_size + tid; t += C) { - __syncthreads(); - _k[tid] = k[t]; - _r[tid] = r[t]; - _td[tid] = td[t]; - __syncthreads(); - - const float _v = v[t]; - float y = 0; - for (int j = 0; j < head_size; j += 4) { - const float4& k = (float4&)(_k[j]); - const float4& r = (float4&)(_r[j]); - const float4& tf = (float4&)(_tf[j]); - const float4& td = (float4&)(_td[j]); - float4& s = (float4&)(state[j]); - float4 kv; - - kv.x = k.x * _v; - kv.y = k.y * _v; - kv.z = k.z * _v; - kv.w = k.w * _v; - - y += r.x * (tf.x * kv.x + s.x); - y += r.y * (tf.y * kv.y + s.y); - y += r.z * (tf.z * kv.z + s.z); - y += r.w * (tf.w * kv.w + s.w); - - s.x = s.x * td.x + kv.x; - s.y = s.y * td.y + kv.y; - s.z = s.z * td.z + kv.z; - s.w = s.w * td.w + kv.w; - } - dst[t] = y; - } - - #pragma unroll - for (int i = 0; i < head_size; i++) { - dst[T * C + batch_i * state_size + head_i * head_size * head_size + i * head_size + tid] = state[i]; - } -} - -void ggml_cuda_op_rwkv_wkv6(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { - const float * k_d = (const float *)dst->src[0]->data; - const float * v_d = (const float *)dst->src[1]->data; - const float * r_d = (const float *)dst->src[2]->data; - const float * tf_d = (const float *)dst->src[3]->data; - const float * td_d = (const float *)dst->src[4]->data; - const float * s_d = (const float *)dst->src[5]->data; - - const int64_t B = dst->src[5]->ne[1]; - const int64_t T = dst->src[0]->ne[2]; - const int64_t C = dst->ne[0]; - const int64_t H = dst->src[0]->ne[1]; - - float * dst_d = (float *)dst->data; - - cudaStream_t stream = ctx.stream(); - - GGML_ASSERT(dst->src[5]->type == GGML_TYPE_F32); - GGML_ASSERT(C % H == 0); - GGML_ASSERT(C / H == CUDA_WKV_BLOCK_SIZE); // The current cuda kernel is designed for RWKV6, HEAD_SIZE == 64 - - rwkv_wkv_f32<<>>(B, T, C, H, k_d, v_d, r_d, tf_d, td_d, s_d, dst_d); -} diff --git a/ggml/src/ggml-metal/ggml-metal-impl.h b/ggml/src/ggml-metal/ggml-metal-impl.h index a58c474eb007e..1e954b4ceabd7 100644 --- a/ggml/src/ggml-metal/ggml-metal-impl.h +++ b/ggml/src/ggml-metal/ggml-metal-impl.h @@ -285,6 +285,13 @@ typedef struct { float eps; } ggml_metal_kargs_rms_norm; +typedef struct { + int32_t ne00; + int32_t ne00_4; + uint64_t nb01; + float eps; +} ggml_metal_kargs_l2_norm; + typedef struct { int64_t ne00; int64_t ne01; diff --git a/ggml/src/ggml-metal/ggml-metal.m b/ggml/src/ggml-metal/ggml-metal.m index e51a4169a23bf..af65e7d9f53d4 100644 --- a/ggml/src/ggml-metal/ggml-metal.m +++ b/ggml/src/ggml-metal/ggml-metal.m @@ -184,10 +184,13 @@ static void ggml_backend_metal_device_rel(struct ggml_backend_metal_device_conte GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_XS, GGML_METAL_KERNEL_TYPE_GET_ROWS_I32, GGML_METAL_KERNEL_TYPE_RMS_NORM, + GGML_METAL_KERNEL_TYPE_L2_NORM, GGML_METAL_KERNEL_TYPE_GROUP_NORM, GGML_METAL_KERNEL_TYPE_NORM, GGML_METAL_KERNEL_TYPE_SSM_CONV_F32, GGML_METAL_KERNEL_TYPE_SSM_SCAN_F32, + GGML_METAL_KERNEL_TYPE_RWKV_WKV6_F32, + GGML_METAL_KERNEL_TYPE_RWKV_WKV7_F32, GGML_METAL_KERNEL_TYPE_MUL_MV_F32_F32, GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32, GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_1ROW, @@ -810,10 +813,13 @@ @implementation GGMLMetalClass GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_XS, get_rows_iq4_xs, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_I32, get_rows_i32, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_RMS_NORM, rms_norm, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_L2_NORM, l2_norm, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GROUP_NORM, group_norm, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_NORM, norm, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SSM_CONV_F32, ssm_conv_f32, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SSM_SCAN_F32, ssm_scan_f32, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_RWKV_WKV6_F32, rwkv_wkv6_f32, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_RWKV_WKV7_F32, rwkv_wkv7_f32, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F32_F32, mul_mv_f32_f32, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32, mul_mv_bf16_f32, has_simdgroup_reduction && use_bfloat); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32_1ROW, mul_mv_bf16_f32_1row, has_simdgroup_reduction && use_bfloat); @@ -1251,6 +1257,7 @@ static bool ggml_metal_supports_op(const struct ggml_backend_metal_device_contex case GGML_OP_GROUP_NORM: return has_simdgroup_reduction && ggml_is_contiguous(op->src[0]); case GGML_OP_RMS_NORM: + case GGML_OP_L2_NORM: return has_simdgroup_reduction && (op->ne[0] % 4 == 0 && ggml_is_contiguous_1(op->src[0])); case GGML_OP_ARGMAX: return true; @@ -1288,6 +1295,8 @@ static bool ggml_metal_supports_op(const struct ggml_backend_metal_device_contex return has_simdgroup_mm; // TODO: over-restricted for vec-kernels case GGML_OP_SSM_CONV: case GGML_OP_SSM_SCAN: + case GGML_OP_RWKV_WKV6: + case GGML_OP_RWKV_WKV7: return true; case GGML_OP_MUL_MAT: case GGML_OP_MUL_MAT_ID: @@ -2216,6 +2225,83 @@ static void ggml_metal_encode_node( [encoder dispatchThreadgroups:MTLSizeMake(d_inner, n_seqs, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; } break; + case GGML_OP_RWKV_WKV6: + { + const int64_t B = dst->src[5]->ne[1]; + const int64_t T = dst->src[0]->ne[2]; + const int64_t C = dst->ne[0]; + const int64_t H = dst->src[0]->ne[1]; + + GGML_ASSERT(dst->src[5]->type == GGML_TYPE_F32); + GGML_ASSERT(C % H == 0); + GGML_ASSERT(C / H == 64); + + size_t offs_src3 = 0; + size_t offs_src4 = 0; + size_t offs_src5 = 0; + + id id_src3 = dst->src[3] ? ggml_metal_get_buffer(dst->src[3], &offs_src3) : nil; + id id_src4 = dst->src[4] ? ggml_metal_get_buffer(dst->src[4], &offs_src4) : nil; + id id_src5 = dst->src[5] ? ggml_metal_get_buffer(dst->src[5], &offs_src5) : nil; + + id pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_RWKV_WKV6_F32].pipeline; + + [encoder setComputePipelineState:pipeline]; + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; + [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1]; + [encoder setBuffer:id_src2 offset:offs_src2 atIndex:2]; + [encoder setBuffer:id_src3 offset:offs_src3 atIndex:3]; + [encoder setBuffer:id_src4 offset:offs_src4 atIndex:4]; + [encoder setBuffer:id_src5 offset:offs_src5 atIndex:5]; + [encoder setBuffer:id_dst offset:offs_dst atIndex:6]; + + [encoder setBytes:&B length:sizeof(B) atIndex:7]; + [encoder setBytes:&T length:sizeof(T) atIndex:8]; + [encoder setBytes:&C length:sizeof(C) atIndex:9]; + [encoder setBytes:&H length:sizeof(H) atIndex:10]; + + [encoder dispatchThreadgroups:MTLSizeMake(B * H, 1, 1) threadsPerThreadgroup:MTLSizeMake(C/ H, 1, 1)]; + } break; + case GGML_OP_RWKV_WKV7: + { + const int64_t B = dst->src[6]->ne[1]; + const int64_t T = dst->src[0]->ne[2]; + const int64_t C = dst->ne[0]; + const int64_t H = dst->src[0]->ne[1]; + + GGML_ASSERT(dst->src[6]->type == GGML_TYPE_F32); + GGML_ASSERT(C % H == 0); + GGML_ASSERT(C / H == 64); + + size_t offs_src3 = 0; + size_t offs_src4 = 0; + size_t offs_src5 = 0; + size_t offs_src6 = 0; + + id id_src3 = dst->src[3] ? ggml_metal_get_buffer(dst->src[3], &offs_src3) : nil; + id id_src4 = dst->src[4] ? ggml_metal_get_buffer(dst->src[4], &offs_src4) : nil; + id id_src5 = dst->src[5] ? ggml_metal_get_buffer(dst->src[5], &offs_src5) : nil; + id id_src6 = dst->src[6] ? ggml_metal_get_buffer(dst->src[6], &offs_src6) : nil; + + id pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_RWKV_WKV7_F32].pipeline; + + [encoder setComputePipelineState:pipeline]; + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; + [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1]; + [encoder setBuffer:id_src2 offset:offs_src2 atIndex:2]; + [encoder setBuffer:id_src3 offset:offs_src3 atIndex:3]; + [encoder setBuffer:id_src4 offset:offs_src4 atIndex:4]; + [encoder setBuffer:id_src5 offset:offs_src5 atIndex:5]; + [encoder setBuffer:id_src6 offset:offs_src6 atIndex:6]; + [encoder setBuffer:id_dst offset:offs_dst atIndex:7]; + + [encoder setBytes:&B length:sizeof(B) atIndex:8]; + [encoder setBytes:&T length:sizeof(T) atIndex:9]; + [encoder setBytes:&C length:sizeof(C) atIndex:10]; + [encoder setBytes:&H length:sizeof(H) atIndex:11]; + + [encoder dispatchThreadgroups:MTLSizeMake(B * H, 1, 1) threadsPerThreadgroup:MTLSizeMake(C/ H, 1, 1)]; + } break; case GGML_OP_MUL_MAT: { GGML_ASSERT(ne00 == ne10); @@ -3122,6 +3208,42 @@ static void ggml_metal_encode_node( const int64_t nrows = ggml_nrows(src0); + [encoder dispatchThreadgroups:MTLSizeMake(nrows, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)]; + } break; + case GGML_OP_L2_NORM: + { + GGML_ASSERT(ne00 % 4 == 0); + GGML_ASSERT(ggml_is_contiguous_1(src0)); + + float eps; + memcpy(&eps, dst->op_params, sizeof(float)); + + id pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_L2_NORM].pipeline; + + int nth = 32; // SIMD width + + while (nth < ne00/4 && nth < (int) pipeline.maxTotalThreadsPerThreadgroup) { + nth *= 2; + } + + nth = MIN(nth, ne00/4); + + ggml_metal_kargs_l2_norm args = { + /*.ne00 =*/ ne00, + /*.ne00_4 =*/ ne00/4, + /*.nb01 =*/ nb01, + /*.eps =*/ eps, + }; + + [encoder setComputePipelineState:pipeline]; + [encoder setBytes:&args length:sizeof(args) atIndex:0]; + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1]; + [encoder setBuffer:id_dst offset:offs_dst atIndex:2]; + + [encoder setThreadgroupMemoryLength:32*sizeof(float) atIndex:0]; + + const int64_t nrows = ggml_nrows(src0); + [encoder dispatchThreadgroups:MTLSizeMake(nrows, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)]; } break; case GGML_OP_GROUP_NORM: diff --git a/ggml/src/ggml-metal/ggml-metal.metal b/ggml/src/ggml-metal/ggml-metal.metal index ad9d42a3eaa9e..3cef81b797197 100644 --- a/ggml/src/ggml-metal/ggml-metal.metal +++ b/ggml/src/ggml-metal/ggml-metal.metal @@ -1295,6 +1295,184 @@ kernel void kernel_ssm_scan_f32( } } +kernel void kernel_rwkv_wkv6_f32( + device const float * k, + device const float * v, + device const float * r, + device const float * tf, + device const float * td, + device const float * state_in, + device float * dst, + constant uint & B, + constant uint & T, + constant uint & C, + constant uint & H, + uint3 tgpig[[threadgroup_position_in_grid]], + uint3 tpitg[[thread_position_in_threadgroup]], + uint3 ntg[[threads_per_threadgroup]]) { + + const uint head_size = 64; // TODO: support head_size = 128 + const uint batch_id = tgpig.x / H; + const uint head_id = tgpig.x % H; + const uint tid = tpitg.x; + + if (batch_id >= B || head_id >= H) { + return; + } + + const uint state_size = C * head_size; + const uint n_seq_tokens = T / B; + + threadgroup float _k[head_size]; + threadgroup float _r[head_size]; + threadgroup float _tf[head_size]; + threadgroup float _td[head_size]; + + float state[head_size]; + + for (uint i = 0; i < head_size; i++) { + state[i] = state_in[batch_id * state_size + head_id * head_size * head_size + + i * head_size + tid]; + } + + threadgroup_barrier(mem_flags::mem_threadgroup); + _tf[tid] = tf[head_id * head_size + tid]; + threadgroup_barrier(mem_flags::mem_threadgroup); + + const uint start_t = batch_id * n_seq_tokens * C + head_id * head_size + tid; + const uint end_t = (batch_id + 1) * n_seq_tokens * C + head_id * head_size + tid; + + for (uint t = start_t; t < end_t; t += C) { + threadgroup_barrier(mem_flags::mem_threadgroup); + _k[tid] = k[t]; + _r[tid] = r[t]; + _td[tid] = td[t]; + threadgroup_barrier(mem_flags::mem_threadgroup); + + const float v_val = v[t]; + float y = 0.0; + + for (uint j = 0; j < head_size; j += 4) { + float4 k_vec = float4(_k[j], _k[j+1], _k[j+2], _k[j+3]); + float4 r_vec = float4(_r[j], _r[j+1], _r[j+2], _r[j+3]); + float4 tf_vec = float4(_tf[j], _tf[j+1], _tf[j+2], _tf[j+3]); + float4 td_vec = float4(_td[j], _td[j+1], _td[j+2], _td[j+3]); + float4 s_vec = float4(state[j], state[j+1], state[j+2], state[j+3]); + + float4 kv = k_vec * v_val; + + float4 temp = tf_vec * kv + s_vec; + y += dot(r_vec, temp); + + s_vec = s_vec * td_vec + kv; + state[j] = s_vec[0]; + state[j+1] = s_vec[1]; + state[j+2] = s_vec[2]; + state[j+3] = s_vec[3]; + } + + dst[t] = y; + } + + for (uint i = 0; i < head_size; i++) { + dst[T * C + batch_id * state_size + head_id * head_size * head_size + + i * head_size + tid] = state[i]; + } +} + +kernel void kernel_rwkv_wkv7_f32( + device const float * r, + device const float * w, + device const float * k, + device const float * v, + device const float * a, + device const float * b, + device const float * state_in, + device float * dst, + constant uint & B, + constant uint & T, + constant uint & C, + constant uint & H, + uint3 tgpig[[threadgroup_position_in_grid]], + uint3 tpitg[[thread_position_in_threadgroup]], + uint3 ntg[[threads_per_threadgroup]]) { + + const uint head_size = 64; // TODO: support head_size = 128 + const uint batch_id = tgpig.x / H; + const uint head_id = tgpig.x % H; + const uint tid = tpitg.x; + + if (batch_id >= B || head_id >= H) { + return; + } + + const uint state_size = C * head_size; + const uint n_seq_tokens = T / B; + + threadgroup float _r[head_size]; + threadgroup float _w[head_size]; + threadgroup float _k[head_size]; + threadgroup float _a[head_size]; + threadgroup float _b[head_size]; + + float state[head_size]; + + for (uint i = 0; i < head_size; i++) { + state[i] = state_in[batch_id * state_size + head_id * head_size * head_size + + tid * head_size + i]; + } + + const uint start_t = batch_id * n_seq_tokens * C + head_id * head_size + tid; + const uint end_t = (batch_id + 1) * n_seq_tokens * C + head_id * head_size + tid; + + for (uint t = start_t; t < end_t; t += C) { + threadgroup_barrier(mem_flags::mem_threadgroup); + _r[tid] = r[t]; + _w[tid] = w[t]; + _k[tid] = k[t]; + _a[tid] = a[t]; + _b[tid] = b[t]; + threadgroup_barrier(mem_flags::mem_threadgroup); + + const float v_val = v[t]; + float y = 0.0, sa = 0.0; + + float4 sa_vec(0.0); + + for (int j = 0; j < head_size; j += 4) { + float4 a_vec = float4(_a[j], _a[j+1], _a[j+2], _a[j+3]); + float4 s_vec = float4(state[j], state[j+1], state[j+2], state[j+3]); + sa_vec += a_vec * s_vec; + } + sa = sa_vec[0] + sa_vec[1] + sa_vec[2] + sa_vec[3]; + + for (uint j = 0; j < head_size; j += 4) { + float4 r_vec = float4(_r[j], _r[j+1], _r[j+2], _r[j+3]); + float4 w_vec = float4(_w[j], _w[j+1], _w[j+2], _w[j+3]); + float4 k_vec = float4(_k[j], _k[j+1], _k[j+2], _k[j+3]); + float4 b_vec = float4(_b[j], _b[j+1], _b[j+2], _b[j+3]); + float4 s_vec = float4(state[j], state[j+1], state[j+2], state[j+3]); + + float4 kv = k_vec * v_val; + + s_vec = s_vec * w_vec + kv + sa * b_vec; + y += dot(s_vec, r_vec); + + state[j] = s_vec[0]; + state[j+1] = s_vec[1]; + state[j+2] = s_vec[2]; + state[j+3] = s_vec[3]; + } + + dst[t] = y; + } + + for (uint i = 0; i < head_size; i++) { + dst[T * C + batch_id * state_size + head_id * head_size * head_size + + tid * head_size + i] = state[i]; + } +} + kernel void kernel_argmax( device const void * x, device int32_t * dst, @@ -1463,6 +1641,49 @@ kernel void kernel_rms_norm( } } +kernel void kernel_l2_norm( + constant ggml_metal_kargs_l2_norm & args, + device const char * src0, + device char * dst, + threadgroup float * shmem_f32 [[threadgroup(0)]], + uint tgpig[[threadgroup_position_in_grid]], + ushort tpitg[[thread_position_in_threadgroup]], + ushort sgitg[[simdgroup_index_in_threadgroup]], + ushort tiisg[[thread_index_in_simdgroup]], + ushort ntg[[threads_per_threadgroup]]) { + if (sgitg == 0) { + shmem_f32[tiisg] = 0.0f; + } + + device const float4 * x = (device const float4 *) (src0 + tgpig*args.nb01); + + float sumf = 0.0f; + + // parallel sum + for (int i00 = tpitg; i00 < args.ne00_4; i00 += ntg) { + sumf += dot(x[i00], x[i00]); + } + sumf = simd_sum(sumf); + + threadgroup_barrier(mem_flags::mem_threadgroup); + + if (tiisg == 0) { + shmem_f32[sgitg] = sumf; + } + + threadgroup_barrier(mem_flags::mem_threadgroup); + + sumf = shmem_f32[tiisg]; + sumf = simd_sum(sumf); + + const float scale = 1.0f/sqrt(max(sumf, args.eps)); + + device float4 * y = (device float4 *) dst + tgpig*args.ne00_4; + for (int i00 = tpitg; i00 < args.ne00_4; i00 += ntg) { + y[i00] = x[i00] * scale; + } +} + kernel void kernel_group_norm( device const float * src0, device float * dst, diff --git a/ggml/src/ggml-sycl/backend.hpp b/ggml/src/ggml-sycl/backend.hpp index 577ff51fde5a8..73d807cab0be9 100644 --- a/ggml/src/ggml-sycl/backend.hpp +++ b/ggml/src/ggml-sycl/backend.hpp @@ -26,7 +26,7 @@ #include "softmax.hpp" #include "tsembd.hpp" #include "im2col.hpp" -#include "wkv6.hpp" +#include "wkv.hpp" #include "outprod.hpp" #include "element_wise.hpp" #include "cpy.hpp" diff --git a/ggml/src/ggml-sycl/ggml-sycl.cpp b/ggml/src/ggml-sycl/ggml-sycl.cpp index 05984d8c5ac4e..477652ab283ee 100644 --- a/ggml/src/ggml-sycl/ggml-sycl.cpp +++ b/ggml/src/ggml-sycl/ggml-sycl.cpp @@ -2696,6 +2696,12 @@ static void ggml_sycl_rms_norm(ggml_backend_sycl_context & ctx, ggml_tensor * ds GGML_SYCL_DEBUG("call %s done\n", __func__); } +static void ggml_sycl_l2_norm(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + GGML_SYCL_DEBUG("call %s\n", __func__); + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_l2_norm); + GGML_SYCL_DEBUG("call %s done\n", __func__); +} + static void ggml_sycl_group_norm(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_SYCL_DEBUG("call %s\n", __func__); ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_group_norm); @@ -3410,6 +3416,9 @@ bool ggml_sycl_compute_forward(ggml_backend_sycl_context & ctx, struct ggml_tens case GGML_OP_RMS_NORM: ggml_sycl_rms_norm(ctx, dst); break; + case GGML_OP_L2_NORM: + ggml_sycl_l2_norm(ctx, dst); + break; case GGML_OP_MUL_MAT: if (dst->src[0]->ne[3] != dst->src[1]->ne[3]) { return false; @@ -3487,6 +3496,9 @@ bool ggml_sycl_compute_forward(ggml_backend_sycl_context & ctx, struct ggml_tens case GGML_OP_RWKV_WKV6: ggml_sycl_op_rwkv_wkv6(ctx, dst); break; + case GGML_OP_RWKV_WKV7: + ggml_sycl_op_rwkv_wkv7(ctx, dst); + break; case GGML_OP_GATED_LINEAR_ATTN: ggml_sycl_op_gated_linear_attn(ctx, dst); break; @@ -4012,6 +4024,7 @@ static bool ggml_backend_sycl_device_supports_op(ggml_backend_dev_t dev, const g return (op->src[0]->type == GGML_TYPE_F32); case GGML_OP_NORM: case GGML_OP_RMS_NORM: + case GGML_OP_L2_NORM: case GGML_OP_GROUP_NORM: return ggml_is_contiguous(op->src[0]); case GGML_OP_SCALE: @@ -4045,6 +4058,7 @@ static bool ggml_backend_sycl_device_supports_op(ggml_backend_dev_t dev, const g case GGML_OP_LEAKY_RELU: case GGML_OP_TIMESTEP_EMBEDDING: case GGML_OP_RWKV_WKV6: + case GGML_OP_RWKV_WKV7: case GGML_OP_GATED_LINEAR_ATTN: return true; default: diff --git a/ggml/src/ggml-sycl/norm.cpp b/ggml/src/ggml-sycl/norm.cpp index 9cf2be15575d8..6439db21b2978 100644 --- a/ggml/src/ggml-sycl/norm.cpp +++ b/ggml/src/ggml-sycl/norm.cpp @@ -180,6 +180,50 @@ static void rms_norm_f32(const float* x, float* dst, const int ncols, const floa } } +static void l2_norm_f32(const float* x, float* dst, const int ncols, const float eps, + const sycl::nd_item<3>& item_ct1, float* s_sum, int block_size) { + const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) + + item_ct1.get_local_id(1); + const int tid = item_ct1.get_local_id(2); + const int nthreads = item_ct1.get_local_range(2); + const int nwarps = nthreads / WARP_SIZE; + float tmp = 0.0f; // partial sum for thread in warp + + for (int col = tid; col < ncols; col += block_size) { + const float xi = x[row * ncols + col]; + tmp += xi * xi; + } + + // sum up partial sums + tmp = warp_reduce_sum(tmp, item_ct1); + if (block_size > WARP_SIZE) { + + int warp_id = item_ct1.get_local_id(2) / WARP_SIZE; + int lane_id = item_ct1.get_local_id(2) % WARP_SIZE; + if (lane_id == 0) { + s_sum[warp_id] = tmp; + } + /* + DPCT1118:3: SYCL group functions and algorithms must be encountered in + converged control flow. You may need to adjust the code. + */ + item_ct1.barrier(sycl::access::fence_space::local_space); + size_t nreduce = nwarps / WARP_SIZE; + tmp = 0.f; + for (size_t i = 0; i < nreduce; i += 1) + { + tmp += s_sum[lane_id + i * WARP_SIZE]; + } + tmp = warp_reduce_sum(tmp, item_ct1); + } + + const float scale = sycl::rsqrt(sycl::max(tmp, eps * eps)); + + for (int col = tid; col < ncols; col += block_size) { + dst[row * ncols + col] = scale * x[row * ncols + col]; + } +} + static void norm_f32_sycl(const float* x, float* dst, const int ncols, const int nrows, const float eps, queue_ptr stream, int device) { @@ -311,6 +355,48 @@ static void rms_norm_f32_sycl(const float* x, float* dst, const int ncols, } } +static void l2_norm_f32_sycl(const float* x, float* dst, const int ncols, + const int nrows, const float eps, + queue_ptr stream, int device) { + GGML_ASSERT(ncols % WARP_SIZE == 0); + // printf("%s ncols=%d, nrows=%d, WARP_SIZE=%d\n", __func__, ncols, nrows, WARP_SIZE); + if (ncols < 1024) { + const sycl::range<3> block_dims(1, 1, WARP_SIZE); + stream->submit([&](sycl::handler& cgh) { + cgh.parallel_for( + sycl::nd_range<3>(sycl::range<3>(1, 1, nrows) * block_dims, + block_dims), + [=](sycl::nd_item<3> item_ct1) + [[intel::reqd_sub_group_size(WARP_SIZE)]] { + l2_norm_f32(x, dst, ncols, eps, item_ct1, + nullptr, WARP_SIZE); + }); + }); + } + else { + const int work_group_size = ggml_sycl_info().max_work_group_sizes[device]; + assert(work_group_size % (WARP_SIZE * WARP_SIZE) == 0); + const sycl::range<3> block_dims(1, 1, work_group_size); + /* + DPCT1049:19: The work-group size passed to the SYCL kernel may exceed + the limit. To get the device limit, query + info::device::max_work_group_size. Adjust the work-group size if needed. + */ + stream->submit([&](sycl::handler& cgh) { + sycl::local_accessor s_sum_acc_ct1(sycl::range<1>(work_group_size / WARP_SIZE), + cgh); + cgh.parallel_for( + sycl::nd_range<3>(sycl::range<3>(1, 1, nrows) * block_dims, + block_dims), + [=](sycl::nd_item<3> item_ct1) + [[intel::reqd_sub_group_size(WARP_SIZE)]] { + l2_norm_f32(x, dst, ncols, eps, item_ct1, + get_pointer(s_sum_acc_ct1), work_group_size); + }); + }); + } +} + void ggml_sycl_op_norm(ggml_backend_sycl_context& ctx, const ggml_tensor* src0, const ggml_tensor* src1, ggml_tensor* dst, const float* src0_dd, const float* src1_dd, float* dst_dd, @@ -376,3 +462,25 @@ void ggml_sycl_op_rms_norm(ggml_backend_sycl_context& ctx, const ggml_tensor* sr (void)dst; (void)src1_dd; } + +void ggml_sycl_op_l2_norm(ggml_backend_sycl_context& ctx, const ggml_tensor* src0, + const ggml_tensor* src1, ggml_tensor* dst, + const float* src0_dd, const float* src1_dd, + float* dst_dd, + const queue_ptr& main_stream) { + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + + const int64_t ne00 = src0->ne[0]; + const int64_t nrows = ggml_nrows(src0); + + float eps; + memcpy(&eps, dst->op_params, sizeof(float)); + + l2_norm_f32_sycl(src0_dd, dst_dd, ne00, nrows, eps, main_stream, ctx.device); + + (void)src1; + (void)dst; + (void)src1_dd; +} diff --git a/ggml/src/ggml-sycl/norm.hpp b/ggml/src/ggml-sycl/norm.hpp index a9ad9156fa33e..11e91680cc496 100644 --- a/ggml/src/ggml-sycl/norm.hpp +++ b/ggml/src/ggml-sycl/norm.hpp @@ -32,4 +32,10 @@ void ggml_sycl_op_group_norm(ggml_backend_sycl_context& ctx, const ggml_tensor* float* dst_dd, const queue_ptr& main_stream); +void ggml_sycl_op_l2_norm(ggml_backend_sycl_context& ctx, const ggml_tensor* src0, + const ggml_tensor* src1, ggml_tensor* dst, + const float* src0_dd, const float* src1_dd, + float* dst_dd, + const queue_ptr& main_stream); + #endif // GGML_SYCL_NORM_HPP diff --git a/ggml/src/ggml-sycl/wkv.cpp b/ggml/src/ggml-sycl/wkv.cpp new file mode 100644 index 0000000000000..540f6fbf5f0d9 --- /dev/null +++ b/ggml/src/ggml-sycl/wkv.cpp @@ -0,0 +1,305 @@ +#include +#include "wkv.hpp" + +constexpr int WKV_BLOCK_SIZE = 64; // Matching CUDA_WKV_BLOCK_SIZE + +// Helper function for the main kernel +template +static void rwkv_wkv6_f32_kernel( + const int B, const int T, const int C, const int H, + const float* k, const float* v, const float* r, + const float* tf, const float* td, const float* s, + float* dst, const sycl::nd_item<3>& item_ct1, float* shared_mem) { + + const int tid = item_ct1.get_local_id(2); + const int bid = item_ct1.get_group(2); + + const int head_size = block_size; + const int batch_i = bid / H; + const int head_i = bid % H; + const int state_size = C * head_size; + const int n_seq_tokens = T / B; + + // Set up shared memory pointers + float* _k = shared_mem; + float* _r = _k + head_size; + float* _tf = _r + head_size; + float* _td = _tf + head_size; + + // Local state array + float state[block_size]; + + // Load initial state + #pragma unroll + for (int i = 0; i < head_size; i++) { + state[i] = s[batch_i * state_size + head_i * head_size * head_size + i * head_size + tid]; + } + + // Sync threads before shared memory operations + item_ct1.barrier(sycl::access::fence_space::local_space); + + // Load time-mixing parameters + _tf[tid] = tf[head_i * head_size + tid]; + item_ct1.barrier(sycl::access::fence_space::local_space); + + // Main sequence processing loop + for (int t = batch_i * n_seq_tokens * C + head_i * head_size + tid; + t < (batch_i + 1) * n_seq_tokens * C + head_i * head_size + tid; + t += C) { + + item_ct1.barrier(sycl::access::fence_space::local_space); + + // Load current timestep data to shared memory + _k[tid] = k[t]; + _r[tid] = r[t]; + _td[tid] = td[t]; + + item_ct1.barrier(sycl::access::fence_space::local_space); + + const float _v = v[t]; + float y = 0; + + // Process in chunks of 4 for better vectorization + sycl::float4 k4, r4, tf4, td4, s4; + #pragma unroll + for (int j = 0; j < head_size; j += 4) { + // Load data in vec4 chunks + k4 = sycl::float4(_k[j], _k[j+1], _k[j+2], _k[j+3]); + r4 = sycl::float4(_r[j], _r[j+1], _r[j+2], _r[j+3]); + tf4 = sycl::float4(_tf[j], _tf[j+1], _tf[j+2], _tf[j+3]); + td4 = sycl::float4(_td[j], _td[j+1], _td[j+2], _td[j+3]); + s4 = sycl::float4(state[j], state[j+1], state[j+2], state[j+3]); + + // Compute key-value product + sycl::float4 kv4 = k4 * _v; + + // Accumulate weighted sum + y += sycl::dot(r4, tf4 * kv4 + s4); + + // Update state + s4 = s4 * td4 + kv4; + + // Store updated state + state[j] = s4.x(); + state[j+1] = s4.y(); + state[j+2] = s4.z(); + state[j+3] = s4.w(); + } + + dst[t] = y; + } + + // Save final state + #pragma unroll + for (int i = 0; i < head_size; i++) { + dst[T * C + batch_i * state_size + head_i * head_size * head_size + i * head_size + tid] = state[i]; + } +} + +template +static void rwkv_wkv7_f32_kernel( + const int B, const int T, const int C, const int H, + const float* r, const float* w, const float* k, const float* v, + const float* a, const float* b, const float* s, + float* dst, const sycl::nd_item<3>& item_ct1, float* shared_mem) { + + const int tid = item_ct1.get_local_id(2); + const int bid = item_ct1.get_group(2); + + const int head_size = block_size; + const int batch_i = bid / H; + const int head_i = bid % H; + const int state_size = C * head_size; + const int n_seq_tokens = T / B; + + float* _r = shared_mem; + float* _w = _r + head_size; + float* _k = _w + head_size; + float* _a = _k + head_size; + float* _b = _a + head_size; + + float state[block_size]; + + #pragma unroll + for (int i = 0; i < head_size; i++) { + state[i] = s[batch_i * state_size + head_i * head_size * head_size + tid * head_size + i]; + } + + for (int t = batch_i * n_seq_tokens * C + head_i * head_size + tid; + t < (batch_i + 1) * n_seq_tokens * C + head_i * head_size + tid; + t += C) { + + item_ct1.barrier(sycl::access::fence_space::local_space); + + _r[tid] = r[t]; + _w[tid] = w[t]; + _k[tid] = k[t]; + _a[tid] = a[t]; + _b[tid] = b[t]; + + item_ct1.barrier(sycl::access::fence_space::local_space); + + const float _v = v[t]; + float y = 0, sa = 0; + sycl::float4 a4, s4; + + #pragma unroll + for (int j = 0; j < head_size; j += 4) { + a4 = sycl::float4(_a[j], _a[j+1], _a[j+2], _a[j+3]); + s4 = sycl::float4(state[j], state[j+1], state[j+2], state[j+3]); + sa += sycl::dot(a4, s4); + } + + sycl::float4 r4, w4, k4, b4; + #pragma unroll + for (int j = 0; j < head_size; j += 4) { + r4 = sycl::float4(_r[j], _r[j+1], _r[j+2], _r[j+3]); + w4 = sycl::float4(_w[j], _w[j+1], _w[j+2], _w[j+3]); + k4 = sycl::float4(_k[j], _k[j+1], _k[j+2], _k[j+3]); + b4 = sycl::float4(_b[j], _b[j+1], _b[j+2], _b[j+3]); + s4 = sycl::float4(state[j], state[j+1], state[j+2], state[j+3]); + + sycl::float4 kv4 = k4 * _v; + + s4 = s4 * w4 + kv4 + sa * b4; + y += sycl::dot(r4, s4); + + state[j] = s4.x(); + state[j+1] = s4.y(); + state[j+2] = s4.z(); + state[j+3] = s4.w(); + } + + dst[t] = y; + } + + #pragma unroll + for (int i = 0; i < head_size; i++) { + dst[T * C + batch_i * state_size + head_i * head_size * head_size + tid * head_size + i] = state[i]; + } +} + +void ggml_sycl_op_rwkv_wkv6(ggml_backend_sycl_context& ctx, ggml_tensor* dst) { + + const ggml_tensor *src0 = dst->src[0]; + const ggml_tensor *src1 = dst->src[1]; + + const float* k_d = (const float*)dst->src[0]->data; + const float* v_d = (const float*)dst->src[1]->data; + const float* r_d = (const float*)dst->src[2]->data; + const float* tf_d = (const float*)dst->src[3]->data; + const float* td_d = (const float*)dst->src[4]->data; + const float* s_d = (const float*)dst->src[5]->data; + float* dst_d = (float*)dst->data; + + const int64_t B = dst->src[5]->ne[1]; + const int64_t T = dst->src[0]->ne[2]; + const int64_t C = dst->ne[0]; + const int64_t H = dst->src[0]->ne[1]; + + GGML_ASSERT(dst->src[5]->type == GGML_TYPE_F32); + GGML_ASSERT(C % H == 0); + GGML_ASSERT(C / H == WKV_BLOCK_SIZE || C / H == WKV_BLOCK_SIZE * 2); // The current sycl kernel is designed for RWKV6, HEAD_SIZE == 64 + + dpct::queue_ptr stream = ctx.stream(); + + // Calculate execution configuration + const size_t shared_mem_size = C / H * 4 * sizeof(float); // For k, r, tf, td + sycl::range<3> block_dims(1, 1, C / H); + sycl::range<3> grid_dims(1, 1, B * H); + + // Submit kernel + if (C / H == WKV_BLOCK_SIZE) { + stream->submit([&](sycl::handler& cgh) { + sycl::local_accessor shared_mem_acc(shared_mem_size, cgh); + + cgh.parallel_for( + sycl::nd_range<3>(grid_dims * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) { + rwkv_wkv6_f32_kernel( + B, T, C, H, k_d, v_d, r_d, tf_d, td_d, s_d, dst_d, + item_ct1, (float*)shared_mem_acc.get_multi_ptr().get() + ); + }); + }); + } else { + stream->submit([&](sycl::handler& cgh) { + sycl::local_accessor shared_mem_acc(shared_mem_size, cgh); + + cgh.parallel_for( + sycl::nd_range<3>(grid_dims * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) { + rwkv_wkv6_f32_kernel( + B, T, C, H, k_d, v_d, r_d, tf_d, td_d, s_d, dst_d, + item_ct1, (float*)shared_mem_acc.get_multi_ptr().get() + ); + }); + }); + } + + GGML_UNUSED(src0); + GGML_UNUSED(src1); +} + +void ggml_sycl_op_rwkv_wkv7(ggml_backend_sycl_context& ctx, ggml_tensor* dst) { + + const ggml_tensor *src0 = dst->src[0]; + const ggml_tensor *src1 = dst->src[1]; + + const float* r_d = (const float*)dst->src[0]->data; + const float* w_d = (const float*)dst->src[1]->data; + const float* k_d = (const float*)dst->src[2]->data; + const float* v_d = (const float*)dst->src[3]->data; + const float* a_d = (const float*)dst->src[4]->data; + const float* b_d = (const float*)dst->src[5]->data; + const float* s_d = (const float*)dst->src[6]->data; + float* dst_d = (float*)dst->data; + + const int64_t B = dst->src[6]->ne[1]; + const int64_t T = dst->src[0]->ne[2]; + const int64_t C = dst->ne[0]; + const int64_t H = dst->src[0]->ne[1]; + + GGML_ASSERT(dst->src[6]->type == GGML_TYPE_F32); + GGML_ASSERT(C % H == 0); + GGML_ASSERT(C / H == WKV_BLOCK_SIZE || C / H == WKV_BLOCK_SIZE * 2); + + dpct::queue_ptr stream = ctx.stream(); + + // Calculate execution configuration + const size_t shared_mem_size = C / H * 5 * sizeof(float); // For r, w, k, a, b + sycl::range<3> block_dims(1, 1, C / H); + sycl::range<3> grid_dims(1, 1, B * H); + + // Submit kernel + if (C / H == WKV_BLOCK_SIZE) { + stream->submit([&](sycl::handler& cgh) { + sycl::local_accessor shared_mem_acc(shared_mem_size, cgh); + + cgh.parallel_for( + sycl::nd_range<3>(grid_dims * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) { + rwkv_wkv7_f32_kernel( + B, T, C, H, r_d, w_d, k_d, v_d, a_d, b_d, s_d, dst_d, + item_ct1, (float*)shared_mem_acc.get_multi_ptr().get() + ); + }); + }); + } else { + stream->submit([&](sycl::handler& cgh) { + sycl::local_accessor shared_mem_acc(shared_mem_size, cgh); + + cgh.parallel_for( + sycl::nd_range<3>(grid_dims * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) { + rwkv_wkv7_f32_kernel( + B, T, C, H, r_d, w_d, k_d, v_d, a_d, b_d, s_d, dst_d, + item_ct1, (float*)shared_mem_acc.get_multi_ptr().get() + ); + }); + }); + } + + GGML_UNUSED(src0); + GGML_UNUSED(src1); +} diff --git a/ggml/src/ggml-sycl/wkv.hpp b/ggml/src/ggml-sycl/wkv.hpp new file mode 100644 index 0000000000000..9f34a1001fd68 --- /dev/null +++ b/ggml/src/ggml-sycl/wkv.hpp @@ -0,0 +1,10 @@ +#ifndef GGML_SYCL_WKV_HPP +#define GGML_SYCL_WKV_HPP + +#include "common.hpp" + +void ggml_sycl_op_rwkv_wkv6(ggml_backend_sycl_context & ctx, ggml_tensor * dst); + +void ggml_sycl_op_rwkv_wkv7(ggml_backend_sycl_context & ctx, ggml_tensor * dst); + +#endif // GGML_SYCL_WKV_HPP diff --git a/ggml/src/ggml-sycl/wkv6.cpp b/ggml/src/ggml-sycl/wkv6.cpp deleted file mode 100644 index b54c20964ed5d..0000000000000 --- a/ggml/src/ggml-sycl/wkv6.cpp +++ /dev/null @@ -1,143 +0,0 @@ -#include -#include "wkv6.hpp" - -constexpr int WKV_BLOCK_SIZE = 64; // Matching CUDA_WKV_BLOCK_SIZE - -// Helper function for the main kernel -static void rwkv_wkv_f32_kernel( - const int B, const int T, const int C, const int H, - const float* k, const float* v, const float* r, - const float* tf, const float* td, const float* s, - float* dst, const sycl::nd_item<3>& item_ct1, float* shared_mem) { - - const int tid = item_ct1.get_local_id(2); - const int bid = item_ct1.get_group(2); - - const int head_size = WKV_BLOCK_SIZE; - const int batch_i = bid / H; - const int head_i = bid % H; - const int state_size = C * head_size; - const int n_seq_tokens = T / B; - - // Set up shared memory pointers - float* _k = shared_mem; - float* _r = _k + head_size; - float* _tf = _r + head_size; - float* _td = _tf + head_size; - - // Local state array - float state[WKV_BLOCK_SIZE]; - - // Load initial state - #pragma unroll - for (int i = 0; i < head_size; i++) { - state[i] = s[batch_i * state_size + head_i * head_size * head_size + i * head_size + tid]; - } - - // Sync threads before shared memory operations - item_ct1.barrier(sycl::access::fence_space::local_space); - - // Load time-mixing parameters - _tf[tid] = tf[head_i * head_size + tid]; - item_ct1.barrier(sycl::access::fence_space::local_space); - - // Main sequence processing loop - for (int t = batch_i * n_seq_tokens * C + head_i * head_size + tid; - t < (batch_i + 1) * n_seq_tokens * C + head_i * head_size + tid; - t += C) { - - item_ct1.barrier(sycl::access::fence_space::local_space); - - // Load current timestep data to shared memory - _k[tid] = k[t]; - _r[tid] = r[t]; - _td[tid] = td[t]; - - item_ct1.barrier(sycl::access::fence_space::local_space); - - const float _v = v[t]; - float y = 0; - - // Process in chunks of 4 for better vectorization - sycl::float4 k4, r4, tf4, td4, s4; - #pragma unroll - for (int j = 0; j < head_size; j += 4) { - // Load data in vec4 chunks - k4 = sycl::float4(_k[j], _k[j+1], _k[j+2], _k[j+3]); - r4 = sycl::float4(_r[j], _r[j+1], _r[j+2], _r[j+3]); - tf4 = sycl::float4(_tf[j], _tf[j+1], _tf[j+2], _tf[j+3]); - td4 = sycl::float4(_td[j], _td[j+1], _td[j+2], _td[j+3]); - s4 = sycl::float4(state[j], state[j+1], state[j+2], state[j+3]); - - // Compute key-value product - sycl::float4 kv4 = k4 * _v; - - // Accumulate weighted sum - y += sycl::dot(r4, tf4 * kv4 + s4); - - // Update state - s4 = s4 * td4 + kv4; - - // Store updated state - state[j] = s4.x(); - state[j+1] = s4.y(); - state[j+2] = s4.z(); - state[j+3] = s4.w(); - } - - dst[t] = y; - } - - // Save final state - #pragma unroll - for (int i = 0; i < head_size; i++) { - dst[T * C + batch_i * state_size + head_i * head_size * head_size + i * head_size + tid] = state[i]; - } -} - -void ggml_sycl_op_rwkv_wkv6(ggml_backend_sycl_context& ctx, ggml_tensor* dst) { - - const ggml_tensor *src0 = dst->src[0]; - const ggml_tensor *src1 = dst->src[1]; - - const float* k_d = (const float*)dst->src[0]->data; - const float* v_d = (const float*)dst->src[1]->data; - const float* r_d = (const float*)dst->src[2]->data; - const float* tf_d = (const float*)dst->src[3]->data; - const float* td_d = (const float*)dst->src[4]->data; - const float* s_d = (const float*)dst->src[5]->data; - float* dst_d = (float*)dst->data; - - const int64_t B = dst->src[5]->ne[1]; - const int64_t T = dst->src[0]->ne[2]; - const int64_t C = dst->ne[0]; - const int64_t H = dst->src[0]->ne[1]; - - GGML_ASSERT(dst->src[5]->type == GGML_TYPE_F32); - GGML_ASSERT(C % H == 0); - GGML_ASSERT(C / H == WKV_BLOCK_SIZE); // The current sycl kernel is designed for RWKV6, HEAD_SIZE == 64 - - dpct::queue_ptr stream = ctx.stream(); - - // Calculate execution configuration - const size_t shared_mem_size = WKV_BLOCK_SIZE * 4 * sizeof(float); // For k, r, tf, td - sycl::range<3> block_dims(1, 1, C / H); - sycl::range<3> grid_dims(1, 1, B * H); - - // Submit kernel - stream->submit([&](sycl::handler& cgh) { - sycl::local_accessor shared_mem_acc(shared_mem_size, cgh); - - cgh.parallel_for( - sycl::nd_range<3>(grid_dims * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) { - rwkv_wkv_f32_kernel( - B, T, C, H, k_d, v_d, r_d, tf_d, td_d, s_d, dst_d, - item_ct1, (float*)shared_mem_acc.get_multi_ptr().get() - ); - }); - }); - - GGML_UNUSED(src0); - GGML_UNUSED(src1); -} diff --git a/ggml/src/ggml-sycl/wkv6.hpp b/ggml/src/ggml-sycl/wkv6.hpp deleted file mode 100644 index 8c596a9972220..0000000000000 --- a/ggml/src/ggml-sycl/wkv6.hpp +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef GGML_SYCL_WKV6_HPP -#define GGML_SYCL_WKV6_HPP - -#include "common.hpp" - -void ggml_sycl_op_rwkv_wkv6(ggml_backend_sycl_context & ctx, ggml_tensor * dst); - - -#endif // GGML_SYCL_WKV6_HPP diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 97398f071b80e..c0ee5dadef78a 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -304,6 +304,7 @@ struct vk_device_struct { vk_pipeline pipeline_group_norm_f32; vk_pipeline pipeline_rms_norm_f32; vk_pipeline pipeline_rms_norm_back_f32; + vk_pipeline pipeline_l2_norm_f32; vk_pipeline pipeline_gelu_f32; vk_pipeline pipeline_gelu_quick_f32; vk_pipeline pipeline_silu_f32; @@ -328,6 +329,7 @@ struct vk_device_struct { vk_pipeline pipeline_timestep_embedding_f32; vk_pipeline pipeline_pool2d_f32; vk_pipeline pipeline_rwkv_wkv6_f32; + vk_pipeline pipeline_rwkv_wkv7_f32; vk_pipeline pipeline_opt_step_adamw_f32; // [2][2][2] is for {f16acc,f32acc}x{large,small_rows}x{unaligned, aligned} @@ -629,6 +631,13 @@ struct vk_op_rwkv_wkv6_push_constants { uint32_t H; }; +struct vk_op_rwkv_wkv7_push_constants { + uint32_t B; + uint32_t T; + uint32_t C; + uint32_t H; +}; + // Allow pre-recording command buffers struct vk_staging_memcpy { vk_staging_memcpy(void * _dst, const void * _src, size_t _n) : dst(_dst), src(_src), n(_n) {} @@ -2263,6 +2272,7 @@ static void ggml_vk_load_shaders(vk_device& device) { ggml_vk_create_pipeline(device, device->pipeline_group_norm_f32, "group_norm_f32", group_norm_f32_len, group_norm_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_rms_norm_f32, "rms_norm_f32", rms_norm_f32_len, rms_norm_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_rms_norm_back_f32, "rms_norm_back_f32", rms_norm_back_f32_len, rms_norm_back_f32_data, "main", 3, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1); + ggml_vk_create_pipeline(device, device->pipeline_l2_norm_f32, "l2_norm_f32", l2_norm_f32_len, l2_norm_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_f32, "cpy_f32_f32", cpy_f32_f32_len, cpy_f32_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_f16, "cpy_f32_f16", cpy_f32_f16_len, cpy_f32_f16_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1); @@ -2374,6 +2384,8 @@ static void ggml_vk_load_shaders(vk_device& device) { ggml_vk_create_pipeline(device, device->pipeline_rwkv_wkv6_f32, "rwkv_wkv6_f32", rwkv_wkv6_f32_len, rwkv_wkv6_f32_data, "main", 7, sizeof(vk_op_rwkv_wkv6_push_constants), {1, 1, 1}, {device->subgroup_size}, 1); + ggml_vk_create_pipeline(device, device->pipeline_rwkv_wkv7_f32, "rwkv_wkv7_f32", rwkv_wkv7_f32_len, rwkv_wkv7_f32_data, "main", 8, sizeof(vk_op_rwkv_wkv7_push_constants), {1, 1, 1}, {device->subgroup_size}, 1); + ggml_vk_create_pipeline(device, device->pipeline_opt_step_adamw_f32, "opt_step_adamw_f32", opt_step_adamw_f32_len, opt_step_adamw_f32_data, "main", 5, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1); for (auto &c : compiles) { @@ -5473,6 +5485,11 @@ static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const return ctx->device->pipeline_rms_norm_back_f32; } return nullptr; + case GGML_OP_L2_NORM: + if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) { + return ctx->device->pipeline_l2_norm_f32; + } + return nullptr; case GGML_OP_UNARY: switch (ggml_get_unary_op(dst)) { case GGML_UNARY_OP_SILU: @@ -5612,6 +5629,11 @@ static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const return ctx->device->pipeline_rwkv_wkv6_f32; } return nullptr; + case GGML_OP_RWKV_WKV7: + if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) { + return ctx->device->pipeline_rwkv_wkv7_f32; + } + return nullptr; case GGML_OP_OPT_STEP_ADAMW: if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) { return ctx->device->pipeline_opt_step_adamw_f32; @@ -5859,6 +5881,7 @@ static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context& subctx, co case GGML_OP_NORM: case GGML_OP_RMS_NORM: case GGML_OP_RMS_NORM_BACK: + case GGML_OP_L2_NORM: case GGML_OP_SOFT_MAX: case GGML_OP_SOFT_MAX_BACK: case GGML_OP_SUM_ROWS: @@ -6108,23 +6131,17 @@ static void ggml_vk_div(ggml_backend_vk_context * ctx, vk_context& subctx, const }, dryrun); } -static void ggml_vk_op_f32_rwkv6(ggml_backend_vk_context * ctx, vk_context& subctx, ggml_tensor * dst, const vk_op_rwkv_wkv6_push_constants&& pc, bool dryrun = false) { - const ggml_tensor * k = dst->src[0]; - const ggml_tensor * v = dst->src[1]; - const ggml_tensor * r = dst->src[2]; - const ggml_tensor * tf = dst->src[3]; - const ggml_tensor * td = dst->src[4]; - const ggml_tensor * state = dst->src[5]; - - GGML_ASSERT(!ggml_is_quantized(k->type)); - GGML_ASSERT(!ggml_is_quantized(v->type)); - GGML_ASSERT(!ggml_is_quantized(r->type)); - GGML_ASSERT(!ggml_is_quantized(tf->type)); - GGML_ASSERT(!ggml_is_quantized(td->type)); - GGML_ASSERT(!ggml_is_quantized(state->type)); +static void ggml_vk_op_f32_wkv(ggml_backend_vk_context * ctx, vk_context& subctx, ggml_tensor * dst, const vk_op_rwkv_wkv6_push_constants&& pc, int version, bool dryrun = false) { + GGML_ASSERT(version == 6 || version == 7); + int num_srcs = version == 6 ? 6 : 7; + + for (int i = 0; i < num_srcs; i++) { + GGML_ASSERT(!ggml_is_quantized(dst->src[i]->type)); + } + GGML_ASSERT(dst->buffer != nullptr); - vk_pipeline pipeline = ggml_vk_op_get_pipeline(ctx, k, v, r, dst, GGML_OP_RWKV_WKV6); + vk_pipeline pipeline = ggml_vk_op_get_pipeline(ctx, dst->src[0], dst->src[1], dst->src[2], dst, dst->op); GGML_ASSERT(pipeline != nullptr); if (dryrun) { @@ -6133,89 +6150,73 @@ static void ggml_vk_op_f32_rwkv6(ggml_backend_vk_context * ctx, vk_context& subc } ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context; - ggml_backend_vk_buffer_context * k_buf_ctx = (ggml_backend_vk_buffer_context *)k->buffer->context; - ggml_backend_vk_buffer_context * v_buf_ctx = (ggml_backend_vk_buffer_context *)v->buffer->context; - ggml_backend_vk_buffer_context * r_buf_ctx = (ggml_backend_vk_buffer_context *)r->buffer->context; - ggml_backend_vk_buffer_context * tf_buf_ctx = (ggml_backend_vk_buffer_context *)tf->buffer->context; - ggml_backend_vk_buffer_context * td_buf_ctx = (ggml_backend_vk_buffer_context *)td->buffer->context; - ggml_backend_vk_buffer_context * state_buf_ctx = (ggml_backend_vk_buffer_context *)state->buffer->context; + ggml_backend_vk_buffer_context * src_buf_ctxs[7] = { nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr }; + for (int i = 0; i < num_srcs; i++) { + src_buf_ctxs[i] = (ggml_backend_vk_buffer_context *)dst->src[i]->buffer->context; + } ggml_vk_sync_buffers(subctx); - vk_buffer d_D = nullptr, d_K = nullptr, d_V = nullptr, d_R = nullptr, d_TF = nullptr, d_TD = nullptr, d_State = nullptr; - size_t k_offset = 0, v_offset = 0, r_offset = 0, tf_offset = 0, td_offset = 0, state_offset = 0, dst_offset = 0; - bool K_uma = false, V_uma = false, R_uma = false, TF_uma = false, TD_uma = false, STATE_uma = false, DST_uma = false; + vk_buffer d_D = nullptr, d_srcs[7] = { nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr }; + size_t dst_offset = 0, src_offsets[7] = { 0, 0, 0, 0, 0, 0, 0 }; + bool dst_uma = false, srcs_uma[7] = { false, false, false, false, false, false, false }; if (ctx->device->uma) { - ggml_vk_host_get(ctx->device, k->data, d_K, k_offset); - ggml_vk_host_get(ctx->device, v->data, d_V, v_offset); - ggml_vk_host_get(ctx->device, r->data, d_R, r_offset); - ggml_vk_host_get(ctx->device, tf->data, d_TF, tf_offset); - ggml_vk_host_get(ctx->device, td->data, d_TD, td_offset); - ggml_vk_host_get(ctx->device, state->data, d_State, state_offset); - ggml_vk_host_get(ctx->device, dst->data, d_D, dst_offset); + for (int i = 0; i < num_srcs; i++) { + ggml_vk_host_get(ctx->device, dst->src[i]->data, d_srcs[i], src_offsets[i]); + srcs_uma[i] = d_srcs[i] != nullptr; + } - K_uma = d_K != nullptr; - V_uma = d_V != nullptr; - R_uma = d_R != nullptr; - TF_uma = d_TF != nullptr; - TD_uma = d_TD != nullptr; - STATE_uma = d_State != nullptr; - DST_uma = d_D != nullptr; + ggml_vk_host_get(ctx->device, dst->data, d_D, dst_offset); + dst_uma = d_D != nullptr; } - if (!K_uma) { - d_K = k_buf_ctx->dev_buffer; - k_offset = vk_tensor_offset(k) + k->view_offs; - } - if (!V_uma) { - d_V = v_buf_ctx->dev_buffer; - v_offset = vk_tensor_offset(v) + v->view_offs; - } - if (!R_uma) { - d_R = r_buf_ctx->dev_buffer; - r_offset = vk_tensor_offset(r) + r->view_offs; - } - if (!TF_uma) { - d_TF = tf_buf_ctx->dev_buffer; - tf_offset = vk_tensor_offset(tf) + tf->view_offs; - } - if (!TD_uma) { - d_TD = td_buf_ctx->dev_buffer; - td_offset = vk_tensor_offset(td) + td->view_offs; - } - if (!STATE_uma) { - d_State = state_buf_ctx->dev_buffer; - state_offset = vk_tensor_offset(state) + state->view_offs; + uint64_t src_sizes[7] = { 0, 0, 0, 0, 0, 0, 0 }; + for (int i = 0; i < num_srcs; i++) { + src_sizes[i] = ggml_nbytes(dst->src[i]); + if (!srcs_uma[i]) { + d_srcs[i] = src_buf_ctxs[i]->dev_buffer; + src_offsets[i] = vk_tensor_offset(dst->src[i]) + dst->src[i]->view_offs; + } } - if (!DST_uma) { + + const uint64_t dst_size = ggml_nbytes(dst); + if (!dst_uma) { d_D = dst_buf_ctx->dev_buffer; dst_offset = vk_tensor_offset(dst) + dst->view_offs; } - const uint64_t k_size = ggml_nbytes(k); - const uint64_t v_size = ggml_nbytes(v); - const uint64_t r_size = ggml_nbytes(r); - const uint64_t tf_size = ggml_nbytes(tf); - const uint64_t td_size = ggml_nbytes(td); - const uint64_t state_size = ggml_nbytes(state); - const uint64_t dst_size = ggml_nbytes(dst); - std::array elements = { (uint32_t)(pc.B * pc.H), 1, 1 }; - ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { - vk_subbuffer{ d_K, k_offset, k_size }, - vk_subbuffer{ d_V, v_offset, v_size }, - vk_subbuffer{ d_R, r_offset, r_size }, - vk_subbuffer{ d_TF, tf_offset, tf_size }, - vk_subbuffer{ d_TD, td_offset, td_size }, - vk_subbuffer{ d_State, state_offset, state_size }, - vk_subbuffer{ d_D, dst_offset, dst_size } - }, sizeof(vk_op_rwkv_wkv6_push_constants), &pc, elements); + if (version == 6) { + ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { + vk_subbuffer{ d_srcs[0], src_offsets[0], src_sizes[0] }, + vk_subbuffer{ d_srcs[1], src_offsets[1], src_sizes[1] }, + vk_subbuffer{ d_srcs[2], src_offsets[2], src_sizes[2] }, + vk_subbuffer{ d_srcs[3], src_offsets[3], src_sizes[3] }, + vk_subbuffer{ d_srcs[4], src_offsets[4], src_sizes[4] }, + vk_subbuffer{ d_srcs[5], src_offsets[5], src_sizes[5] }, + vk_subbuffer{ d_D, dst_offset, dst_size } + }, sizeof(vk_op_rwkv_wkv6_push_constants), &pc, elements); + } else if (version == 7) { + ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { + vk_subbuffer{ d_srcs[0], src_offsets[0], src_sizes[0] }, + vk_subbuffer{ d_srcs[1], src_offsets[1], src_sizes[1] }, + vk_subbuffer{ d_srcs[2], src_offsets[2], src_sizes[2] }, + vk_subbuffer{ d_srcs[3], src_offsets[3], src_sizes[3] }, + vk_subbuffer{ d_srcs[4], src_offsets[4], src_sizes[4] }, + vk_subbuffer{ d_srcs[5], src_offsets[5], src_sizes[5] }, + vk_subbuffer{ d_srcs[6], src_offsets[6], src_sizes[6] }, + vk_subbuffer{ d_D, dst_offset, dst_size } + }, sizeof(vk_op_rwkv_wkv7_push_constants), &pc, elements); + } else { + // shouldn't happen + GGML_ASSERT(false); + } } static void ggml_vk_rwkv_wkv6(ggml_backend_vk_context * ctx, vk_context& subctx, ggml_tensor * dst, bool dryrun = false) { @@ -6224,7 +6225,26 @@ static void ggml_vk_rwkv_wkv6(ggml_backend_vk_context * ctx, vk_context& subctx, const size_t n_heads = dst->src[0]->ne[1]; const size_t n_seqs = dst->src[5]->ne[1]; - ggml_vk_op_f32_rwkv6( + ggml_vk_op_f32_wkv( + ctx, subctx, dst, + { + (uint32_t)n_seqs, + (uint32_t)seq_length, + (uint32_t)n_embed, + (uint32_t)n_heads, + }, + 6, + dryrun + ); +} + +static void ggml_vk_rwkv_wkv7(ggml_backend_vk_context * ctx, vk_context& subctx, ggml_tensor * dst, bool dryrun = false) { + const size_t seq_length = dst->src[0]->ne[2]; + const size_t n_embed = dst->ne[0]; + const size_t n_heads = dst->src[0]->ne[1]; + const size_t n_seqs = dst->src[6]->ne[1]; + + ggml_vk_op_f32_wkv( ctx, subctx, dst, { (uint32_t)n_seqs, @@ -6232,6 +6252,7 @@ static void ggml_vk_rwkv_wkv6(ggml_backend_vk_context * ctx, vk_context& subctx, (uint32_t)n_embed, (uint32_t)n_heads, }, + 7, dryrun ); } @@ -6533,6 +6554,11 @@ static void ggml_vk_rms_norm_back(ggml_backend_vk_context * ctx, vk_context& sub ggml_vk_op_f32(ctx, subctx, src0, src1, nullptr, dst, GGML_OP_RMS_NORM_BACK, { (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], op_params[0], 0.0f }, dryrun); } +static void ggml_vk_l2_norm(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) { + float * op_params = (float *)dst->op_params; + ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_L2_NORM, { (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], op_params[0], 0.0f }, dryrun); +} + static void ggml_vk_unary(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) { ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_UNARY, { (uint32_t)ggml_nelements(src0), 0, 0.0f, 0.0f }, dryrun); } @@ -7528,6 +7554,7 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_tensor * nod case GGML_OP_GROUP_NORM: case GGML_OP_RMS_NORM: case GGML_OP_RMS_NORM_BACK: + case GGML_OP_L2_NORM: case GGML_OP_DIAG_MASK_INF: case GGML_OP_SOFT_MAX: case GGML_OP_SOFT_MAX_BACK: @@ -7544,6 +7571,7 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_tensor * nod case GGML_OP_TIMESTEP_EMBEDDING: case GGML_OP_POOL_2D: case GGML_OP_RWKV_WKV6: + case GGML_OP_RWKV_WKV7: case GGML_OP_LEAKY_RELU: case GGML_OP_FLASH_ATTN_EXT: case GGML_OP_OPT_STEP_ADAMW: @@ -7590,6 +7618,7 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_tensor * nod case GGML_OP_GROUP_NORM: case GGML_OP_RMS_NORM: case GGML_OP_RMS_NORM_BACK: + case GGML_OP_L2_NORM: case GGML_OP_UNARY: case GGML_OP_DIAG_MASK_INF: case GGML_OP_SOFT_MAX: @@ -7707,6 +7736,10 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_tensor * nod case GGML_OP_RMS_NORM_BACK: ggml_vk_rms_norm_back(ctx, compute_ctx, src0, src1, node, dryrun); + break; + case GGML_OP_L2_NORM: + ggml_vk_l2_norm(ctx, compute_ctx, src0, node, dryrun); + break; case GGML_OP_UNARY: switch (ggml_get_unary_op(node)) { @@ -7797,6 +7830,11 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_tensor * nod break; + case GGML_OP_RWKV_WKV7: + ggml_vk_rwkv_wkv7(ctx, compute_ctx, node, dryrun); + + break; + case GGML_OP_OPT_STEP_ADAMW: ggml_vk_opt_step_adamw(ctx, compute_ctx, node, dryrun); @@ -7870,6 +7908,7 @@ static bool ggml_vk_compute_forward(ggml_backend_vk_context * ctx, ggml_tensor * case GGML_OP_GROUP_NORM: case GGML_OP_RMS_NORM: case GGML_OP_RMS_NORM_BACK: + case GGML_OP_L2_NORM: case GGML_OP_DIAG_MASK_INF: case GGML_OP_SOFT_MAX: case GGML_OP_SOFT_MAX_BACK: @@ -7889,6 +7928,7 @@ static bool ggml_vk_compute_forward(ggml_backend_vk_context * ctx, ggml_tensor * case GGML_OP_TIMESTEP_EMBEDDING: case GGML_OP_POOL_2D: case GGML_OP_RWKV_WKV6: + case GGML_OP_RWKV_WKV7: case GGML_OP_LEAKY_RELU: case GGML_OP_REPEAT: case GGML_OP_REPEAT_BACK: @@ -8806,6 +8846,7 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm case GGML_OP_NORM: case GGML_OP_GROUP_NORM: case GGML_OP_RMS_NORM: + case GGML_OP_L2_NORM: return ggml_is_contiguous(op->src[0]); case GGML_OP_ADD: case GGML_OP_SUB: @@ -8835,6 +8876,7 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm case GGML_OP_TIMESTEP_EMBEDDING: case GGML_OP_POOL_2D: case GGML_OP_RWKV_WKV6: + case GGML_OP_RWKV_WKV7: case GGML_OP_LEAKY_RELU: case GGML_OP_OPT_STEP_ADAMW: return true; @@ -9219,6 +9261,9 @@ static void ggml_vk_check_results_0(ggml_tensor * tensor) { tensor_clone = ggml_rms_norm_back(ggml_ctx, src_clone[0], src_clone[1], eps); } else if (tensor->op == GGML_OP_SILU_BACK) { tensor_clone = ggml_silu_back(ggml_ctx, src_clone[0], src_clone[1]); + } else if (tensor->op == GGML_OP_L2_NORM) { + const float eps = ((float *) tensor->op_params)[0]; + tensor_clone = ggml_l2_norm(ggml_ctx, src_clone[0], eps); } else if (tensor->op == GGML_OP_SOFT_MAX) { if (src1 != nullptr) { tensor_clone = ggml_soft_max_ext(ggml_ctx, src_clone[0], src_clone[1], ((float *)tensor->op_params)[0], ((float *)tensor->op_params)[1]); @@ -9338,6 +9383,9 @@ static void ggml_vk_check_results_0(ggml_tensor * tensor) { } else if (tensor->op == GGML_OP_RWKV_WKV6) { tensor_clone = ggml_rwkv_wkv6(ggml_ctx, src_clone[0], src_clone[1], src_clone[2], src_clone[3], src_clone[4], src_clone[5]); + } else if (tensor->op == GGML_OP_RWKV_WKV7) { + tensor_clone = ggml_rwkv_wkv7(ggml_ctx, src_clone[0], src_clone[1], src_clone[2], src_clone[3], + src_clone[4], src_clone[5], src_clone[6]); } else if (tensor->op == GGML_OP_OPT_STEP_ADAMW) { src_clone[0]->flags = src0->flags; tensor_clone = ggml_opt_step_adamw(ggml_ctx, src_clone[0], src_clone[1], diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/l2_norm.comp b/ggml/src/ggml-vulkan/vulkan-shaders/l2_norm.comp new file mode 100644 index 0000000000000..deba8c3985629 --- /dev/null +++ b/ggml/src/ggml-vulkan/vulkan-shaders/l2_norm.comp @@ -0,0 +1,41 @@ +#version 450 + +#include "generic_head.comp" +#include "types.comp" + +#extension GL_EXT_control_flow_attributes : enable +#define BLOCK_SIZE 512 + +layout(local_size_x = BLOCK_SIZE, local_size_y = 1, local_size_z = 1) in; + +layout (binding = 0) readonly buffer X {A_TYPE data_a[];}; +layout (binding = 1) writeonly buffer D {D_TYPE data_d[];}; + +shared FLOAT_TYPE sum[BLOCK_SIZE]; + +void main() { + const uint row = gl_WorkGroupID.z * 262144 + gl_WorkGroupID.y * 512 + gl_WorkGroupID.x; + const uint tid = gl_LocalInvocationID.x; + + sum[tid] = FLOAT_TYPE(0.0f); // partial sum for thread in warp + + [[unroll]] for (uint col = tid; col < p.KX; col += BLOCK_SIZE) { + const FLOAT_TYPE xi = FLOAT_TYPE(data_a[row*p.KX + col]); + sum[tid] += xi * xi; + } + + // sum up partial sums and write back result + barrier(); + [[unroll]] for (int s = BLOCK_SIZE / 2; s > 0; s >>= 1) { + if (tid < s) { + sum[tid] += sum[tid + s]; + } + barrier(); + } + + const FLOAT_TYPE scale = inversesqrt(max(sum[0], FLOAT_TYPE(p.param1))); + + [[unroll]] for (uint col = tid; col < p.KX; col += BLOCK_SIZE) { + data_d[row*p.KX + col] = D_TYPE(scale * FLOAT_TYPE(data_a[row*p.KX + col])); + } +} diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp b/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp index ee1fec4e114aa..eb2ad63ff6bf0 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp @@ -434,6 +434,7 @@ void process_shaders() { string_to_spv("group_norm_f32", "group_norm.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "float"}})); string_to_spv("rms_norm_f32", "rms_norm.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "float"}})); string_to_spv("rms_norm_back_f32", "rms_norm_back.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}})); + string_to_spv("l2_norm_f32", "l2_norm.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "float"}})); string_to_spv("cpy_f32_f32", "copy.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}}); string_to_spv("cpy_f32_f16", "copy.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float16_t"}}); @@ -528,6 +529,8 @@ void process_shaders() { string_to_spv("rwkv_wkv6_f32", "wkv6.comp", merge_maps(base_dict, {{"A_TYPE", "float"}})); + string_to_spv("rwkv_wkv7_f32", "wkv7.comp", merge_maps(base_dict, {{"A_TYPE", "float"}})); + string_to_spv("opt_step_adamw_f32", "opt_step_adamw.comp", merge_maps(base_dict, {{"A_TYPE", "float"}})); for (auto &c : compiles) { diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/wkv7.comp b/ggml/src/ggml-vulkan/vulkan-shaders/wkv7.comp new file mode 100644 index 0000000000000..88c1c02b32b8c --- /dev/null +++ b/ggml/src/ggml-vulkan/vulkan-shaders/wkv7.comp @@ -0,0 +1,91 @@ +#version 450 + +#extension GL_EXT_control_flow_attributes : require + +#define BLOCK_SIZE 64 +layout(local_size_x = BLOCK_SIZE, local_size_y = 1, local_size_z = 1) in; + +layout(push_constant) uniform Parameters { + uint B; + uint T; + uint C; + uint H; +}; + +layout(binding = 0) readonly buffer RBuf { A_TYPE r[]; }; +layout(binding = 1) readonly buffer WBuf { A_TYPE w[]; }; +layout(binding = 2) readonly buffer KBuf { A_TYPE k[]; }; +layout(binding = 3) readonly buffer VBuf { A_TYPE v[]; }; +layout(binding = 4) readonly buffer ABuf { A_TYPE a[]; }; +layout(binding = 5) readonly buffer BBuf { A_TYPE b[]; }; +layout(binding = 6) readonly buffer StateBuf { A_TYPE state_in[]; }; +layout(binding = 7) buffer DstBuf { A_TYPE dst[]; }; + +shared A_TYPE _r[BLOCK_SIZE], _w[BLOCK_SIZE], _k[BLOCK_SIZE], _a[BLOCK_SIZE], _b[BLOCK_SIZE]; + +void main() { + const uint head_size = BLOCK_SIZE; + const uint batch_id = gl_WorkGroupID.x / H; + const uint head_id = gl_WorkGroupID.x % H; + const uint tid = gl_LocalInvocationID.x; + + const uint state_size = C * head_size; + const uint n_seq_tokens = T / B; + + if (batch_id >= B || head_id >= H) { + return; + } + + A_TYPE state[BLOCK_SIZE]; + [[unroll]] for (uint i = 0; i < head_size; i++) { + state[i] = state_in[batch_id * state_size + head_id * head_size * head_size + + tid * head_size + i]; + } + + const uint start_t = batch_id * n_seq_tokens * C + head_id * head_size + tid; + const uint end_t = (batch_id + 1) * n_seq_tokens * C + head_id * head_size + tid; + + for (uint t = start_t; t < end_t; t += C) { + barrier(); + _r[tid] = r[t]; + _w[tid] = w[t]; + _k[tid] = k[t]; + _a[tid] = a[t]; + _b[tid] = b[t]; + barrier(); + + A_TYPE sa = 0.0; + [[unroll]] for (uint j = 0; j < head_size; j += 4) { + vec4 s_vec = vec4(state[j], state[j+1], state[j+2], state[j+3]); + vec4 a_vec = vec4(_a[j], _a[j+1], _a[j+2], _a[j+3]); + sa += dot(s_vec, a_vec); + } + + const A_TYPE v_val = v[t]; + A_TYPE y = 0.0; + + [[unroll]] for (uint j = 0; j < head_size; j += 4) { + vec4 r_vec = vec4(_r[j], _r[j+1], _r[j+2], _r[j+3]); + vec4 w_vec = vec4(_w[j], _w[j+1], _w[j+2], _w[j+3]); + vec4 k_vec = vec4(_k[j], _k[j+1], _k[j+2], _k[j+3]); + vec4 b_vec = vec4(_b[j], _b[j+1], _b[j+2], _b[j+3]); + vec4 s_vec = vec4(state[j], state[j+1], state[j+2], state[j+3]); + + vec4 kv = k_vec * v_val; + s_vec = s_vec * w_vec + kv + sa * b_vec; + y += dot(r_vec, s_vec); + + state[j] = s_vec.x; + state[j+1] = s_vec.y; + state[j+2] = s_vec.z; + state[j+3] = s_vec.w; + } + + dst[t] = y; + } + + [[unroll]] for (uint i = 0; i < head_size; i++) { + dst[T * C + batch_id * state_size + head_id * head_size * head_size + + tid * head_size + i] = state[i]; + } +} diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c index 89409bb0e42a5..2e081d5910c6e 100644 --- a/ggml/src/ggml.c +++ b/ggml/src/ggml.c @@ -929,6 +929,7 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = { "RMS_NORM", "RMS_NORM_BACK", "GROUP_NORM", + "L2_NORM", "MUL_MAT", "MUL_MAT_ID", @@ -977,6 +978,7 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = { "ADD_REL_POS", "RWKV_WKV6", "GATED_LINEAR_ATTN", + "RWKV_WKV7", "UNARY", @@ -996,7 +998,7 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = { "OPT_STEP_ADAMW", }; -static_assert(GGML_OP_COUNT == 83, "GGML_OP_COUNT != 83"); +static_assert(GGML_OP_COUNT == 85, "GGML_OP_COUNT != 85"); static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "none", @@ -1026,6 +1028,7 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "rms_norm(x)", "rms_norm_back(x)", "group_norm(x)", + "l2_norm(x)", "X*Y", "X[i]*Y", @@ -1074,6 +1077,7 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "add_rel_pos(x)", "rwkv_wkv6(k, v, r, tf, td, s)", "gated_linear_attn(k, v, q, gate, s)", + "rwkv_wkv7(r, w, k, v, a, b, s)", "unary(x)", @@ -1093,7 +1097,7 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "adamw(x)", }; -static_assert(GGML_OP_COUNT == 83, "GGML_OP_COUNT != 83"); +static_assert(GGML_OP_COUNT == 85, "GGML_OP_COUNT != 85"); static_assert(GGML_OP_POOL_COUNT == 2, "GGML_OP_POOL_COUNT != 2"); @@ -2686,6 +2690,37 @@ struct ggml_tensor * ggml_group_norm_inplace( return ggml_group_norm_impl(ctx, a, n_groups, eps, true); } +// ggml_l2_norm + +static struct ggml_tensor * ggml_l2_norm_impl( + struct ggml_context * ctx, + struct ggml_tensor * a, + float eps, + bool inplace) { + struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + + ggml_set_op_params_f32(result, 0, eps); + + result->op = GGML_OP_L2_NORM; + result->src[0] = a; + + return result; +} + +struct ggml_tensor * ggml_l2_norm( + struct ggml_context * ctx, + struct ggml_tensor * a, + float eps) { + return ggml_l2_norm_impl(ctx, a, eps, false); +} + +struct ggml_tensor * ggml_l2_norm_inplace( + struct ggml_context * ctx, + struct ggml_tensor * a, + float eps) { + return ggml_l2_norm_impl(ctx, a, eps, true); +} + // ggml_mul_mat static inline bool ggml_can_mul_mat(const struct ggml_tensor * t0, const struct ggml_tensor * t1) { @@ -4720,6 +4755,54 @@ struct ggml_tensor * ggml_gated_linear_attn( return result; } +// ggml_rwkv_wkv7 + +struct ggml_tensor * ggml_rwkv_wkv7( + struct ggml_context * ctx, + struct ggml_tensor * r, + struct ggml_tensor * w, + struct ggml_tensor * k, + struct ggml_tensor * v, + struct ggml_tensor * a, + struct ggml_tensor * b, + struct ggml_tensor * state) { + GGML_ASSERT(ggml_is_contiguous(r)); + GGML_ASSERT(ggml_is_contiguous(w)); + GGML_ASSERT(ggml_is_contiguous(k)); + GGML_ASSERT(ggml_is_contiguous(v)); + GGML_ASSERT(ggml_is_contiguous(a)); + GGML_ASSERT(ggml_is_contiguous(b)); + GGML_ASSERT(ggml_is_contiguous(state)); + + const int64_t S = k->ne[0]; + const int64_t H = k->ne[1]; + const int64_t n_tokens = k->ne[2]; + const int64_t n_seqs = state->ne[1]; + { + GGML_ASSERT(w->ne[0] == S && w->ne[1] == H && w->ne[2] == n_tokens); + GGML_ASSERT(k->ne[0] == S && k->ne[1] == H && k->ne[2] == n_tokens); + GGML_ASSERT(v->ne[0] == S && v->ne[1] == H && v->ne[2] == n_tokens); + GGML_ASSERT(a->ne[0] == S && a->ne[1] == H && a->ne[2] == n_tokens); + GGML_ASSERT(b->ne[0] == S && b->ne[1] == H && b->ne[2] == n_tokens); + GGML_ASSERT(ggml_nelements(state) == S * S * H * n_seqs); + } + + // concat output and new_state + const int64_t ne[4] = { S * H, n_tokens + S * n_seqs, 1, 1 }; + struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne); + + result->op = GGML_OP_RWKV_WKV7; + result->src[0] = r; + result->src[1] = w; + result->src[2] = k; + result->src[3] = v; + result->src[4] = a; + result->src[5] = b; + result->src[6] = state; + + return result; +} + // ggml_unary static struct ggml_tensor * ggml_unary_impl( diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py index 19624eae04ece..cc48913d9789d 100644 --- a/gguf-py/gguf/constants.py +++ b/gguf-py/gguf/constants.py @@ -118,22 +118,26 @@ class LLM: TOKEN_SHIFT_COUNT = "{arch}.token_shift_count" class Attention: - HEAD_COUNT = "{arch}.attention.head_count" - HEAD_COUNT_KV = "{arch}.attention.head_count_kv" - MAX_ALIBI_BIAS = "{arch}.attention.max_alibi_bias" - CLAMP_KQV = "{arch}.attention.clamp_kqv" - KEY_LENGTH = "{arch}.attention.key_length" - VALUE_LENGTH = "{arch}.attention.value_length" - LAYERNORM_EPS = "{arch}.attention.layer_norm_epsilon" - LAYERNORM_RMS_EPS = "{arch}.attention.layer_norm_rms_epsilon" - GROUPNORM_EPS = "{arch}.attention.group_norm_epsilon" - GROUPNORM_GROUPS = "{arch}.attention.group_norm_groups" - CAUSAL = "{arch}.attention.causal" - Q_LORA_RANK = "{arch}.attention.q_lora_rank" - KV_LORA_RANK = "{arch}.attention.kv_lora_rank" - REL_BUCKETS_COUNT = "{arch}.attention.relative_buckets_count" - SLIDING_WINDOW = "{arch}.attention.sliding_window" - SCALE = "{arch}.attention.scale" + HEAD_COUNT = "{arch}.attention.head_count" + HEAD_COUNT_KV = "{arch}.attention.head_count_kv" + MAX_ALIBI_BIAS = "{arch}.attention.max_alibi_bias" + CLAMP_KQV = "{arch}.attention.clamp_kqv" + KEY_LENGTH = "{arch}.attention.key_length" + VALUE_LENGTH = "{arch}.attention.value_length" + LAYERNORM_EPS = "{arch}.attention.layer_norm_epsilon" + LAYERNORM_RMS_EPS = "{arch}.attention.layer_norm_rms_epsilon" + GROUPNORM_EPS = "{arch}.attention.group_norm_epsilon" + GROUPNORM_GROUPS = "{arch}.attention.group_norm_groups" + CAUSAL = "{arch}.attention.causal" + Q_LORA_RANK = "{arch}.attention.q_lora_rank" + KV_LORA_RANK = "{arch}.attention.kv_lora_rank" + DECAY_LORA_RANK = "{arch}.attention.decay_lora_rank" + ICLR_LORA_RANK = "{arch}.attention.iclr_lora_rank" + VALUE_RESIDUAL_MIX_LORA_RANK = "{arch}.attention.value_residual_mix_lora_rank" + GATE_LORA_RANK = "{arch}.attention.gate_lora_rank" + REL_BUCKETS_COUNT = "{arch}.attention.relative_buckets_count" + SLIDING_WINDOW = "{arch}.attention.sliding_window" + SCALE = "{arch}.attention.scale" class Rope: DIMENSION_COUNT = "{arch}.rope.dimension_count" @@ -257,6 +261,8 @@ class MODEL_ARCH(IntEnum): STARCODER2 = auto() RWKV6 = auto() RWKV6QWEN2 = auto() + RWKV7 = auto() + ARWKV7 = auto() MAMBA = auto() XVERSE = auto() COMMAND_R = auto() @@ -329,8 +335,20 @@ class MODEL_TENSOR(IntEnum): SSM_A = auto() SSM_D = auto() SSM_OUT = auto() + TIME_MIX_W0 = auto() TIME_MIX_W1 = auto() TIME_MIX_W2 = auto() + TIME_MIX_A0 = auto() + TIME_MIX_A1 = auto() + TIME_MIX_A2 = auto() + TIME_MIX_V0 = auto() + TIME_MIX_V1 = auto() + TIME_MIX_V2 = auto() + TIME_MIX_G1 = auto() + TIME_MIX_G2 = auto() + TIME_MIX_K_K = auto() + TIME_MIX_K_A = auto() + TIME_MIX_R_K = auto() TIME_MIX_LERP_X = auto() TIME_MIX_LERP_K = auto() TIME_MIX_LERP_V = auto() @@ -445,6 +463,8 @@ class MODEL_TENSOR(IntEnum): MODEL_ARCH.STARCODER2: "starcoder2", MODEL_ARCH.RWKV6: "rwkv6", MODEL_ARCH.RWKV6QWEN2: "rwkv6qwen2", + MODEL_ARCH.RWKV7: "rwkv7", + MODEL_ARCH.ARWKV7: "arwkv7", MODEL_ARCH.MAMBA: "mamba", MODEL_ARCH.XVERSE: "xverse", MODEL_ARCH.COMMAND_R: "command-r", @@ -517,8 +537,20 @@ class MODEL_TENSOR(IntEnum): MODEL_TENSOR.SSM_A: "blk.{bid}.ssm_a", MODEL_TENSOR.SSM_D: "blk.{bid}.ssm_d", MODEL_TENSOR.SSM_OUT: "blk.{bid}.ssm_out", + MODEL_TENSOR.TIME_MIX_W0: "blk.{bid}.time_mix_w0", MODEL_TENSOR.TIME_MIX_W1: "blk.{bid}.time_mix_w1", MODEL_TENSOR.TIME_MIX_W2: "blk.{bid}.time_mix_w2", + MODEL_TENSOR.TIME_MIX_A0: "blk.{bid}.time_mix_a0", + MODEL_TENSOR.TIME_MIX_A1: "blk.{bid}.time_mix_a1", + MODEL_TENSOR.TIME_MIX_A2: "blk.{bid}.time_mix_a2", + MODEL_TENSOR.TIME_MIX_V0: "blk.{bid}.time_mix_v0", + MODEL_TENSOR.TIME_MIX_V1: "blk.{bid}.time_mix_v1", + MODEL_TENSOR.TIME_MIX_V2: "blk.{bid}.time_mix_v2", + MODEL_TENSOR.TIME_MIX_G1: "blk.{bid}.time_mix_g1", + MODEL_TENSOR.TIME_MIX_G2: "blk.{bid}.time_mix_g2", + MODEL_TENSOR.TIME_MIX_K_K: "blk.{bid}.time_mix_k_k", + MODEL_TENSOR.TIME_MIX_K_A: "blk.{bid}.time_mix_k_a", + MODEL_TENSOR.TIME_MIX_R_K: "blk.{bid}.time_mix_r_k", MODEL_TENSOR.TIME_MIX_LERP_X: "blk.{bid}.time_mix_lerp_x", MODEL_TENSOR.TIME_MIX_LERP_K: "blk.{bid}.time_mix_lerp_k", MODEL_TENSOR.TIME_MIX_LERP_V: "blk.{bid}.time_mix_lerp_v", @@ -1172,6 +1204,68 @@ class MODEL_TENSOR(IntEnum): MODEL_TENSOR.FFN_DOWN, MODEL_TENSOR.FFN_UP, ], + MODEL_ARCH.RWKV7: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.TOKEN_EMBD_NORM, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_NORM_2, + MODEL_TENSOR.TIME_MIX_LERP_FUSED, + MODEL_TENSOR.TIME_MIX_W0, + MODEL_TENSOR.TIME_MIX_W1, + MODEL_TENSOR.TIME_MIX_W2, + MODEL_TENSOR.TIME_MIX_A0, + MODEL_TENSOR.TIME_MIX_A1, + MODEL_TENSOR.TIME_MIX_A2, + MODEL_TENSOR.TIME_MIX_V0, + MODEL_TENSOR.TIME_MIX_V1, + MODEL_TENSOR.TIME_MIX_V2, + MODEL_TENSOR.TIME_MIX_G1, + MODEL_TENSOR.TIME_MIX_G2, + MODEL_TENSOR.TIME_MIX_K_K, + MODEL_TENSOR.TIME_MIX_K_A, + MODEL_TENSOR.TIME_MIX_R_K, + MODEL_TENSOR.TIME_MIX_KEY, + MODEL_TENSOR.TIME_MIX_VALUE, + MODEL_TENSOR.TIME_MIX_RECEPTANCE, + MODEL_TENSOR.TIME_MIX_LN, + MODEL_TENSOR.TIME_MIX_OUTPUT, + MODEL_TENSOR.CHANNEL_MIX_LERP_K, + MODEL_TENSOR.CHANNEL_MIX_KEY, + MODEL_TENSOR.CHANNEL_MIX_VALUE, + ], + MODEL_ARCH.ARWKV7: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.TOKEN_EMBD_NORM, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.TIME_MIX_LERP_FUSED, + MODEL_TENSOR.TIME_MIX_W0, + MODEL_TENSOR.TIME_MIX_W1, + MODEL_TENSOR.TIME_MIX_W2, + MODEL_TENSOR.TIME_MIX_A0, + MODEL_TENSOR.TIME_MIX_A1, + MODEL_TENSOR.TIME_MIX_A2, + MODEL_TENSOR.TIME_MIX_V0, + MODEL_TENSOR.TIME_MIX_V1, + MODEL_TENSOR.TIME_MIX_V2, + MODEL_TENSOR.TIME_MIX_G1, + MODEL_TENSOR.TIME_MIX_G2, + MODEL_TENSOR.TIME_MIX_K_K, + MODEL_TENSOR.TIME_MIX_K_A, + MODEL_TENSOR.TIME_MIX_R_K, + MODEL_TENSOR.TIME_MIX_KEY, + MODEL_TENSOR.TIME_MIX_VALUE, + MODEL_TENSOR.TIME_MIX_RECEPTANCE, + MODEL_TENSOR.TIME_MIX_LN, + MODEL_TENSOR.TIME_MIX_OUTPUT, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], MODEL_ARCH.MAMBA: [ MODEL_TENSOR.TOKEN_EMBD, MODEL_TENSOR.OUTPUT_NORM, diff --git a/gguf-py/gguf/gguf_writer.py b/gguf-py/gguf/gguf_writer.py index 080d2b9dce5cb..af8b388dfaba5 100644 --- a/gguf-py/gguf/gguf_writer.py +++ b/gguf-py/gguf/gguf_writer.py @@ -767,6 +767,18 @@ def add_q_lora_rank(self, length: int) -> None: def add_kv_lora_rank(self, length: int) -> None: self.add_uint32(Keys.Attention.KV_LORA_RANK.format(arch=self.arch), length) + def add_decay_lora_rank(self, length: int) -> None: + self.add_uint32(Keys.Attention.DECAY_LORA_RANK.format(arch=self.arch), length) + + def add_iclr_lora_rank(self, length: int) -> None: + self.add_uint32(Keys.Attention.ICLR_LORA_RANK.format(arch=self.arch), length) + + def add_value_residual_mix_lora_rank(self, length: int) -> None: + self.add_uint32(Keys.Attention.VALUE_RESIDUAL_MIX_LORA_RANK.format(arch=self.arch), length) + + def add_gate_lora_rank(self, length: int) -> None: + self.add_uint32(Keys.Attention.GATE_LORA_RANK.format(arch=self.arch), length) + def add_relative_attn_buckets_count(self, value: int) -> None: self.add_uint32(Keys.Attention.REL_BUCKETS_COUNT.format(arch=self.arch), value) diff --git a/gguf-py/gguf/tensor_mapping.py b/gguf-py/gguf/tensor_mapping.py index 617791e240b60..8d4a2b0320183 100644 --- a/gguf-py/gguf/tensor_mapping.py +++ b/gguf-py/gguf/tensor_mapping.py @@ -27,7 +27,8 @@ class TensorNameMap: "embedding.word_embeddings", # chatglm "transformer.token_embeddings", # openelm "shared", # t5 - "rwkv.embeddings", # rwkv + "rwkv.embeddings", # rwkv6 + "model.embeddings", # rwkv7 ), # Token type embeddings @@ -42,6 +43,9 @@ class TensorNameMap: "emb_ln", # nomic-bert "transformer.norm", # openelm "rwkv.blocks.0.pre_ln", # rwkv + "rwkv.blocks.0.pre_ln", # rwkv6 + "model.pre_ln", # rwkv7 + "model.layers.0.pre_norm", # rwkv7 "backbone.norm", # wavtokenizer ), @@ -81,7 +85,8 @@ class TensorNameMap: "encoder.final_layernorm", # chatglm "transformer.norm", # openelm "model.norm", # nemotron - "rwkv.ln_out", # rwkv + "rwkv.ln_out", # rwkv6 + "model.ln_out", # rwkv7 "backbone.final_layer_norm", # wavtokenizer ), @@ -122,14 +127,16 @@ class TensorNameMap: "transformer.blocks.{bid}.norm_attn_norm.norm_1", # dbrx "encoder.layers.{bid}.input_layernorm", # chatglm "transformer.layers.{bid}.attn_norm", # openelm - "rwkv.blocks.{bid}.ln1", # rwkv + "rwkv.blocks.{bid}.ln1", # rwkv6 + "model.layers.{bid}.ln1", # rwkv7 ), # Attention norm 2 MODEL_TENSOR.ATTN_NORM_2: ( "transformer.h.{bid}.ln_attn", # falcon40b "encoder.layer.{bid}.layer_norm_1", # jina-v2-code - "rwkv.blocks.{bid}.ln2", # rwkv + "rwkv.blocks.{bid}.ln2", # rwkv6 + "model.layers.{bid}.ln2", # rwkv7 ), # Attention query-key-value @@ -462,112 +469,174 @@ class TensorNameMap: "backbone.layers.{bid}.mixer.out_proj", ), + MODEL_TENSOR.TIME_MIX_W0: ( + "model.layers.{bid}.attention.w0", # rwkv7 + ), + MODEL_TENSOR.TIME_MIX_W1: ( - "rwkv.blocks.{bid}.attention.time_maa_w1", # rwkv v6 - "model.layers.{bid}.self_attn.time_maa_w1", # rwkv6qwen2 + "rwkv.blocks.{bid}.attention.time_maa_w1", # rwkv6 + "model.layers.{bid}.self_attn.time_maa_w1", # rwkv6qwen2 + "model.layers.{bid}.attention.w1", # rwkv7 ), MODEL_TENSOR.TIME_MIX_W2: ( - "rwkv.blocks.{bid}.attention.time_maa_w2", # rwkv v6 - "model.layers.{bid}.self_attn.time_maa_w2", # rwkv6qwen2 + "rwkv.blocks.{bid}.attention.time_maa_w2", # rwkv6 + "model.layers.{bid}.self_attn.time_maa_w2", # rwkv6qwen2 + "model.layers.{bid}.attention.w2", # rwkv7 + ), + + MODEL_TENSOR.TIME_MIX_A0: ( + "model.layers.{bid}.attention.a0", # rwkv7 + ), + + MODEL_TENSOR.TIME_MIX_A1: ( + "model.layers.{bid}.attention.a1", # rwkv7 + ), + + MODEL_TENSOR.TIME_MIX_A2: ( + "model.layers.{bid}.attention.a2", # rwkv7 + ), + + MODEL_TENSOR.TIME_MIX_V0: ( + "model.layers.{bid}.attention.v0", # rwkv7 + ), + + MODEL_TENSOR.TIME_MIX_V1: ( + "model.layers.{bid}.attention.v1", # rwkv7 + ), + + MODEL_TENSOR.TIME_MIX_V2: ( + "model.layers.{bid}.attention.v2", # rwkv7 + ), + + MODEL_TENSOR.TIME_MIX_G1: ( + "model.layers.{bid}.attention.g1", # rwkv7 + ), + + MODEL_TENSOR.TIME_MIX_G2: ( + "model.layers.{bid}.attention.g2", # rwkv7 + ), + + MODEL_TENSOR.TIME_MIX_K_K: ( + "model.layers.{bid}.attention.k_k", # rwkv7 + ), + + MODEL_TENSOR.TIME_MIX_K_A: ( + "model.layers.{bid}.attention.k_a", # rwkv7 + ), + + MODEL_TENSOR.TIME_MIX_R_K: ( + "model.layers.{bid}.attention.r_k", # rwkv7 ), MODEL_TENSOR.TIME_MIX_LERP_X: ( - "rwkv.blocks.{bid}.attention.time_maa_x", # rwkv v6 + "rwkv.blocks.{bid}.attention.time_maa_x", # rwkv6 "model.layers.{bid}.self_attn.time_maa_x", # rwkv6qwen2 ), MODEL_TENSOR.TIME_MIX_LERP_K: ( - "rwkv.blocks.{bid}.attention.time_maa_k", # rwkv v6 + "rwkv.blocks.{bid}.attention.time_maa_k", # rwkv6 "model.layers.{bid}.self_attn.time_maa_k", # rwkv6qwen2 ), MODEL_TENSOR.TIME_MIX_LERP_V: ( - "rwkv.blocks.{bid}.attention.time_maa_v", # rwkv v6 + "rwkv.blocks.{bid}.attention.time_maa_v", # rwkv6 "model.layers.{bid}.self_attn.time_maa_v", # rwkv6qwen2 ), MODEL_TENSOR.TIME_MIX_LERP_R: ( - "rwkv.blocks.{bid}.attention.time_maa_r", # rwkv v6 + "rwkv.blocks.{bid}.attention.time_maa_r", # rwkv6 "model.layers.{bid}.self_attn.time_maa_r", # rwkv6qwen2 ), MODEL_TENSOR.TIME_MIX_LERP_G: ( - "rwkv.blocks.{bid}.attention.time_maa_g", # rwkv v6 + "rwkv.blocks.{bid}.attention.time_maa_g", # rwkv6 "model.layers.{bid}.self_attn.time_maa_g", # rwkv6qwen2 ), MODEL_TENSOR.TIME_MIX_LERP_W: ( - "rwkv.blocks.{bid}.attention.time_maa_w", # rwkv v6 + "rwkv.blocks.{bid}.attention.time_maa_w", # rwkv6 "model.layers.{bid}.self_attn.time_maa_w", # rwkv6qwen2 ), MODEL_TENSOR.TIME_MIX_FIRST: ( - "rwkv.blocks.{bid}.attention.time_faaaa", # rwkv v6 + "rwkv.blocks.{bid}.attention.time_faaaa", # rwkv6 ), MODEL_TENSOR.TIME_MIX_DECAY: ( - "rwkv.blocks.{bid}.attention.time_decay", # rwkv v6 + "rwkv.blocks.{bid}.attention.time_decay", # rwkv6 "model.layers.{bid}.self_attn.time_decay", # rwkv6qwen2 ), MODEL_TENSOR.TIME_MIX_DECAY_W1: ( - "rwkv.blocks.{bid}.attention.time_decay_w1", # rwkv v6 + "rwkv.blocks.{bid}.attention.time_decay_w1", # rwkv6 "model.layers.{bid}.self_attn.time_decay_w1", # rwkv6qwen2 ), MODEL_TENSOR.TIME_MIX_DECAY_W2: ( - "rwkv.blocks.{bid}.attention.time_decay_w2", # rwkv v6 + "rwkv.blocks.{bid}.attention.time_decay_w2", # rwkv6 "model.layers.{bid}.self_attn.time_decay_w2", # rwkv6qwen2 ), MODEL_TENSOR.TIME_MIX_KEY: ( - "rwkv.blocks.{bid}.attention.key", # rwkv + "rwkv.blocks.{bid}.attention.key", # rwkv6 "model.layers.{bid}.self_attn.k_proj", # rwkv6qwen2 + "model.layers.{bid}.attention.key", # rwkv7 + "model.layers.{bid}.attention.k_proj", # rwkv7 ), MODEL_TENSOR.TIME_MIX_VALUE: ( - "rwkv.blocks.{bid}.attention.value", # rwkv + "rwkv.blocks.{bid}.attention.value", # rwkv6 "model.layers.{bid}.self_attn.v_proj", # rwkv6qwen2 + "model.layers.{bid}.attention.value", # rwkv7 + "model.layers.{bid}.attention.v_proj", # rwkv7 ), MODEL_TENSOR.TIME_MIX_RECEPTANCE: ( - "rwkv.blocks.{bid}.attention.receptance", # rwkv - "model.layers.{bid}.self_attn.q_proj", # rwkv6qwen2 + "rwkv.blocks.{bid}.attention.receptance", # rwkv6 + "model.layers.{bid}.self_attn.q_proj", # rwkv6qwen2 + "model.layers.{bid}.attention.receptance", # rwkv7 + "model.layers.{bid}.attention.r_proj", # rwkv7 ), MODEL_TENSOR.TIME_MIX_GATE: ( - "rwkv.blocks.{bid}.attention.gate", # rwkv - "model.layers.{bid}.self_attn.gate", # rwkv6qwen2 + "rwkv.blocks.{bid}.attention.gate", # rwkv6 + "model.layers.{bid}.self_attn.gate", # rwkv6qwen2 ), MODEL_TENSOR.TIME_MIX_LN: ( - "rwkv.blocks.{bid}.attention.ln_x", # rwkv + "rwkv.blocks.{bid}.attention.ln_x", # rwkv6 + "model.layers.{bid}.attention.ln_x" # rwkv7 ), MODEL_TENSOR.TIME_MIX_OUTPUT: ( - "rwkv.blocks.{bid}.attention.output", # rwkv + "rwkv.blocks.{bid}.attention.output", # rwkv6 "model.layers.{bid}.self_attn.o_proj", # rwkv6qwen2 + "model.layers.{bid}.attention.output", # rwkv7 + "model.layers.{bid}.attention.o_proj", # rwkv7 ), MODEL_TENSOR.CHANNEL_MIX_LERP_K: ( - "rwkv.blocks.{bid}.feed_forward.time_maa_k", # rwkv v6 + "rwkv.blocks.{bid}.feed_forward.time_maa_k", # rwkv6 + "model.layers.{bid}.feed_forward.x_k", # rwkv7 ), MODEL_TENSOR.CHANNEL_MIX_LERP_R: ( - "rwkv.blocks.{bid}.feed_forward.time_maa_r", # rwkv v6 + "rwkv.blocks.{bid}.feed_forward.time_maa_r", # rwkv6 ), MODEL_TENSOR.CHANNEL_MIX_KEY: ( - "rwkv.blocks.{bid}.feed_forward.key", # rwkv + "rwkv.blocks.{bid}.feed_forward.key", # rwkv6 + "model.layers.{bid}.feed_forward.key", # rwkv7 ), MODEL_TENSOR.CHANNEL_MIX_RECEPTANCE: ( - "rwkv.blocks.{bid}.feed_forward.receptance", # rwkv + "rwkv.blocks.{bid}.feed_forward.receptance", # rwkv6 ), MODEL_TENSOR.CHANNEL_MIX_VALUE: ( - "rwkv.blocks.{bid}.feed_forward.value", # rwkv + "rwkv.blocks.{bid}.feed_forward.value", # rwkv6 + "model.layers.{bid}.feed_forward.value", # rwkv7 ), MODEL_TENSOR.ATTN_Q_A: ( diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp index 28f2bbc8f72bf..9debb56cc80d5 100644 --- a/src/llama-arch.cpp +++ b/src/llama-arch.cpp @@ -59,6 +59,8 @@ static const std::map LLM_ARCH_NAMES = { { LLM_ARCH_EXAONE, "exaone" }, { LLM_ARCH_RWKV6, "rwkv6" }, { LLM_ARCH_RWKV6QWEN2, "rwkv6qwen2" }, + { LLM_ARCH_RWKV7, "rwkv7" }, + { LLM_ARCH_ARWKV7, "arwkv7" }, { LLM_ARCH_GRANITE, "granite" }, { LLM_ARCH_GRANITE_MOE, "granitemoe" }, { LLM_ARCH_CHAMELEON, "chameleon" }, @@ -110,22 +112,26 @@ static const std::map LLM_KV_NAMES = { { LLM_KV_EMBEDDING_SCALE, "%s.embedding_scale" }, { LLM_KV_TOKEN_SHIFT_COUNT, "%s.token_shift_count" }, - { LLM_KV_ATTENTION_HEAD_COUNT, "%s.attention.head_count" }, - { LLM_KV_ATTENTION_HEAD_COUNT_KV, "%s.attention.head_count_kv" }, - { LLM_KV_ATTENTION_MAX_ALIBI_BIAS, "%s.attention.max_alibi_bias" }, - { LLM_KV_ATTENTION_CLAMP_KQV, "%s.attention.clamp_kqv" }, - { LLM_KV_ATTENTION_KEY_LENGTH, "%s.attention.key_length" }, - { LLM_KV_ATTENTION_VALUE_LENGTH, "%s.attention.value_length" }, - { LLM_KV_ATTENTION_LAYERNORM_EPS, "%s.attention.layer_norm_epsilon" }, - { LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, "%s.attention.layer_norm_rms_epsilon" }, - { LLM_KV_ATTENTION_GROUPNORM_EPS, "%s.attention.group_norm_epsilon" }, - { LLM_KV_ATTENTION_GROUPNORM_GROUPS, "%s.attention.group_norm_groups" }, - { LLM_KV_ATTENTION_CAUSAL, "%s.attention.causal" }, - { LLM_KV_ATTENTION_Q_LORA_RANK, "%s.attention.q_lora_rank" }, - { LLM_KV_ATTENTION_KV_LORA_RANK, "%s.attention.kv_lora_rank" }, - { LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, "%s.attention.relative_buckets_count" }, - { LLM_KV_ATTENTION_SLIDING_WINDOW, "%s.attention.sliding_window" }, - { LLM_KV_ATTENTION_SCALE, "%s.attention.scale" }, + { LLM_KV_ATTENTION_HEAD_COUNT, "%s.attention.head_count" }, + { LLM_KV_ATTENTION_HEAD_COUNT_KV, "%s.attention.head_count_kv" }, + { LLM_KV_ATTENTION_MAX_ALIBI_BIAS, "%s.attention.max_alibi_bias" }, + { LLM_KV_ATTENTION_CLAMP_KQV, "%s.attention.clamp_kqv" }, + { LLM_KV_ATTENTION_KEY_LENGTH, "%s.attention.key_length" }, + { LLM_KV_ATTENTION_VALUE_LENGTH, "%s.attention.value_length" }, + { LLM_KV_ATTENTION_LAYERNORM_EPS, "%s.attention.layer_norm_epsilon" }, + { LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, "%s.attention.layer_norm_rms_epsilon" }, + { LLM_KV_ATTENTION_GROUPNORM_EPS, "%s.attention.group_norm_epsilon" }, + { LLM_KV_ATTENTION_GROUPNORM_GROUPS, "%s.attention.group_norm_groups" }, + { LLM_KV_ATTENTION_CAUSAL, "%s.attention.causal" }, + { LLM_KV_ATTENTION_Q_LORA_RANK, "%s.attention.q_lora_rank" }, + { LLM_KV_ATTENTION_KV_LORA_RANK, "%s.attention.kv_lora_rank" }, + { LLM_KV_ATTENTION_DECAY_LORA_RANK, "%s.attention.decay_lora_rank" }, + { LLM_KV_ATTENTION_ICLR_LORA_RANK, "%s.attention.iclr_lora_rank" }, + { LLM_KV_ATTENTION_VALUE_RESIDUAL_MIX_LORA_RANK, "%s.attention.value_residual_mix_lora_rank" }, + { LLM_KV_ATTENTION_GATE_LORA_RANK, "%s.attention.gate_lora_rank" }, + { LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, "%s.attention.relative_buckets_count" }, + { LLM_KV_ATTENTION_SLIDING_WINDOW, "%s.attention.sliding_window" }, + { LLM_KV_ATTENTION_SCALE, "%s.attention.scale" }, { LLM_KV_ROPE_DIMENSION_COUNT, "%s.rope.dimension_count" }, { LLM_KV_ROPE_DIMENSION_SECTIONS, "%s.rope.dimension_sections" }, @@ -1238,6 +1244,74 @@ static const std::map> LLM_TENSOR_N { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, }, }, + { + LLM_ARCH_RWKV7, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_NORM_2, "blk.%d.attn_norm_2" }, + { LLM_TENSOR_TIME_MIX_W0, "blk.%d.time_mix_w0" }, + { LLM_TENSOR_TIME_MIX_W1, "blk.%d.time_mix_w1" }, + { LLM_TENSOR_TIME_MIX_W2, "blk.%d.time_mix_w2" }, + { LLM_TENSOR_TIME_MIX_A0, "blk.%d.time_mix_a0" }, + { LLM_TENSOR_TIME_MIX_A1, "blk.%d.time_mix_a1" }, + { LLM_TENSOR_TIME_MIX_A2, "blk.%d.time_mix_a2" }, + { LLM_TENSOR_TIME_MIX_V0, "blk.%d.time_mix_v0" }, + { LLM_TENSOR_TIME_MIX_V1, "blk.%d.time_mix_v1" }, + { LLM_TENSOR_TIME_MIX_V2, "blk.%d.time_mix_v2" }, + { LLM_TENSOR_TIME_MIX_G1, "blk.%d.time_mix_g1" }, + { LLM_TENSOR_TIME_MIX_G2, "blk.%d.time_mix_g2" }, + { LLM_TENSOR_TIME_MIX_K_K, "blk.%d.time_mix_k_k" }, + { LLM_TENSOR_TIME_MIX_K_A, "blk.%d.time_mix_k_a" }, + { LLM_TENSOR_TIME_MIX_R_K, "blk.%d.time_mix_r_k" }, + { LLM_TENSOR_TIME_MIX_LERP_FUSED, "blk.%d.time_mix_lerp_fused" }, + { LLM_TENSOR_TIME_MIX_KEY, "blk.%d.time_mix_key" }, + { LLM_TENSOR_TIME_MIX_VALUE, "blk.%d.time_mix_value" }, + { LLM_TENSOR_TIME_MIX_RECEPTANCE, "blk.%d.time_mix_receptance" }, + { LLM_TENSOR_TIME_MIX_LN, "blk.%d.time_mix_ln" }, + { LLM_TENSOR_TIME_MIX_OUTPUT, "blk.%d.time_mix_output" }, + { LLM_TENSOR_CHANNEL_MIX_LERP_K, "blk.%d.channel_mix_lerp_k" }, + { LLM_TENSOR_CHANNEL_MIX_KEY, "blk.%d.channel_mix_key" }, + { LLM_TENSOR_CHANNEL_MIX_VALUE, "blk.%d.channel_mix_value" }, + }, + }, + { + LLM_ARCH_ARWKV7, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_TIME_MIX_W0, "blk.%d.time_mix_w0" }, + { LLM_TENSOR_TIME_MIX_W1, "blk.%d.time_mix_w1" }, + { LLM_TENSOR_TIME_MIX_W2, "blk.%d.time_mix_w2" }, + { LLM_TENSOR_TIME_MIX_A0, "blk.%d.time_mix_a0" }, + { LLM_TENSOR_TIME_MIX_A1, "blk.%d.time_mix_a1" }, + { LLM_TENSOR_TIME_MIX_A2, "blk.%d.time_mix_a2" }, + { LLM_TENSOR_TIME_MIX_V0, "blk.%d.time_mix_v0" }, + { LLM_TENSOR_TIME_MIX_V1, "blk.%d.time_mix_v1" }, + { LLM_TENSOR_TIME_MIX_V2, "blk.%d.time_mix_v2" }, + { LLM_TENSOR_TIME_MIX_G1, "blk.%d.time_mix_g1" }, + { LLM_TENSOR_TIME_MIX_G2, "blk.%d.time_mix_g2" }, + { LLM_TENSOR_TIME_MIX_K_K, "blk.%d.time_mix_k_k" }, + { LLM_TENSOR_TIME_MIX_K_A, "blk.%d.time_mix_k_a" }, + { LLM_TENSOR_TIME_MIX_R_K, "blk.%d.time_mix_r_k" }, + { LLM_TENSOR_TIME_MIX_LERP_FUSED, "blk.%d.time_mix_lerp_fused" }, + { LLM_TENSOR_TIME_MIX_KEY, "blk.%d.time_mix_key" }, + { LLM_TENSOR_TIME_MIX_VALUE, "blk.%d.time_mix_value" }, + { LLM_TENSOR_TIME_MIX_RECEPTANCE, "blk.%d.time_mix_receptance" }, + { LLM_TENSOR_TIME_MIX_LN, "blk.%d.time_mix_ln" }, + { LLM_TENSOR_TIME_MIX_OUTPUT, "blk.%d.time_mix_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, { LLM_ARCH_GRANITE, { @@ -1397,6 +1471,12 @@ static const std::map LLM_TENSOR_INFOS = { {LLM_TENSOR_SSM_OUT, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, {LLM_TENSOR_TIME_MIX_W1, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, {LLM_TENSOR_TIME_MIX_W2, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_TIME_MIX_A1, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_TIME_MIX_A2, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_TIME_MIX_V1, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_TIME_MIX_V2, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_TIME_MIX_G1, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_TIME_MIX_G2, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, {LLM_TENSOR_TIME_MIX_DECAY_W1, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, {LLM_TENSOR_TIME_MIX_DECAY_W2, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, {LLM_TENSOR_TIME_MIX_KEY, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, @@ -1415,6 +1495,9 @@ static const std::map LLM_TENSOR_INFOS = { {LLM_TENSOR_TIME_MIX_LN, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, {LLM_TENSOR_CHANNEL_MIX_LERP_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, {LLM_TENSOR_CHANNEL_MIX_LERP_R, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_TIME_MIX_K_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_TIME_MIX_K_A, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_TIME_MIX_R_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, {LLM_TENSOR_TIME_MIX_LERP_W, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}}, {LLM_TENSOR_TIME_MIX_LERP_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}}, {LLM_TENSOR_TIME_MIX_LERP_V, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}}, @@ -1422,6 +1505,9 @@ static const std::map LLM_TENSOR_INFOS = { {LLM_TENSOR_TIME_MIX_LERP_G, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}}, {LLM_TENSOR_TIME_MIX_LERP_FUSED, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}}, {LLM_TENSOR_TIME_MIX_DECAY, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}}, + {LLM_TENSOR_TIME_MIX_W0, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}}, + {LLM_TENSOR_TIME_MIX_A0, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}}, + {LLM_TENSOR_TIME_MIX_V0, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}}, {LLM_TENSOR_TIME_MIX_FIRST, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_RWKV_WKV6}}, {LLM_TENSOR_ATTN_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, {LLM_TENSOR_ATTN_NORM_2, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, diff --git a/src/llama-arch.h b/src/llama-arch.h index 2ec2e2362eba1..a28815d8a14c7 100644 --- a/src/llama-arch.h +++ b/src/llama-arch.h @@ -63,6 +63,8 @@ enum llm_arch { LLM_ARCH_EXAONE, LLM_ARCH_RWKV6, LLM_ARCH_RWKV6QWEN2, + LLM_ARCH_RWKV7, + LLM_ARCH_ARWKV7, LLM_ARCH_GRANITE, LLM_ARCH_GRANITE_MOE, LLM_ARCH_CHAMELEON, @@ -127,6 +129,10 @@ enum llm_kv { LLM_KV_ATTENTION_CAUSAL, LLM_KV_ATTENTION_Q_LORA_RANK, LLM_KV_ATTENTION_KV_LORA_RANK, + LLM_KV_ATTENTION_DECAY_LORA_RANK, + LLM_KV_ATTENTION_ICLR_LORA_RANK, + LLM_KV_ATTENTION_VALUE_RESIDUAL_MIX_LORA_RANK, + LLM_KV_ATTENTION_GATE_LORA_RANK, LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, LLM_KV_ATTENTION_SLIDING_WINDOW, LLM_KV_ATTENTION_SCALE, @@ -250,8 +256,20 @@ enum llm_tensor { LLM_TENSOR_SSM_A, LLM_TENSOR_SSM_D, LLM_TENSOR_SSM_OUT, + LLM_TENSOR_TIME_MIX_W0, LLM_TENSOR_TIME_MIX_W1, LLM_TENSOR_TIME_MIX_W2, + LLM_TENSOR_TIME_MIX_A0, + LLM_TENSOR_TIME_MIX_A1, + LLM_TENSOR_TIME_MIX_A2, + LLM_TENSOR_TIME_MIX_V0, + LLM_TENSOR_TIME_MIX_V1, + LLM_TENSOR_TIME_MIX_V2, + LLM_TENSOR_TIME_MIX_G1, + LLM_TENSOR_TIME_MIX_G2, + LLM_TENSOR_TIME_MIX_K_K, + LLM_TENSOR_TIME_MIX_K_A, + LLM_TENSOR_TIME_MIX_R_K, LLM_TENSOR_TIME_MIX_LERP_X, LLM_TENSOR_TIME_MIX_LERP_W, LLM_TENSOR_TIME_MIX_LERP_K, diff --git a/src/llama-hparams.h b/src/llama-hparams.h index dbb7abd317b6f..bb17ba86dc2fb 100644 --- a/src/llama-hparams.h +++ b/src/llama-hparams.h @@ -76,6 +76,10 @@ struct llama_hparams { uint32_t time_decay_extra_dim = 0; uint32_t wkv_head_size = 0; uint32_t token_shift_count = 2; + uint32_t n_lora_decay = 0; + uint32_t n_lora_iclr = 0; + uint32_t n_lora_value_res_mix = 0; + uint32_t n_lora_gate = 0; float rope_attn_factor = 1.0f; float rope_freq_base_train; diff --git a/src/llama-model.cpp b/src/llama-model.cpp index 4b288d8f66a33..c571aa69b671c 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -32,6 +32,7 @@ const char * llm_type_name(llm_type type) { case LLM_TYPE_109M: return "109M"; case LLM_TYPE_137M: return "137M"; case LLM_TYPE_160M: return "160M"; + case LLM_TYPE_190M: return "190M"; case LLM_TYPE_220M: return "220M"; case LLM_TYPE_250M: return "250M"; case LLM_TYPE_270M: return "270M"; @@ -48,6 +49,7 @@ const char * llm_type_name(llm_type type) { case LLM_TYPE_1_6B: return "1.6B"; case LLM_TYPE_2B: return "2B"; case LLM_TYPE_2_8B: return "2.8B"; + case LLM_TYPE_2_9B: return "2.9B"; case LLM_TYPE_3B: return "3B"; case LLM_TYPE_4B: return "4B"; case LLM_TYPE_6B: return "6B"; @@ -1250,6 +1252,36 @@ void llama_model::load_hparams(llama_model_loader & ml) { default: type = LLM_TYPE_UNKNOWN; } } break; + case LLM_ARCH_RWKV7: + case LLM_ARCH_ARWKV7: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps, false); + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps, false); + ml.get_key(LLM_KV_WKV_HEAD_SIZE, hparams.wkv_head_size); + ml.get_key(LLM_KV_ATTENTION_DECAY_LORA_RANK, hparams.n_lora_decay); + ml.get_key(LLM_KV_ATTENTION_ICLR_LORA_RANK, hparams.n_lora_iclr); + ml.get_key(LLM_KV_ATTENTION_VALUE_RESIDUAL_MIX_LORA_RANK, hparams.n_lora_value_res_mix); + ml.get_key(LLM_KV_ATTENTION_GATE_LORA_RANK, hparams.n_lora_gate, false); + ml.get_key(LLM_KV_TOKEN_SHIFT_COUNT, hparams.token_shift_count, false); + + switch (hparams.n_layer) { + case 12: type = LLM_TYPE_190M; break; + case 24: + switch (hparams.n_embd) { + case 1024: type = LLM_TYPE_450M; break; + case 2048: type = LLM_TYPE_1_5B; break; + default: type = LLM_TYPE_UNKNOWN; + } break; + case 28: + switch (hparams.n_embd) { + case 1536: type = LLM_TYPE_1_5B; break; + case 3584: type = LLM_TYPE_7B; break; + default: type = LLM_TYPE_UNKNOWN; + } break; + case 32: type = LLM_TYPE_2_9B; break; // RWKV-7-World + default: type = LLM_TYPE_UNKNOWN; + } + } break; case LLM_ARCH_GRANITE: case LLM_ARCH_GRANITE_MOE: { @@ -3366,6 +3398,146 @@ bool llama_model::load_tensors(llama_model_loader & ml) { layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); } } break; + case LLM_ARCH_RWKV7: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + // Block 0, LN0 + tok_norm = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, 0); + tok_norm_b = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd}, 0); + + // output + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0); + + const int n_lora_decay = hparams.n_lora_decay; + const int n_lora_iclr = hparams.n_lora_iclr; + const int n_lora_value_res_mix = hparams.n_lora_value_res_mix; + const int n_lora_gate = hparams.n_lora_gate; + const int attn_hidden_size = n_embd; + const int ffn_size = hparams.n_ff_arr[0]; + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0); + + layer.attn_norm_2 = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd}, 0); + layer.attn_norm_2_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "bias", i), {n_embd}, 0); + + layer.time_mix_w0 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W0, "weight", i), {n_embd}, 0); + layer.time_mix_w1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W1, "weight", i), {n_embd, n_lora_decay}, 0); + layer.time_mix_w2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W2, "weight", i), {n_lora_decay, n_embd}, 0); + + layer.time_mix_a0 = create_tensor(tn(LLM_TENSOR_TIME_MIX_A0, "weight", i), {n_embd}, 0); + layer.time_mix_a1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_A1, "weight", i), {n_embd, n_lora_iclr}, 0); + layer.time_mix_a2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_A2, "weight", i), {n_lora_iclr, n_embd}, 0); + + if (i == 0) { + // actually not used + layer.time_mix_v0 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V0, "weight", i), {n_embd}, 0); + layer.time_mix_v1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V1, "weight", i), {n_embd, n_lora_iclr}, 0); + layer.time_mix_v2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V2, "weight", i), {n_lora_iclr, n_embd}, 0); + } else { + layer.time_mix_v0 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V0, "weight", i), {n_embd}, 0); + layer.time_mix_v1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V1, "weight", i), {n_embd, n_lora_value_res_mix}, 0); + layer.time_mix_v2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V2, "weight", i), {n_lora_value_res_mix, n_embd}, 0); + } + + layer.time_mix_g1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_G1, "weight", i), {n_embd, n_lora_gate}, 0); + layer.time_mix_g2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_G2, "weight", i), {n_lora_gate, n_embd}, 0); + + layer.time_mix_lerp_fused = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_FUSED, "weight", i), {n_embd, 1, 1, 6}, 0); + + layer.time_mix_k_k = create_tensor(tn(LLM_TENSOR_TIME_MIX_K_K, "weight", i), {attn_hidden_size}, 0); + layer.time_mix_k_a = create_tensor(tn(LLM_TENSOR_TIME_MIX_K_A, "weight", i), {attn_hidden_size}, 0); + layer.time_mix_r_k = create_tensor(tn(LLM_TENSOR_TIME_MIX_R_K, "weight", i), {attn_hidden_size}, 0); + + layer.time_mix_key = create_tensor(tn(LLM_TENSOR_TIME_MIX_KEY, "weight", i), {attn_hidden_size, n_embd}, 0); + layer.time_mix_value = create_tensor(tn(LLM_TENSOR_TIME_MIX_VALUE, "weight", i), {attn_hidden_size, n_embd}, 0); + layer.time_mix_receptance = create_tensor(tn(LLM_TENSOR_TIME_MIX_RECEPTANCE, "weight", i), {attn_hidden_size, n_embd}, 0); + + layer.time_mix_ln = create_tensor(tn(LLM_TENSOR_TIME_MIX_LN, "weight", i), {n_embd}, 0); + layer.time_mix_ln_b = create_tensor(tn(LLM_TENSOR_TIME_MIX_LN, "bias", i), {n_embd}, 0); + layer.time_mix_output = create_tensor(tn(LLM_TENSOR_TIME_MIX_OUTPUT, "weight", i), {n_embd, attn_hidden_size}, 0); + + layer.channel_mix_lerp_k = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_LERP_K, "weight", i), {n_embd, 1, 1}, 0); + + layer.channel_mix_key = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_KEY, "weight", i), {n_embd, ffn_size}, 0); + layer.channel_mix_value = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_VALUE, "weight", i), {ffn_size, n_embd}, 0); + } + + } break; + case LLM_ARCH_ARWKV7: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + // output + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0); + + const int n_lora_decay = hparams.n_lora_decay; + const int n_lora_iclr = hparams.n_lora_iclr; + const int n_lora_value_res_mix = hparams.n_lora_value_res_mix; + const int n_lora_gate = hparams.n_lora_gate; + const int attn_hidden_size = n_embd; + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + + layer.time_mix_w0 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W0, "weight", i), {n_embd}, 0); + layer.time_mix_w1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W1, "weight", i), {n_embd, n_lora_decay}, 0); + layer.time_mix_w2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W2, "weight", i), {n_lora_decay, n_embd}, 0); + + layer.time_mix_a0 = create_tensor(tn(LLM_TENSOR_TIME_MIX_A0, "weight", i), {n_embd}, 0); + layer.time_mix_a1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_A1, "weight", i), {n_embd, n_lora_iclr}, 0); + layer.time_mix_a2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_A2, "weight", i), {n_lora_iclr, n_embd}, 0); + + if (i == 0) { + // actually not used + layer.time_mix_v0 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V0, "weight", i), {n_embd}, 0); + layer.time_mix_v1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V1, "weight", i), {n_embd, n_lora_iclr}, 0); + layer.time_mix_v2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V2, "weight", i), {n_lora_iclr, n_embd}, 0); + } else { + layer.time_mix_v0 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V0, "weight", i), {n_embd}, 0); + layer.time_mix_v1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V1, "weight", i), {n_embd, n_lora_value_res_mix}, 0); + layer.time_mix_v2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V2, "weight", i), {n_lora_value_res_mix, n_embd}, 0); + } + + layer.time_mix_g1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_G1, "weight", i), {n_embd, n_lora_gate}, llama_model_loader::TENSOR_NOT_REQUIRED); + layer.time_mix_g2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_G2, "weight", i), {n_lora_gate, n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED); + + try { + layer.time_mix_lerp_fused = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_FUSED, "weight", i), {n_embd, 1, 1, 6}, 0); + } catch(std::runtime_error & e) { + // ARWKV models may not have gate tensors + layer.time_mix_lerp_fused = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_FUSED, "weight", i), {n_embd, 1, 1, 5}, 0); + } + + layer.time_mix_k_k = create_tensor(tn(LLM_TENSOR_TIME_MIX_K_K, "weight", i), {attn_hidden_size}, 0); + layer.time_mix_k_a = create_tensor(tn(LLM_TENSOR_TIME_MIX_K_A, "weight", i), {attn_hidden_size}, 0); + layer.time_mix_r_k = create_tensor(tn(LLM_TENSOR_TIME_MIX_R_K, "weight", i), {attn_hidden_size}, 0); + + layer.time_mix_key = create_tensor(tn(LLM_TENSOR_TIME_MIX_KEY, "weight", i), {attn_hidden_size, n_embd}, 0); + layer.time_mix_value = create_tensor(tn(LLM_TENSOR_TIME_MIX_VALUE, "weight", i), {attn_hidden_size, n_embd}, 0); + layer.time_mix_receptance = create_tensor(tn(LLM_TENSOR_TIME_MIX_RECEPTANCE, "weight", i), {attn_hidden_size, n_embd}, 0); + + layer.time_mix_ln = create_tensor(tn(LLM_TENSOR_TIME_MIX_LN, "weight", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED); + layer.time_mix_ln_b = create_tensor(tn(LLM_TENSOR_TIME_MIX_LN, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED); + layer.time_mix_output = create_tensor(tn(LLM_TENSOR_TIME_MIX_OUTPUT, "weight", i), {n_embd, attn_hidden_size}, 0); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + } + + } break; case LLM_ARCH_CHAMELEON: { tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); @@ -10212,6 +10384,7 @@ struct llm_build_rwkv6_base : public llm_graph_context { const auto n_tokens = ubatch.n_tokens; const auto n_seqs = ubatch.n_seqs; + const auto n_seq_tokens = ubatch.n_seq_tokens; const auto n_embd = hparams.n_embd; const auto head_size = hparams.wkv_head_size; const auto n_head = n_embd / head_size; @@ -10224,6 +10397,10 @@ struct llm_build_rwkv6_base : public llm_graph_context { bool is_qrwkv = layer.time_mix_first == nullptr; ggml_tensor * sx = ggml_sub(ctx0, x_prev, cur); + + sx = ggml_reshape_2d(ctx0, sx, n_embd, n_tokens); + cur = ggml_reshape_2d(ctx0, cur, n_embd, n_tokens); + ggml_tensor * xxx = ggml_add(ctx0, ggml_mul(ctx0, sx, layer.time_mix_lerp_x), cur); xxx = ggml_reshape_4d( @@ -10366,7 +10543,7 @@ struct llm_build_rwkv6_base : public llm_graph_context { cur = ggml_mul(ctx0, cur, g); cur = build_lora_mm(layer.time_mix_output, cur); - return cur; + return ggml_reshape_3d(ctx0, cur, n_embd, n_seq_tokens, n_seqs); } }; @@ -10389,6 +10566,7 @@ struct llm_build_rwkv6 : public llm_build_rwkv6_base { for (int il = 0; il < n_layer; ++il) { const llama_layer * layer = &model.layers[il]; + inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs); ggml_tensor * token_shift = build_rwkv_token_shift_load( gf, state_copy, state_mask, ubatch, il @@ -10422,9 +10600,6 @@ struct llm_build_rwkv6 : public llm_build_rwkv6_base { 1 ); - cur = build_rwkv6_channel_mix(layer, ffn_norm, x_prev, LLM_ARCH_RWKV6); - cur = ggml_add(ctx0, cur, ffn_inp); - token_shift = ggml_concat(ctx0, ggml_view_3d(ctx0, att_norm, n_embd, 1, n_seqs, att_norm->nb[1], att_norm->nb[2], (n_seq_tokens-1)*n_embd*ggml_element_size(att_norm)), ggml_view_3d(ctx0, ffn_norm, n_embd, 1, n_seqs, ffn_norm->nb[1], ffn_norm->nb[2], (n_seq_tokens-1)*n_embd*ggml_element_size(ffn_norm)), @@ -10432,6 +10607,18 @@ struct llm_build_rwkv6 : public llm_build_rwkv6_base { ); ggml_build_forward_expand(gf, build_rwkv_token_shift_store(token_shift, ubatch, il)); + if (il == n_layer - 1) { + // skip computing output for unused tokens + struct ggml_tensor * inp_out_ids = build_inp_out_ids(); + ffn_inp = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, ffn_inp, n_embd, n_tokens), inp_out_ids); + ffn_norm = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, ffn_norm, n_embd, n_tokens), inp_out_ids); + x_prev = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, x_prev, n_embd, n_tokens), inp_out_ids); + cur = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, cur, n_embd, n_tokens), inp_out_ids); + } + + cur = build_rwkv6_channel_mix(layer, ffn_norm, x_prev, LLM_ARCH_RWKV6); + cur = ggml_add(ctx0, cur, ffn_inp); + if (hparams.rescale_every_n_layers != 0 && (il + 1) % hparams.rescale_every_n_layers == 0) { cur = ggml_scale(ctx0, cur, 0.5F); } @@ -10444,12 +10631,6 @@ struct llm_build_rwkv6 : public llm_build_rwkv6_base { } cur = inpL; - - ggml_tensor * inp_out_ids = build_inp_out_ids(); - - cur = ggml_reshape_2d(ctx0, cur, n_embd, n_tokens); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - cur = build_norm(cur, model.output_norm, model.output_norm_b, LLM_NORM, -1); cb(cur, "result_norm", -1); @@ -10481,10 +10662,9 @@ struct llm_build_rwkv6qwen2 : public llm_build_rwkv6_base { const auto n_seq_tokens = ubatch.n_seq_tokens; const auto n_seqs = ubatch.n_seqs; - inpL = build_inp_embd(model.tok_embd); - for (int il = 0; il < n_layer; ++il) { const llama_layer * layer = &model.layers[il]; + inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs); ggml_tensor * token_shift = build_rwkv_token_shift_load( gf, state_copy, state_mask, ubatch, il @@ -10508,6 +10688,13 @@ struct llm_build_rwkv6qwen2 : public llm_build_rwkv6_base { ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); cb(ffn_inp, "ffn_inp", il); + if (il == n_layer - 1) { + // skip computing output for unused tokens + struct ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, cur, n_embd, n_tokens), inp_out_ids); + ffn_inp = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, ffn_inp, n_embd, n_tokens), inp_out_ids); + } + // feed-forward network cur = build_norm(ffn_inp, model.layers[il].ffn_norm, NULL, @@ -10532,10 +10719,358 @@ struct llm_build_rwkv6qwen2 : public llm_build_rwkv6_base { } cur = inpL; - ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_reshape_2d(ctx0, cur, n_embd, n_tokens); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); + cur = build_norm(cur, model.output_norm, model.output_norm_b, LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_rwkv7_base : public llm_graph_context { + const llama_model & model; + + llm_build_rwkv7_base(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params), model(model) { + } + + ggml_tensor * build_rwkv7_channel_mix( + const llama_layer * layer, + ggml_tensor * cur, + ggml_tensor * x_prev, + llm_arch arch) const { + ggml_tensor * sx = ggml_sub(ctx0, x_prev, cur); + switch (arch) { + case LLM_ARCH_RWKV7: + { + ggml_tensor * xk = ggml_add(ctx0, ggml_mul(ctx0, sx, layer->channel_mix_lerp_k), cur); + + ggml_tensor * k = ggml_sqr( + ctx0, + ggml_relu( + ctx0, + build_lora_mm(layer->channel_mix_key, xk) + ) + ); + + cur = build_lora_mm(layer->channel_mix_value, k); + } break; + default: + GGML_ABORT("fatal error"); + } + + return cur; + } + + ggml_tensor * build_rwkv7_time_mix( + ggml_cgraph * gf, + ggml_tensor * cur, + ggml_tensor * x_prev, + ggml_tensor * state_copy, + ggml_tensor * state_mask, + ggml_tensor *& first_layer_value, + const llama_ubatch & ubatch, + int il) const { + const llama_kv_cache_unified * kv_self = static_cast(memory); + + const auto n_tokens = ubatch.n_tokens; + const auto n_seqs = ubatch.n_seqs; + const auto n_embd = hparams.n_embd; + const auto head_size = hparams.wkv_head_size; + const auto head_count = n_embd / head_size; + const auto n_seq_tokens = ubatch.n_seq_tokens; + + const auto kv_head = kv_self->head; + + const auto & layer = model.layers[il]; + + bool has_gating = layer.time_mix_g1 && layer.time_mix_g2; + + ggml_tensor * sx = ggml_sub(ctx0, x_prev, cur); + ggml_tensor * dummy = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, n_embd, n_seq_tokens, n_seqs, has_gating ? 6 : 5); + sx = ggml_repeat(ctx0, sx, dummy); + + ggml_tensor * xxx = ggml_add(ctx0, ggml_mul(ctx0, sx, layer.time_mix_lerp_fused), cur); + + ggml_tensor * xr = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], 0); + ggml_tensor * xw = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * sizeof(float)); + ggml_tensor * xk = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 2 * sizeof(float)); + ggml_tensor * xv = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 3 * sizeof(float)); + ggml_tensor * xa = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 4 * sizeof(float)); + ggml_tensor * xg = has_gating ? ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 5 * sizeof(float)) : nullptr; + + ggml_tensor * r = build_lora_mm(layer.time_mix_receptance, xr); + ggml_tensor * w = ggml_add( + ctx0, + ggml_mul_mat(ctx0, layer.time_mix_w2, ggml_tanh(ctx0, ggml_mul_mat(ctx0, layer.time_mix_w1, xw))), + layer.time_mix_w0 + ); + w = ggml_exp(ctx0, ggml_scale(ctx0, ggml_sigmoid(ctx0, w), -0.606531)); + + ggml_tensor * k = build_lora_mm(layer.time_mix_key, xk); + ggml_tensor * v = build_lora_mm(layer.time_mix_value, xv); + if (first_layer_value == nullptr) { + first_layer_value = v; + } else { + // Add the first layer value as a residual connection. + v = ggml_add(ctx0, v, + ggml_mul(ctx0, + ggml_sub(ctx0, first_layer_value, v), + ggml_sigmoid(ctx0, ggml_add(ctx0, + ggml_mul_mat(ctx0, layer.time_mix_v2, ggml_mul_mat(ctx0, layer.time_mix_v1, xv)), + layer.time_mix_v0 + ) + ) + ) + ); + } + + ggml_tensor * g = nullptr; + if (layer.time_mix_g1 && layer.time_mix_g2) { + g = ggml_mul_mat(ctx0, layer.time_mix_g2, ggml_sigmoid(ctx0, ggml_mul_mat(ctx0, layer.time_mix_g1, xg))); + } + + ggml_tensor * a = ggml_sigmoid(ctx0, + ggml_add( + ctx0, + ggml_mul_mat(ctx0, layer.time_mix_a2, ggml_mul_mat(ctx0, layer.time_mix_a1, xa)), + layer.time_mix_a0 + ) + ); + + ggml_tensor * kk = ggml_reshape_3d(ctx0, ggml_mul(ctx0, k, layer.time_mix_k_k), head_size, head_count, n_tokens); + kk = ggml_l2_norm(ctx0, kk, 1e-12); + ggml_tensor * ka = ggml_mul(ctx0, k, layer.time_mix_k_a); + k = ggml_add(ctx0, k, ggml_sub(ctx0, ggml_mul(ctx0, a, ka), ka)); + + r = ggml_reshape_3d(ctx0, r, head_size, head_count, n_tokens); + w = ggml_reshape_3d(ctx0, w, head_size, head_count, n_tokens); + k = ggml_reshape_3d(ctx0, k, head_size, head_count, n_tokens); + v = ggml_reshape_3d(ctx0, v, head_size, head_count, n_tokens); + a = ggml_reshape_3d(ctx0, a, head_size, head_count, n_tokens); + + ggml_tensor * wkv_state = build_copy_mask_state( + gf, kv_self->v_l[il], state_copy, state_mask, + hparams.n_embd_v_s(), n_seqs); + + ggml_tensor * wkv_output = ggml_rwkv_wkv7(ctx0, r, w, k, v, ggml_neg(ctx0, kk), ggml_mul(ctx0, kk, a), wkv_state); + cur = ggml_view_1d(ctx0, wkv_output, n_embd * n_tokens, 0); + wkv_state = ggml_view_1d(ctx0, wkv_output, n_embd * head_size * n_seqs, n_embd * n_tokens * sizeof(float)); + + ggml_build_forward_expand( + gf, + ggml_cpy( + ctx0, + wkv_state, + ggml_view_1d( + ctx0, + kv_self->v_l[il], + hparams.n_embd_v_s() * n_seqs, + hparams.n_embd_v_s() * kv_head * ggml_element_size(kv_self->v_l[il]) + ) + ) + ); + + if (layer.time_mix_ln && layer.time_mix_ln_b) { + // group norm with head_count groups + cur = ggml_reshape_3d(ctx0, cur, n_embd / head_count, head_count, n_tokens); + cur = ggml_norm(ctx0, cur, 64e-5f); + + // Convert back to regular vectors. + cur = ggml_reshape_2d(ctx0, cur, n_embd, n_tokens); + cur = ggml_add(ctx0, ggml_mul(ctx0, cur, layer.time_mix_ln), layer.time_mix_ln_b); + } else { + cur = ggml_reshape_2d(ctx0, cur, n_embd, n_tokens); + } + + ggml_tensor * rk = ggml_sum_rows(ctx0, + ggml_mul(ctx0, ggml_mul(ctx0, k, r), ggml_reshape_2d(ctx0, layer.time_mix_r_k, head_size, head_count))); + cur = ggml_add(ctx0, cur, ggml_reshape_2d(ctx0, ggml_mul(ctx0, v, rk), n_embd, n_tokens)); + + if (has_gating) { + cur = ggml_mul(ctx0, cur, g); + } + cur = build_lora_mm(layer.time_mix_output, cur); + + return ggml_reshape_3d(ctx0, cur, n_embd, n_seq_tokens, n_seqs); + } +}; + +struct llm_build_rwkv7 : public llm_build_rwkv7_base { + llm_build_rwkv7(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_build_rwkv7_base(model, params) { + GGML_ASSERT(hparams.token_shift_count == 2); + + ggml_tensor * cur; + ggml_tensor * inpL; + ggml_tensor * v_first = nullptr; + + inpL = build_inp_embd(model.tok_embd); + inpL = build_norm(inpL, model.tok_norm, model.tok_norm_b, LLM_NORM, -1); + + ggml_tensor * state_copy = build_inp_s_copy(); + ggml_tensor * state_mask = build_inp_s_mask(); + + const auto n_embd = hparams.n_embd; + const auto n_seq_tokens = ubatch.n_seq_tokens; + const auto n_seqs = ubatch.n_seqs; + + for (int il = 0; il < n_layer; ++il) { + const llama_layer * layer = &model.layers[il]; + inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs); + + ggml_tensor * token_shift = build_rwkv_token_shift_load( + gf, state_copy, state_mask, ubatch, il + ); + + ggml_tensor * att_shift = ggml_view_3d(ctx0, token_shift, n_embd, 1, n_seqs, token_shift->nb[1], token_shift->nb[2], 0); + ggml_tensor * ffn_shift = ggml_view_3d(ctx0, token_shift, n_embd, 1, n_seqs, token_shift->nb[1], token_shift->nb[2], n_embd * ggml_element_size(token_shift)); + + ggml_tensor * att_norm = build_norm(inpL, layer->attn_norm, layer->attn_norm_b, LLM_NORM, il); + cb(att_norm, "attn_norm", il); + + ggml_tensor * x_prev = ggml_concat( + ctx0, + att_shift, + ggml_view_3d(ctx0, att_norm, n_embd, n_seq_tokens - 1, n_seqs, att_norm->nb[1], att_norm->nb[2], 0), + 1 + ); + + cur = build_rwkv7_time_mix(gf, att_norm, x_prev, state_copy, state_mask, v_first, ubatch, il); + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); + cb(ffn_inp, "ffn_inp", il); + + ggml_tensor * ffn_norm = build_norm(ffn_inp, layer->attn_norm_2, layer->attn_norm_2_b, LLM_NORM, il); + cb(ffn_norm, "ffn_norm", il); + + x_prev = ggml_concat( + ctx0, + ffn_shift, + ggml_view_3d(ctx0, ffn_norm, n_embd, n_seq_tokens - 1, n_seqs, ffn_norm->nb[1], ffn_norm->nb[2], 0), + 1 + ); + + token_shift = ggml_concat(ctx0, + ggml_view_3d(ctx0, att_norm, n_embd, 1, n_seqs, att_norm->nb[1], att_norm->nb[2], (n_seq_tokens-1)*n_embd*ggml_element_size(att_norm)), + ggml_view_3d(ctx0, ffn_norm, n_embd, 1, n_seqs, ffn_norm->nb[1], ffn_norm->nb[2], (n_seq_tokens-1)*n_embd*ggml_element_size(ffn_norm)), + 1 + ); + ggml_build_forward_expand(gf, build_rwkv_token_shift_store(token_shift, ubatch, il)); + + if (il == n_layer - 1) { + // skip computing output for unused tokens + struct ggml_tensor * inp_out_ids = build_inp_out_ids(); + ffn_inp = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, ffn_inp, n_embd, n_tokens), inp_out_ids); + ffn_norm = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, ffn_norm, n_embd, n_tokens), inp_out_ids); + x_prev = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, x_prev, n_embd, n_tokens), inp_out_ids); + } + + cur = build_rwkv7_channel_mix(layer, ffn_norm, x_prev, LLM_ARCH_RWKV7); + cur = ggml_add(ctx0, cur, ffn_inp); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + cur = build_norm(cur, model.output_norm, model.output_norm_b, LLM_NORM, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + + +struct llm_build_arwkv7 : public llm_build_rwkv7_base { + llm_build_arwkv7(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_build_rwkv7_base(model, params) { + GGML_ASSERT(n_embd == hparams.n_embd_k_s()); + + ggml_tensor * cur; + ggml_tensor * inpL; + ggml_tensor * v_first = nullptr; + + inpL = build_inp_embd(model.tok_embd); + + ggml_tensor * state_copy = build_inp_s_copy(); + ggml_tensor * state_mask = build_inp_s_mask(); + + const auto n_embd = hparams.n_embd; + const auto n_seq_tokens = ubatch.n_seq_tokens; + const auto n_seqs = ubatch.n_seqs; + + for (int il = 0; il < n_layer; ++il) { + const llama_layer * layer = &model.layers[il]; + inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs); + + ggml_tensor * token_shift = build_rwkv_token_shift_load( + gf, state_copy, state_mask, ubatch, il + ); + + ggml_tensor * att_norm = build_norm(inpL, layer->attn_norm, layer->attn_norm_b, LLM_NORM_RMS, il); + cb(att_norm, "attn_norm", il); + + ggml_tensor * x_prev = ggml_concat( + ctx0, + token_shift, + ggml_view_3d(ctx0, att_norm, n_embd, n_seq_tokens - 1, n_seqs, att_norm->nb[1], att_norm->nb[2], 0), + 1 + ); + + cur = build_rwkv7_time_mix(gf, att_norm, x_prev, state_copy, state_mask, v_first, ubatch, il); + + token_shift = ggml_view_3d(ctx0, att_norm, n_embd, 1, n_seqs, att_norm->nb[1], att_norm->nb[2], (n_seq_tokens-1)*n_embd*ggml_element_size(att_norm)); + ggml_build_forward_expand(gf, build_rwkv_token_shift_store(token_shift, ubatch, il)); + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); + cb(ffn_inp, "ffn_inp", il); + + if (il == n_layer - 1) { + // skip computing output for unused tokens + struct ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, cur, n_embd, n_tokens), inp_out_ids); + ffn_inp = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, ffn_inp, n_embd, n_tokens), inp_out_ids); + } + + // feed-forward network + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + + cur = ggml_add(ctx0, cur, ffn_inp); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; cur = build_norm(cur, model.output_norm, model.output_norm_b, LLM_NORM_RMS, -1); cb(cur, "result_norm", -1); @@ -10883,9 +11418,11 @@ llama_memory_i * llama_model::create_memory() const { llama_memory_i * res; switch (arch) { + case LLM_ARCH_MAMBA: case LLM_ARCH_RWKV6: case LLM_ARCH_RWKV6QWEN2: - case LLM_ARCH_MAMBA: + case LLM_ARCH_RWKV7: + case LLM_ARCH_ARWKV7: { res = new llama_kv_cache_unified(hparams, { /*.get_rope_factors =*/ nullptr @@ -11132,6 +11669,14 @@ llm_graph_result_ptr llama_model::build_graph( { llm = std::make_unique(*this, params, gf); } break; + case LLM_ARCH_RWKV7: + { + llm = std::make_unique(*this, params, gf); + } break; + case LLM_ARCH_ARWKV7: + { + llm = std::make_unique(*this, params, gf); + } break; case LLM_ARCH_CHAMELEON: { llm = std::make_unique(*this, params, gf); @@ -11245,6 +11790,8 @@ llama_rope_type llama_model_rope_type(const llama_model * model) { case LLM_ARCH_JAIS: case LLM_ARCH_RWKV6: case LLM_ARCH_RWKV6QWEN2: + case LLM_ARCH_RWKV7: + case LLM_ARCH_ARWKV7: case LLM_ARCH_WAVTOKENIZER_DEC: return LLAMA_ROPE_TYPE_NONE; @@ -11399,6 +11946,8 @@ bool llama_model_is_recurrent(const llama_model * model) { case LLM_ARCH_MAMBA: return true; case LLM_ARCH_RWKV6: return true; case LLM_ARCH_RWKV6QWEN2: return true; + case LLM_ARCH_RWKV7: return true; + case LLM_ARCH_ARWKV7: return true; default: return false; } } diff --git a/src/llama-model.h b/src/llama-model.h index 55c26a92b02d2..a9da1215abbfd 100644 --- a/src/llama-model.h +++ b/src/llama-model.h @@ -29,6 +29,7 @@ enum llm_type { LLM_TYPE_109M, LLM_TYPE_137M, LLM_TYPE_160M, + LLM_TYPE_190M, LLM_TYPE_220M, LLM_TYPE_250M, LLM_TYPE_270M, @@ -45,6 +46,7 @@ enum llm_type { LLM_TYPE_1_6B, LLM_TYPE_2B, LLM_TYPE_2_8B, + LLM_TYPE_2_9B, LLM_TYPE_3B, LLM_TYPE_4B, LLM_TYPE_6B, @@ -260,6 +262,20 @@ struct llama_layer { struct ggml_tensor * time_mix_receptance_b = nullptr; struct ggml_tensor * time_mix_gate = nullptr; + // rwkv7 + struct ggml_tensor * time_mix_w0 = nullptr; + struct ggml_tensor * time_mix_a0 = nullptr; + struct ggml_tensor * time_mix_a1 = nullptr; + struct ggml_tensor * time_mix_a2 = nullptr; + struct ggml_tensor * time_mix_v0 = nullptr; + struct ggml_tensor * time_mix_v1 = nullptr; + struct ggml_tensor * time_mix_v2 = nullptr; + struct ggml_tensor * time_mix_g1 = nullptr; + struct ggml_tensor * time_mix_g2 = nullptr; + struct ggml_tensor * time_mix_k_k = nullptr; + struct ggml_tensor * time_mix_k_a = nullptr; + struct ggml_tensor * time_mix_r_k = nullptr; + struct ggml_tensor * time_mix_ln = nullptr; struct ggml_tensor * time_mix_ln_b = nullptr; struct ggml_tensor * time_mix_output = nullptr; diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index fb7982655a373..09eb570779ce5 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -756,10 +756,19 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: // NOTE: can't use LLM_TN here because the layer number is not known quantize &= name.find("ssm_conv1d.weight") == std::string::npos; - // do not quantize RWKV's time_mix_first tensors + // do not quantize RWKV's small yet 2D weights quantize &= name.find("time_mix_first.weight") == std::string::npos; + quantize &= name.find("time_mix_w0.weight") == std::string::npos; quantize &= name.find("time_mix_w1.weight") == std::string::npos; quantize &= name.find("time_mix_w2.weight") == std::string::npos; + quantize &= name.find("time_mix_v0.weight") == std::string::npos; + quantize &= name.find("time_mix_v1.weight") == std::string::npos; + quantize &= name.find("time_mix_v2.weight") == std::string::npos; + quantize &= name.find("time_mix_a0.weight") == std::string::npos; + quantize &= name.find("time_mix_a1.weight") == std::string::npos; + quantize &= name.find("time_mix_a2.weight") == std::string::npos; + quantize &= name.find("time_mix_g1.weight") == std::string::npos; + quantize &= name.find("time_mix_g2.weight") == std::string::npos; quantize &= name.find("time_mix_decay_w1.weight") == std::string::npos; quantize &= name.find("time_mix_decay_w2.weight") == std::string::npos; quantize &= name.find("time_mix_lerp_fused.weight") == std::string::npos; diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index c86ffb64e9e89..adb749bd5ec9a 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -1916,6 +1916,40 @@ struct test_gla : public test_case { } }; +// GGML_OP_RWKV_WKV7 +struct test_rwkv_wkv7 : public test_case { + const ggml_type type; + + const int64_t head_count; + const int64_t head_size; + const int64_t n_seq_tokens; + const int64_t n_seqs; + + std::string vars() override { + return VARS_TO_STR5(type, head_count, head_size, n_seq_tokens, n_seqs); + } + + test_rwkv_wkv7(ggml_type type = GGML_TYPE_F32, + int64_t head_count = 32, int64_t head_size = 64, int64_t n_seq_tokens = 32, int64_t n_seqs = 32) + : type(type), head_count(head_count), head_size(head_size), n_seq_tokens(n_seq_tokens), n_seqs(n_seqs) {} + + ggml_tensor * build_graph(ggml_context * ctx) override { + const int64_t n_tokens = n_seq_tokens * n_seqs; + ggml_tensor * r = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); + ggml_tensor * w = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); + ggml_tensor * k = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); + ggml_tensor * v = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); + ggml_tensor * a = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); + ggml_tensor * b = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); + // Outputs may become NaN with long seqlen without these normalization + a = ggml_l2_norm(ctx, a, 1e-7F); + b = ggml_l2_norm(ctx, b, 1e-7F); + ggml_tensor * s = ggml_new_tensor(ctx, type, 2, std::vector{ head_size * head_size * head_count, n_seqs }.data()); + ggml_tensor * out = ggml_rwkv_wkv7(ctx, r, w, k, v, a, b, s); + return out; + } +}; + // GGML_OP_MUL_MAT struct test_mul_mat : public test_case { const ggml_type type_a; @@ -2972,6 +3006,32 @@ struct test_group_norm : public test_case { } }; +// GGML_OP_L2_NORM +struct test_l2_norm : public test_case { + const ggml_type type; + const std::array ne; + const float eps; + + std::string vars() override { + return VARS_TO_STR2(type, ne); + } + + test_l2_norm(ggml_type type = GGML_TYPE_F32, + std::array ne = {64, 64, 320, 1}, + float eps = 1e-12f) + : type(type), ne(ne), eps(eps) {} + + ggml_tensor * build_graph(ggml_context * ctx) override { + ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data()); + ggml_set_name(a, "a"); + + ggml_tensor * out = ggml_l2_norm(ctx, a, eps); + ggml_set_name(out, "out"); + + return out; + } +}; + // GGML_OP_ACC struct test_acc : public test_case { const ggml_type type; @@ -4006,8 +4066,11 @@ static std::vector> make_test_cases_eval() { test_cases.emplace_back(new test_rms_norm(GGML_TYPE_F32, {64, 5, 4, 3}, v, eps)); } test_cases.emplace_back(new test_rms_norm_back(GGML_TYPE_F32, {64, 5, 4, 3}, eps)); + test_cases.emplace_back(new test_l2_norm (GGML_TYPE_F32, {64, 5, 4, 3}, eps)); } + test_cases.emplace_back(new test_l2_norm(GGML_TYPE_F32, {64, 5, 4, 3}, 1e-12f)); + test_cases.emplace_back(new test_ssm_conv(GGML_TYPE_F32, {4, 1536, 1, 1}, {4, 1536, 1, 1})); test_cases.emplace_back(new test_ssm_conv(GGML_TYPE_F32, {8, 1536, 1, 1}, {4, 1536, 1, 1})); test_cases.emplace_back(new test_ssm_conv(GGML_TYPE_F32, {4, 1536, 4, 1}, {4, 1536, 1, 1})); @@ -4019,6 +4082,11 @@ static std::vector> make_test_cases_eval() { test_cases.emplace_back(new test_rwkv_wkv6(GGML_TYPE_F32, 32, 64, 32, 4)); test_cases.emplace_back(new test_rwkv_wkv6(GGML_TYPE_F32, 32, 64, 128, 4)); + test_cases.emplace_back(new test_rwkv_wkv7(GGML_TYPE_F32, 32, 64, 1, 1)); + test_cases.emplace_back(new test_rwkv_wkv7(GGML_TYPE_F32, 32, 64, 32, 1)); + test_cases.emplace_back(new test_rwkv_wkv7(GGML_TYPE_F32, 32, 64, 32, 4)); + test_cases.emplace_back(new test_rwkv_wkv7(GGML_TYPE_F32, 32, 64, 128, 4)); + test_cases.emplace_back(new test_gla(GGML_TYPE_F32, 32, 64, 1, 1)); test_cases.emplace_back(new test_gla(GGML_TYPE_F32, 32, 64, 32, 1)); test_cases.emplace_back(new test_gla(GGML_TYPE_F32, 32, 64, 32, 4)); From a53f7f7b8859f3e634415ab03e1e295b9861d7e6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20=C5=9Alusarczyk?= <112692748+lslusarczyk@users.noreply.github.com> Date: Tue, 18 Mar 2025 01:51:25 +0100 Subject: [PATCH 073/398] fixed compilation warnings in ggml-sycl (#12424) --- ggml/src/ggml-sycl/convert.cpp | 2 +- ggml/src/ggml-sycl/dmmv.cpp | 25 +++++---- ggml/src/ggml-sycl/element_wise.cpp | 80 ++++++++++++++--------------- ggml/src/ggml-sycl/getrows.cpp | 3 +- ggml/src/ggml-sycl/ggml-sycl.cpp | 43 ++++++++-------- ggml/src/ggml-sycl/mmq.cpp | 1 - ggml/src/ggml-sycl/mmvq.cpp | 39 +++++++------- ggml/src/ggml-sycl/norm.cpp | 12 ++--- ggml/src/ggml-sycl/softmax.cpp | 2 +- 9 files changed, 101 insertions(+), 106 deletions(-) diff --git a/ggml/src/ggml-sycl/convert.cpp b/ggml/src/ggml-sycl/convert.cpp index 86b200e07030f..76ac6a4dd1f7b 100644 --- a/ggml/src/ggml-sycl/convert.cpp +++ b/ggml/src/ggml-sycl/convert.cpp @@ -138,7 +138,7 @@ static void dequantize_row_q4_0_sycl_reorder(const void *vx, dst_t *y, const int stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, n_warp) * sycl::range<3>(1, 1, WARP_SIZE), sycl::range<3>(1, 1, WARP_SIZE)), - [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(WARP_SIZE)]]{ + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]]{ dequantize_block_q4_0_reorder(vx, y, k, item_ct1); }); diff --git a/ggml/src/ggml-sycl/dmmv.cpp b/ggml/src/ggml-sycl/dmmv.cpp index 99d3859de8979..04a85fa35ff2d 100644 --- a/ggml/src/ggml-sycl/dmmv.cpp +++ b/ggml/src/ggml-sycl/dmmv.cpp @@ -210,7 +210,7 @@ static void convert_mul_mat_vec_f16_sycl(const void *vx, const dfloat *y, stream->parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(WARP_SIZE)]] { + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { dequantize_mul_mat_vec<1, 1, convert_f16>(vx, y, dst, ncols, nrows, item_ct1); }); @@ -879,7 +879,7 @@ static void dequantize_mul_mat_vec_q4_0_sycl_reorder(const void *vx, const dfloa stream->parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(WARP_SIZE)]] { + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { dequantize_mul_mat_vec_reorder( vx, y, dst, ncols, nrows, item_ct1); }); @@ -902,7 +902,7 @@ static void dequantize_mul_mat_vec_q4_0_sycl(const void *vx, const dfloat *y, stream->parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(WARP_SIZE)]] { + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { dequantize_mul_mat_vec( vx, y, dst, ncols, nrows, item_ct1); }); @@ -923,7 +923,7 @@ static void dequantize_mul_mat_vec_q4_1_sycl(const void *vx, const dfloat *y, stream->parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(WARP_SIZE)]] { + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { dequantize_mul_mat_vec( vx, y, dst, ncols, nrows, item_ct1); }); @@ -944,7 +944,7 @@ static void dequantize_mul_mat_vec_q5_0_sycl(const void *vx, const dfloat *y, stream->parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(WARP_SIZE)]] { + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { dequantize_mul_mat_vec( vx, y, dst, ncols, nrows, item_ct1); }); @@ -965,7 +965,7 @@ static void dequantize_mul_mat_vec_q5_1_sycl(const void *vx, const dfloat *y, stream->parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(WARP_SIZE)]] { + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { dequantize_mul_mat_vec( vx, y, dst, ncols, nrows, item_ct1); }); @@ -986,7 +986,7 @@ static void dequantize_mul_mat_vec_q8_0_sycl(const void *vx, const dfloat *y, stream->parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(WARP_SIZE)]] { + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { dequantize_mul_mat_vec( vx, y, dst, ncols, nrows, item_ct1); }); @@ -1004,7 +1004,7 @@ static void dequantize_mul_mat_vec_q2_K_sycl(const void *vx, const float *y, const sycl::range<3> block_dims(1, ny, QK_WARP_SIZE); stream->parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(QK_WARP_SIZE)]] { + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(QK_WARP_SIZE)]] { dequantize_mul_mat_vec_q2_k(vx, y, dst, ncols, nrows, item_ct1); }); } @@ -1020,7 +1020,7 @@ static void dequantize_mul_mat_vec_q3_K_sycl(const void *vx, const float *y, const sycl::range<3> block_dims(1, ny, QK_WARP_SIZE); stream->parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(QK_WARP_SIZE)]] { + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(QK_WARP_SIZE)]] { dequantize_mul_mat_vec_q3_k(vx, y, dst, ncols, nrows, item_ct1); }); } @@ -1036,7 +1036,7 @@ static void dequantize_mul_mat_vec_q4_K_sycl(const void *vx, const float *y, const sycl::range<3> block_dims(1, ny, QK_WARP_SIZE); stream->parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(QK_WARP_SIZE)]] { + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(QK_WARP_SIZE)]] { dequantize_mul_mat_vec_q4_k(vx, y, dst, ncols, nrows, item_ct1); }); } @@ -1049,7 +1049,7 @@ static void dequantize_mul_mat_vec_q5_K_sycl(const void *vx, const float *y, const sycl::range<3> block_dims(1, 1, QK_WARP_SIZE); stream->parallel_for( sycl::nd_range<3>(sycl::range<3>(1, 1, nrows) * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(QK_WARP_SIZE)]] { + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(QK_WARP_SIZE)]] { dequantize_mul_mat_vec_q5_k(vx, y, dst, ncols, item_ct1); }); } @@ -1065,7 +1065,7 @@ static void dequantize_mul_mat_vec_q6_K_sycl(const void *vx, const float *y, const sycl::range<3> block_dims(1, ny, QK_WARP_SIZE); stream->parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(QK_WARP_SIZE)]] { + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(QK_WARP_SIZE)]] { dequantize_mul_mat_vec_q6_k(vx, y, dst, ncols, nrows, item_ct1); }); } @@ -1143,7 +1143,6 @@ void ggml_sycl_op_dequantize_mul_mat_vec( default: printf("ggml_sycl_op_dequantize_mul_mat_vec unsupported GGML_TYPE %d\n", src0->type); GGML_ABORT("fatal error"); - break; } GGML_UNUSED(src1); diff --git a/ggml/src/ggml-sycl/element_wise.cpp b/ggml/src/ggml-sycl/element_wise.cpp index 4bcd74376eaac..1e12cb220e4c1 100644 --- a/ggml/src/ggml-sycl/element_wise.cpp +++ b/ggml/src/ggml-sycl/element_wise.cpp @@ -1,7 +1,7 @@ #include "common.hpp" #include "element_wise.hpp" -void acc_f32(const float * x, const float * y, float * dst, const int ne, +static void acc_f32(const float * x, const float * y, float * dst, const int ne, const int ne10, const int ne11, const int ne12, const int nb1, const int nb2, int offset, const sycl::nd_item<3> &item_ct1) { const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) + @@ -20,7 +20,7 @@ void acc_f32(const float * x, const float * y, float * dst, const int ne, } } -void gelu_f32(const float * x, float * dst, const int k, +static void gelu_f32(const float * x, float * dst, const int k, const sycl::nd_item<3> &item_ct1) { const float GELU_COEF_A = 0.044715f; const float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f; @@ -37,7 +37,7 @@ void gelu_f32(const float * x, float * dst, const int k, sycl::tanh(SQRT_2_OVER_PI * xi * (1.0f + GELU_COEF_A * xi * xi))); } -void silu_f32(const float * x, float * dst, const int k, +static void silu_f32(const float * x, float * dst, const int k, const sycl::nd_item<3> &item_ct1) { const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) + item_ct1.get_local_id(2); @@ -48,7 +48,7 @@ void silu_f32(const float * x, float * dst, const int k, dst[i] = x[i] / (1.0f + sycl::native::exp(-x[i])); } -void gelu_quick_f32(const float *x, float *dst, int k, +static void gelu_quick_f32(const float *x, float *dst, int k, const sycl::nd_item<3> &item_ct1) { const float GELU_QUICK_COEF = -1.702f; const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) + @@ -59,7 +59,7 @@ void gelu_quick_f32(const float *x, float *dst, int k, dst[i] = x[i] * (1.0f / (1.0f + sycl::native::exp(GELU_QUICK_COEF * x[i]))); } -void tanh_f32(const float *x, float *dst, int k, +static void tanh_f32(const float *x, float *dst, int k, const sycl::nd_item<3> &item_ct1) { const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) + item_ct1.get_local_id(2); @@ -69,7 +69,7 @@ void tanh_f32(const float *x, float *dst, int k, dst[i] = sycl::tanh((float)(x[i])); } -void relu_f32(const float * x, float * dst, const int k, +static void relu_f32(const float * x, float * dst, const int k, const sycl::nd_item<3> &item_ct1) { const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) + item_ct1.get_local_id(2); @@ -80,7 +80,7 @@ void relu_f32(const float * x, float * dst, const int k, dst[i] = sycl::fmax((float)(x[i]), (float)0); } -void sigmoid_f32(const float * x, float * dst, const int k, +static void sigmoid_f32(const float * x, float * dst, const int k, const sycl::nd_item<3> &item_ct1) { const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) + item_ct1.get_local_id(2); @@ -91,7 +91,7 @@ void sigmoid_f32(const float * x, float * dst, const int k, dst[i] = 1.0f / (1.0f + sycl::native::exp(-x[i])); } -void sqrt_f32(const float * x, float * dst, const int k, +static void sqrt_f32(const float * x, float * dst, const int k, const sycl::nd_item<3> &item_ct1) { const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) + item_ct1.get_local_id(2); @@ -102,7 +102,7 @@ void sqrt_f32(const float * x, float * dst, const int k, dst[i] = sycl::sqrt(x[i]); } -void sin_f32(const float * x, float * dst, const int k, +static void sin_f32(const float * x, float * dst, const int k, const sycl::nd_item<3> &item_ct1) { const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) + item_ct1.get_local_id(2); @@ -113,7 +113,7 @@ void sin_f32(const float * x, float * dst, const int k, dst[i] = sycl::sin(x[i]); } -void cos_f32(const float * x, float * dst, const int k, +static void cos_f32(const float * x, float * dst, const int k, const sycl::nd_item<3> &item_ct1) { const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) + item_ct1.get_local_id(2); @@ -124,7 +124,7 @@ void cos_f32(const float * x, float * dst, const int k, dst[i] = sycl::cos(x[i]); } -void hardsigmoid_f32(const float * x, float * dst, const int k, +static void hardsigmoid_f32(const float * x, float * dst, const int k, const sycl::nd_item<3> &item_ct1) { const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) + item_ct1.get_local_id(2); @@ -135,7 +135,7 @@ void hardsigmoid_f32(const float * x, float * dst, const int k, dst[i] = sycl::fmin(1.0f, sycl::fmax(0.0f, (x[i] + 3.0f) / 6.0f)); } -void hardswish_f32(const float * x, float * dst, const int k, +static void hardswish_f32(const float * x, float * dst, const int k, const sycl::nd_item<3> &item_ct1) { const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) + item_ct1.get_local_id(2); @@ -146,7 +146,7 @@ void hardswish_f32(const float * x, float * dst, const int k, dst[i] = x[i] * sycl::fmin(1.0f, sycl::fmax(0.0f, (x[i] + 3.0f) / 6.0f)); } -void exp_f32(const float * x, float * dst, const int k, +static void exp_f32(const float * x, float * dst, const int k, const sycl::nd_item<3> &item_ct1) { const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) + item_ct1.get_local_id(2); @@ -157,7 +157,7 @@ void exp_f32(const float * x, float * dst, const int k, dst[i] = sycl::exp(x[i]); } -void log_f32(const float * x, float * dst, const int k, +static void log_f32(const float * x, float * dst, const int k, const sycl::nd_item<3> &item_ct1) { const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) + item_ct1.get_local_id(2); @@ -173,7 +173,7 @@ void log_f32(const float * x, float * dst, const int k, } } -void neg_f32(const float * x, float * dst, const int k, +static void neg_f32(const float * x, float * dst, const int k, const sycl::nd_item<3> &item_ct1) { const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) + item_ct1.get_local_id(2); @@ -184,7 +184,7 @@ void neg_f32(const float * x, float * dst, const int k, dst[i] = -x[i]; } -void step_f32(const float * x, float * dst, const int k, +static void step_f32(const float * x, float * dst, const int k, const sycl::nd_item<3> &item_ct1) { const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) + item_ct1.get_local_id(2); @@ -195,7 +195,7 @@ void step_f32(const float * x, float * dst, const int k, dst[i] = x[i] > 0.0f; } -void leaky_relu_f32(const float *x, float *dst, const int k, const float negative_slope, +static void leaky_relu_f32(const float *x, float *dst, const int k, const float negative_slope, const sycl::nd_item<3> &item_ct1) { const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) + item_ct1.get_local_id(2); @@ -206,7 +206,7 @@ void leaky_relu_f32(const float *x, float *dst, const int k, const float negativ sycl::fmin((float)(x[i]), 0.0f) * negative_slope; } -void sqr_f32(const float * x, float * dst, const int k, +static void sqr_f32(const float * x, float * dst, const int k, const sycl::nd_item<3> &item_ct1) { const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) + item_ct1.get_local_id(2); @@ -217,7 +217,7 @@ void sqr_f32(const float * x, float * dst, const int k, dst[i] = x[i] * x[i]; } -void upscale_f32(const float *x, float *dst, const int nb00, const int nb01, +static void upscale_f32(const float *x, float *dst, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int ne13, const float sf0, const float sf1, const float sf2, const float sf3, const sycl::nd_item<1> &item_ct1) { @@ -240,7 +240,7 @@ void upscale_f32(const float *x, float *dst, const int nb00, const int nb01, dst[index] = *(const float *)((const char *)x + i03 * nb03 + i02 * nb02 + i01 * nb01 + i00 * nb00); } -void pad_f32(const float *x, float *dst, const int ne0, const int ne00, const int ne01, const int ne02, +static void pad_f32(const float *x, float *dst, const int ne0, const int ne00, const int ne01, const int ne02, const sycl::nd_item<3> &item_ct1) { int nidx = item_ct1.get_local_id(2) + item_ct1.get_group(2) * item_ct1.get_local_range(2); @@ -262,7 +262,7 @@ void pad_f32(const float *x, float *dst, const int ne0, const int ne00, const i -void acc_f32_sycl(const float *x, const float *y, float *dst, +static void acc_f32_sycl(const float *x, const float *y, float *dst, const int n_elements, const int ne10, const int ne11, const int ne12, const int nb1, const int nb2, const int offset, queue_ptr stream) { @@ -277,7 +277,7 @@ void acc_f32_sycl(const float *x, const float *y, float *dst, }); } -void gelu_f32_sycl(const float *x, float *dst, const int k, +static void gelu_f32_sycl(const float *x, float *dst, const int k, queue_ptr stream) { const int num_blocks = (k + SYCL_GELU_BLOCK_SIZE - 1) / SYCL_GELU_BLOCK_SIZE; stream->parallel_for( @@ -289,7 +289,7 @@ void gelu_f32_sycl(const float *x, float *dst, const int k, }); } -void silu_f32_sycl(const float *x, float *dst, const int k, +static void silu_f32_sycl(const float *x, float *dst, const int k, queue_ptr stream) { const int num_blocks = (k + SYCL_SILU_BLOCK_SIZE - 1) / SYCL_SILU_BLOCK_SIZE; stream->parallel_for( @@ -301,7 +301,7 @@ void silu_f32_sycl(const float *x, float *dst, const int k, }); } -void gelu_quick_f32_sycl(const float *x, float *dst, const int k, +static void gelu_quick_f32_sycl(const float *x, float *dst, const int k, queue_ptr stream) { const int num_blocks = (k + SYCL_GELU_BLOCK_SIZE - 1) / SYCL_GELU_BLOCK_SIZE; stream->parallel_for( @@ -313,7 +313,7 @@ void gelu_quick_f32_sycl(const float *x, float *dst, const int k, }); } -void tanh_f32_sycl(const float *x, float *dst, const int k, +static void tanh_f32_sycl(const float *x, float *dst, const int k, queue_ptr stream) { const int num_blocks = (k + SYCL_TANH_BLOCK_SIZE - 1) / SYCL_TANH_BLOCK_SIZE; stream->parallel_for( @@ -325,7 +325,7 @@ void tanh_f32_sycl(const float *x, float *dst, const int k, }); } -void relu_f32_sycl(const float *x, float *dst, const int k, +static void relu_f32_sycl(const float *x, float *dst, const int k, queue_ptr stream) { const int num_blocks = (k + SYCL_RELU_BLOCK_SIZE - 1) / SYCL_RELU_BLOCK_SIZE; stream->parallel_for( @@ -337,7 +337,7 @@ void relu_f32_sycl(const float *x, float *dst, const int k, }); } -void hardsigmoid_f32_sycl(const float *x, float *dst, const int k, +static void hardsigmoid_f32_sycl(const float *x, float *dst, const int k, queue_ptr stream) { const int num_blocks = (k + SYCL_HARDSIGMOID_BLOCK_SIZE - 1) / SYCL_HARDSIGMOID_BLOCK_SIZE; stream->parallel_for( @@ -349,7 +349,7 @@ void hardsigmoid_f32_sycl(const float *x, float *dst, const int k, }); } -void hardswish_f32_sycl(const float *x, float *dst, const int k, +static void hardswish_f32_sycl(const float *x, float *dst, const int k, queue_ptr stream) { const int num_blocks = (k + SYCL_HARDSWISH_BLOCK_SIZE - 1) / SYCL_HARDSWISH_BLOCK_SIZE; stream->parallel_for( @@ -361,7 +361,7 @@ void hardswish_f32_sycl(const float *x, float *dst, const int k, }); } -void exp_f32_sycl(const float *x, float *dst, const int k, +static void exp_f32_sycl(const float *x, float *dst, const int k, queue_ptr stream) { const int num_blocks = (k + SYCL_EXP_BLOCK_SIZE - 1) / SYCL_EXP_BLOCK_SIZE; stream->parallel_for( @@ -373,7 +373,7 @@ void exp_f32_sycl(const float *x, float *dst, const int k, }); } -void log_f32_sycl(const float *x, float *dst, const int k, +static void log_f32_sycl(const float *x, float *dst, const int k, queue_ptr stream) { const int num_blocks = (k + SYCL_EXP_BLOCK_SIZE - 1) / SYCL_EXP_BLOCK_SIZE; stream->parallel_for( @@ -385,7 +385,7 @@ void log_f32_sycl(const float *x, float *dst, const int k, }); } -void neg_f32_sycl(const float *x, float *dst, const int k, +static void neg_f32_sycl(const float *x, float *dst, const int k, queue_ptr stream) { const int num_blocks = (k + SYCL_NEG_BLOCK_SIZE - 1) / SYCL_NEG_BLOCK_SIZE; stream->parallel_for( @@ -397,7 +397,7 @@ void neg_f32_sycl(const float *x, float *dst, const int k, }); } -void step_f32_sycl(const float *x, float *dst, const int k, +static void step_f32_sycl(const float *x, float *dst, const int k, queue_ptr stream) { const int num_blocks = (k + SYCL_NEG_BLOCK_SIZE - 1) / SYCL_NEG_BLOCK_SIZE; stream->parallel_for( @@ -409,7 +409,7 @@ void step_f32_sycl(const float *x, float *dst, const int k, }); } -void sigmoid_f32_sycl(const float *x, float *dst, const int k, +static void sigmoid_f32_sycl(const float *x, float *dst, const int k, queue_ptr stream) { const int num_blocks = (k + SYCL_SIGMOID_BLOCK_SIZE - 1) / SYCL_SIGMOID_BLOCK_SIZE; stream->parallel_for( @@ -421,7 +421,7 @@ void sigmoid_f32_sycl(const float *x, float *dst, const int k, }); } -void sqrt_f32_sycl(const float *x, float *dst, const int k, +static void sqrt_f32_sycl(const float *x, float *dst, const int k, queue_ptr stream) { const int num_blocks = (k + SYCL_SQRT_BLOCK_SIZE - 1) / SYCL_SQRT_BLOCK_SIZE; stream->parallel_for( @@ -433,7 +433,7 @@ void sqrt_f32_sycl(const float *x, float *dst, const int k, }); } -void sin_f32_sycl(const float *x, float *dst, const int k, +static void sin_f32_sycl(const float *x, float *dst, const int k, queue_ptr stream) { const int num_blocks = (k + SYCL_SIN_BLOCK_SIZE - 1) / SYCL_SIN_BLOCK_SIZE; stream->parallel_for( @@ -445,7 +445,7 @@ void sin_f32_sycl(const float *x, float *dst, const int k, }); } -void cos_f32_sycl(const float *x, float *dst, const int k, +static void cos_f32_sycl(const float *x, float *dst, const int k, queue_ptr stream) { const int num_blocks = (k + SYCL_SIN_BLOCK_SIZE - 1) / SYCL_SIN_BLOCK_SIZE; stream->parallel_for( @@ -457,7 +457,7 @@ void cos_f32_sycl(const float *x, float *dst, const int k, }); } -void leaky_relu_f32_sycl(const float *x, float *dst, const int k, +static void leaky_relu_f32_sycl(const float *x, float *dst, const int k, const float negative_slope, queue_ptr stream) { const int num_blocks = (k + SYCL_RELU_BLOCK_SIZE - 1) / SYCL_RELU_BLOCK_SIZE; @@ -470,7 +470,7 @@ void leaky_relu_f32_sycl(const float *x, float *dst, const int k, }); } -void sqr_f32_sycl(const float *x, float *dst, const int k, +static void sqr_f32_sycl(const float *x, float *dst, const int k, queue_ptr stream) { const int num_blocks = (k + SYCL_SQR_BLOCK_SIZE - 1) / SYCL_SQR_BLOCK_SIZE; stream->parallel_for( @@ -482,7 +482,7 @@ void sqr_f32_sycl(const float *x, float *dst, const int k, }); } -void upscale_f32_sycl(const float *x, float *dst, const int nb00, const int nb01, +static void upscale_f32_sycl(const float *x, float *dst, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int ne13, const float sf0, const float sf1, const float sf2, const float sf3, queue_ptr stream) { @@ -496,7 +496,7 @@ void upscale_f32_sycl(const float *x, float *dst, const int nb00, const int nb01 }); } -void pad_f32_sycl(const float *x, float *dst, const int ne00, +static void pad_f32_sycl(const float *x, float *dst, const int ne00, const int ne01, const int ne02, const int ne0, const int ne1, const int ne2, queue_ptr stream) { int num_blocks = (ne0 + SYCL_PAD_BLOCK_SIZE - 1) / SYCL_PAD_BLOCK_SIZE; diff --git a/ggml/src/ggml-sycl/getrows.cpp b/ggml/src/ggml-sycl/getrows.cpp index 51c19f6b3b90c..b9cf8767cbab2 100644 --- a/ggml/src/ggml-sycl/getrows.cpp +++ b/ggml/src/ggml-sycl/getrows.cpp @@ -207,7 +207,7 @@ static void get_rows_sycl_reorder(ggml_backend_sycl_context & ctx, const ggml_te const size_t nrows = ne01; const sycl::half* src0_dq = (const sycl::half*)(src0_q + nrows * ncols / 2); stream->parallel_for(sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(WARP_SIZE)]]{ + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]]{ k_get_rows_reorder( src0_dd, src0_dq, src1_dd, dst_dd, ne00, ne12, s1, s2, s3, nb01, nb02, nb03, s10, s11, s12, item_ct1); @@ -302,7 +302,6 @@ void ggml_sycl_op_get_rows(ggml_backend_sycl_context & ctx, const ggml_tensor *s // TODO: k-quants GGML_LOG_ERROR("%s: unsupported type: %s\n", __func__, ggml_type_name(src0->type)); GGML_ABORT("fatal error"); - break; } } diff --git a/ggml/src/ggml-sycl/ggml-sycl.cpp b/ggml/src/ggml-sycl/ggml-sycl.cpp index 477652ab283ee..207c0b440a052 100644 --- a/ggml/src/ggml-sycl/ggml-sycl.cpp +++ b/ggml/src/ggml-sycl/ggml-sycl.cpp @@ -95,7 +95,7 @@ const ggml_sycl_device_info & ggml_sycl_info() { return info; } -void print_device_detail(int id, sycl::device &device, std::string device_type) { +static void print_device_detail(int id, sycl::device &device, std::string device_type) { dpct::device_info prop; SYCL_CHECK(CHECK_TRY_ERROR( @@ -118,7 +118,7 @@ void print_device_detail(int id, sycl::device &device, std::string device_type) global_mem_size, device.get_info().c_str()); } -void print_device_opt_feature(int device_count) { +static void print_device_opt_feature(int device_count) { GGML_LOG_INFO("SYCL Optimization Feature:\n"); GGML_LOG_INFO( "|ID| Device Type|Reorder|\n"); @@ -401,7 +401,7 @@ catch (sycl::exception const &exc) { std::exit(1); } -void dev2dev_memcpy(sycl::queue &q_dst, sycl::queue &q_src, void *ptr_dst, +static void dev2dev_memcpy(sycl::queue &q_dst, sycl::queue &q_src, void *ptr_dst, const void *ptr_src, size_t size) { char *host_buf = (char *)malloc(size); q_src.memcpy(host_buf, (const char *)ptr_src, size).wait(); @@ -620,7 +620,7 @@ ggml_backend_buffer_type_t ggml_backend_sycl_buffer_type(int device) { return &ggml_backend_sycl_buffer_types[device]; } -ggml_backend_buffer_type_t ggml_backend_sycl_buffer_type(ggml_backend_sycl_context * ctx) { +static ggml_backend_buffer_type_t ggml_backend_sycl_buffer_type(ggml_backend_sycl_context * ctx) { GGML_SYCL_DEBUG("[SYCL] call ggml_backend_sycl_buffer_type\n"); int device = ctx->device; @@ -1682,7 +1682,7 @@ static void quantize_row_q8_1_sycl(const float *x, void *vy, const int kx, stream->parallel_for( sycl::nd_range<3>(num_blocks * block_size, block_size), - [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(WARP_SIZE)]] { + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { quantize_q8_1(x, vy, kx, kx_padded, item_ct1); }); } @@ -1703,7 +1703,7 @@ static void ggml_mul_mat_p021_f16_f32_sycl(const void *vx, const float *y, stream->parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(WARP_SIZE)]] { + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_p021_f16_f32(vx, y, dst, ncols_x, nrows_x, nchannels_x, nchannels_y, item_ct1); }); @@ -1723,7 +1723,7 @@ static void ggml_mul_mat_vec_nc_f16_f32_sycl( stream->parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(WARP_SIZE)]] { + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_vec_nc_f16_f32(vx, y, dst, ncols_x, nrows_x, row_stride_x, channel_stride_x, nchannels_y / nchannels_x, item_ct1); @@ -1764,7 +1764,7 @@ static void sum_rows_f32_sycl(const float *x, float *dst, const int ncols, const sycl::range<3> block_nums(1, nrows, 1); stream->parallel_for(sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(WARP_SIZE)]] { + [[sycl::reqd_sub_group_size(WARP_SIZE)]] { k_sum_rows_f32(x, dst, ncols, item_ct1); }); } @@ -2920,7 +2920,7 @@ inline bool ggml_sycl_supports_mmq(enum ggml_type type) { return false; } -bool ggml_sycl_supports_dmmv(enum ggml_type type) { +static bool ggml_sycl_supports_dmmv(enum ggml_type type) { switch (type) { case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: @@ -3293,7 +3293,7 @@ static void ggml_sycl_argmax(ggml_backend_sycl_context & ctx, ggml_tensor * dst) } -void ggml_sycl_set_main_device(const int main_device) try { +static void ggml_sycl_set_main_device(const int main_device) try { if (dpct::get_current_device_id() == static_cast (main_device)) { return; } @@ -3314,7 +3314,7 @@ catch (sycl::exception const &exc) { std::exit(1); } -bool ggml_sycl_compute_forward(ggml_backend_sycl_context & ctx, struct ggml_tensor * dst) { +static bool ggml_sycl_compute_forward(ggml_backend_sycl_context & ctx, struct ggml_tensor * dst) { if (!g_sycl_loaded) return false; if (dst->src[0] != nullptr && ggml_backend_buffer_is_sycl_split(dst->src[0]->buffer)) { @@ -3638,7 +3638,7 @@ catch (sycl::exception const &exc) { std::exit(1); } -void reorder_qw(char *data_device, const int ncols, const int nrows, +static void reorder_qw(char *data_device, const int ncols, const int nrows, size_t size, size_t offset, dpct::queue_ptr stream) { auto tmp_buf = sycl::malloc_shared(size, *stream); SYCL_CHECK( @@ -3652,7 +3652,7 @@ void reorder_qw(char *data_device, const int ncols, const int nrows, stream->parallel_for( size / sizeof(block_q4_0), - [=](auto i) [[intel::reqd_sub_group_size(WARP_SIZE)]] { + [=](auto i) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { const block_q4_0* x = (const block_q4_0*)tmp_buf; const int ib = i; @@ -3666,7 +3666,7 @@ void reorder_qw(char *data_device, const int ncols, const int nrows, sycl::free(tmp_buf, *stream); } -void reorder_qw(ggml_tensor * src0, dpct::queue_ptr stream) { +static void reorder_qw(ggml_tensor * src0, dpct::queue_ptr stream) { char*data_device = (char*)src0->data; size_t ncols = src0->ne[0]; size_t nrows = src0->ne[1]; @@ -3675,7 +3675,7 @@ void reorder_qw(ggml_tensor * src0, dpct::queue_ptr stream) { reorder_qw(data_device, ncols, nrows, size, 0, stream); } -void opt_for_reorder(ggml_tensor * dst, dpct::queue_ptr stream) { +static void opt_for_reorder(ggml_tensor * dst, dpct::queue_ptr stream) { ggml_tensor *src0 = dst->src[0]; ggml_tensor *src1 = dst->src[1]; @@ -3688,7 +3688,7 @@ void opt_for_reorder(ggml_tensor * dst, dpct::queue_ptr stream) { } } -void optimize_graph_once(ggml_cgraph * cgraph, ggml_backend_sycl_context * ctx) { +static void optimize_graph_once(ggml_cgraph * cgraph, ggml_backend_sycl_context * ctx) { dpct::queue_ptr stream = ctx->stream(); if (ctx->optimized_graph) { return; @@ -3878,7 +3878,7 @@ static bool ggml_backend_sycl_device_supports_op(ggml_backend_dev_t dev, const g return true; } return false; - } break; + } case GGML_OP_UNARY: switch (ggml_get_unary_op(op)) { case GGML_UNARY_OP_NEG: @@ -3896,7 +3896,6 @@ static bool ggml_backend_sycl_device_supports_op(ggml_backend_dev_t dev, const g default: return false; } - break; case GGML_OP_MUL_MAT: case GGML_OP_MUL_MAT_ID: { @@ -3927,7 +3926,7 @@ static bool ggml_backend_sycl_device_supports_op(ggml_backend_dev_t dev, const g return false; } return true; - } break; + } case GGML_OP_OUT_PROD: return op->type == GGML_TYPE_F32 && op->src[0]->type == GGML_TYPE_F32 && op->src[1]->type == GGML_TYPE_F32 && op->ne[2] == 1 && op->ne[3] == 1; case GGML_OP_GET_ROWS: @@ -3944,7 +3943,7 @@ static bool ggml_backend_sycl_device_supports_op(ggml_backend_dev_t dev, const g default: return false; } - } break; + } case GGML_OP_CPY: { ggml_type src0_type = op->src[0]->type; @@ -3995,12 +3994,12 @@ static bool ggml_backend_sycl_device_supports_op(ggml_backend_dev_t dev, const g return true; } return false; - } break; + } case GGML_OP_CONCAT: { ggml_type src0_type = op->src[0]->type; return src0_type != GGML_TYPE_I32 && src0_type != GGML_TYPE_I16; - } break; + } case GGML_OP_DUP: case GGML_OP_ARGMAX: case GGML_OP_NONE: diff --git a/ggml/src/ggml-sycl/mmq.cpp b/ggml/src/ggml-sycl/mmq.cpp index 8ea82c940c788..ffb272aa28378 100644 --- a/ggml/src/ggml-sycl/mmq.cpp +++ b/ggml/src/ggml-sycl/mmq.cpp @@ -3017,7 +3017,6 @@ void ggml_sycl_op_mul_mat_q( break; default: GGML_ABORT("fatal error"); - break; } GGML_UNUSED(src1); diff --git a/ggml/src/ggml-sycl/mmvq.cpp b/ggml/src/ggml-sycl/mmvq.cpp index a96286d710153..1b92ba2d6047e 100644 --- a/ggml/src/ggml-sycl/mmvq.cpp +++ b/ggml/src/ggml-sycl/mmvq.cpp @@ -495,7 +495,7 @@ static void mul_mat_vec_q4_0_q8_1_sycl(const void *vx, const void *vy, cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(WARP_SIZE)]] { + [[sycl::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_vec_q( vx, vy, dst, ncols, nrows, item_ct1); @@ -519,7 +519,7 @@ static void mul_mat_vec_q4_1_q8_1_sycl(const void *vx, const void *vy, cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(WARP_SIZE)]] { + [[sycl::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_vec_q( vx, vy, dst, ncols, nrows, item_ct1); @@ -543,7 +543,7 @@ static void mul_mat_vec_q5_0_q8_1_sycl(const void *vx, const void *vy, cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(WARP_SIZE)]] { + [[sycl::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_vec_q( vx, vy, dst, ncols, nrows, item_ct1); @@ -567,7 +567,7 @@ static void mul_mat_vec_q5_1_q8_1_sycl(const void *vx, const void *vy, cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(WARP_SIZE)]] { + [[sycl::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_vec_q( vx, vy, dst, ncols, nrows, item_ct1); @@ -591,7 +591,7 @@ static void mul_mat_vec_q8_0_q8_1_sycl(const void *vx, const void *vy, cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(WARP_SIZE)]] { + [[sycl::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_vec_q( vx, vy, dst, ncols, nrows, item_ct1); @@ -615,7 +615,7 @@ static void mul_mat_vec_q2_K_q8_1_sycl(const void *vx, const void *vy, cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(WARP_SIZE)]] { + [[sycl::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_vec_q( vx, vy, dst, ncols, nrows, item_ct1); @@ -639,7 +639,7 @@ static void mul_mat_vec_q3_K_q8_1_sycl(const void *vx, const void *vy, cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(WARP_SIZE)]] { + [[sycl::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_vec_q( vx, vy, dst, ncols, nrows, item_ct1); @@ -663,7 +663,7 @@ static void mul_mat_vec_q4_K_q8_1_sycl(const void *vx, const void *vy, cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(WARP_SIZE)]] { + [[sycl::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_vec_q( vx, vy, dst, ncols, nrows, item_ct1); @@ -687,7 +687,7 @@ static void mul_mat_vec_q5_K_q8_1_sycl(const void *vx, const void *vy, cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(WARP_SIZE)]] { + [[sycl::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_vec_q( vx, vy, dst, ncols, nrows, item_ct1); @@ -711,7 +711,7 @@ static void mul_mat_vec_q6_K_q8_1_sycl(const void *vx, const void *vy, cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(WARP_SIZE)]] { + [[sycl::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_vec_q( vx, vy, dst, ncols, nrows, item_ct1); @@ -734,7 +734,7 @@ static void mul_mat_vec_iq2_xxs_q8_1_sycl(const void *vx, const void *vy, cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(WARP_SIZE)]] { + [[sycl::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_vec_q_iq2_xxs_q8_1( vx, vy, dst, ncols, nrows, item_ct1); }); @@ -755,7 +755,7 @@ static void mul_mat_vec_iq2_xs_q8_1_sycl(const void *vx, const void *vy, cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(WARP_SIZE)]] { + [[sycl::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_vec_q_iq2_xs_q8_1( vx, vy, dst, ncols, nrows, item_ct1); }); @@ -777,7 +777,7 @@ static void mul_mat_vec_iq2_s_q8_1_sycl(const void *vx, const void *vy, cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(WARP_SIZE)]] { + [[sycl::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_vec_q_iq2_s_q8_1( vx, vy, dst, ncols, nrows, item_ct1); }); @@ -799,7 +799,7 @@ static void mul_mat_vec_iq3_xxs_q8_1_sycl(const void *vx, const void *vy, cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(WARP_SIZE)]] { + [[sycl::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_vec_q_iq3_xxs_q8_1( vx, vy, dst, ncols, nrows, item_ct1); }); @@ -821,7 +821,7 @@ static void mul_mat_vec_iq3_s_q8_1_sycl(const void *vx, const void *vy, cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(WARP_SIZE)]] { + [[sycl::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_vec_q_iq3_s_q8_1( vx, vy, dst, ncols, nrows, item_ct1); }); @@ -843,7 +843,7 @@ static void mul_mat_vec_iq1_s_q8_1_sycl(const void *vx, const void *vy, cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(WARP_SIZE)]] { + [[sycl::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_vec_q_iq1_s_q8_1( vx, vy, dst, ncols, nrows, item_ct1); }); @@ -864,7 +864,7 @@ static void mul_mat_vec_iq1_m_q8_1_sycl(const void *vx, const void *vy, cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(WARP_SIZE)]] { + [[sycl::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_vec_q_iq1_m_q8_1( vx, vy, dst, ncols, nrows, item_ct1); }); @@ -886,7 +886,7 @@ static void mul_mat_vec_iq4_nl_q8_1_sycl(const void *vx, const void *vy, cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(WARP_SIZE)]] { + [[sycl::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_vec_q_iq4_nl_q8_1( vx, vy, dst, ncols, nrows, item_ct1); }); @@ -908,7 +908,7 @@ static void mul_mat_vec_iq4_xs_q8_1_sycl(const void *vx, const void *vy, cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(WARP_SIZE)]] { + [[sycl::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_vec_q_iq4_xs_q8_1( vx, vy, dst, ncols, nrows, item_ct1); }); @@ -1003,7 +1003,6 @@ void ggml_sycl_op_mul_mat_vec_q( break; default: GGML_ABORT("fatal error"); - break; } } GGML_UNUSED(src1); diff --git a/ggml/src/ggml-sycl/norm.cpp b/ggml/src/ggml-sycl/norm.cpp index 6439db21b2978..d9678da8f042e 100644 --- a/ggml/src/ggml-sycl/norm.cpp +++ b/ggml/src/ggml-sycl/norm.cpp @@ -235,7 +235,7 @@ static void norm_f32_sycl(const float* x, float* dst, const int ncols, sycl::nd_range<3>(sycl::range<3>(1, 1, nrows) * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(WARP_SIZE)]] { + [[sycl::reqd_sub_group_size(WARP_SIZE)]] { norm_f32(x, dst, ncols, eps, item_ct1, nullptr, WARP_SIZE); }); @@ -258,7 +258,7 @@ static void norm_f32_sycl(const float* x, float* dst, const int ncols, sycl::nd_range<3>(sycl::range<3>(1, 1, nrows) * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(WARP_SIZE)]] { + [[sycl::reqd_sub_group_size(WARP_SIZE)]] { norm_f32(x, dst, ncols, eps, item_ct1, get_pointer(s_sum_acc_ct1), work_group_size); }); @@ -277,7 +277,7 @@ static void group_norm_f32_sycl(const float* x, float* dst, sycl::nd_range<3>(sycl::range<3>(1, 1, num_groups) * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(WARP_SIZE)]] { + [[sycl::reqd_sub_group_size(WARP_SIZE)]] { group_norm_f32( x, dst, group_size, ne_elements, eps_ct4, item_ct1, nullptr, WARP_SIZE); @@ -304,7 +304,7 @@ static void group_norm_f32_sycl(const float* x, float* dst, sycl::nd_range<3>(sycl::range<3>(1, 1, num_groups) * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(WARP_SIZE)]] { + [[sycl::reqd_sub_group_size(WARP_SIZE)]] { group_norm_f32(x, dst, group_size, ne_elements, eps_ct4, item_ct1, get_pointer(s_sum_acc_ct1), work_group_size); @@ -325,7 +325,7 @@ static void rms_norm_f32_sycl(const float* x, float* dst, const int ncols, sycl::nd_range<3>(sycl::range<3>(1, 1, nrows) * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(WARP_SIZE)]] { + [[sycl::reqd_sub_group_size(WARP_SIZE)]] { rms_norm_f32(x, dst, ncols, eps, item_ct1, nullptr, WARP_SIZE); }); @@ -347,7 +347,7 @@ static void rms_norm_f32_sycl(const float* x, float* dst, const int ncols, sycl::nd_range<3>(sycl::range<3>(1, 1, nrows) * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) - [[intel::reqd_sub_group_size(WARP_SIZE)]] { + [[sycl::reqd_sub_group_size(WARP_SIZE)]] { rms_norm_f32(x, dst, ncols, eps, item_ct1, get_pointer(s_sum_acc_ct1), work_group_size); }); diff --git a/ggml/src/ggml-sycl/softmax.cpp b/ggml/src/ggml-sycl/softmax.cpp index eb20bd251e172..7563d9ceda654 100644 --- a/ggml/src/ggml-sycl/softmax.cpp +++ b/ggml/src/ggml-sycl/softmax.cpp @@ -132,7 +132,7 @@ static void soft_max_f32_submitter(const float * x, const T * mask, float * dst, cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(WARP_SIZE)]] { + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { soft_max_f32(x, mask, dst, ncols_par, nrows_y, scale, max_bias, m0, m1, n_head_log2, item_ct1, From fd123cfead49eb32e386e26b8ef7a6d41554dda5 Mon Sep 17 00:00:00 2001 From: 0cc4m Date: Tue, 18 Mar 2025 07:21:40 +0100 Subject: [PATCH 074/398] Vulkan: Default to 1GB allocations instead of 4GB to avoid fragmentation and driver issues (#12434) --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index c0ee5dadef78a..dd680aa522438 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -2524,13 +2524,9 @@ static vk_device ggml_vk_get_device(size_t idx) { if (GGML_VK_SUBALLOCATION_BLOCK_SIZE != nullptr) { device->suballocation_block_size = std::stoul(GGML_VK_SUBALLOCATION_BLOCK_SIZE); -#if defined(_WIN32) - } else if (device->vendor_id == VK_VENDOR_ID_NVIDIA) { + } else { // Limit batching of allocations to 1GB by default to avoid fragmentation issues device->suballocation_block_size = 1024*1024*1024; -#endif - } else { - device->suballocation_block_size = device->max_memory_allocation_size; } device->suballocation_block_size = std::min(device->suballocation_block_size, device->max_memory_allocation_size); From d9a14523bb9074eef42d1b43ae4a1e149f81b7e2 Mon Sep 17 00:00:00 2001 From: fj-y-saito <85871716+fj-y-saito@users.noreply.github.com> Date: Tue, 18 Mar 2025 17:14:39 +0900 Subject: [PATCH 075/398] ggml : add SVE support for q6_K_q8_K (#12361) --- ggml/src/ggml-cpu/ggml-cpu-quants.c | 151 +++++++++++++++++++++++++++- 1 file changed, 150 insertions(+), 1 deletion(-) diff --git a/ggml/src/ggml-cpu/ggml-cpu-quants.c b/ggml/src/ggml-cpu/ggml-cpu-quants.c index 8c7dbd1ccb5fe..4e0ae057244c9 100644 --- a/ggml/src/ggml-cpu/ggml-cpu-quants.c +++ b/ggml/src/ggml-cpu/ggml-cpu-quants.c @@ -8158,7 +8158,156 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi const int nb = n / QK_K; -#ifdef __ARM_NEON +#ifdef __ARM_FEATURE_SVE + const int vector_length = ggml_cpu_get_sve_cnt()*8; + float sum = 0; + svuint8_t m4b = svdup_n_u8(0xf); + svint32_t vzero = svdup_n_s32(0); + svuint8_t mone = svdup_n_u8(0x30); + svint8_t q6bytes_1, q6bytes_2, q6bytes_3, q6bytes_4; + svuint8_t q6h_1, q6h_2, q6h_3, q6h_4; + + for (int i = 0; i < nb; ++i) { + const float d_all = GGML_FP16_TO_FP32(x[i].d); + + const uint8_t * GGML_RESTRICT q6 = x[i].ql; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + const int8_t * GGML_RESTRICT scale = x[i].scales; + + const svbool_t pg16_8 = svptrue_pat_b16(SV_VL8); + const svint16_t q8sums_1 = svld1_s16(pg16_8, y[i].bsums); + const svint16_t q8sums_2 = svld1_s16(pg16_8, y[i].bsums + 8); + const svint16_t q6scales_1 = svunpklo_s16(svld1_s8(svptrue_pat_b8(SV_VL8), scale)); + const svint16_t q6scales_2 = svunpklo_s16(svld1_s8(svptrue_pat_b8(SV_VL8), scale + 8)); + const svint64_t prod = svdup_n_s64(0); + int32_t isum_mins = svaddv_s64(svptrue_b64(), svadd_s64_x(svptrue_b64(), svdot_s64(prod, q8sums_1, q6scales_1), + svdot_s64(prod, q8sums_2, q6scales_2))); + int32_t isum = 0; + + switch (vector_length) { + case 128: + { + const svbool_t pg32_4 = svptrue_pat_b32(SV_VL4); + const svbool_t pg8_16 = svptrue_pat_b8(SV_VL16); + svint32_t isum_tmp = svdup_n_s32(0); + for (int j = 0; j < QK_K/128; ++j) { + svuint8_t qhbits_1 = svld1_u8(pg8_16, qh); + svuint8_t qhbits_2 = svld1_u8(pg8_16, qh+16); + qh += 32; + svuint8_t q6bits_1 = svld1_u8(pg8_16, q6); + svuint8_t q6bits_2 = svld1_u8(pg8_16, q6+16); + svuint8_t q6bits_3 = svld1_u8(pg8_16, q6+32); + svuint8_t q6bits_4 = svld1_u8(pg8_16, q6+48); + q6 += 64; + svint8_t q8bytes_1 = svld1_s8(pg8_16, q8); + svint8_t q8bytes_2 = svld1_s8(pg8_16, q8+16); + svint8_t q8bytes_3 = svld1_s8(pg8_16, q8+32); + svint8_t q8bytes_4 = svld1_s8(pg8_16, q8+48); + q8 += 64; + + q6h_1 = svand_u8_x(pg16_8, mone, svlsl_n_u8_x(pg16_8, qhbits_1, 4)); + q6h_2 = svand_u8_x(pg16_8, mone, svlsl_n_u8_x(pg16_8, qhbits_2, 4)); + q6h_3 = svand_u8_x(pg16_8, mone, svlsl_n_u8_x(pg16_8, qhbits_1, 2)); + q6h_4 = svand_u8_x(pg16_8, mone, svlsl_n_u8_x(pg16_8, qhbits_2, 2)); + q6bytes_1 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svand_u8_x(pg8_16, q6bits_1, m4b), q6h_1)); + q6bytes_2 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svand_u8_x(pg8_16, q6bits_2, m4b), q6h_2)); + q6bytes_3 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svand_u8_x(pg8_16, q6bits_3, m4b), q6h_3)); + q6bytes_4 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svand_u8_x(pg8_16, q6bits_4, m4b), q6h_4)); + isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_1, q8bytes_1), scale[0]); + isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_2, q8bytes_2), scale[1]); + isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_3, q8bytes_3), scale[2]); + isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_4, q8bytes_4), scale[3]); + + scale += 4; + q8bytes_1 = svld1_s8(pg8_16, q8); + q8bytes_2 = svld1_s8(pg8_16, q8+16); + q8bytes_3 = svld1_s8(pg8_16, q8+32); + q8bytes_4 = svld1_s8(pg8_16, q8+48); + q8 += 64; + + q6h_1 = svand_u8_x(pg16_8, mone, qhbits_1); + q6h_2 = svand_u8_x(pg16_8, mone, qhbits_2); + q6h_3 = svand_u8_x(pg16_8, mone, svlsr_n_u8_x(pg16_8, qhbits_1, 2)); + q6h_4 = svand_u8_x(pg16_8, mone, svlsr_n_u8_x(pg16_8, qhbits_2, 2)); + q6bytes_1 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svlsr_n_u8_x(pg8_16, q6bits_1, 4), q6h_1)); + q6bytes_2 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svlsr_n_u8_x(pg8_16, q6bits_2, 4), q6h_2)); + q6bytes_3 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svlsr_n_u8_x(pg8_16, q6bits_3, 4), q6h_3)); + q6bytes_4 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svlsr_n_u8_x(pg8_16, q6bits_4, 4), q6h_4)); + isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_1, q8bytes_1), scale[0]); + isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_2, q8bytes_2), scale[1]); + isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_3, q8bytes_3), scale[2]); + isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_4, q8bytes_4), scale[3]); + scale += 4; + } + isum += svaddv_s32(pg32_4, isum_tmp); + sum += d_all * y[i].d * (isum - 32 * isum_mins); + } + break; + case 256: + case 512: + { + const svbool_t pg8_2 = svptrue_pat_b8(SV_VL2); + const svbool_t pg32_8 = svptrue_pat_b32(SV_VL8); + const svbool_t pg8_32 = svptrue_pat_b8(SV_VL32); + svint32_t isum_tmp = svdup_n_s32(0); + for (int j = 0; j < QK_K/128; j++) { + svuint8_t qhbits_1 = svld1_u8(pg8_32, qh); + qh += 32; + svuint8_t q6bits_1 = svld1_u8(pg8_32, q6); + svuint8_t q6bits_2 = svld1_u8(pg8_32, q6+32); + q6 += 64; + svint8_t q8bytes_1 = svld1_s8(pg8_32, q8); + svint8_t q8bytes_2 = svld1_s8(pg8_32, q8+32); + svint8_t q8bytes_3 = svld1_s8(pg8_32, q8+64); + svint8_t q8bytes_4 = svld1_s8(pg8_32, q8+96); + q8 += 128; + q6h_1 = svand_u8_x(pg8_32, mone, svlsl_n_u8_x(pg8_32, qhbits_1, 4)); + q6h_2 = svand_u8_x(pg8_32, mone, svlsl_n_u8_x(pg8_32, qhbits_1, 2)); + q6h_3 = svand_u8_x(pg8_32, mone, qhbits_1); + q6h_4 = svand_u8_x(pg8_32, mone, svlsr_n_u8_x(pg8_32, qhbits_1, 2)); + q6bytes_1 = svreinterpret_s8_u8(svorr_u8_x(pg8_32, svand_u8_x(pg8_32, q6bits_1, m4b), q6h_1)); + q6bytes_2 = svreinterpret_s8_u8(svorr_u8_x(pg8_32, svand_u8_x(pg8_32, q6bits_2, m4b), q6h_2)); + q6bytes_3 = svreinterpret_s8_u8(svorr_u8_x(pg8_32, svlsr_n_u8_x(pg8_32, q6bits_1, 4), q6h_3)); + q6bytes_4 = svreinterpret_s8_u8(svorr_u8_x(pg8_32, svlsr_n_u8_x(pg8_32, q6bits_2, 4), q6h_4)); + + svint8_t scale_lane_1_tmp = svld1_s8(pg8_2, scale); + scale_lane_1_tmp= svzip1_s8(scale_lane_1_tmp, scale_lane_1_tmp); + scale_lane_1_tmp= svzip1_s8(scale_lane_1_tmp, scale_lane_1_tmp); + svint8_t scale_lane_2_tmp = svld1_s8(pg8_2, scale+2); + scale_lane_2_tmp = svzip1_s8(scale_lane_2_tmp, scale_lane_2_tmp); + scale_lane_2_tmp = svzip1_s8(scale_lane_2_tmp, scale_lane_2_tmp); + svint8_t scale_lane_3_tmp = svld1_s8(pg8_2, scale+4); + scale_lane_3_tmp = svzip1_s8(scale_lane_3_tmp, scale_lane_3_tmp); + scale_lane_3_tmp = svzip1_s8(scale_lane_3_tmp, scale_lane_3_tmp); + svint8_t scale_lane_4_tmp = svld1_s8(pg8_2, scale+6); + scale_lane_4_tmp = svzip1_s8(scale_lane_4_tmp, scale_lane_4_tmp); + scale_lane_4_tmp = svzip1_s8(scale_lane_4_tmp, scale_lane_4_tmp); + svint32_t scale_lane_1 = svunpklo_s32(svunpklo_s16(scale_lane_1_tmp)); + svint32_t scale_lane_2 = svunpklo_s32(svunpklo_s16(scale_lane_2_tmp)); + svint32_t scale_lane_3 = svunpklo_s32(svunpklo_s16(scale_lane_3_tmp)); + svint32_t scale_lane_4 = svunpklo_s32(svunpklo_s16(scale_lane_4_tmp)); + + isum_tmp = svmla_s32_x(pg32_8, isum_tmp, svdot_s32(vzero, q6bytes_1, q8bytes_1), scale_lane_1); + isum_tmp = svmla_s32_x(pg32_8, isum_tmp, svdot_s32(vzero, q6bytes_2, q8bytes_2), scale_lane_2); + isum_tmp = svmla_s32_x(pg32_8, isum_tmp, svdot_s32(vzero, q6bytes_3, q8bytes_3), scale_lane_3); + isum_tmp = svmla_s32_x(pg32_8, isum_tmp, svdot_s32(vzero, q6bytes_4, q8bytes_4), scale_lane_4); + scale += 8; + } + isum += svaddv_s32(pg32_8, isum_tmp); + sum += d_all * y[i].d * (isum - 32 * isum_mins); + } + break; + default: + assert(false && "Unsupported vector length"); + break; + } + } + + *s = sum; + +#elif __ARM_NEON float sum = 0; const uint8x16_t m4b = vdupq_n_u8(0xF); From eba92d64c3f6d86de2e6b4dd3a540d2805a22b0c Mon Sep 17 00:00:00 2001 From: Prajwal B Mehendarkar Date: Tue, 18 Mar 2025 15:07:33 +0530 Subject: [PATCH 076/398] cmake : fix PowerPC build (#12241) Closes #12240 --- ggml/src/ggml-cpu/CMakeLists.txt | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/ggml/src/ggml-cpu/CMakeLists.txt b/ggml/src/ggml-cpu/CMakeLists.txt index d6c4a9c2992d7..6aa078a93ea8e 100644 --- a/ggml/src/ggml-cpu/CMakeLists.txt +++ b/ggml/src/ggml-cpu/CMakeLists.txt @@ -287,17 +287,25 @@ function(ggml_add_cpu_backend_variant_impl tag_name) endif() endif() endif() - elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc64") + elseif ("${CMAKE_SYSTEM_PROCESSOR} " STREQUAL "ppc64le " OR "${CMAKE_SYSTEM_PROCESSOR} " STREQUAL "powerpc ") message(STATUS "PowerPC detected") - execute_process(COMMAND bash -c "grep POWER /proc/cpuinfo | head -n 1" OUTPUT_VARIABLE POWER_M) - if (${POWER_M} MATCHES "POWER10") - list(APPEND ARCH_FLAGS -mcpu=power10) - elseif (${POWER_M} MATCHES "POWER9") - list(APPEND ARCH_FLAGS -mcpu=power9) + if(${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc64") + file(READ "/proc/cpuinfo" POWER10_M) + elseif(${CMAKE_SYSTEM_PROCESSOR} MATCHES "powerpc") + execute_process(COMMAND bash -c "prtconf |grep 'Implementation' | head -n 1" OUTPUT_VARIABLE POWER10_M) + endif() + + string(REGEX MATCHALL "POWER *([0-9]+)" MATCHED_STRING "${POWER10_M}") + string(REGEX REPLACE "POWER *([0-9]+)" "\\1" EXTRACTED_NUMBER "${MATCHED_STRING}") + + if (EXTRACTED_NUMBER GREATER_EQUAL 10) + list(APPEND ARCH_FLAGS -mcpu=power10 -mpowerpc64) + elseif (EXTRACTED_NUMBER EQUAL 9) + list(APPEND ARCH_FLAGS -mcpu=power9 -mpowerpc64) elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc64le") list(APPEND ARCH_FLAGS -mcpu=powerpc64le -mtune=native) else() - list(APPEND ARCH_FLAGS -mcpu=powerpc64 -mtune=native) + list(APPEND ARCH_FLAGS -mcpu=native -mtune=native -mpowerpc64) endif() elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "loongarch64") message(STATUS "loongarch64 detected") From 810e0af3f50379682dd46b7967c4aadf3f8286f6 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 18 Mar 2025 12:05:42 +0200 Subject: [PATCH 077/398] server : fix warmup draft cache type (#12446) ggml-ci --- examples/server/server.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 71e053b202cd2..c2f1afeca450d 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -1872,6 +1872,10 @@ struct server_context { params_dft.n_gpu_layers = params_base.speculative.n_gpu_layers; params_dft.n_parallel = 1; + // force F16 KV cache for the draft model for extra performance + params_dft.cache_type_k = GGML_TYPE_F16; + params_dft.cache_type_v = GGML_TYPE_F16; + llama_init_dft = common_init_from_params(params_dft); model_dft = llama_init_dft.model.get(); @@ -1892,10 +1896,6 @@ struct server_context { cparams_dft = common_context_params_to_llama(params_dft); cparams_dft.n_batch = n_ctx_dft; - // force F16 KV cache for the draft model for extra performance - cparams_dft.type_k = GGML_TYPE_F16; - cparams_dft.type_v = GGML_TYPE_F16; - // the context is not needed - we will create one for each slot llama_init_dft.context.reset(); } From 35cae5ba05a5292dc3108636a71ec59fa2f80ab7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20=C5=9Alusarczyk?= <112692748+lslusarczyk@users.noreply.github.com> Date: Tue, 18 Mar 2025 11:16:31 +0100 Subject: [PATCH 078/398] SYCL: using graphs is configurable by environment variable and compile option (#12371) * alberto changes * enable sycl graphs by env variable * fixed compilation warnings in ggml-sycl.cpp * renamed graph variables * fix markdown in docs/backend/SYCL.md Co-authored-by: Romain Biessy * fix markdown in docs/backend/SYCL.md again * compiling graphs by default, renamed graph_enable to graph_disable --------- Co-authored-by: Romain Biessy --- docs/backend/SYCL.md | 4 ++- ggml/CMakeLists.txt | 1 + ggml/src/ggml-sycl/CMakeLists.txt | 3 ++ ggml/src/ggml-sycl/common.hpp | 5 ++++ ggml/src/ggml-sycl/ggml-sycl.cpp | 47 +++++++++++++++++++++++++++++-- 5 files changed, 56 insertions(+), 4 deletions(-) diff --git a/docs/backend/SYCL.md b/docs/backend/SYCL.md index 5da439e94e092..184cd419554f8 100644 --- a/docs/backend/SYCL.md +++ b/docs/backend/SYCL.md @@ -660,8 +660,9 @@ use 1 SYCL GPUs: [0] with Max compute units:512 |--------------------|---------------------------------------|---------------------------------------------| | GGML_SYCL | ON (mandatory) | Enable build with SYCL code path.
FP32 path - recommended for better perforemance than FP16 on quantized model| | GGML_SYCL_TARGET | INTEL *(default)* \| NVIDIA \| AMD | Set the SYCL target device type. | -| GGML_SYCL_DEVICE_ARCH | Optional (except for AMD) | Set the SYCL device architecture, optional except for AMD. Setting the device architecture can improve the performance. See the table [--offload-arch](https://github.com/intel/llvm/blob/sycl/sycl/doc/design/OffloadDesign.md#--offload-arch) for a list of valid architectures. | +| GGML_SYCL_DEVICE_ARCH | Optional (except for AMD) | Set the SYCL device architecture, optional except for AMD. Setting the device architecture can improve the performance. See the table [--offload-arch](https://github.com/intel/llvm/blob/sycl/sycl/doc/design/OffloadDesign.md#--offload-arch) for a list of valid architectures. | | GGML_SYCL_F16 | OFF *(default)* \|ON *(optional)* | Enable FP16 build with SYCL code path. | +| GGML_SYCL_GRAPH | ON *(default)* \|OFF *(Optional)* | Enable build with [SYCL Graph extension](https://github.com/intel/llvm/blob/sycl/sycl/doc/extensions/experimental/sycl_ext_oneapi_graph.asciidoc). | | CMAKE_C_COMPILER | `icx` *(Linux)*, `icx/cl` *(Windows)* | Set `icx` compiler for SYCL code path. | | CMAKE_CXX_COMPILER | `icpx` *(Linux)*, `icx` *(Windows)* | Set `icpx/icx` compiler for SYCL code path. | @@ -671,6 +672,7 @@ use 1 SYCL GPUs: [0] with Max compute units:512 |-------------------|------------------|---------------------------------------------------------------------------------------------------------------------------| | GGML_SYCL_DEBUG | 0 (default) or 1 | Enable log function by macro: GGML_SYCL_DEBUG | | GGML_SYCL_DISABLE_OPT | 0 (default) or 1 | Disable optimize features based on Intel GPU type, to compare the performance increase | +| GGML_SYCL_DISABLE_GRAPH | 0 or 1 (default) | Disable running computations through SYCL Graphs feature. Disabled by default because graph performance isn't yet better than non-graph performance. | | ZES_ENABLE_SYSMAN | 0 (default) or 1 | Support to get free memory of GPU by sycl::aspect::ext_intel_free_memory.
Recommended to use when --split-mode = layer | diff --git a/ggml/CMakeLists.txt b/ggml/CMakeLists.txt index 9a4ee4992d0c7..740f9f69cf2ed 100644 --- a/ggml/CMakeLists.txt +++ b/ggml/CMakeLists.txt @@ -186,6 +186,7 @@ option(GGML_OPENMP "ggml: use OpenMP" option(GGML_RPC "ggml: use RPC" OFF) option(GGML_SYCL "ggml: use SYCL" OFF) option(GGML_SYCL_F16 "ggml: use 16 bit floats for sycl calculations" OFF) +option(GGML_SYCL_GRAPH "ggml: enable graphs in the SYCL backend" ON) set (GGML_SYCL_TARGET "INTEL" CACHE STRING "ggml: sycl target device") set (GGML_SYCL_DEVICE_ARCH "" CACHE STRING diff --git a/ggml/src/ggml-sycl/CMakeLists.txt b/ggml/src/ggml-sycl/CMakeLists.txt index 3ad044432a27d..271413ca414bf 100644 --- a/ggml/src/ggml-sycl/CMakeLists.txt +++ b/ggml/src/ggml-sycl/CMakeLists.txt @@ -66,6 +66,9 @@ if (WIN32) find_package(MKL REQUIRED) target_link_libraries(ggml-sycl PRIVATE IntelSYCL::SYCL_CXX MKL::MKL MKL::MKL_SYCL) else() + if (GGML_SYCL_GRAPH) + add_compile_definitions(GGML_SYCL_GRAPH) + endif() if (GGML_SYCL_TARGET STREQUAL "INTEL") target_link_libraries(ggml-sycl PRIVATE sycl OpenCL mkl_core pthread m dl mkl_sycl_blas mkl_intel_ilp64 mkl_tbb_thread) elseif (GGML_SYCL_TARGET STREQUAL "NVIDIA") diff --git a/ggml/src/ggml-sycl/common.hpp b/ggml/src/ggml-sycl/common.hpp index fdd07d9cafa11..7cc5e14f9ab22 100644 --- a/ggml/src/ggml-sycl/common.hpp +++ b/ggml/src/ggml-sycl/common.hpp @@ -301,6 +301,7 @@ inline optimize_feature check_gpu_optimize_feature(syclex::architecture &arch) { return opt; } +namespace sycl_ex = sycl::ext::oneapi::experimental; struct ggml_backend_sycl_context { int device; std::string name; @@ -392,6 +393,10 @@ struct ggml_backend_sycl_context { return pool(device); } +#ifdef GGML_SYCL_GRAPH + std::unique_ptr> exec_graph = nullptr; +#endif + ggml_sycl_pool & host_pool(int device) { if (host_pools[device] == nullptr) { host_pools[device] = new_pool_for_host(stream(device, 0), device); diff --git a/ggml/src/ggml-sycl/ggml-sycl.cpp b/ggml/src/ggml-sycl/ggml-sycl.cpp index 207c0b440a052..360e3f166c218 100644 --- a/ggml/src/ggml-sycl/ggml-sycl.cpp +++ b/ggml/src/ggml-sycl/ggml-sycl.cpp @@ -46,6 +46,7 @@ static bool g_sycl_loaded = false; int g_ggml_sycl_debug = 0; int g_ggml_sycl_disable_optimize = 0; +int g_ggml_sycl_disable_graph = 0; static ggml_sycl_device_info ggml_sycl_init() { ggml_sycl_device_info info = {}; @@ -191,10 +192,12 @@ static void ggml_check_sycl() try { if (!initialized) { g_ggml_sycl_debug = get_sycl_env("GGML_SYCL_DEBUG", 0); g_ggml_sycl_disable_optimize= get_sycl_env("GGML_SYCL_DISABLE_OPT", 0); + g_ggml_sycl_disable_graph = get_sycl_env("GGML_SYCL_DISABLE_GRAPH", 1); GGML_SYCL_DEBUG("[SYCL] call ggml_check_sycl\n"); GGML_LOG_INFO("Running with Environment Variables:\n"); GGML_LOG_INFO(" GGML_SYCL_DEBUG: %d\n", g_ggml_sycl_debug); GGML_LOG_INFO(" GGML_SYCL_DISABLE_OPT: %d\n", g_ggml_sycl_disable_optimize); + GGML_LOG_INFO(" GGML_SYCL_DISABLE_GRAPH: %d\n", g_ggml_sycl_disable_graph); GGML_LOG_INFO("Build with Macros:\n"); #if defined(GGML_SYCL_FORCE_MMQ) GGML_LOG_INFO(" GGML_SYCL_FORCE_MMQ: yes\n"); @@ -3699,10 +3702,9 @@ static void optimize_graph_once(ggml_cgraph * cgraph, ggml_backend_sycl_context if (ctx->opt_feature.reorder) opt_for_reorder(cgraph->nodes[i], stream); } } -static ggml_status ggml_backend_sycl_graph_compute(ggml_backend_t backend, ggml_cgraph * cgraph) { - ggml_backend_sycl_context * sycl_ctx = (ggml_backend_sycl_context *)backend->context; - ggml_sycl_set_main_device(sycl_ctx->device); +static void ggml_backend_sycl_graph_compute_impl(ggml_backend_sycl_context * sycl_ctx, ggml_cgraph * cgraph) { + ggml_sycl_set_main_device(sycl_ctx->device); if (!g_ggml_sycl_disable_optimize) optimize_graph_once(cgraph, sycl_ctx); for (int i = 0; i < cgraph->n_nodes; i++) { @@ -3724,7 +3726,46 @@ static ggml_status ggml_backend_sycl_graph_compute(ggml_backend_t backend, ggml_ } GGML_ASSERT(ok); } +} + +static ggml_status ggml_backend_sycl_graph_compute(ggml_backend_t backend, ggml_cgraph * cgraph) { + auto * sycl_ctx = static_cast(backend->context); + +#ifdef GGML_SYCL_GRAPH + if (!g_ggml_sycl_disable_graph) { + if (!sycl_ctx->exec_graph && !dpct::get_device(sycl_ctx->device).has(sycl::aspect::ext_oneapi_graph)) { + GGML_SYCL_DEBUG("[SYCL-GRAPH] can not use graphs on device:%d\n", sycl_ctx->device); + ggml_backend_sycl_graph_compute_impl(sycl_ctx, cgraph); + return GGML_STATUS_SUCCESS; + } + + sycl_ex::command_graph model_sycl_graph(*(sycl_ctx->stream())); + model_sycl_graph.begin_recording(*(sycl_ctx->stream())); + ggml_backend_sycl_graph_compute_impl(sycl_ctx, cgraph); + model_sycl_graph.end_recording(); + if (!sycl_ctx->exec_graph) { + auto exec_graph = model_sycl_graph.finalize({sycl_ex::property::graph::updatable{}}); + sycl_ctx->exec_graph = std::make_unique< + sycl_ex::command_graph>(exec_graph); + } else { + try { + sycl_ctx->exec_graph->update(model_sycl_graph); + GGML_SYCL_DEBUG("[SYCL-GRAPH] update success\n"); + } catch (sycl::exception const & e) { + GGML_SYCL_DEBUG("[SYCL-GRAPH] Exception when updating graph, %s\n", e.what()); + auto exec_graph = model_sycl_graph.finalize({sycl_ex::property::graph::updatable{}}); + sycl_ctx->exec_graph = std::make_unique< + sycl_ex::command_graph>(exec_graph); + } + } + + sycl_ctx->stream()->ext_oneapi_graph(*(sycl_ctx->exec_graph)); + } else +#endif + { + ggml_backend_sycl_graph_compute_impl(sycl_ctx, cgraph); + } return GGML_STATUS_SUCCESS; } From 8551c44d840a7db50adb958ccaf464dc3ded82e7 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 18 Mar 2025 13:05:49 +0200 Subject: [PATCH 079/398] context : always use non-causal attention for encoder graphs (#12447) * context : always use non-causal attention for encoder graphs ggml-ci * context : move the change to llama_context::encode() ggml-ci --- src/llama-context.cpp | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/llama-context.cpp b/src/llama-context.cpp index abb7e526f6171..42332acf1e39d 100644 --- a/src/llama-context.cpp +++ b/src/llama-context.cpp @@ -1057,6 +1057,13 @@ int llama_context::encode(llama_batch & inp_batch) { ggml_backend_sched_reset(sched.get()); ggml_backend_sched_set_eval_callback(sched.get(), cparams.cb_eval, cparams.cb_eval_user_data); + const auto causal_attn_org = cparams.causal_attn; + + // always use non-causal attention for encoder graphs + // TODO: this is a tmp solution until we have a proper way to support enc-dec models + // ref: https://github.com/ggml-org/llama.cpp/pull/12181#issuecomment-2730451223 + cparams.causal_attn = false; + auto * gf = graph_init(); auto res = graph_build(ctx_compute.get(), gf, ubatch, LLM_GRAPH_TYPE_ENCODER); @@ -1064,6 +1071,8 @@ int llama_context::encode(llama_batch & inp_batch) { res->set_inputs(&ubatch); + cparams.causal_attn = causal_attn_org; + const auto compute_status = graph_compute(gf, n_tokens > 1); switch (compute_status) { case GGML_STATUS_SUCCESS: From 99aa304fb900654ec338749f64e62895b9a88afd Mon Sep 17 00:00:00 2001 From: Xuan-Son Nguyen Date: Tue, 18 Mar 2025 17:24:33 +0100 Subject: [PATCH 080/398] llama : add support for EXAONE tied word embeddings (#12451) --- src/llama-model.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/llama-model.cpp b/src/llama-model.cpp index c571aa69b671c..9171585bd9d91 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -3264,7 +3264,12 @@ bool llama_model::load_tensors(llama_model_loader & ml) { // output output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); - output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED); + + // if output is NULL, init from the input tok embed + if (output == NULL) { + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); + } for (int i = 0; i < n_layer; ++i) { auto & layer = layers[i]; From c6af2161b200284d55633cf184a07406ca89908e Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 18 Mar 2025 19:35:11 +0200 Subject: [PATCH 081/398] speculative : fix seg fault in certain cases (#12454) --- examples/speculative/speculative.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/speculative/speculative.cpp b/examples/speculative/speculative.cpp index bfddc67e034fb..627d01bbcb5ad 100644 --- a/examples/speculative/speculative.cpp +++ b/examples/speculative/speculative.cpp @@ -331,11 +331,11 @@ int main(int argc, char ** argv) { } active_seqs.erase(s); - for(int i = 0; i < n_seq_dft; i++) { + for (int i = 0; i < n_seq_dft; i++) { if (i == s) { continue; } - if (drafts[i].tokens[i_dft] == drafts[s].tokens[i_dft]) { + if (drafts[i].active && drafts[i].tokens[i_dft] == drafts[s].tokens[i_dft]) { // synchronize active status for sequences with the same drafted token drafts[i].active = drafts[i].active && accept; if (!drafts[i].active) { From 29fff308c704c1c752cdb5153361e545e2bac09d Mon Sep 17 00:00:00 2001 From: Xuan-Son Nguyen Date: Tue, 18 Mar 2025 19:16:19 +0100 Subject: [PATCH 082/398] llama : support converting Mistral Small text-only (#12450) --- convert_hf_to_gguf.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index d13d57c54154a..7a2ef4c7e38ce 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -1747,6 +1747,25 @@ def prepare_tensors(self): raise ValueError(f"Unprocessed experts: {experts}") +@Model.register("Mistral3ForConditionalGeneration") +class Mistral3Model(LlamaModel): + model_arch = gguf.MODEL_ARCH.LLAMA + + # we need to merge the text_config into the root level of hparams + def __init__(self, *args, **kwargs): + hparams = Model.load_hparams(kwargs["dir_model"]) + if "text_config" in hparams: + hparams = {**hparams, **hparams["text_config"]} + kwargs["hparams"] = hparams + super().__init__(*args, **kwargs) + + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None): + name = name.replace("language_model.", "") + if "multi_modal_projector" in name or "vision_tower" in name: + return [] + return super().modify_tensors(data_torch, name, bid) + + @Model.register("DeciLMForCausalLM") class DeciModel(Model): model_arch = gguf.MODEL_ARCH.DECI From bb115d2bf7ed2cdd7dccd7ae74cc9cfe4b0adb71 Mon Sep 17 00:00:00 2001 From: R0CKSTAR Date: Wed, 19 Mar 2025 02:28:26 +0800 Subject: [PATCH 083/398] musa: override warp_size of musa device to 32 (#12445) Signed-off-by: Xiaodong Ye --- ggml/src/ggml-cuda/ggml-cuda.cu | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index 8fb063822cfb7..5cb56df9a81ae 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -262,6 +262,8 @@ static ggml_cuda_device_info ggml_cuda_init() { id, prop.name, prop.gcnArchName, info.devices[id].cc & 0xffff, device_vmm ? "yes" : "no", prop.warpSize); #elif defined(GGML_USE_MUSA) + // FIXME: Ensure compatibility with varying warp sizes across different MUSA archs. + info.devices[id].warp_size = 32; // TODO: refine the .cc to reflect MUSA's actual CC capabilities info.devices[id].smpbo = prop.sharedMemPerBlockOptin; info.devices[id].cc = 100*prop.major + 10*prop.minor; From 75422e8bc42646005be0754f7aa438b97a5e777e Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 18 Mar 2025 21:35:19 +0200 Subject: [PATCH 084/398] graph : normalize Q, K, V shapes + sync cross attention (#12449) * graph : normalize Q, K, V shapes and add comments ggml-ci * context : synchronize before getting cross attention data * model : fix command-r attention norm check --- src/llama-context.cpp | 2 + src/llama-graph.cpp | 2 +- src/llama-graph.h | 24 +- src/llama-model.cpp | 622 ++++++++++++++++++++++++++---------------- 4 files changed, 403 insertions(+), 247 deletions(-) diff --git a/src/llama-context.cpp b/src/llama-context.cpp index 42332acf1e39d..664703a896701 100644 --- a/src/llama-context.cpp +++ b/src/llama-context.cpp @@ -1143,6 +1143,8 @@ int llama_context::encode(llama_batch & inp_batch) { if (model.arch == LLM_ARCH_T5 && t_embd) { //cross.t_embd = t_embd; + synchronize(); + cross.n_embd = t_embd->ne[0]; cross.n_enc = t_embd->ne[1]; cross.v_embd.resize(cross.n_embd*cross.n_enc); diff --git a/src/llama-graph.cpp b/src/llama-graph.cpp index 4e90873397ca4..0bd40174438cc 100644 --- a/src/llama-graph.cpp +++ b/src/llama-graph.cpp @@ -1378,7 +1378,7 @@ ggml_tensor * llm_graph_context::build_attn( // note: storing RoPE-ed version of K in the KV cache ggml_build_forward_expand(gf, ggml_cpy(ctx0, k_cur, k_cache_view)); - assert(v_cur->ne[0] == n_embd_v_gqa && v_cur->ne[1] == n_tokens); + v_cur = ggml_reshape_2d(ctx0, v_cur, n_embd_v_gqa, n_tokens); ggml_tensor * v_cache_view = nullptr; diff --git a/src/llama-graph.h b/src/llama-graph.h index c4328e6f9e627..bdf19ed015e35 100644 --- a/src/llama-graph.h +++ b/src/llama-graph.h @@ -487,9 +487,9 @@ struct llm_graph_context { ggml_tensor * build_attn_mha( ggml_cgraph * gf, - ggml_tensor * q, - ggml_tensor * k, - ggml_tensor * v, + ggml_tensor * q, // [n_embd_head_q, n_tokens, n_head_q] + ggml_tensor * k, // [n_embd_head_k, n_tokens, n_head_k] + ggml_tensor * v, // [n_embd_head_v, n_tokens, n_head_v] (v_trans == false) ggml_tensor * kq_b, ggml_tensor * kq_mask, bool v_trans, @@ -502,9 +502,9 @@ struct llm_graph_context { ggml_cgraph * gf, ggml_tensor * wo, ggml_tensor * wo_b, - ggml_tensor * q_cur, - ggml_tensor * k_cur, - ggml_tensor * v_cur, + ggml_tensor * q_cur, // [n_embd_head_q, n_head_q, n_tokens] + ggml_tensor * k_cur, // [n_embd_head_k, n_head_k, n_tokens] + ggml_tensor * v_cur, // [n_embd_head_v, n_head_v, n_tokens] ggml_tensor * kq_b, float kq_scale, int il) const; @@ -516,9 +516,9 @@ struct llm_graph_context { ggml_cgraph * gf, ggml_tensor * wo, ggml_tensor * wo_b, - ggml_tensor * q_cur, - ggml_tensor * k_cur, - ggml_tensor * v_cur, + ggml_tensor * q_cur, // [n_embd_head_q, n_head_q, n_tokens] + ggml_tensor * k_cur, // [n_embd_head_k, n_head_k, n_tokens] + ggml_tensor * v_cur, // [n_embd_head_v, n_head_v, n_tokens] ggml_tensor * kq_b, float kq_scale, int il) const; @@ -530,9 +530,9 @@ struct llm_graph_context { ggml_cgraph * gf, ggml_tensor * wo, ggml_tensor * wo_b, - ggml_tensor * q_cur, - ggml_tensor * k_cur, - ggml_tensor * v_cur, + ggml_tensor * q_cur, // [n_embd_head_q, n_head_q, n_tokens] + ggml_tensor * k_cur, // [n_embd_head_k, n_head_k, n_tokens] + ggml_tensor * v_cur, // [n_embd_head_v, n_head_v, n_tokens] ggml_tensor * kq_b, float kq_scale, int il) const; diff --git a/src/llama-model.cpp b/src/llama-model.cpp index 9171585bd9d91..d286176c1ff83 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -4093,19 +4093,25 @@ struct llm_build_llama : public llm_graph_context { cb(Vcur, "Vcur", il); } + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, rope_factors, + ctx0, Qcur, inp_pos, rope_factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); - cb(Qcur, "Qcur", il); Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, rope_factors, + ctx0, Kcur, inp_pos, rope_factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); + + cb(Qcur, "Qcur", il); cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); cur = build_attn(inp_attn, gf, model.layers[il].wo, model.layers[il].bo, @@ -4267,19 +4273,25 @@ struct llm_build_deci : public llm_graph_context { cb(Vcur, "Vcur", il); } + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, rope_factors, + ctx0, Qcur, inp_pos, rope_factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); - cb(Qcur, "Qcur", il); Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, rope_factors, + ctx0, Kcur, inp_pos, rope_factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); + + cb(Qcur, "Qcur", il); cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); cur = build_attn(inp_attn, gf, model.layers[il].wo, model.layers[il].bo, @@ -4396,28 +4408,32 @@ struct llm_build_baichuan : public llm_graph_context { ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); cb(Vcur, "Vcur", il); + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + switch (model.type) { case LLM_TYPE_7B: Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, + ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); break; case LLM_TYPE_13B: - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd/n_head, n_head, n_tokens); - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd/n_head, n_head, n_tokens); break; default: GGML_ABORT("fatal error"); } + cb(Qcur, "Qcur", il); cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); cur = build_attn(inp_attn, gf, model.layers[il].wo, NULL, @@ -4514,19 +4530,25 @@ struct llm_build_xverse : public llm_graph_context { ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); cb(Vcur, "Vcur", il); + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, + ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); - cb(Qcur, "Qcur", il); Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); + + cb(Qcur, "Qcur", il); cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); cur = build_attn(inp_attn, gf, model.layers[il].wo, NULL, @@ -4632,25 +4654,26 @@ struct llm_build_falcon : public llm_graph_context { ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); - cb(Qcur, "Qcur", il); - cb(Kcur, "Kcur", il); - cb(Vcur, "Vcur", il); - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); // using mode = 2 for neox mode Qcur = ggml_rope_ext( - ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, - freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow + ctx0, Qcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow ); - cb(Qcur, "Qcur", il); Kcur = ggml_rope_ext( - ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, - freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow + ctx0, Kcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow ); + + cb(Qcur, "Qcur", il); cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); cur = build_attn(inp_attn, gf, model.layers[il].wo, NULL, @@ -4762,19 +4785,25 @@ struct llm_build_grok : public llm_graph_context { cb(Vcur, "Vcur", il); } + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, + ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); - cb(Qcur, "Qcur", il); Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); + + cb(Qcur, "Qcur", il); cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); cur = build_attn(inp_attn, gf, model.layers[il].wo, model.layers[il].bo, @@ -4907,23 +4936,25 @@ struct llm_build_dbrx : public llm_graph_context { Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); - cb(Qcur, "Qcur", il); - cb(Kcur, "Kcur", il); - cb(Vcur, "Vcur", il); + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, + ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); - cb(Qcur, "Qcur", il); Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); + + cb(Qcur, "Qcur", il); cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); cur = build_attn(inp_attn, gf, model.layers[il].wo, NULL, @@ -5031,12 +5062,14 @@ struct llm_build_starcoder : public llm_graph_context { ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + cb(Qcur, "Qcur", il); cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - cur = build_attn(inp_attn, gf, model.layers[il].wo, model.layers[il].bo, Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); @@ -5128,11 +5161,13 @@ struct llm_build_refact : public llm_graph_context { ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); cb(Vcur, "Vcur", il); + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); - cb(Kcur, "Kcur", il); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); cur = build_attn(inp_attn, gf, model.layers[il].wo, NULL, @@ -5267,21 +5302,21 @@ struct llm_build_bert : public llm_graph_context { Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, + ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); - cb(Qcur, "Qcur", il); Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); - cb(Kcur, "Kcur", il); - - Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); } cb(Qcur, "Qcur", il); @@ -5397,12 +5432,14 @@ struct llm_build_bloom : public llm_graph_context { ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + cb(Qcur, "Qcur", il); cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - cur = build_attn(inp_attn, gf, model.layers[il].wo, model.layers[il].bo, Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); @@ -5534,20 +5571,19 @@ struct llm_build_mpt : public llm_graph_context { model.layers[il].attn_k_norm_b, LLM_NORM, il); cb(Kcur, "Kcur", il); + } - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); - cur = build_attn(inp_attn, gf, - model.layers[il].wo, model.layers[il].bo, - Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); - } else { - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); - cur = build_attn(inp_attn, gf, - model.layers[il].wo, model.layers[il].bo, - Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); - } + cur = build_attn(inp_attn, gf, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } if (il == n_layer - 1) { @@ -5656,9 +5692,8 @@ struct llm_build_stablelm : public llm_graph_context { } Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - cb(Qcur, "Qcur", il); Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); - cb(Kcur, "Kcur", il); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); if (model.layers[il].attn_q_norm) { Qcur = build_norm(Qcur, @@ -5667,6 +5702,7 @@ struct llm_build_stablelm : public llm_graph_context { LLM_NORM, il); cb(Qcur, "Qcur", il); } + if (model.layers[il].attn_k_norm) { Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, @@ -5675,20 +5711,21 @@ struct llm_build_stablelm : public llm_graph_context { cb(Kcur, "Kcur", il); } - Qcur = ggml_rope_ext( ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); - cb(Qcur, "Qcur", il); Kcur = ggml_rope_ext( ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); + + cb(Qcur, "Qcur", il); cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); cur = build_attn(inp_attn, gf, model.layers[il].wo, NULL, @@ -5792,25 +5829,26 @@ struct llm_build_qwen : public llm_graph_context { ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 2*sizeof(float)*(n_embd))); - cb(Qcur, "Qcur", il); - cb(Kcur, "Kcur", il); - cb(Vcur, "Vcur", il); - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); // using mode = 2 for neox mode Qcur = ggml_rope_ext( - ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, - freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow + ctx0, Qcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow ); - cb(Qcur, "Qcur", il); Kcur = ggml_rope_ext( - ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, - freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow + ctx0, Kcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow ); + + cb(Qcur, "Qcur", il); cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); cur = build_attn(inp_attn, gf, model.layers[il].wo, NULL, @@ -5901,33 +5939,36 @@ struct llm_build_qwen2 : public llm_graph_context { { // compute Q and K and RoPE them ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); - cb(Qcur, "Qcur", il); Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); cb(Qcur, "Qcur", il); ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); - cb(Kcur, "Kcur", il); Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); cb(Kcur, "Kcur", il); ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); - cb(Vcur, "Vcur", il); Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); cb(Vcur, "Vcur", il); + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, + ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); - cb(Qcur, "Qcur", il); Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); + + cb(Qcur, "Qcur", il); cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); cur = build_attn(inp_attn, gf, model.layers[il].wo, model.layers[il].bo, @@ -6019,35 +6060,36 @@ struct llm_build_qwen2vl : public llm_graph_context { { // compute Q and K and RoPE them ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); - cb(Qcur, "Qcur", il); Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); cb(Qcur, "Qcur", il); ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); - cb(Kcur, "Kcur", il); Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); cb(Kcur, "Kcur", il); ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); - cb(Vcur, "Vcur", il); Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); cb(Vcur, "Vcur", il); + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + Qcur = ggml_rope_multi( - ctx0, - ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, + ctx0, Qcur, inp_pos, nullptr, n_rot, sections, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); - cb(Qcur, "Qcur", il); Kcur = ggml_rope_multi( - ctx0, - ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + ctx0, Kcur, inp_pos, nullptr, n_rot, sections, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); + + cb(Qcur, "Qcur", il); cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); cur = build_attn(inp_attn, gf, model.layers[il].wo, model.layers[il].bo, @@ -6136,33 +6178,36 @@ struct llm_build_qwen2moe : public llm_graph_context { { // compute Q and K and RoPE them ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); - cb(Qcur, "Qcur", il); Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); cb(Qcur, "Qcur", il); ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); - cb(Kcur, "Kcur", il); Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); cb(Kcur, "Kcur", il); ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); - cb(Vcur, "Vcur", il); Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); cb(Vcur, "Vcur", il); + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, + ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); - cb(Qcur, "Qcur", il); Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); + + cb(Qcur, "Qcur", il); cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); cur = build_attn(inp_attn, gf, model.layers[il].wo, model.layers[il].bo, @@ -6307,23 +6352,27 @@ struct llm_build_phi2 : public llm_graph_context { Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); Qcur = ggml_rope_ext( - ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, - freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow + ctx0, Qcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow ); + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); // with phi2, we scale the Q to avoid precision issues // ref: https://github.com/ml-explore/mlx-examples/blob/08e862336ade809bc37d1035f94b359e7d1a5152/phi2/phi2.py#L64-L66 Qcur = ggml_scale(ctx0, Qcur, 1.0f/sqrtf(float(n_embd_head))); - cb(Qcur, "Qcur", il); - - Kcur = ggml_rope_ext( - ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, - freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Kcur, "Kcur", il); cur = build_attn(inp_attn, gf, model.layers[il].wo, model.layers[il].bo, @@ -6433,21 +6482,26 @@ struct llm_build_phi3 : public llm_graph_context { Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); Qcur = ggml_rope_ext( - ctx0, Qcur, inp_pos, rope_factors, n_rot, rope_type, n_ctx_orig, - freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow + ctx0, Qcur, inp_pos, rope_factors, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow ); - cb(Qcur, "Qcur", il); - - Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head))); - cb(Qcur, "Qcur", il); Kcur = ggml_rope_ext( - ctx0, Kcur, inp_pos, rope_factors, n_rot, rope_type, n_ctx_orig, - freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow + ctx0, Kcur, inp_pos, rope_factors, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow ); + + cb(Qcur, "Qcur", il); cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head))); + cb(Qcur, "Qcur", il); cur = build_attn(inp_attn, gf, model.layers[il].wo, model.layers[il].bo, @@ -6564,17 +6618,25 @@ struct llm_build_plamo : public llm_graph_context { ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); cb(Vcur, "Vcur", il); + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_rot, n_head, n_tokens), inp_pos, nullptr, + ctx0, Qcur, inp_pos, nullptr, n_embd_head, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow); - cb(Qcur, "Qcur", il); + ext_factor, attn_factor, beta_fast, beta_slow + ); Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_rot, n_head_kv, n_tokens), inp_pos, nullptr, + ctx0, Kcur, inp_pos, nullptr, n_embd_head, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow); + ext_factor, attn_factor, beta_fast, beta_slow + ); + + cb(Qcur, "Qcur", il); cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); cur = build_attn(inp_attn, gf, model.layers[il].wo, NULL, @@ -6679,7 +6741,9 @@ struct llm_build_gpt2 : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); cur = build_attn(inp_attn, gf, model.layers[il].wo, model.layers[il].bo, @@ -6773,27 +6837,29 @@ struct llm_build_codeshell : public llm_graph_context { cur = ggml_add(ctx0, cur, model.layers[il].bqkv); cb(cur, "bqkv", il); - ggml_tensor * tmpq = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); - ggml_tensor * tmpk = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); + ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); + ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); - cb(tmpq, "tmpq", il); - cb(tmpk, "tmpk", il); - cb(Vcur, "Vcur", il); + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); - ggml_tensor * Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, tmpq, n_embd_head, n_head, n_tokens), inp_pos, nullptr, + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); - cb(Qcur, "Qcur", il); - ggml_tensor * Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, tmpk, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); + + cb(Qcur, "Qcur", il); cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); cur = build_attn(inp_attn, gf, model.layers[il].wo, model.layers[il].bo, @@ -6904,19 +6970,25 @@ struct llm_build_orion : public llm_graph_context { // cb(Vcur, "Vcur", il); // } + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, + ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); - cb(Qcur, "Qcur", il); Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); + + cb(Qcur, "Qcur", il); cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); cur = build_attn(inp_attn, gf, model.layers[il].wo, NULL, @@ -7025,19 +7097,25 @@ struct llm_build_internlm2 : public llm_graph_context { cb(Vcur, "Vcur", il); } + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, + ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); - cb(Qcur, "Qcur", il); Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); + + cb(Qcur, "Qcur", il); cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); cur = build_attn(inp_attn, gf, model.layers[il].wo, model.layers[il].bo, @@ -7311,7 +7389,7 @@ struct llm_build_minicpm3 : public llm_graph_context { struct llm_build_gemma : public llm_graph_context { llm_build_gemma(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { - const int64_t n_embd_head_k = hparams.n_embd_head_k; + const int64_t n_embd_head = hparams.n_embd_head_v; ggml_tensor * cur; ggml_tensor * inpL; @@ -7345,20 +7423,26 @@ struct llm_build_gemma : public llm_graph_context { ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); cb(Vcur, "Vcur", il); + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head_k, n_head, n_tokens), inp_pos, nullptr, + ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow); - cb(Qcur, "Qcur", il); - - Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head_k))); - cb(Qcur, "Qcur_scaled", il); Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head_k, n_head_kv, n_tokens), inp_pos, nullptr, + ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow); + + cb(Qcur, "Qcur", il); cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head))); + cb(Qcur, "Qcur_scaled", il); cur = build_attn(inp_attn, gf, model.layers[il].wo, NULL, @@ -7421,7 +7505,7 @@ struct llm_build_gemma : public llm_graph_context { struct llm_build_gemma2 : public llm_graph_context { llm_build_gemma2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { - const int64_t n_embd_head_k = hparams.n_embd_head_k; + const int64_t n_embd_head = hparams.n_embd_head_k; ggml_tensor * cur; ggml_tensor * inpL; @@ -7455,27 +7539,33 @@ struct llm_build_gemma2 : public llm_graph_context { ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); cb(Vcur, "Vcur", il); + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head_k, n_head, n_tokens), inp_pos, nullptr, + ctx0, Qcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow); + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); // ref: https://github.com/google/gemma_pytorch/commit/03e657582d17cb5a8617ebf333c1c16f3694670e switch (model.type) { case LLM_TYPE_2B: - case LLM_TYPE_9B: Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head_k))); break; - case LLM_TYPE_27B: Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd / n_head))); break; + case LLM_TYPE_9B: + case LLM_TYPE_27B: Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head))); break; default: GGML_ABORT("fatal error"); }; cb(Qcur, "Qcur_scaled", il); - Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head_k, n_head_kv, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow); - cb(Kcur, "Kcur", il); - cur = build_attn(inp_attn, gf, model.layers[il].wo, NULL, Qcur, Kcur, Vcur, nullptr, 1.0f, il); @@ -7552,7 +7642,7 @@ struct llm_build_gemma2 : public llm_graph_context { struct llm_build_gemma3 : public llm_graph_context { llm_build_gemma3(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { - const int64_t n_embd_head_k = hparams.n_embd_head_k; + const int64_t n_embd_head = hparams.n_embd_head_k; ggml_tensor * cur; ggml_tensor * inpL; @@ -7593,7 +7683,10 @@ struct llm_build_gemma3 : public llm_graph_context { ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); cb(Vcur, "Vcur", il); - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head_k, n_head, n_tokens); + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL, LLM_NORM_RMS, il); cb(Qcur, "Qcur_normed", il); @@ -7601,9 +7694,7 @@ struct llm_build_gemma3 : public llm_graph_context { ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base_l, freq_scale_l, ext_factor, attn_factor, beta_fast, beta_slow); - cb(Qcur, "Qcur", il); - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head_k, n_head_kv, n_tokens); Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL, LLM_NORM_RMS, il); cb(Kcur, "Kcur_normed", il); @@ -7611,7 +7702,10 @@ struct llm_build_gemma3 : public llm_graph_context { ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base_l, freq_scale_l, ext_factor, attn_factor, beta_fast, beta_slow); + + cb(Qcur, "Qcur", il); cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); cur = build_attn(inp_attn, gf, model.layers[il].wo, NULL, @@ -7733,19 +7827,25 @@ struct llm_build_starcoder2 : public llm_graph_context { cb(Vcur, "Vcur", il); } + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, + ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); - cb(Qcur, "Qcur", il); Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); + + cb(Qcur, "Qcur", il); cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); cur = build_attn(inp_attn, gf, model.layers[il].wo, model.layers[il].bo, @@ -8046,24 +8146,25 @@ struct llm_build_command_r : public llm_graph_context { cb(Vcur, "Vcur", il); } - if (model.layers[il].attn_q_norm) { - Qcur = ggml_view_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens, - ggml_element_size(Qcur) * n_embd_head, - ggml_element_size(Qcur) * n_embd_head * n_head, - 0); - cb(Qcur, "Qcur", il); - Kcur = ggml_view_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens, - ggml_element_size(Kcur) * n_embd_head, - ggml_element_size(Kcur) * n_embd_head * n_head_kv, - 0); - cb(Kcur, "Kcur", il); + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + if (model.layers[il].attn_q_norm) { Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL, LLM_NORM, il); cb(Qcur, "Qcur", il); + } + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + if (model.layers[il].attn_k_norm) { Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL, @@ -8071,19 +8172,15 @@ struct llm_build_command_r : public llm_graph_context { cb(Kcur, "Kcur", il); } - Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Qcur, "Qcur", il); - Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); + + cb(Qcur, "Qcur", il); cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); cur = build_attn(inp_attn, gf, model.layers[il].wo, model.layers[il].bo, @@ -8198,25 +8295,28 @@ struct llm_build_cohere2 : public llm_graph_context { cb(Vcur, "Vcur", il); } - if (is_swa) { - Qcur = ggml_rope_ext(ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, rope_factors, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, - beta_fast, beta_slow); - cb(Qcur, "Qcur", il); + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); - Kcur = ggml_rope_ext(ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, - rope_factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, - attn_factor, beta_fast, beta_slow); - cb(Kcur, "Kcur", il); - } else { - // For non-sliding layers, just reshape without applying RoPE - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - cb(Qcur, "Qcur", il); + if (is_swa) { + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, rope_factors, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); - cb(Kcur, "Kcur", il); + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, rope_factors, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); } + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + cur = build_attn(inp_attn, gf, model.layers[il].wo, model.layers[il].bo, Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); @@ -8328,19 +8428,25 @@ struct llm_build_olmo : public llm_graph_context { cb(Vcur, "Vcur", il); } + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, + ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); - cb(Qcur, "Qcur", il); Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); + + cb(Qcur, "Qcur", il); cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); cur = build_attn(inp_attn, gf, model.layers[il].wo, nullptr, @@ -8442,22 +8548,25 @@ struct llm_build_olmo2 : public llm_graph_context { LLM_NORM_RMS, il); cb(Kcur, "Kcur_normed", il); - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); Qcur = ggml_rope_ext( ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); - cb(Qcur, "Qcur_rope", il); Kcur = ggml_rope_ext( ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); - cb(Kcur, "Kcur_rope", il); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); cur = build_attn(inp_attn, gf, model.layers[il].wo, NULL, @@ -8572,22 +8681,25 @@ struct llm_build_olmoe : public llm_graph_context { LLM_NORM_RMS, il); cb(Kcur, "Kcur_normed", il); - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); Qcur = ggml_rope_ext( ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); - cb(Qcur, "Qcur_rope", il); Kcur = ggml_rope_ext( ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); - cb(Kcur, "Kcur_rope", il); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); cur = build_attn(inp_attn, gf, model.layers[il].wo, NULL, @@ -8687,7 +8799,7 @@ struct llm_build_openelm : public llm_graph_context { cur = ggml_reshape_3d(ctx0, cur, n_embd_head_k, n_head_qkv, n_tokens); - ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_3d(ctx0, cur, n_embd_head, n_head, n_tokens, cur->nb[1], cur->nb[2], 0)); + ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_3d(ctx0, cur, n_embd_head, n_head, n_tokens, cur->nb[1], cur->nb[2], 0)); cb(Qcur, "Qcur", il); ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, cur->nb[1], cur->nb[2], cur->nb[1]*n_head)); @@ -8707,18 +8819,19 @@ struct llm_build_openelm : public llm_graph_context { cb(Kcur, "Kcur", il); Qcur = ggml_rope_ext( - ctx0, Qcur, inp_pos, NULL, n_rot, rope_type, n_ctx_orig, - freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow + ctx0, Qcur, inp_pos, NULL, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow ); - cb(Qcur, "Qcur", il); Kcur = ggml_rope_ext( - ctx0, Kcur, inp_pos, NULL, n_rot, rope_type, n_ctx_orig, - freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow + ctx0, Kcur, inp_pos, NULL, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow ); - cb(Kcur, "Kcur", il); - Vcur = ggml_reshape_2d(ctx0, Vcur, n_embd_head * n_head_kv, n_tokens); + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); cb(Qcur, "Vcur", il); cur = build_attn(inp_attn, gf, @@ -8815,23 +8928,25 @@ struct llm_build_gptneox : public llm_graph_context { ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); - cb(Qcur, "Qcur", il); - cb(Kcur, "Kcur", il); - cb(Vcur, "Vcur", il); + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, + ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); - cb(Qcur, "Qcur", il); Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); + + cb(Qcur, "Qcur", il); cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); cur = build_attn(inp_attn, gf, model.layers[il].wo, model.layers[il].bo, @@ -8963,19 +9078,25 @@ struct llm_build_arctic : public llm_graph_context { ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); cb(Vcur, "Vcur", il); + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, + ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); - cb(Qcur, "Qcur", il); Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); + + cb(Qcur, "Qcur", il); cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); cur = build_attn(inp_attn, gf, model.layers[il].wo, NULL, @@ -9112,19 +9233,25 @@ struct llm_build_deepseek : public llm_graph_context { cb(Vcur, "Vcur", il); } + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, rope_factors, + ctx0, Qcur, inp_pos, rope_factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); - cb(Qcur, "Qcur", il); Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, rope_factors, + ctx0, Kcur, inp_pos, rope_factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); + + cb(Qcur, "Qcur", il); cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); cur = build_attn(inp_attn, gf, model.layers[il].wo, model.layers[il].bo, @@ -9502,19 +9629,25 @@ struct llm_build_bitnet : public llm_graph_context { cb(Vcur, "Vcur", il); } + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, + ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); - cb(Qcur, "Qcur", il); Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); + + cb(Qcur, "Qcur", il); cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); cur = build_attn(inp_attn, gf, NULL, NULL, @@ -9906,7 +10039,9 @@ struct llm_build_jais : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); cur = build_attn(inp_attn, gf, model.layers[il].wo, model.layers[il].bo, @@ -10019,29 +10154,30 @@ struct llm_build_chatglm : public llm_graph_context { Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); } - cb(Qcur, "Qcur", il); - cb(Kcur, "Kcur", il); - cb(Vcur, "Vcur", il); + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); //printf("freq_base: %f freq_scale: %f ext_factor: %f attn_factor: %f\n", freq_base, freq_scale, ext_factor, attn_factor); Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, + ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); - cb(Qcur, "Qcur_rope", il); Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); - cb(Kcur, "Kcur_rope", il); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); cur = build_attn(inp_attn, gf, model.layers[il].wo, NULL, Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); - } if (il == n_layer - 1) { @@ -10145,19 +10281,25 @@ struct llm_build_nemotron : public llm_graph_context { cb(Vcur, "Vcur", il); } + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, + ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); - cb(Qcur, "Qcur", il); Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); + + cb(Qcur, "Qcur", il); cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); cur = build_attn(inp_attn, gf, model.layers[il].wo, model.layers[il].bo, @@ -10270,19 +10412,25 @@ struct llm_build_exaone : public llm_graph_context { cb(Vcur, "Vcur", il); } + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, rope_factors, + ctx0, Qcur, inp_pos, rope_factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); - cb(Qcur, "Qcur", il); Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, rope_factors, + ctx0, Kcur, inp_pos, rope_factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); + + cb(Qcur, "Qcur", il); cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); cur = build_attn(inp_attn, gf, model.layers[il].wo, model.layers[il].bo, @@ -11166,19 +11314,25 @@ struct llm_build_chameleon : public llm_graph_context { cb(Kcur, "Kcur", il); } + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, + ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); - cb(Qcur, "Qcur", il); Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow ); + + cb(Qcur, "Qcur", il); cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); cur = build_attn(inp_attn, gf, model.layers[il].wo, nullptr, From d84635b1b085d54d6a21924e6171688d6e3dfb46 Mon Sep 17 00:00:00 2001 From: lhez Date: Tue, 18 Mar 2025 12:54:55 -0700 Subject: [PATCH 085/398] opencl: improve profiling (#12442) * opencl: more profiling timing * opencl: generate trace for profiling * opencl: reduce profiling overhead * Populate profiling timing info at the end rather than after each kernel run * opencl: fix for chrome tracing --- ggml/src/ggml-opencl/ggml-opencl.cpp | 117 ++++++++++++++++++++++----- 1 file changed, 95 insertions(+), 22 deletions(-) diff --git a/ggml/src/ggml-opencl/ggml-opencl.cpp b/ggml/src/ggml-opencl/ggml-opencl.cpp index 14d9934fb1b73..efaf7f4790797 100644 --- a/ggml/src/ggml-opencl/ggml-opencl.cpp +++ b/ggml/src/ggml-opencl/ggml-opencl.cpp @@ -297,8 +297,27 @@ static int ggml_backend_opencl_n_devices = 0; struct ProfilingInfo { std::string op_name; std::string kernel_name; - // Kernel execution time in nanoseconds. - cl_ulong duration_ns; + + cl_kernel kernel; + cl_event evt; + + cl_ulong cmd_queued; + cl_ulong cmd_submit; + cl_ulong cmd_start; + cl_ulong cmd_end; + cl_ulong overhead_start; + cl_ulong overhead_end; + // For the times below, see spec for clGetEventProfilingInfo + // The time kernel spent in cmd queue - SUBMIT - QUEUED + cl_ulong cmd_queued_duration_ns; + // The time kernel spent for submission - START - SUBMIT + cl_ulong cmd_submit_duration_ns; + // Kernel execution time in nanoseconds - END - START + cl_ulong cmd_duration_ns; + // The time for the kernel to complete - COMPLETE - END + cl_ulong cmd_complete_duration_ns; + // Total time to finish the kernel - COMPELTE - QUEUED + cl_ulong cmd_total_duration_ns; // Global and local work sizes. size_t global_size[3]; size_t local_size[3]; @@ -903,12 +922,56 @@ static void ggml_cl2_free(void) { return; } + // Populate profiling info + for (ProfilingInfo & info : g_profiling_info) { + cl_ulong cmd_queued; + cl_ulong cmd_submit; + cl_ulong cmd_start; + cl_ulong cmd_end; + cl_ulong cmd_complete; + + CL_CHECK(clWaitForEvents(1, &info.evt)); + CL_CHECK(clGetEventProfilingInfo( + info.evt, CL_PROFILING_COMMAND_QUEUED, sizeof(cl_ulong), &cmd_queued, NULL)); + CL_CHECK(clGetEventProfilingInfo( + info.evt, CL_PROFILING_COMMAND_SUBMIT, sizeof(cl_ulong), &cmd_submit, NULL)); + CL_CHECK(clGetEventProfilingInfo( + info.evt, CL_PROFILING_COMMAND_START, sizeof(cl_ulong), &cmd_start, NULL)); + CL_CHECK(clGetEventProfilingInfo( + info.evt, CL_PROFILING_COMMAND_END, sizeof(cl_ulong), &cmd_end, NULL)); + CL_CHECK(clGetEventProfilingInfo( + info.evt, CL_PROFILING_COMMAND_COMPLETE, sizeof(cl_ulong), &cmd_complete, NULL)); + CL_CHECK(clReleaseEvent(info.evt)); + + char kernel_name[512]; + CL_CHECK(clGetKernelInfo(info.kernel, CL_KERNEL_FUNCTION_NAME, + sizeof(kernel_name), kernel_name, NULL)); + info.kernel_name = kernel_name; + + info.cmd_queued = cmd_queued; + info.cmd_submit = cmd_submit; + info.cmd_start = cmd_start; + info.cmd_end = cmd_end; + + info.cmd_queued_duration_ns = cmd_submit - cmd_queued; + info.cmd_submit_duration_ns = cmd_start - cmd_submit; + info.cmd_duration_ns = cmd_end - cmd_start; + info.cmd_complete_duration_ns = cmd_complete - cmd_end; + info.cmd_total_duration_ns = cmd_complete - cmd_queued; + } + + // Dump a csv float total_kernel_time = 0; - fprintf(fperf, "op name, kernel name, duration (ms), global size, local size, output size\n"); + fprintf(fperf, "op name, kernel name, queued duration (ms), submit duration(ms), exec duration (ms), complete duration (ms), total duration (ms), global size, local size, output size\n"); for (const ProfilingInfo & info : g_profiling_info) { - total_kernel_time += info.duration_ns/1.e6f; - fprintf(fperf, "%s,%s,%f,%zux%zux%zu,%zux%zux%zu,%zux%zux%zux%zu\n", - info.op_name.c_str(), info.kernel_name.c_str(), info.duration_ns/1.e6f, + total_kernel_time += info.cmd_duration_ns/1.e6f; + fprintf(fperf, "%s,%s,%f,%f,%f,%f,%f,%zux%zux%zu,%zux%zux%zu,%zux%zux%zux%zu\n", + info.op_name.c_str(), info.kernel_name.c_str(), + info.cmd_queued_duration_ns/1.e6f, + info.cmd_submit_duration_ns/1.e6f, + info.cmd_duration_ns/1.e6f, + info.cmd_complete_duration_ns/1.e6f, + info.cmd_total_duration_ns/1.e6f, info.global_size[0], info.global_size[1], info.global_size[2], info.local_size[0], info.local_size[2], info.local_size[2], info.output_size[0], info.output_size[1], info.output_size[2], info.output_size[3]); @@ -916,6 +979,27 @@ static void ggml_cl2_free(void) { fclose(fperf); GGML_LOG_INFO("ggml_opencl: total kernel time: %f\n", total_kernel_time); + + // Dump a simple chrome trace + FILE* ftrace = fopen("cl_trace.json", "w"); + if (!ftrace) { + GGML_LOG_ERROR("Failed to open cl_trace.json\n"); + return; + } + + fprintf(ftrace, "[\n"); + for (const ProfilingInfo & info : g_profiling_info) { + fprintf(ftrace, "{\"name\": \"%s\", \"cat\": \"OpenCL\", \"ph\": \"B\", \"ts\": %lu, \"pid\": \"\", \"tid\": \"Host\"},\n", + info.kernel_name.c_str(), info.cmd_queued/1000); + fprintf(ftrace, "{\"name\": \"%s\", \"cat\": \"OpenCL\", \"ph\": \"E\", \"ts\": %lu, \"pid\": \"\", \"tid\": \"Host\"},\n", + info.kernel_name.c_str(), info.cmd_submit/1000); + + fprintf(ftrace, "{\"name\": \"%s\", \"cat\": \"OpenCL\", \"ph\": \"B\", \"ts\": %lu, \"pid\": \"\", \"tid\": \"Device\"},\n", + info.kernel_name.c_str(), info.cmd_start/1000); + fprintf(ftrace, "{\"name\": \"%s\", \"cat\": \"OpenCL\", \"ph\": \"E\", \"ts\": %lu, \"pid\": \"\", \"tid\": \"Device\"},\n", + info.kernel_name.c_str(), info.cmd_end/1000); + } + fclose(ftrace); #endif } @@ -2062,25 +2146,14 @@ static void dump_tensor(ggml_backend_t backend, const struct ggml_tensor * tenso // Profiling utility //------------------------------------------------------------------------------ #ifdef GGML_OPENCL_PROFILING -void populateProfilingInfo( +static void populateProfilingInfo( ProfilingInfo& info, cl_event evt, cl_kernel kernel, size_t global_size[3], size_t local_size[3], const ggml_tensor * tensor) { - cl_ulong start; - cl_ulong end; - CL_CHECK(clWaitForEvents(1, &evt)); - CL_CHECK(clGetEventProfilingInfo( - evt, CL_PROFILING_COMMAND_START, sizeof(cl_ulong), &start, NULL)); - CL_CHECK(clGetEventProfilingInfo( - evt, CL_PROFILING_COMMAND_END, sizeof(cl_ulong), &end, NULL)); - - char kernel_name[512]; - CL_CHECK(clGetKernelInfo(kernel, CL_KERNEL_FUNCTION_NAME, - sizeof(kernel_name), kernel_name, NULL)); - - info.duration_ns = end - start; - info.op_name = tensor->name; - info.kernel_name = kernel_name; + info.op_name = tensor->name; + info.kernel = kernel; + info.evt = evt; + info.local_size[0] = local_size[0]; info.local_size[1] = local_size[1]; info.local_size[2] = local_size[2]; From c446b2edd2a9fe2772a1a18923c3e54a6749c364 Mon Sep 17 00:00:00 2001 From: Jeff Bolz Date: Wed, 19 Mar 2025 02:26:26 -0500 Subject: [PATCH 086/398] vulkan: Submit once enough matmul work has been recorded (#12406) I've been seeing significantly worse performance for tg with flash attention enabled vs disabled, and it seems to be related to the submit heuristic. Change the heuristic to check how many bytes worth of weight matrix are used and flush every 100MB, and ramp up after the first few submits. This seems to resolve the issue, and also increases perf for non-FA a bit. --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 32 ++++++++++++++++++---------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index dd680aa522438..d450fe9a2f2f6 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -8436,8 +8436,12 @@ static ggml_status ggml_backend_vk_graph_compute(ggml_backend_t backend, ggml_cg VK_LOG_DEBUG("ggml_backend_vk_graph_compute(" << cgraph->n_nodes << " nodes)"); ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context; + uint64_t total_mat_mul_bytes = 0; for (int i = 0; i < cgraph->n_nodes; i++) { ggml_vk_build_graph(ctx, cgraph->nodes[i], i, nullptr, 0, true, false, false); + if (cgraph->nodes[i]->op == GGML_OP_MUL_MAT || cgraph->nodes[i]->op == GGML_OP_MUL_MAT_ID) { + total_mat_mul_bytes += ggml_nbytes(cgraph->nodes[i]->src[0]); + } } if (ctx->device->need_compiles) { ggml_vk_load_shaders(ctx->device); @@ -8458,17 +8462,27 @@ static ggml_status ggml_backend_vk_graph_compute(ggml_backend_t backend, ggml_cg bool first_node_in_batch = true; // true if next node will be first node in a batch int submit_node_idx = 0; // index to first node in a batch - // Submit work every nodes_per_submit nodes to overlap CPU cmdbuffer generation with GPU execution. - // Start with a smaller count to get work submitted right away, and increase it after each submit. - int nodes_per_submit = 20; + // Submit after enough work has accumulated, to overlap CPU cmdbuffer generation with GPU execution. + // Estimate the amount of matmul work by looking at the weight matrix size, and submit every 100MB + // (and scaled down based on model size, so smaller models submit earlier). + // Also submit at least every 100 nodes, in case there are workloads without as much matmul. + int nodes_per_submit = 100; int submitted_nodes = 0; int submit_count = 0; + uint64_t mul_mat_bytes = 0; + uint64_t mul_mat_bytes_per_submit = std::min(uint64_t(100*1000*1000), total_mat_mul_bytes / 40u); for (int i = 0; i < cgraph->n_nodes; i++) { if (first_node_in_batch) { submit_node_idx = i; } - bool submit = (submitted_nodes >= nodes_per_submit) || (i == last_node); + if (cgraph->nodes[i]->op == GGML_OP_MUL_MAT || cgraph->nodes[i]->op == GGML_OP_MUL_MAT_ID) { + mul_mat_bytes += ggml_nbytes(cgraph->nodes[i]->src[0]); + } + + bool submit = (submitted_nodes >= nodes_per_submit) || + (mul_mat_bytes >= mul_mat_bytes_per_submit) || + (i == last_node); bool enqueued = ggml_vk_build_graph(ctx, cgraph->nodes[i], i, cgraph->nodes[submit_node_idx], submit_node_idx, false, i == last_node, submit); @@ -8485,13 +8499,9 @@ static ggml_status ggml_backend_vk_graph_compute(ggml_backend_t backend, ggml_cg if (submit) { first_node_in_batch = true; submitted_nodes = 0; - switch (submit_count) { - case 0: - nodes_per_submit = 50; - break; - default: - nodes_per_submit = 100; - break; + mul_mat_bytes = 0; + if (submit_count < 3) { + mul_mat_bytes_per_submit *= 2; } submit_count++; } From a686171ea71ed8cb8a324850d146cb65a001e141 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Wed, 19 Mar 2025 08:58:13 +0100 Subject: [PATCH 087/398] convert : Support chat_template.json (#12460) --- gguf-py/gguf/vocab.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/gguf-py/gguf/vocab.py b/gguf-py/gguf/vocab.py index 2ef7d14ab15c0..cca0979862a71 100644 --- a/gguf-py/gguf/vocab.py +++ b/gguf-py/gguf/vocab.py @@ -154,7 +154,12 @@ def _try_load_from_tokenizer_json(self, path: Path) -> bool: return True with open(tokenizer_config_file, encoding = 'utf-8') as f: tokenizer_config = json.load(f) - chat_template = tokenizer_config.get('chat_template') + chat_template_alt = None + chat_template_file = path / 'chat_template.json' + if chat_template_file.is_file(): + with open(chat_template_file, encoding = 'utf-8') as f: + chat_template_alt = json.load(f).get('chat_template') + chat_template = tokenizer_config.get('chat_template', chat_template_alt) if chat_template is None or isinstance(chat_template, (str, list)): self.chat_template = chat_template else: From 108e53c2f1b57b79c8bcf9773fba5a82e2f1eeb5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Wed, 19 Mar 2025 09:08:49 +0100 Subject: [PATCH 088/398] llama : add support for GPT2, Bloom and CodeShell tied word embeddings (#12456) * Add support for GPT2, Bloom and CodeShell tied word embeddings * Deduplicate tied word embeddings weights * Workaround for incorrect weight map It appears transformer.wte.weight is in the weight map even though the weights are not there, remove it if output weights are encountered first. * check++ * fatfingers-- --- convert_hf_to_gguf.py | 37 ++++++++++++++++--------------------- src/llama-model.cpp | 21 ++++++++++++++++++--- 2 files changed, 34 insertions(+), 24 deletions(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 7a2ef4c7e38ce..7574218e241d4 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -180,7 +180,8 @@ def get_tensors(self) -> Iterator[tuple[str, Tensor]]: extra = sorted(tensor_names_from_parts.difference(self.tensor_names)) missing_files = sorted(set(weight_map[n] for n in missing if n in weight_map)) if len(extra) == 0 and len(missing_files) > 0: - raise ValueError(f"Missing or incomplete model files: {missing_files}") + raise ValueError(f"Missing or incomplete model files: {missing_files}\n" + f"Missing tensors: {missing}") else: raise ValueError("Mismatch between weight map and model parts for tensor names:\n" f"Missing tensors: {missing}\n" @@ -1099,13 +1100,6 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter tensors.append((self.map_tensor_name(name), data_torch)) - if name == "word_embeddings.weight": - assert self.tensor_names is not None - - # TODO: tie them at runtime, don't duplicate in the model file - if all(s not in self.tensor_names for s in ("lm_head.weight", "output.weight")): - tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch)) - return tensors @@ -2423,10 +2417,6 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter tensors.append((new_name, data_torch)) - # note: GPT2 output is tied to (same as) wte in original model - if new_name == self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD): - tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch)) - return tensors @@ -2756,21 +2746,26 @@ def set_gguf_parameters(self): self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR) self.gguf_writer.add_rope_scaling_factor(1.0) + _has_tok_embd = False + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: del bid # unused - new_name = self.map_tensor_name(name) - - tensors: list[tuple[str, Tensor]] = [(new_name, data_torch)] + output_name = self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT) + tok_embd_name = self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD) - if new_name == self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD): - assert self.tensor_names is not None + new_name = self.map_tensor_name(name) - if all(s not in self.tensor_names for s in ("lm_head.weight", "output.weight")): - # copy tok_embd.weight to output.weight - tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch)) + # assuming token_embd.weight is seen before output.weight + if not self._has_tok_embd and new_name == self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT): + # even though the tensor file(s) does not contain the word embeddings they are still in the weight map + if self.tensor_names and "transformer.wte.weight" in self.tensor_names: + logger.debug(f"{tok_embd_name} not found before {output_name}, assuming they are tied") + self.tensor_names.remove("transformer.wte.weight") + elif new_name == tok_embd_name: + self._has_tok_embd = True - return tensors + return [(new_name, data_torch)] @Model.register("InternLM2ForCausalLM") diff --git a/src/llama-model.cpp b/src/llama-model.cpp index d286176c1ff83..17af8cc30b0cb 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -2020,7 +2020,12 @@ bool llama_model::load_tensors(llama_model_loader & ml) { // output output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0); - output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED); + + // if output is NULL, init from the input tok embed + if (output == NULL) { + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); + } for (int i = 0; i < n_layer; ++i) { auto & layer = layers[i]; @@ -2381,7 +2386,12 @@ bool llama_model::load_tensors(llama_model_loader & ml) { // output output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0); - output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED); + + // if output is NULL, init from the input tok embed + if (output == NULL) { + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); + } for (int i = 0; i < n_layer; ++i) { auto & layer = layers[i]; @@ -2407,7 +2417,12 @@ bool llama_model::load_tensors(llama_model_loader & ml) { } break; case LLM_ARCH_CODESHELL: { - tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED); + + // if tok embd is NULL, init from output + if (tok_embd == NULL) { + tok_embd = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); + } // output output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); From 0fd8487b142b2b92565bc95b39ddc440955a237c Mon Sep 17 00:00:00 2001 From: Guus Waals <_@guusw.nl> Date: Wed, 19 Mar 2025 10:15:23 +0000 Subject: [PATCH 089/398] Fix visionOS build and add CI (#12415) * ci: add visionOS build workflow Add a new GitHub Actions workflow for building on visionOS with CMake and Xcode. * ggml: Define _DARWIN_C_SOURCE for visionOS to fix missing u_xxx typedefs * ci: remove define hacks for u_xxx system types --------- Co-authored-by: Giovanni Petrantoni <7008900+sinkingsugar@users.noreply.github.com> --- .github/workflows/build.yml | 29 +++++++++++++++++++++++++++++ build-xcframework.sh | 8 ++++---- ggml/src/CMakeLists.txt | 4 ++++ 3 files changed, 37 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 03cde0a48436f..7db85528659d3 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -676,6 +676,35 @@ jobs: -DCMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM=ggml cmake --build build --config Release -j $(sysctl -n hw.logicalcpu) -- CODE_SIGNING_ALLOWED=NO + macOS-latest-cmake-visionos: + runs-on: macos-latest + + steps: + - name: Clone + id: checkout + uses: actions/checkout@v4 + + - name: Dependencies + id: depends + continue-on-error: true + run: | + brew update + + - name: Build + id: cmake_build + run: | + sysctl -a + cmake -B build -G Xcode \ + -DGGML_METAL_USE_BF16=ON \ + -DGGML_METAL_EMBED_LIBRARY=ON \ + -DLLAMA_BUILD_EXAMPLES=OFF \ + -DLLAMA_BUILD_TESTS=OFF \ + -DLLAMA_BUILD_SERVER=OFF \ + -DCMAKE_SYSTEM_NAME=visionOS \ + -DCMAKE_OSX_DEPLOYMENT_TARGET=1.0 \ + -DCMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM=ggml + cmake --build build --config Release -j $(sysctl -n hw.logicalcpu) -- CODE_SIGNING_ALLOWED=NO + macOS-latest-swift: runs-on: macos-latest diff --git a/build-xcframework.sh b/build-xcframework.sh index 37833dc4eabcb..2ce3939c43d6c 100755 --- a/build-xcframework.sh +++ b/build-xcframework.sh @@ -432,8 +432,8 @@ cmake -B build-visionos -G Xcode \ -DCMAKE_SYSTEM_NAME=visionOS \ -DCMAKE_OSX_SYSROOT=xros \ -DCMAKE_XCODE_ATTRIBUTE_SUPPORTED_PLATFORMS=xros \ - -DCMAKE_C_FLAGS="-D_XOPEN_SOURCE=700 -Du_int=unsigned\ int -Du_char=unsigned\ char -Du_short=unsigned\ short ${COMMON_C_FLAGS}" \ - -DCMAKE_CXX_FLAGS="-D_XOPEN_SOURCE=700 -Du_int=unsigned\ int -Du_char=unsigned\ char -Du_short=unsigned\ short ${COMMON_CXX_FLAGS}" \ + -DCMAKE_C_FLAGS="-D_XOPEN_SOURCE=700 ${COMMON_C_FLAGS}" \ + -DCMAKE_CXX_FLAGS="-D_XOPEN_SOURCE=700 ${COMMON_CXX_FLAGS}" \ -S . cmake --build build-visionos --config Release -- -quiet @@ -445,8 +445,8 @@ cmake -B build-visionos-sim -G Xcode \ -DCMAKE_SYSTEM_NAME=visionOS \ -DCMAKE_OSX_SYSROOT=xrsimulator \ -DCMAKE_XCODE_ATTRIBUTE_SUPPORTED_PLATFORMS=xrsimulator \ - -DCMAKE_C_FLAGS="-D_XOPEN_SOURCE=700 -Du_int=unsigned\ int -Du_char=unsigned\ char -Du_short=unsigned\ short ${COMMON_C_FLAGS}" \ - -DCMAKE_CXX_FLAGS="-D_XOPEN_SOURCE=700 -Du_int=unsigned\ int -Du_char=unsigned\ char -Du_short=unsigned\ short ${COMMON_CXX_FLAGS}" \ + -DCMAKE_C_FLAGS="-D_XOPEN_SOURCE=700 ${COMMON_C_FLAGS}" \ + -DCMAKE_CXX_FLAGS="-D_XOPEN_SOURCE=700 ${COMMON_CXX_FLAGS}" \ -S . cmake --build build-visionos-sim --config Release -- -quiet diff --git a/ggml/src/CMakeLists.txt b/ggml/src/CMakeLists.txt index a797e2b187fbe..c1c7498694beb 100644 --- a/ggml/src/CMakeLists.txt +++ b/ggml/src/CMakeLists.txt @@ -325,6 +325,10 @@ if (CMAKE_SYSTEM_NAME MATCHES "Android") target_link_libraries(ggml-base PRIVATE dl) endif() +if(CMAKE_SYSTEM_NAME MATCHES "visionOS") + target_compile_definitions(ggml-base PUBLIC _DARWIN_C_SOURCE) +endif() + if (BUILD_SHARED_LIBS) foreach (target ggml-base ggml) set_target_properties(${target} PROPERTIES POSITION_INDEPENDENT_CODE ON) From a9b59288e222f39fc0311dc66944ed5a86c815fa Mon Sep 17 00:00:00 2001 From: Jeff Bolz Date: Wed, 19 Mar 2025 13:56:23 -0500 Subject: [PATCH 090/398] vulkan: optimize iq1 coopmat2 dequant functions (#12427) --- .../vulkan-shaders/dequant_funcs_cm2.comp | 18 ++++++++++++------ ggml/src/ggml-vulkan/vulkan-shaders/types.comp | 7 +++++++ 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs_cm2.comp b/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs_cm2.comp index 8efe4653ffe75..b3fad35e21d4e 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs_cm2.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs_cm2.comp @@ -311,8 +311,8 @@ float16_t dequantFuncIQ1_S(const in decodeBufIQ1_S bl, const in uint blockCoords const float16_t d = bl.block.d; const uint idx = coordInBlock[1]; - const uint ib32 = idx / 32; - const uint ib8 = idx / 8; + const uint ib32 = (idx & 0xE0) >> 5; + const uint ib8 = (idx & 0xF8) >> 3; const uint qh = bl.block.qh[ib32]; const uint qs = bl.block.qs[ib8]; @@ -330,14 +330,20 @@ layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufIQ1 block_iq1_m block; }; +layout(buffer_reference, std430, buffer_reference_align = 8) buffer decodeBufIQ1_M_packed64 { + block_iq1_m_packed64 block; +}; + float16_t dequantFuncIQ1_M(const in decodeBufIQ1_M bl, const in uint blockCoords[2], const in uint coordInBlock[2]) { - const u16vec4 scales = u16vec4(bl.block.scales[0], bl.block.scales[1], bl.block.scales[2], bl.block.scales[3]) >> 12; - const float16_t d = uint16BitsToHalf(scales.x | (scales.y << 4) | (scales.z << 8) | (scales.w << 12)); + decodeBufIQ1_M_packed64 bl64 = decodeBufIQ1_M_packed64(bl); const uint idx = coordInBlock[1]; - const uint ib8 = idx / 8; - const uint ib16 = idx / 16; + uvec2 scales = unpack32(bl64.block.scales); + const float16_t d = uint16BitsToHalf(uint16_t(((scales.x & 0xF000) >> 12) | ((scales.x & 0xF0000000) >> 24) | ((scales.y & 0xF000) >> 4) | ((scales.y & 0xF0000000) >> 16))); + + const uint ib8 = (idx & 0xF8) >> 3; + const uint ib16 = (idx & 0xF0) >> 4; const int i8 = int(idx % 8); const uint sc = bl.block.scales[ib8 / 8]; const uint qs = bl.block.qs[ib8]; diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/types.comp b/ggml/src/ggml-vulkan/vulkan-shaders/types.comp index f01179326e7fc..789776816b75a 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/types.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/types.comp @@ -2,6 +2,7 @@ #if !defined(GGML_TYPES_COMP) #define GGML_TYPES_COMP +#extension GL_EXT_shader_explicit_arithmetic_types_int64 : require #extension GL_EXT_shader_explicit_arithmetic_types_int32 : require #extension GL_EXT_shader_explicit_arithmetic_types_int16 : require #extension GL_EXT_shader_explicit_arithmetic_types_int8 : require @@ -312,6 +313,12 @@ struct block_iq1_m { uint16_t scales[QUANT_K_IQ1_M/64]; }; +struct block_iq1_m_packed64 { + uint64_t qs[QUANT_K_IQ1_M/8/8]; + uint64_t qh[QUANT_K_IQ1_M/16/8]; + uint64_t scales; +}; + #if defined(DATA_A_IQ1_S) #define QUANT_K QUANT_K_IQ1_S #define QUANT_R QUANT_R_IQ1_S From 517b5ddbf002b91fd6d6daf5d8db8c88a0173039 Mon Sep 17 00:00:00 2001 From: Gaurav Garg <52341457+gaugarg-nv@users.noreply.github.com> Date: Thu, 20 Mar 2025 01:22:06 +0530 Subject: [PATCH 091/398] CUDA: Improve flash decoding kernel GPU occupancy for BS=1 case (#12183) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Find out active blocks per SM using cudaOccupancyMaxActiveBlocksPerMultiprocessor API. Use this value to determine the optimal parallel_blocks value. - Prefer vector flash attention kernels over MMA kernel for BS=1 Fixes Issue: #12182 --------- Co-authored-by: Johannes Gäßler --- ggml/src/ggml-cuda/fattn-common.cuh | 88 +++++++++++++++++++--------- ggml/src/ggml-cuda/fattn-mma-f16.cuh | 3 +- ggml/src/ggml-cuda/fattn-tile-f16.cu | 63 ++++++++------------ ggml/src/ggml-cuda/fattn-tile-f32.cu | 63 ++++++++------------ ggml/src/ggml-cuda/fattn-vec-f16.cuh | 73 +++++++++-------------- ggml/src/ggml-cuda/fattn-vec-f32.cuh | 73 +++++++++-------------- ggml/src/ggml-cuda/fattn-wmma-f16.cu | 65 +++++--------------- ggml/src/ggml-cuda/fattn.cu | 18 +++--- ggml/src/ggml-cuda/ggml-cuda.cu | 3 + ggml/src/ggml-cuda/vendors/hip.h | 1 + ggml/src/ggml-cuda/vendors/musa.h | 1 + tests/test-backend-ops.cpp | 29 ++++++--- 12 files changed, 214 insertions(+), 266 deletions(-) diff --git a/ggml/src/ggml-cuda/fattn-common.cuh b/ggml/src/ggml-cuda/fattn-common.cuh index 4067fd41bc247..1c2a2a138f9b3 100644 --- a/ggml/src/ggml-cuda/fattn-common.cuh +++ b/ggml/src/ggml-cuda/fattn-common.cuh @@ -606,48 +606,47 @@ static __global__ void flash_attn_stream_k_fixup( *dst = dst_val / rowsum; } -template // D == head size +template // D == head size #if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) __launch_bounds__(D, 1) #endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) static __global__ void flash_attn_combine_results( const float * __restrict__ VKQ_parts, const float2 * __restrict__ VKQ_meta, - float * __restrict__ dst) { - VKQ_parts += parallel_blocks*D * gridDim.y*blockIdx.x; - VKQ_meta += parallel_blocks * gridDim.y*blockIdx.x; - dst += D * gridDim.y*blockIdx.x; + float * __restrict__ dst, + const int parallel_blocks) { + VKQ_parts += parallel_blocks*D * gridDim.z*blockIdx.x; + VKQ_meta += parallel_blocks * gridDim.z*blockIdx.x; + dst += D * gridDim.z*blockIdx.x; const int tid = threadIdx.x; __builtin_assume(tid < D); - __shared__ float2 meta[parallel_blocks]; + extern __shared__ float2 meta[]; if (tid < 2*parallel_blocks) { - ((float *) meta)[threadIdx.x] = ((const float *)VKQ_meta) [blockIdx.y*(2*parallel_blocks) + tid]; + ((float *) meta)[threadIdx.x] = ((const float *)VKQ_meta) [blockIdx.z*(2*parallel_blocks) + tid]; } __syncthreads(); float kqmax = meta[0].x; -#pragma unroll for (int l = 1; l < parallel_blocks; ++l) { kqmax = max(kqmax, meta[l].x); } float VKQ_numerator = 0.0f; float VKQ_denominator = 0.0f; -#pragma unroll for (int l = 0; l < parallel_blocks; ++l) { const float diff = meta[l].x - kqmax; const float KQ_max_scale = expf(diff); const uint32_t ftz_mask = 0xFFFFFFFF * (diff > SOFTMAX_FTZ_THRESHOLD); *((uint32_t *) &KQ_max_scale) &= ftz_mask; - VKQ_numerator += KQ_max_scale * VKQ_parts[l*gridDim.y*D + blockIdx.y*D + tid]; + VKQ_numerator += KQ_max_scale * VKQ_parts[l*gridDim.z*D + blockIdx.z*D + tid]; VKQ_denominator += KQ_max_scale * meta[l].y; } - dst[blockIdx.y*D + tid] = VKQ_numerator / VKQ_denominator; + dst[blockIdx.z*D + tid] = VKQ_numerator / VKQ_denominator; } static void on_no_fattn_vec_case(const int D) { @@ -671,12 +670,10 @@ static void on_no_fattn_vec_case(const int D) { } } -// parallel_blocks == 0 is stream-k decomposition -template +template void launch_fattn( - ggml_backend_cuda_context & ctx, ggml_tensor * dst, fattn_kernel_t fattn_kernel, - const int nwarps, const size_t nbytes_shared, const bool need_f16_K, const bool need_f16_V, - const int warp_size = WARP_SIZE + ggml_backend_cuda_context & ctx, ggml_tensor * dst, fattn_kernel_t fattn_kernel, const int nwarps, const size_t nbytes_shared, + const int KQ_row_granularity, const bool need_f16_K, const bool need_f16_V, const bool stream_k, const int warp_size = WARP_SIZE ) { constexpr int ncols = ncols1 * ncols2; @@ -748,12 +745,14 @@ void launch_fattn( nb23 = nb23*bs*sizeof(half)/ts; } + int parallel_blocks = 1; + const int ntiles_x = ((Q->ne[1] + ncols1 - 1) / ncols1); const int ntiles_total = ntiles_x * (Q->ne[2] / ncols2) * Q->ne[3]; const dim3 block_dim(warp_size, nwarps, 1); dim3 blocks_num; - if (parallel_blocks == 0) { + if (stream_k) { // For short contexts it can be faster to have the SMs work on whole tiles because this lets us skip the fixup. const int max_blocks = 2*nsm; const int tiles_nwaves = (ntiles_total + max_blocks - 1) / max_blocks; @@ -769,9 +768,43 @@ void launch_fattn( dst_tmp_meta.alloc(blocks_num.x*ncols * (2*2 + D) * sizeof(float)); } else { - blocks_num.x = parallel_blocks*ntiles_x; - blocks_num.y = Q->ne[2]; - blocks_num.z = Q->ne[3]; + GGML_ASSERT(K->ne[1] % KQ_row_granularity == 0); + const int ntiles_KQ = K->ne[1] / KQ_row_granularity; // Max. number of parallel blocks limited by tensor size. + + int max_blocks_per_sm = 1; // Max. number of active blocks limited by occupancy. + CUDA_CHECK(cudaOccupancyMaxActiveBlocksPerMultiprocessor(&max_blocks_per_sm, fattn_kernel, block_dim.x * block_dim.y * block_dim.z, nbytes_shared)); + + // parallel_blocks should be at least large enough to achieve max. occupancy for a single wave: + parallel_blocks = std::max((nsm * max_blocks_per_sm) / ntiles_total, 1); + + // parallel_blocks must not be larger than what the tensor size allows: + parallel_blocks = std::min(parallel_blocks, ntiles_KQ); + + // If ntiles_total % blocks_per_wave != 0 then some efficiency is lost due to tail effects. + // Test whether parallel_blocks can be set to a higher value for better efficiency. + const int blocks_per_wave = nsm * max_blocks_per_sm; + int nwaves_best = 0; + int efficiency_percent_best = 0; + for (int parallel_blocks_test = parallel_blocks; parallel_blocks_test <= ntiles_KQ; ++parallel_blocks_test) { + const int nblocks_total = ntiles_total * parallel_blocks_test; + const int nwaves = (nblocks_total + blocks_per_wave - 1) / blocks_per_wave; + const int efficiency_percent = 100 * nblocks_total / (nwaves*blocks_per_wave); + + // Stop trying configurations with more waves if we already have good efficiency to avoid excessive overhead. + if (efficiency_percent_best >= 90 && nwaves > nwaves_best) { + break; + } + + if (efficiency_percent > efficiency_percent_best) { + nwaves_best = nwaves; + efficiency_percent_best = efficiency_percent; + parallel_blocks = parallel_blocks_test; + } + } + + blocks_num.x = ntiles_x; + blocks_num.y = parallel_blocks; + blocks_num.z = Q->ne[2]*Q->ne[3]; if (parallel_blocks > 1) { dst_tmp.alloc(parallel_blocks*ggml_nelements(KQV)); @@ -803,7 +836,7 @@ void launch_fattn( K_data, V_data, mask ? ((const char *) mask->data) : nullptr, - (parallel_blocks) > 1 ? dst_tmp.ptr : (float *) KQV->data, dst_tmp_meta.ptr, + !stream_k && parallel_blocks > 1 ? dst_tmp.ptr : (float *) KQV->data, dst_tmp_meta.ptr, scale, max_bias, m0, m1, n_head_log2, logit_softcap, Q->ne[0], Q->ne[1], Q->ne[2], Q->ne[3], K->ne[0], K->ne[1], K->ne[2], K->ne[3], @@ -815,7 +848,7 @@ void launch_fattn( ); CUDA_CHECK(cudaGetLastError()); - if constexpr (parallel_blocks == 0) { + if (stream_k) { if (ntiles_total % blocks_num.x != 0) { // Fixup is only needed if the SMs work on fractional tiles. const dim3 block_dim_combine(D, 1, 1); const dim3 blocks_num_combine = {blocks_num.x, ncols1, ncols2}; @@ -824,13 +857,14 @@ void launch_fattn( <<>> ((float *) KQV->data, dst_tmp_meta.ptr, Q->ne[1], Q->ne[2], K->ne[1]); } - } else if constexpr (parallel_blocks > 1) { + } else if (parallel_blocks > 1) { const dim3 block_dim_combine(D, 1, 1); - const dim3 blocks_num_combine(Q->ne[1], blocks_num.y, blocks_num.z); + const dim3 blocks_num_combine(Q->ne[1], 1, blocks_num.z); + const size_t nbytes_shared_combine = parallel_blocks*sizeof(float2); - flash_attn_combine_results - <<>> - (dst_tmp.ptr, dst_tmp_meta.ptr, (float *) KQV->data); + flash_attn_combine_results + <<>> + (dst_tmp.ptr, dst_tmp_meta.ptr, (float *) KQV->data, parallel_blocks); } CUDA_CHECK(cudaGetLastError()); } diff --git a/ggml/src/ggml-cuda/fattn-mma-f16.cuh b/ggml/src/ggml-cuda/fattn-mma-f16.cuh index 718ee5402dccd..024032f6221bc 100644 --- a/ggml/src/ggml-cuda/fattn-mma-f16.cuh +++ b/ggml/src/ggml-cuda/fattn-mma-f16.cuh @@ -970,7 +970,8 @@ void ggml_cuda_flash_attn_ext_mma_f16_case(ggml_backend_cuda_context & ctx, ggml fattn_kernel = flash_attn_ext_f16; } - launch_fattn(ctx, dst, fattn_kernel, nwarps, nbytes_shared_total, true, true); + launch_fattn + (ctx, dst, fattn_kernel, nwarps, nbytes_shared_total, FATTN_KQ_STRIDE, true, true, true); } diff --git a/ggml/src/ggml-cuda/fattn-tile-f16.cu b/ggml/src/ggml-cuda/fattn-tile-f16.cu index ef3569fab2789..77455d8e4f1a2 100644 --- a/ggml/src/ggml-cuda/fattn-tile-f16.cu +++ b/ggml/src/ggml-cuda/fattn-tile-f16.cu @@ -4,7 +4,7 @@ #define FATTN_KQ_STRIDE_TILE_F16 64 -template // D == head size +template // D == head size #if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) __launch_bounds__(nwarps*WARP_SIZE, 1) #endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) @@ -58,18 +58,17 @@ static __global__ void flash_attn_tile_ext_f16( //In this kernel Q, K, V are matrices while i, j, k are matrix indices. - const int ic0 = (blockIdx.x / parallel_blocks) * ncols; // Index of the Q/QKV column to work on. - const int ip = blockIdx.x % parallel_blocks; // Index in group of blocks running for the same column in parallel. + const int ic0 = blockIdx.x * ncols; // Index of the Q/QKV column to work on. const int gqa_ratio = ne02 / ne12; // With grouped query attention there are > 1 Q matrices per K, V matrix. - const float2 * Q_f2 = (const float2 *) (Q + nb02* blockIdx.y + nb01*ic0); - const half2 * K_h2 = (const half2 *) (K + nb12*(blockIdx.y / gqa_ratio)); - const half2 * V_h2 = (const half2 *) (V + nb12*(blockIdx.y / gqa_ratio)); // K and V have same shape + const float2 * Q_f2 = (const float2 *) (Q + nb02* blockIdx.z + nb01*ic0); + const half2 * K_h2 = (const half2 *) (K + nb12*(blockIdx.z / gqa_ratio)); + const half2 * V_h2 = (const half2 *) (V + nb12*(blockIdx.z / gqa_ratio)); // K and V have same shape const half * maskh = (const half *) mask + ne11*ic0; const int stride_KV2 = nb11 / sizeof(half2); - const float slopef = get_alibi_slope(max_bias, blockIdx.y, n_head_log2, m0, m1); + const float slopef = get_alibi_slope(max_bias, blockIdx.z, n_head_log2, m0, m1); const half slopeh = __float2half(slopef); static_assert(D % (2*WARP_SIZE) == 0, "D not divisible by 2*WARP_SIZE == 64."); @@ -105,8 +104,7 @@ static __global__ void flash_attn_tile_ext_f16( __syncthreads(); - const int k_start = parallel_blocks == 1 ? 0 : ip*FATTN_KQ_STRIDE_TILE_F16; - for (int k_VKQ_0 = k_start; k_VKQ_0 < ne11; k_VKQ_0 += parallel_blocks*FATTN_KQ_STRIDE_TILE_F16) { + for (int k_VKQ_0 = blockIdx.y*FATTN_KQ_STRIDE_TILE_F16; k_VKQ_0 < ne11; k_VKQ_0 += gridDim.y*FATTN_KQ_STRIDE_TILE_F16) { // Calculate KQ tile and keep track of new maximum KQ values: half kqmax_new[ncols/nwarps]; @@ -271,16 +269,16 @@ static __global__ void flash_attn_tile_ext_f16( const int i0 = i00 + 2*threadIdx.x; half2 dst_val = VKQ[j_VKQ_0/nwarps][i0/(2*WARP_SIZE)]; - if (parallel_blocks == 1) { + if (gridDim.y == 1) { dst_val /= __half2half2(kqsum_j); } - const int j_dst = (ic0 + j_VKQ)*parallel_blocks + ip; - dst[j_dst*D*gridDim.y + D*blockIdx.y + i0 + 0] = __low2float(dst_val); - dst[j_dst*D*gridDim.y + D*blockIdx.y + i0 + 1] = __high2float(dst_val); + const int j_dst = (ic0 + j_VKQ)*gridDim.y + blockIdx.y; + dst[j_dst*D*gridDim.z + D*blockIdx.z + i0 + 0] = __low2float(dst_val); + dst[j_dst*D*gridDim.z + D*blockIdx.z + i0 + 1] = __high2float(dst_val); } - if (parallel_blocks != 1 && threadIdx.x == 0) { - dst_meta[(ic0 + j_VKQ)*gridDim.y*parallel_blocks + blockIdx.y*parallel_blocks + ip] = make_float2(kqmax[j_VKQ_0/nwarps], kqsum_j); + if (gridDim.y != 1 && threadIdx.x == 0) { + dst_meta[((ic0 + j_VKQ)*gridDim.z + blockIdx.z) * gridDim.y + blockIdx.y] = make_float2(kqmax[j_VKQ_0/nwarps], kqsum_j); } } #else @@ -288,7 +286,7 @@ static __global__ void flash_attn_tile_ext_f16( #endif // defined(FLASH_ATTN_AVAILABLE) && defined(FP16_AVAILABLE) } -template +template void launch_fattn_tile_f16_64_128(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * Q = dst->src[0]; switch (Q->ne[0]) { @@ -296,15 +294,17 @@ void launch_fattn_tile_f16_64_128(ggml_backend_cuda_context & ctx, ggml_tensor * constexpr int D = 64; constexpr int nwarps = 8; constexpr size_t nbytes_shared = 0; - fattn_kernel_t fattn_kernel = flash_attn_tile_ext_f16; - launch_fattn(ctx, dst, fattn_kernel, nwarps, nbytes_shared, true, true); + fattn_kernel_t fattn_kernel = flash_attn_tile_ext_f16; + launch_fattn + (ctx, dst, fattn_kernel, nwarps, nbytes_shared, FATTN_KQ_STRIDE_TILE_F16, true, true, false); } break; case 128: { constexpr int D = 128; constexpr int nwarps = 8; constexpr size_t nbytes_shared = 0; - fattn_kernel_t fattn_kernel = flash_attn_tile_ext_f16; - launch_fattn(ctx, dst, fattn_kernel, nwarps, nbytes_shared, true, true); + fattn_kernel_t fattn_kernel = flash_attn_tile_ext_f16; + launch_fattn + (ctx, dst, fattn_kernel, nwarps, nbytes_shared, FATTN_KQ_STRIDE_TILE_F16, true, true, false); } break; default: { GGML_ABORT("FlashAttention without tensor cores only supports head sizes 64 and 128."); @@ -324,37 +324,22 @@ void ggml_cuda_flash_attn_ext_tile_f16(ggml_backend_cuda_context & ctx, ggml_ten if (Q->ne[1] <= 16) { constexpr int cols_per_block = 16; - constexpr int parallel_blocks = 4; if (logit_softcap == 0.0f) { constexpr bool use_logit_softcap = false; - launch_fattn_tile_f16_64_128(ctx, dst); + launch_fattn_tile_f16_64_128(ctx, dst); } else { constexpr bool use_logit_softcap = true; - launch_fattn_tile_f16_64_128(ctx, dst); - } - return; - } - - if (Q->ne[1] <= 32) { - constexpr int cols_per_block = 32; - constexpr int parallel_blocks = 4; - if (logit_softcap == 0.0f) { - constexpr bool use_logit_softcap = false; - launch_fattn_tile_f16_64_128(ctx, dst); - } else { - constexpr bool use_logit_softcap = true; - launch_fattn_tile_f16_64_128(ctx, dst); + launch_fattn_tile_f16_64_128(ctx, dst); } return; } constexpr int cols_per_block = 32; - constexpr int parallel_blocks = 1; if (logit_softcap == 0.0f) { constexpr bool use_logit_softcap = false; - launch_fattn_tile_f16_64_128(ctx, dst); + launch_fattn_tile_f16_64_128(ctx, dst); } else { constexpr bool use_logit_softcap = true; - launch_fattn_tile_f16_64_128(ctx, dst); + launch_fattn_tile_f16_64_128(ctx, dst); } } diff --git a/ggml/src/ggml-cuda/fattn-tile-f32.cu b/ggml/src/ggml-cuda/fattn-tile-f32.cu index 04b69c83be048..85fea4404d07c 100644 --- a/ggml/src/ggml-cuda/fattn-tile-f32.cu +++ b/ggml/src/ggml-cuda/fattn-tile-f32.cu @@ -4,7 +4,7 @@ #define FATTN_KQ_STRIDE_TILE_F32 32 -template // D == head size +template // D == head size #if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) __launch_bounds__(nwarps*WARP_SIZE, 1) #endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) @@ -58,18 +58,17 @@ static __global__ void flash_attn_tile_ext_f32( // In this kernel Q, K, V are matrices while i, j, k are matrix indices. - const int ic0 = (blockIdx.x / parallel_blocks) * ncols; // Index of the Q/QKV column to work on. - const int ip = blockIdx.x % parallel_blocks; // Index in group of blocks running for the same column in parallel. + const int ic0 = blockIdx.x * ncols; // Index of the Q/QKV column to work on. const int gqa_ratio = ne02 / ne12; // With grouped query attention there are > 1 Q matrices per K, V matrix. - const float2 * Q_f2 = (const float2 *) (Q + nb02* blockIdx.y + nb01*ic0); - const half2 * K_h2 = (const half2 *) (K + nb12*(blockIdx.y / gqa_ratio)); - const half2 * V_h2 = (const half2 *) (V + nb12*(blockIdx.y / gqa_ratio)); // K and V have same shape + const float2 * Q_f2 = (const float2 *) (Q + nb02* blockIdx.z + nb01*ic0); + const half2 * K_h2 = (const half2 *) (K + nb12*(blockIdx.z / gqa_ratio)); + const half2 * V_h2 = (const half2 *) (V + nb12*(blockIdx.z / gqa_ratio)); // K and V have same shape const half * maskh = (const half *) mask + ne11*ic0; const int stride_KV2 = nb11 / sizeof(half2); - const float slope = get_alibi_slope(max_bias, blockIdx.y, n_head_log2, m0, m1); + const float slope = get_alibi_slope(max_bias, blockIdx.z, n_head_log2, m0, m1); static_assert(D % (2*WARP_SIZE) == 0, "D not divisible by 2*WARP_SIZE == 64."); @@ -103,8 +102,7 @@ static __global__ void flash_attn_tile_ext_f32( __syncthreads(); - const int k_start = parallel_blocks == 1 ? 0 : ip*FATTN_KQ_STRIDE_TILE_F32; - for (int k_VKQ_0 = k_start; k_VKQ_0 < ne11; k_VKQ_0 += parallel_blocks*FATTN_KQ_STRIDE_TILE_F32) { + for (int k_VKQ_0 = blockIdx.y*FATTN_KQ_STRIDE_TILE_F32; k_VKQ_0 < ne11; k_VKQ_0 += gridDim.y*FATTN_KQ_STRIDE_TILE_F32) { // Calculate KQ tile and keep track of new maximum KQ values: float kqmax_new[ncols/nwarps]; @@ -269,17 +267,17 @@ static __global__ void flash_attn_tile_ext_f32( const int i0 = i00 + 2*threadIdx.x; float2 dst_val = VKQ[j_VKQ_0/nwarps][i0/(2*WARP_SIZE)]; - if (parallel_blocks == 1) { + if (gridDim.y == 1) { dst_val.x /= kqsum_j; dst_val.y /= kqsum_j; } - const int j_dst = (ic0 + j_VKQ)*parallel_blocks + ip; - dst[j_dst*D*gridDim.y + D*blockIdx.y + i0 + 0] = dst_val.x; - dst[j_dst*D*gridDim.y + D*blockIdx.y + i0 + 1] = dst_val.y; + const int j_dst = (ic0 + j_VKQ)*gridDim.y + blockIdx.y; + dst[j_dst*D*gridDim.z + D*blockIdx.z + i0 + 0] = dst_val.x; + dst[j_dst*D*gridDim.z + D*blockIdx.z + i0 + 1] = dst_val.y; } - if (parallel_blocks != 1 && threadIdx.x == 0) { - dst_meta[(ic0 + j_VKQ)*gridDim.y*parallel_blocks + blockIdx.y*parallel_blocks + ip] = make_float2(kqmax[j_VKQ_0/nwarps], kqsum_j); + if (gridDim.y != 1 && threadIdx.x == 0) { + dst_meta[((ic0 + j_VKQ)*gridDim.z + blockIdx.z) * gridDim.y + blockIdx.y] = make_float2(kqmax[j_VKQ_0/nwarps], kqsum_j); } } #else @@ -287,7 +285,7 @@ static __global__ void flash_attn_tile_ext_f32( #endif // FLASH_ATTN_AVAILABLE } -template +template void launch_fattn_tile_f32_64_128(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * Q = dst->src[0]; switch (Q->ne[0]) { @@ -295,15 +293,17 @@ void launch_fattn_tile_f32_64_128(ggml_backend_cuda_context & ctx, ggml_tensor * constexpr int D = 64; constexpr int nwarps = 8; constexpr size_t nbytes_shared = 0; - fattn_kernel_t fattn_kernel = flash_attn_tile_ext_f32; - launch_fattn(ctx, dst, fattn_kernel, nwarps, nbytes_shared, true, true); + fattn_kernel_t fattn_kernel = flash_attn_tile_ext_f32; + launch_fattn + (ctx, dst, fattn_kernel, nwarps, nbytes_shared, FATTN_KQ_STRIDE_TILE_F32, true, true, false); } break; case 128: { constexpr int D = 128; constexpr int nwarps = 8; constexpr size_t nbytes_shared = 0; - fattn_kernel_t fattn_kernel = flash_attn_tile_ext_f32; - launch_fattn(ctx, dst, fattn_kernel, nwarps, nbytes_shared, true, true); + fattn_kernel_t fattn_kernel = flash_attn_tile_ext_f32; + launch_fattn + (ctx, dst, fattn_kernel, nwarps, nbytes_shared, FATTN_KQ_STRIDE_TILE_F32, true, true, false); } break; default: { GGML_ABORT("FlashAttention without tensor cores only supports head sizes 64 and 128."); @@ -320,37 +320,22 @@ void ggml_cuda_flash_attn_ext_tile_f32(ggml_backend_cuda_context & ctx, ggml_ten if (Q->ne[1] <= 16) { constexpr int cols_per_block = 16; - constexpr int parallel_blocks = 4; if (logit_softcap == 0.0f) { constexpr bool use_logit_softcap = false; - launch_fattn_tile_f32_64_128(ctx, dst); + launch_fattn_tile_f32_64_128(ctx, dst); } else { constexpr bool use_logit_softcap = true; - launch_fattn_tile_f32_64_128(ctx, dst); - } - return; - } - - if (Q->ne[1] <= 32) { - constexpr int cols_per_block = 32; - constexpr int parallel_blocks = 4; - if (logit_softcap == 0.0f) { - constexpr bool use_logit_softcap = false; - launch_fattn_tile_f32_64_128(ctx, dst); - } else { - constexpr bool use_logit_softcap = true; - launch_fattn_tile_f32_64_128(ctx, dst); + launch_fattn_tile_f32_64_128(ctx, dst); } return; } constexpr int cols_per_block = 32; - constexpr int parallel_blocks = 1; if (logit_softcap == 0.0f) { constexpr bool use_logit_softcap = false; - launch_fattn_tile_f32_64_128(ctx, dst); + launch_fattn_tile_f32_64_128(ctx, dst); } else { constexpr bool use_logit_softcap = true; - launch_fattn_tile_f32_64_128(ctx, dst); + launch_fattn_tile_f32_64_128(ctx, dst); } } diff --git a/ggml/src/ggml-cuda/fattn-vec-f16.cuh b/ggml/src/ggml-cuda/fattn-vec-f16.cuh index b7686c1ec3d47..32c52ebe33e9c 100644 --- a/ggml/src/ggml-cuda/fattn-vec-f16.cuh +++ b/ggml/src/ggml-cuda/fattn-vec-f16.cuh @@ -1,7 +1,7 @@ #include "common.cuh" #include "fattn-common.cuh" -template // D == head size +template // D == head size #if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) __launch_bounds__(D, 1) #endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) @@ -55,17 +55,16 @@ static __global__ void flash_attn_vec_ext_f16( constexpr bool Q_q8_1 = type_K != GGML_TYPE_F16; constexpr dequantize_1_f16_t dequantize_1_v = get_dequantize_1_f16(type_V); - const int ic0 = (blockIdx.x / parallel_blocks) * ncols; // Index of the Q/QKV column to work on. - const int ip = blockIdx.x % parallel_blocks; // Index in group of blocks running for the same column in parallel. + const int ic0 = blockIdx.x * ncols; // Index of the Q/QKV column to work on. const int gqa_ratio = ne02 / ne12; // With grouped query attention there are > 1 Q matrices per K, V matrix. - Q += nb02* blockIdx.y + nb01*ic0; - K += nb12*(blockIdx.y / gqa_ratio); - V += nb22*(blockIdx.y / gqa_ratio); + Q += nb02* blockIdx.z + nb01*ic0; + K += nb12*(blockIdx.z / gqa_ratio); + V += nb22*(blockIdx.z / gqa_ratio); const half * maskh = (const half *) mask + ne11*ic0; - const float slopef = get_alibi_slope(max_bias, blockIdx.y, n_head_log2, m0, m1); + const float slopef = get_alibi_slope(max_bias, blockIdx.z, n_head_log2, m0, m1); const half slopeh = __float2half(slopef); static_assert(D % (2*WARP_SIZE) == 0, "D not divisible by 2*WARP_SIZE == 64."); @@ -172,8 +171,7 @@ static __global__ void flash_attn_vec_ext_f16( half2 VKQ[ncols] = {{0.0f, 0.0f}}; - const int k_start = parallel_blocks == 1 ? 0 : ip*D; - for (int k_VKQ_0 = k_start; k_VKQ_0 < ne11; k_VKQ_0 += parallel_blocks*D) { + for (int k_VKQ_0 = blockIdx.y*D; k_VKQ_0 < ne11; k_VKQ_0 += gridDim.y*D) { // Calculate KQ tile and keep track of new maximum KQ values: // For unknown reasons using a half array of size 1 for kqmax_new causes a performance regression, @@ -283,29 +281,29 @@ static __global__ void flash_attn_vec_ext_f16( kqsum[j_VKQ] = warp_reduce_sum((float)kqsum[j_VKQ]); half dst_val = (__low2half(VKQ[j_VKQ]) + __high2half(VKQ[j_VKQ])); - if (parallel_blocks == 1) { + if (gridDim.y == 1) { dst_val /= kqsum[j_VKQ]; } - const int j_dst = (ic0 + j_VKQ)*parallel_blocks + ip; - dst[j_dst*D*gridDim.y + D*blockIdx.y + tid] = dst_val; + const int j_dst = (ic0 + j_VKQ)*gridDim.y + blockIdx.y; + dst[j_dst*D*gridDim.z + D*blockIdx.z + tid] = dst_val; } - if (parallel_blocks != 1 && tid < ncols && (ncols <= 2 || ic0 + tid < ne01)) { - dst_meta[(ic0 + tid)*gridDim.y*parallel_blocks + blockIdx.y*parallel_blocks + ip] = make_float2(kqmax[tid], kqsum[tid]); + if (gridDim.y != 1 && tid < ncols && (ncols <= 2 || ic0 + tid < ne01)) { + dst_meta[((ic0 + tid)*gridDim.z + blockIdx.z) * gridDim.y + blockIdx.y] = make_float2(kqmax[tid], kqsum[tid]); } #else NO_DEVICE_CODE; #endif // defined(FLASH_ATTN_AVAILABLE) && defined(FP16_AVAILABLE) } -template +template void ggml_cuda_flash_attn_ext_vec_f16_case_impl(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { constexpr int nwarps = D/WARP_SIZE; - fattn_kernel_t fattn_kernel = flash_attn_vec_ext_f16; + fattn_kernel_t fattn_kernel = flash_attn_vec_ext_f16; constexpr bool need_f16_K = D != 128; constexpr bool need_f16_V = D != 128 && D != 64; constexpr size_t nbytes_shared = 0; - launch_fattn(ctx, dst, fattn_kernel, nwarps, nbytes_shared, need_f16_K, need_f16_V); + launch_fattn(ctx, dst, fattn_kernel, nwarps, nbytes_shared, D, need_f16_K, need_f16_V, false); } template @@ -325,65 +323,48 @@ void ggml_cuda_flash_attn_ext_vec_f16_case(ggml_backend_cuda_context & ctx, ggml memcpy(&logit_softcap, (const float *) KQV->op_params + 2, sizeof(float)); if (Q->ne[1] == 1) { - constexpr int cols_per_block = 1; - constexpr int parallel_blocks = 4; + constexpr int cols_per_block = 1; if (logit_softcap == 0.0f) { constexpr bool use_logit_softcap = false; - ggml_cuda_flash_attn_ext_vec_f16_case_impl(ctx, dst); + ggml_cuda_flash_attn_ext_vec_f16_case_impl(ctx, dst); } else { constexpr bool use_logit_softcap = true; - ggml_cuda_flash_attn_ext_vec_f16_case_impl(ctx, dst); + ggml_cuda_flash_attn_ext_vec_f16_case_impl(ctx, dst); } return; } if (Q->ne[1] == 2) { - constexpr int cols_per_block = 2; - constexpr int parallel_blocks = 4; + constexpr int cols_per_block = 2; if (logit_softcap == 0.0f) { constexpr bool use_logit_softcap = false; - ggml_cuda_flash_attn_ext_vec_f16_case_impl(ctx, dst); + ggml_cuda_flash_attn_ext_vec_f16_case_impl(ctx, dst); } else { constexpr bool use_logit_softcap = true; - ggml_cuda_flash_attn_ext_vec_f16_case_impl(ctx, dst); + ggml_cuda_flash_attn_ext_vec_f16_case_impl(ctx, dst); } return; } if (Q->ne[1] <= 4) { - constexpr int cols_per_block = 4; - constexpr int parallel_blocks = 4; + constexpr int cols_per_block = 4; if (logit_softcap == 0.0f) { constexpr bool use_logit_softcap = false; - ggml_cuda_flash_attn_ext_vec_f16_case_impl(ctx, dst); + ggml_cuda_flash_attn_ext_vec_f16_case_impl(ctx, dst); } else { constexpr bool use_logit_softcap = true; - ggml_cuda_flash_attn_ext_vec_f16_case_impl(ctx, dst); + ggml_cuda_flash_attn_ext_vec_f16_case_impl(ctx, dst); } return; } - if (Q->ne[1] <= 8) { - constexpr int cols_per_block = 8; - constexpr int parallel_blocks = 4; - if (logit_softcap == 0.0f) { - constexpr bool use_logit_softcap = false; - ggml_cuda_flash_attn_ext_vec_f16_case_impl(ctx, dst); - } else { - constexpr bool use_logit_softcap = true; - ggml_cuda_flash_attn_ext_vec_f16_case_impl(ctx, dst); - } - return; - } - - constexpr int cols_per_block = 8; - constexpr int parallel_blocks = 1; + constexpr int cols_per_block = 8; if (logit_softcap == 0.0f) { constexpr bool use_logit_softcap = false; - ggml_cuda_flash_attn_ext_vec_f16_case_impl(ctx, dst); + ggml_cuda_flash_attn_ext_vec_f16_case_impl(ctx, dst); } else { constexpr bool use_logit_softcap = true; - ggml_cuda_flash_attn_ext_vec_f16_case_impl(ctx, dst); + ggml_cuda_flash_attn_ext_vec_f16_case_impl(ctx, dst); } } diff --git a/ggml/src/ggml-cuda/fattn-vec-f32.cuh b/ggml/src/ggml-cuda/fattn-vec-f32.cuh index c1d2dd8d19f4d..336c136d19d7d 100644 --- a/ggml/src/ggml-cuda/fattn-vec-f32.cuh +++ b/ggml/src/ggml-cuda/fattn-vec-f32.cuh @@ -1,7 +1,7 @@ #include "common.cuh" #include "fattn-common.cuh" -template // D == head size +template // D == head size #if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) __launch_bounds__(D, 1) #endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) @@ -55,16 +55,15 @@ static __global__ void flash_attn_vec_ext_f32( constexpr bool Q_q8_1 = type_K != GGML_TYPE_F16; constexpr dequantize_1_f32_t dequantize_1_v = get_dequantize_1_f32(type_V); - const int ic0 = (blockIdx.x / parallel_blocks) * ncols; // Index of the Q/QKV column to work on. - const int ip = blockIdx.x % parallel_blocks; // Index in group of blocks running for the same column in parallel. + const int ic0 = blockIdx.x * ncols; // Index of the Q/QKV column to work on. const int gqa_ratio = ne02 / ne12; // With grouped query attention there are > 1 Q matrices per K, V matrix. - Q += nb02* blockIdx.y + nb01*ic0; - K += nb12*(blockIdx.y / gqa_ratio); - V += nb22*(blockIdx.y / gqa_ratio); // K and V have same shape + Q += nb02* blockIdx.z + nb01*ic0; + K += nb12*(blockIdx.z / gqa_ratio); + V += nb22*(blockIdx.z / gqa_ratio); // K and V have same shape const half * maskh = (const half *) mask + ne11*ic0; - const float slope = get_alibi_slope(max_bias, blockIdx.y, n_head_log2, m0, m1); + const float slope = get_alibi_slope(max_bias, blockIdx.z, n_head_log2, m0, m1); static_assert(D % (2*WARP_SIZE) == 0, "D not divisible by 2*WARP_SIZE == 64."); constexpr int nwarps = D / WARP_SIZE; @@ -167,8 +166,7 @@ static __global__ void flash_attn_vec_ext_f32( float VKQ[ncols] = {0.0f}; - const int k_start = parallel_blocks == 1 ? 0 : ip*D; - for (int k_VKQ_0 = k_start; k_VKQ_0 < ne11; k_VKQ_0 += parallel_blocks*D) { + for (int k_VKQ_0 = blockIdx.y*D; k_VKQ_0 < ne11; k_VKQ_0 += gridDim.y*D) { // Calculate KQ tile and keep track of new maximum KQ values: float kqmax_new_arr[ncols]; @@ -268,29 +266,29 @@ static __global__ void flash_attn_vec_ext_f32( kqsum[j_VKQ] = warp_reduce_sum(kqsum[j_VKQ]); float dst_val = VKQ[j_VKQ]; - if (parallel_blocks == 1) { + if (gridDim.y == 1) { dst_val /= kqsum[j_VKQ]; } - const int j_dst = (ic0 + j_VKQ)*parallel_blocks + ip; - dst[j_dst*D*gridDim.y + D*blockIdx.y + tid] = dst_val; + const int j_dst = (ic0 + j_VKQ)*gridDim.y + blockIdx.y; + dst[j_dst*D*gridDim.z + D*blockIdx.z + tid] = dst_val; } - if (parallel_blocks != 1 && tid < ncols && (ncols <= 2 || ic0 + tid < ne01)) { - dst_meta[(ic0 + tid)*gridDim.y*parallel_blocks + blockIdx.y*parallel_blocks + ip] = make_float2(kqmax[tid], kqsum[tid]); + if (gridDim.y != 1 && tid < ncols && (ncols <= 2 || ic0 + tid < ne01)) { + dst_meta[((ic0 + tid)*gridDim.z + blockIdx.z) * gridDim.y + blockIdx.y] = make_float2(kqmax[tid], kqsum[tid]); } #else NO_DEVICE_CODE; #endif // FLASH_ATTN_AVAILABLE } -template +template void ggml_cuda_flash_attn_ext_vec_f32_case_impl(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { constexpr int nwarps = D/WARP_SIZE; - fattn_kernel_t fattn_kernel = flash_attn_vec_ext_f32; + fattn_kernel_t fattn_kernel = flash_attn_vec_ext_f32; constexpr bool need_f16_K = D != 128; constexpr bool need_f16_V = D != 128 && D != 64; constexpr size_t nbytes_shared = 0; - launch_fattn(ctx, dst, fattn_kernel, nwarps, nbytes_shared, need_f16_K, need_f16_V); + launch_fattn(ctx, dst, fattn_kernel, nwarps, nbytes_shared, D, need_f16_K, need_f16_V, false); } template @@ -307,65 +305,48 @@ void ggml_cuda_flash_attn_ext_vec_f32_case(ggml_backend_cuda_context & ctx, ggml memcpy(&logit_softcap, (const float *) KQV->op_params + 2, sizeof(float)); if (Q->ne[1] == 1) { - constexpr int cols_per_block = 1; - constexpr int parallel_blocks = 4; + constexpr int cols_per_block = 1; if (logit_softcap == 0.0f) { constexpr bool use_logit_softcap = false; - ggml_cuda_flash_attn_ext_vec_f32_case_impl(ctx, dst); + ggml_cuda_flash_attn_ext_vec_f32_case_impl(ctx, dst); } else { constexpr bool use_logit_softcap = true; - ggml_cuda_flash_attn_ext_vec_f32_case_impl(ctx, dst); + ggml_cuda_flash_attn_ext_vec_f32_case_impl(ctx, dst); } return; } if (Q->ne[1] == 2) { - constexpr int cols_per_block = 2; - constexpr int parallel_blocks = 4; + constexpr int cols_per_block = 2; if (logit_softcap == 0.0f) { constexpr bool use_logit_softcap = false; - ggml_cuda_flash_attn_ext_vec_f32_case_impl(ctx, dst); + ggml_cuda_flash_attn_ext_vec_f32_case_impl(ctx, dst); } else { constexpr bool use_logit_softcap = true; - ggml_cuda_flash_attn_ext_vec_f32_case_impl(ctx, dst); + ggml_cuda_flash_attn_ext_vec_f32_case_impl(ctx, dst); } return; } if (Q->ne[1] <= 4) { - constexpr int cols_per_block = 4; - constexpr int parallel_blocks = 4; + constexpr int cols_per_block = 4; if (logit_softcap == 0.0f) { constexpr bool use_logit_softcap = false; - ggml_cuda_flash_attn_ext_vec_f32_case_impl(ctx, dst); + ggml_cuda_flash_attn_ext_vec_f32_case_impl(ctx, dst); } else { constexpr bool use_logit_softcap = true; - ggml_cuda_flash_attn_ext_vec_f32_case_impl(ctx, dst); + ggml_cuda_flash_attn_ext_vec_f32_case_impl(ctx, dst); } return; } - if (Q->ne[1] <= 8) { - constexpr int cols_per_block = 8; - constexpr int parallel_blocks = 4; - if (logit_softcap == 0.0f) { - constexpr bool use_logit_softcap = false; - ggml_cuda_flash_attn_ext_vec_f32_case_impl(ctx, dst); - } else { - constexpr bool use_logit_softcap = true; - ggml_cuda_flash_attn_ext_vec_f32_case_impl(ctx, dst); - } - return; - } - - constexpr int cols_per_block = 8; - constexpr int parallel_blocks = 1; + constexpr int cols_per_block = 8; if (logit_softcap == 0.0f) { constexpr bool use_logit_softcap = false; - ggml_cuda_flash_attn_ext_vec_f32_case_impl(ctx, dst); + ggml_cuda_flash_attn_ext_vec_f32_case_impl(ctx, dst); } else { constexpr bool use_logit_softcap = true; - ggml_cuda_flash_attn_ext_vec_f32_case_impl(ctx, dst); + ggml_cuda_flash_attn_ext_vec_f32_case_impl(ctx, dst); } } diff --git a/ggml/src/ggml-cuda/fattn-wmma-f16.cu b/ggml/src/ggml-cuda/fattn-wmma-f16.cu index dab1d5cbcace4..5c214ea3109d2 100644 --- a/ggml/src/ggml-cuda/fattn-wmma-f16.cu +++ b/ggml/src/ggml-cuda/fattn-wmma-f16.cu @@ -18,7 +18,7 @@ namespace wmma = rocwmma; #endif // FP16_MMA_AVAILABLE // D == head size, VKQ_stride == num VKQ rows calculated in parallel: -template +template __launch_bounds__(nwarps*ggml_cuda_get_physical_warp_size(), 1) static __global__ void flash_attn_ext_f16( const char * __restrict__ Q, @@ -67,8 +67,7 @@ static __global__ void flash_attn_ext_f16( constexpr int warp_size = ggml_cuda_get_physical_warp_size(); - const int ic0 = ncols*(blockIdx.x / parallel_blocks); // Index of the first Q/QKV column to work on. - const int ip = blockIdx.x % parallel_blocks; // Index in group of blocks running for the same column in parallel. + const int ic0 = ncols*blockIdx.x; // Index of the first Q/QKV column to work on. static_assert(D <= FATTN_KQ_STRIDE, "D must be <= FATTN_KQ_STRIDE."); static_assert(ncols == 8 || ncols % 16 == 0, "ncols must be 8 or a multiple of 16."); @@ -91,16 +90,16 @@ static __global__ void flash_attn_ext_f16( constexpr int kqar = sizeof(KQ_acc_t)/sizeof(half); const int gqa_ratio = ne02 / ne12; // With grouped query attention there are > 1 Q matrices per K, V matrix. - const float * Q_f = (const float *) (Q + nb02* blockIdx.y + nb01*ic0); - const half * K_h = (const half *) (K + nb12*(blockIdx.y / gqa_ratio)); - const half * V_h = (const half *) (V + nb12*(blockIdx.y / gqa_ratio)); // K and V have same shape + const float * Q_f = (const float *) (Q + nb02* blockIdx.z + nb01*ic0); + const half * K_h = (const half *) (K + nb12*(blockIdx.z / gqa_ratio)); + const half * V_h = (const half *) (V + nb12*(blockIdx.z / gqa_ratio)); // K and V have same shape const half * maskh = (const half *) mask + (nb31/sizeof(half))* ic0; const half2 * mask2 = (const half2 *) mask + (nb31/sizeof(half))*(ic0/2); const int stride_Q = nb01 / sizeof(float); const int stride_KV = nb11 / sizeof(half); - const float slopef = get_alibi_slope(max_bias, blockIdx.y, n_head_log2, m0, m1); + const float slopef = get_alibi_slope(max_bias, blockIdx.z, n_head_log2, m0, m1); const half slopeh = __float2half(slopef); const half2 slope2 = make_half2(slopef, slopef); @@ -176,7 +175,7 @@ static __global__ void flash_attn_ext_f16( __syncthreads(); // Iterate over ne11 == previous tokens: - for (int k_VKQ_0 = ip*FATTN_KQ_STRIDE; k_VKQ_0 < ne11; k_VKQ_0 += parallel_blocks*FATTN_KQ_STRIDE) { + for (int k_VKQ_0 = blockIdx.y*FATTN_KQ_STRIDE; k_VKQ_0 < ne11; k_VKQ_0 += gridDim.y*FATTN_KQ_STRIDE) { // Calculate tile of KQ: #pragma unroll for (int i_KQ_0 = 0; i_KQ_0 < FATTN_KQ_STRIDE; i_KQ_0 += KQ_stride_tc) { @@ -395,7 +394,7 @@ static __global__ void flash_attn_ext_f16( if (ic0 + j_VKQ >= ne01) { return; } - const int j_dst = (ic0 + j_VKQ)*parallel_blocks + ip; + const int j_dst = (ic0 + j_VKQ)*gridDim.y + blockIdx.y; float KQ_rowsum_j; if (std::is_same::value) { @@ -411,13 +410,13 @@ static __global__ void flash_attn_ext_f16( break; } float dst_val = VKQ[j_VKQ*D_padded + i]; - if (parallel_blocks == 1) { + if (gridDim.y == 1) { dst_val /= KQ_rowsum_j; } - dst[j_dst*gridDim.y*D + blockIdx.y*D + i] = dst_val; + dst[j_dst*gridDim.z*D + blockIdx.z*D + i] = dst_val; } - if (parallel_blocks == 1 || threadIdx.x != 0) { + if (gridDim.y == 1 || threadIdx.x != 0) { continue; } @@ -428,7 +427,7 @@ static __global__ void flash_attn_ext_f16( dst_meta_val.x = __low2float(KQ_max_h2[j0/nwarps]); } dst_meta_val.y = KQ_rowsum_j; - dst_meta[(ic0 + j_VKQ)*gridDim.y*parallel_blocks + blockIdx.y*parallel_blocks + ip] = dst_meta_val; + dst_meta[((ic0 + j_VKQ)*gridDim.z + blockIdx.z) * gridDim.y + blockIdx.y] = dst_meta_val; } #else NO_DEVICE_CODE; @@ -462,60 +461,26 @@ static_assert(get_VKQ_stride( 80, 4, 16) == 16, "Test failed."); template void ggml_cuda_flash_attn_ext_wmma_f16_case(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * KQV = dst; - const ggml_tensor * Q = dst->src[0]; constexpr int nwarps = 4; constexpr int frag_m = cols_per_block == 8 && D % 32 == 0 ? 32 : 16; - const int blocks_num_pb1 = ((Q->ne[1] + cols_per_block - 1) / cols_per_block)*Q->ne[2]*Q->ne[3]; - const int nsm = ggml_cuda_info().devices[ggml_cuda_get_device()].nsm; const int warp_size = ggml_cuda_info().devices[ggml_cuda_get_device()].warp_size; float logit_softcap; memcpy(&logit_softcap, (const float *) KQV->op_params + 2, sizeof(float)); - if (4*blocks_num_pb1 < 2*nsm) { - constexpr int parallel_blocks = 4; - fattn_kernel_t fattn_kernel; - if (logit_softcap == 0.0f) { - constexpr bool use_logit_softcap = false; - fattn_kernel = flash_attn_ext_f16< - D, cols_per_block, nwarps, get_VKQ_stride(D, nwarps, frag_m), parallel_blocks, KQ_acc_t, use_logit_softcap>; - } else { - constexpr bool use_logit_softcap = true; - fattn_kernel = flash_attn_ext_f16< - D, cols_per_block, nwarps, get_VKQ_stride(D, nwarps, frag_m), parallel_blocks, KQ_acc_t, use_logit_softcap>; - } - launch_fattn(ctx, dst, fattn_kernel, nwarps, 0, true, true, warp_size); - return; - } - if (2*blocks_num_pb1 < 2*nsm) { - constexpr int parallel_blocks = 2; - fattn_kernel_t fattn_kernel; - if (logit_softcap == 0.0f) { - constexpr bool use_logit_softcap = false; - fattn_kernel = flash_attn_ext_f16< - D, cols_per_block, nwarps, get_VKQ_stride(D, nwarps, frag_m), parallel_blocks, KQ_acc_t, use_logit_softcap>; - } else { - constexpr bool use_logit_softcap = true; - fattn_kernel = flash_attn_ext_f16< - D, cols_per_block, nwarps, get_VKQ_stride(D, nwarps, frag_m), parallel_blocks, KQ_acc_t, use_logit_softcap>; - } - launch_fattn(ctx, dst, fattn_kernel, nwarps, 0, true, true, warp_size); - return; - } - constexpr int parallel_blocks = 1; fattn_kernel_t fattn_kernel; if (logit_softcap == 0.0f) { constexpr bool use_logit_softcap = false; fattn_kernel = flash_attn_ext_f16< - D, cols_per_block, nwarps, get_VKQ_stride(D, nwarps, frag_m), parallel_blocks, KQ_acc_t, use_logit_softcap>; + D, cols_per_block, nwarps, get_VKQ_stride(D, nwarps, frag_m), KQ_acc_t, use_logit_softcap>; } else { constexpr bool use_logit_softcap = true; fattn_kernel = flash_attn_ext_f16< - D, cols_per_block, nwarps, get_VKQ_stride(D, nwarps, frag_m), parallel_blocks, KQ_acc_t, use_logit_softcap>; + D, cols_per_block, nwarps, get_VKQ_stride(D, nwarps, frag_m), KQ_acc_t, use_logit_softcap>; } - launch_fattn(ctx, dst, fattn_kernel, nwarps, 0, true, true, warp_size); + launch_fattn(ctx, dst, fattn_kernel, nwarps, 0, FATTN_KQ_STRIDE, true, true, false, warp_size); } void ggml_cuda_flash_attn_ext_wmma_f16(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { diff --git a/ggml/src/ggml-cuda/fattn.cu b/ggml/src/ggml-cuda/fattn.cu index 2e72fc8fd380b..973541893ec21 100644 --- a/ggml/src/ggml-cuda/fattn.cu +++ b/ggml/src/ggml-cuda/fattn.cu @@ -281,13 +281,13 @@ void ggml_cuda_flash_attn_ext(ggml_backend_cuda_context & ctx, ggml_tensor * dst if (!fp16_mma_available(cc)) { if (prec == GGML_PREC_DEFAULT) { - if (Q->ne[1] <= 8) { + if (Q->ne[1] <= 8 || Q->ne[0] == 256) { ggml_cuda_flash_attn_ext_vec_f16(ctx, dst); } else { ggml_cuda_flash_attn_ext_tile_f16(ctx, dst); } } else { - if (Q->ne[1] <= 8) { + if (Q->ne[1] <= 8 || Q->ne[0] == 256) { ggml_cuda_flash_attn_ext_vec_f32(ctx, dst); } else { ggml_cuda_flash_attn_ext_tile_f32(ctx, dst); @@ -296,17 +296,17 @@ void ggml_cuda_flash_attn_ext(ggml_backend_cuda_context & ctx, ggml_tensor * dst return; } - const int gqa_ratio = Q->ne[2] / K->ne[2]; - const bool mma_fast_for_bs1 = fp16_mma_available(cc) && gqa_ratio % 2 == 0 && - K->type == GGML_TYPE_F16 && V->type == GGML_TYPE_F16 && mask; - if (Q->ne[1] == 1 && Q->ne[0] % (2*warp_size) == 0 && !mma_fast_for_bs1) { + const bool gqa_opt_applies = ((Q->ne[2] / K->ne[2]) % 2 == 0) && mask; // The mma-based kernels have GQA-specific optimizations + const bool mma_needs_data_conversion = K->type != GGML_TYPE_F16 || V->type != GGML_TYPE_F16; + const bool mma_faster_for_bs1 = new_mma_available(cc) && gqa_opt_applies && cc < GGML_CUDA_CC_ADA_LOVELACE && !mma_needs_data_conversion; + const bool can_use_vector_kernel = (Q->ne[0] % (2*warp_size) == 0) && (prec == GGML_PREC_DEFAULT || Q->ne[0] <= 128); + if (Q->ne[1] == 1 && can_use_vector_kernel && !mma_faster_for_bs1) { if (prec == GGML_PREC_DEFAULT) { ggml_cuda_flash_attn_ext_vec_f16(ctx, dst); - return; - } else if(Q->ne[0] <= 128) { + } else { ggml_cuda_flash_attn_ext_vec_f32(ctx, dst); - return; } + return; } // The MMA implementation needs Turing or newer, use the old WMMA code for Volta: diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index 5cb56df9a81ae..b783310ef7ba7 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -3230,6 +3230,9 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g #ifndef FLASH_ATTN_AVAILABLE return false; #endif // FLASH_ATTN_AVAILABLE + if (op->src[0]->ne[3] != 1) { + return false; + } if (op->src[1]->type == GGML_TYPE_BF16 || op->src[2]->type == GGML_TYPE_BF16) { return false; } diff --git a/ggml/src/ggml-cuda/vendors/hip.h b/ggml/src/ggml-cuda/vendors/hip.h index aace21e3a8b18..a4c717a321cfb 100644 --- a/ggml/src/ggml-cuda/vendors/hip.h +++ b/ggml/src/ggml-cuda/vendors/hip.h @@ -129,6 +129,7 @@ #define cudaGraph_t hipGraph_t #define cudaStream_t hipStream_t #define cudaSuccess hipSuccess +#define cudaOccupancyMaxActiveBlocksPerMultiprocessor hipOccupancyMaxActiveBlocksPerMultiprocessor #define __trap() do { abort(); __builtin_unreachable(); } while(0) #define CUBLAS_STATUS_SUCCESS HIPBLAS_STATUS_SUCCESS #define CUBLAS_STATUS_NOT_INITIALIZED HIPBLAS_STATUS_NOT_INITIALIZED diff --git a/ggml/src/ggml-cuda/vendors/musa.h b/ggml/src/ggml-cuda/vendors/musa.h index 997f671431e01..f2d55796e7874 100644 --- a/ggml/src/ggml-cuda/vendors/musa.h +++ b/ggml/src/ggml-cuda/vendors/musa.h @@ -134,5 +134,6 @@ #define cudaStreamCaptureModeRelaxed musaStreamCaptureModeRelaxed #define cudaStreamBeginCapture musaStreamBeginCapture #define cudaStreamEndCapture musaStreamEndCapture +#define cudaOccupancyMaxActiveBlocksPerMultiprocessor musaOccupancyMaxActiveBlocksPerMultiprocessor typedef mt_bfloat16 nv_bfloat16; diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index adb749bd5ec9a..d48cd21723155 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -259,6 +259,10 @@ static std::string var_to_str(ggml_type type) { return ggml_type_name(type); } +static std::string var_to_str(ggml_prec prec) { + return prec == GGML_PREC_F32 ? "f32" : "def"; +} + static std::string var_to_str(ggml_op_pool pool) { switch (pool) { case GGML_OP_POOL_AVG: return "avg"; @@ -3206,11 +3210,12 @@ struct test_flash_attn_ext : public test_case { const float max_bias; // ALiBi const float logit_softcap; // Gemma 2 + const ggml_prec prec; const ggml_type type_KV; std::array permute; std::string vars() override { - return VARS_TO_STR10(hs, nh, nr, kv, nb, mask, max_bias, logit_softcap, type_KV, permute); + return VARS_TO_STR11(hs, nh, nr, kv, nb, mask, max_bias, logit_softcap, prec, type_KV, permute); } double max_nmse_err() override { @@ -3225,9 +3230,9 @@ struct test_flash_attn_ext : public test_case { } test_flash_attn_ext(int64_t hs = 128, int64_t nh = 32, int64_t nr = 1, int64_t kv = 96, int64_t nb = 8, - bool mask = true, float max_bias = 0.0f, float logit_softcap = 0.0f, ggml_type type_KV = GGML_TYPE_F16, - std::array permute = {0, 1, 2, 3}) - : hs(hs), nh(nh), nr(nr), kv(kv), nb(nb), mask(mask), max_bias(max_bias), logit_softcap(logit_softcap), type_KV(type_KV), permute(permute) {} + bool mask = true, float max_bias = 0.0f, float logit_softcap = 0.0f, ggml_prec prec = GGML_PREC_F32, + ggml_type type_KV = GGML_TYPE_F16, std::array permute = {0, 1, 2, 3}) + : hs(hs), nh(nh), nr(nr), kv(kv), nb(nb), mask(mask), max_bias(max_bias), logit_softcap(logit_softcap), prec(prec), type_KV(type_KV), permute(permute) {} ggml_tensor * build_graph(ggml_context * ctx) override { const int64_t hs_padded = GGML_PAD(hs, ggml_blck_size(type_KV)); @@ -3261,6 +3266,7 @@ struct test_flash_attn_ext : public test_case { } ggml_tensor * out = ggml_flash_attn_ext(ctx, q, k, v, m, 1.0f/sqrtf(hs), max_bias, logit_softcap); + ggml_flash_attn_ext_set_prec(out, prec); ggml_set_name(out, "out"); return out; @@ -4376,11 +4382,16 @@ static std::vector> make_test_cases_eval() { for (int kv : { 512, 1024, }) { if (nr != 1 && kv != 512) continue; for (int nb : { 1, 3, 32, 35, }) { - for (ggml_type type_KV : {GGML_TYPE_F16, GGML_TYPE_BF16, GGML_TYPE_Q8_0, GGML_TYPE_Q4_0}) { - test_cases.emplace_back(new test_flash_attn_ext(hs, nh, nr, kv, nb, mask, max_bias, logit_softcap, type_KV)); - // run fewer test cases permuted - if (mask == true && max_bias == 0.0f && logit_softcap == 0 && kv == 512) { - test_cases.emplace_back(new test_flash_attn_ext(hs, nh, nr, kv, nb, mask, max_bias, logit_softcap, type_KV, {0, 2, 1, 3})); + for (ggml_prec prec : {GGML_PREC_F32, GGML_PREC_DEFAULT}) { + if (hs != 128 && prec == GGML_PREC_DEFAULT) continue; + for (ggml_type type_KV : {GGML_TYPE_F16, GGML_TYPE_BF16, GGML_TYPE_Q8_0, GGML_TYPE_Q4_0}) { + test_cases.emplace_back(new test_flash_attn_ext( + hs, nh, nr, kv, nb, mask, max_bias, logit_softcap, prec, type_KV)); + // run fewer test cases permuted + if (mask == true && max_bias == 0.0f && logit_softcap == 0 && kv == 512) { + test_cases.emplace_back(new test_flash_attn_ext( + hs, nh, nr, kv, nb, mask, max_bias, logit_softcap, prec, type_KV, {0, 2, 1, 3})); + } } } } From 568013d0cd3d5add37c376b3d5e959809b711fc7 Mon Sep 17 00:00:00 2001 From: fairydreaming <166155368+fairydreaming@users.noreply.github.com> Date: Wed, 19 Mar 2025 21:01:57 +0100 Subject: [PATCH 092/398] context : clear sets containing encoder output sequence ids before storing new values (#12470) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Stanisław Szymczyk --- src/llama-context.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/llama-context.cpp b/src/llama-context.cpp index 664703a896701..5bec63e2e79ff 100644 --- a/src/llama-context.cpp +++ b/src/llama-context.cpp @@ -1153,6 +1153,7 @@ int llama_context::encode(llama_batch & inp_batch) { // remember the sequence ids used during the encoding - needed for cross attention later cross.seq_ids_enc.resize(n_tokens); for (int32_t i = 0; i < n_tokens; i++) { + cross.seq_ids_enc[i].clear(); for (int s = 0; s < ubatch.n_seq_id[i]; s++) { llama_seq_id seq_id = ubatch.seq_id[i][s]; cross.seq_ids_enc[i].insert(seq_id); From 732b5fbf5e7f9cf069942f0c5850ee959ef321ba Mon Sep 17 00:00:00 2001 From: Bartowski Date: Thu, 20 Mar 2025 02:36:37 -0400 Subject: [PATCH 093/398] convert : avoid calls to tokenizer.added_tokens_decoder (#12473) tokenizer.added_tokens_decoder returns a fresh dict every time relatively slowly (~0.04s on average) which results in massive slowdowns when we have a huge number of added tokens --- convert_hf_to_gguf.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 7574218e241d4..d21edce16b71e 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -529,6 +529,8 @@ def get_vocab_base(self) -> tuple[list[str], list[int], str]: reverse_vocab = {id_: encoded_tok for encoded_tok, id_ in tokenizer.vocab.items()} added_vocab = tokenizer.get_added_vocab() + added_tokens_decoder = tokenizer.added_tokens_decoder + for i in range(vocab_size): if i not in reverse_vocab: tokens.append(f"[PAD{i}]") @@ -538,13 +540,13 @@ def get_vocab_base(self) -> tuple[list[str], list[int], str]: if token in added_vocab: # The tokenizer in llama.cpp assumes the CONTROL and USER_DEFINED tokens are pre-normalized. # To avoid unexpected issues - we make sure to normalize non-normalized tokens - if not tokenizer.added_tokens_decoder[i].normalized: + if not added_tokens_decoder[i].normalized: previous_token = token token = tokenizer.decode(tokenizer.encode(token, add_special_tokens=False)) if previous_token != token: logger.info(f"{repr(previous_token)} is encoded and decoded back to {repr(token)} using AutoTokenizer") - if tokenizer.added_tokens_decoder[i].special or self.does_token_look_special(token): + if added_tokens_decoder[i].special or self.does_token_look_special(token): toktypes.append(gguf.TokenType.CONTROL) else: # NOTE: this was added for Gemma. From 3d82dbcbce2c677fc35fbf99574ccd107d95a1f8 Mon Sep 17 00:00:00 2001 From: Srihari-mcw <96763064+Srihari-mcw@users.noreply.github.com> Date: Thu, 20 Mar 2025 17:05:34 +0530 Subject: [PATCH 094/398] ggml : block interleaving support for Q4_K quantization for x86 AVX2 architecture (#12332) * Add block interleaving support for Q4_K quantization * Remove whitespaces and fix CI/CD issues * Update pointer of bsums from int16_t to const int16_t * Add vector version of quantize_q8_K_4x8 function * Update code formatting based on review comments --- ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp | 1505 +++++++++++++++++++++++- 1 file changed, 1493 insertions(+), 12 deletions(-) diff --git a/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp b/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp index c24fd56e20886..e852c8253b44e 100644 --- a/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp +++ b/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp @@ -45,6 +45,24 @@ using block_q4_0x8 = block<4, 8>; using block_q8_0x4 = block<8, 4>; using block_q8_0x8 = block<8, 8>; + +struct block_q4_Kx8 { + ggml_half d[8]; // super-block scale for quantized scales + ggml_half dmin[8]; // super-block scale for quantized mins + uint8_t scales[96]; // scales and mins, quantized with 6 bits + uint8_t qs[1024]; // 4--bit quants +}; + +static_assert(sizeof(block_q4_Kx8) == sizeof(ggml_half) * 16 + K_SCALE_SIZE * 8 + QK_K * 4, "wrong q4_K block size/padding"); + +struct block_q8_Kx4 { + float d[4]; // delta + int8_t qs[QK_K * 4]; // quants + int16_t bsums[QK_K / 4]; // sum of quants in groups of 16 +}; + +static_assert(sizeof(block_q8_Kx4) == sizeof(float) * 4 + QK_K * 4 + (QK_K / 4) * sizeof(int16_t), "wrong q8_K block size/padding"); + struct block_iq4_nlx4 { ggml_half d[4]; // deltas for 4 iq4_nl blocks uint8_t qs[QK4_NL * 2]; // nibbles / quants for 4 iq4_nl blocks @@ -60,6 +78,13 @@ static_assert(sizeof(block_iq4_nlx4) == 4 * sizeof(ggml_half) + QK4_NL * 2, "wro #define UNUSED GGML_UNUSED +static inline int nearest_int(float fval) { + assert(fabsf(fval) <= 4194303.f); + float val = fval + 12582912.f; + int i; memcpy(&i, &val, sizeof(int)); + return (i & 0x007fffff) - 0x00400000; +} + // Functions to create the interleaved data layout formats // interleave 4 block_q4_0s in blocks of blck_size_interleave @@ -534,6 +559,270 @@ static void quantize_q8_0_4x8(const float * GGML_RESTRICT x, void * GGML_RESTRIC #endif } +static void quantize_q8_K_4x8(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(QK_K == 256); + assert(k % QK_K == 0); + const int nb = k / QK_K; + + block_q8_Kx4 * GGML_RESTRICT y = (block_q8_Kx4 *) vy; + +#if defined(__AVX2__) + float iscale[4]; + __m256 srcv[4][32]; + __m256 iscale_vec[4]; + + for (int i = 0; i < nb; i++) { + for (int row_iter = 0; row_iter < 4; row_iter++) { + // Load elements into 4 AVX vectors + __m256 v0 = _mm256_loadu_ps( x + row_iter * k + i * 256 ); + __m256 v1 = _mm256_loadu_ps( x + row_iter * k + i * 256 + 8 ); + __m256 v2 = _mm256_loadu_ps( x + row_iter * k + i * 256 + 16 ); + __m256 v3 = _mm256_loadu_ps( x + row_iter * k + i * 256 + 24 ); + + // Compute max(abs(e)) for the block + const __m256 signBit = _mm256_set1_ps( -0.0f ); + __m256 abs0 = _mm256_andnot_ps( signBit, v0 ); + __m256 abs1 = _mm256_andnot_ps( signBit, v1 ); + __m256 abs2 = _mm256_andnot_ps( signBit, v2 ); + __m256 abs3 = _mm256_andnot_ps( signBit, v3 ); + + __m256 maxAbs = _mm256_max_ps( abs0, abs1 ); + maxAbs = _mm256_max_ps( maxAbs, abs2 ); + maxAbs = _mm256_max_ps( maxAbs, abs3 ); + + __m256 mask0 = _mm256_cmp_ps( maxAbs, v0, _CMP_EQ_OQ ); + __m256 mask1 = _mm256_cmp_ps( maxAbs, v1, _CMP_EQ_OQ ); + __m256 mask2 = _mm256_cmp_ps( maxAbs, v2, _CMP_EQ_OQ ); + __m256 mask3 = _mm256_cmp_ps( maxAbs, v3, _CMP_EQ_OQ ); + + __m256 maskAbs = _mm256_or_ps(_mm256_or_ps(mask0, mask1),_mm256_or_ps(mask2, mask3)); + + srcv[row_iter][0] = v0; + srcv[row_iter][1] = v1; + srcv[row_iter][2] = v2; + srcv[row_iter][3] = v3; + + for (int sb = 1; sb < 8; sb++) { + // Temporarily stores absolute quant values + __m256 tempAbs = maxAbs; + + // Load elements into 4 AVX vectors + __m256 v0 = _mm256_loadu_ps( x + row_iter * k + i * 256 + sb * 32); + __m256 v1 = _mm256_loadu_ps( x + row_iter * k + i * 256 + sb * 32 + 8 ); + __m256 v2 = _mm256_loadu_ps( x + row_iter * k + i * 256 + sb * 32 + 16 ); + __m256 v3 = _mm256_loadu_ps( x + row_iter * k + i * 256 + sb * 32 + 24 ); + + // Compute max(abs(e)) for the block + __m256 abs0 = _mm256_andnot_ps( signBit, v0 ); + __m256 abs1 = _mm256_andnot_ps( signBit, v1 ); + __m256 abs2 = _mm256_andnot_ps( signBit, v2 ); + __m256 abs3 = _mm256_andnot_ps( signBit, v3 ); + + maxAbs = _mm256_max_ps( maxAbs, abs0 ); + maxAbs = _mm256_max_ps( maxAbs, abs1 ); + maxAbs = _mm256_max_ps( maxAbs, abs2 ); + maxAbs = _mm256_max_ps( maxAbs, abs3 ); + + __m256 mask_prev = _mm256_cmp_ps( tempAbs, maxAbs, _CMP_EQ_OQ ); + maskAbs = _mm256_and_ps( maskAbs, mask_prev ); + + mask0 = _mm256_cmp_ps( maxAbs, v0, _CMP_EQ_OQ ); + mask1 = _mm256_cmp_ps( maxAbs, v1, _CMP_EQ_OQ ); + mask2 = _mm256_cmp_ps( maxAbs, v2, _CMP_EQ_OQ ); + mask3 = _mm256_cmp_ps( maxAbs, v3, _CMP_EQ_OQ ); + + __m256 mask_curr = _mm256_or_ps(_mm256_or_ps(mask0, mask1),_mm256_or_ps(mask2, mask3)); + maskAbs = _mm256_or_ps(maskAbs, mask_curr); + + srcv[row_iter][sb * 4] = v0; + srcv[row_iter][sb * 4 + 1] = v1; + srcv[row_iter][sb * 4 + 2] = v2; + srcv[row_iter][sb * 4 + 3] = v3; + } + + __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) ); + max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) ); + max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) ); + const float maxScalar = _mm_cvtss_f32( max4 ); + + __m256 maxScalarVec = _mm256_set1_ps(maxScalar); + + __m256 mask_next = _mm256_cmp_ps( maxScalarVec, maxAbs, _CMP_EQ_OQ ); + __m256 finalMask = _mm256_and_ps(maskAbs, mask_next); + + const int mask = _mm256_movemask_ps(finalMask); + iscale[row_iter] = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f; + + if(mask) { + iscale[row_iter] = ( maxScalar != 0.0f ) ? -127.f / maxScalar: 0.0f; + } + + y[i].d[row_iter] = maxScalar ? 1/iscale[row_iter] : 0; + iscale_vec[row_iter] = _mm256_set1_ps(iscale[row_iter]); + } + + __m256i quants_interleaved[32]; + for (int j = 0; j < 32; j++) { + // Apply the multiplier + __m256 v0 = _mm256_mul_ps(srcv[0][j], iscale_vec[0]); + __m256 v1 = _mm256_mul_ps(srcv[1][j], iscale_vec[1]); + __m256 v2 = _mm256_mul_ps(srcv[2][j], iscale_vec[2]); + __m256 v3 = _mm256_mul_ps(srcv[3][j], iscale_vec[3]); + + // Round to nearest integer + v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST ); + v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST ); + v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST ); + v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST ); + + // Convert floats to integers + __m256i i0 = _mm256_cvtps_epi32( v0 ); + __m256i i1 = _mm256_cvtps_epi32( v1 ); + __m256i i2 = _mm256_cvtps_epi32( v2 ); + __m256i i3 = _mm256_cvtps_epi32( v3 ); + + // Convert int32 to int16 + i0 = _mm256_packs_epi32( i0, i1 ); + i2 = _mm256_packs_epi32( i2, i3 ); + // Convert int16 to int8 + i0 = _mm256_packs_epi16( i0, i2 ); + + // Permute and store the quantized weights in the required order after the pack instruction + const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 ); + i0 = _mm256_permutevar8x32_epi32( i0, perm ); + + _mm256_storeu_si256((__m256i *)(y[i].qs + 32 * j), i0); + quants_interleaved[j] = i0; + } + + // Masks to shuffle the quants of corresonding sub blocks for rearraning quants for vectorized bsums computation + __m256i shuffle_mask_sb2 = _mm256_castsi128_si256(_mm_setr_epi8(0, 1, 0, 1, 4, 5, 6, 7, 8, 9, 8, 9, 12, 13, 14, 15)); + shuffle_mask_sb2 = _mm256_permute2f128_si256(shuffle_mask_sb2, shuffle_mask_sb2, 0); + __m256i shuffle_mask_sb3 = _mm256_castsi128_si256(_mm_setr_epi8(0, 1, 2, 3, 0, 1, 6, 7, 8, 9, 10, 11, 8, 9, 14, 15)); + shuffle_mask_sb3 = _mm256_permute2f128_si256(shuffle_mask_sb3, shuffle_mask_sb3, 0); + __m256i shuffle_mask_sb4 = _mm256_castsi128_si256(_mm_setr_epi8(0, 1, 2, 3, 4, 5, 0, 1, 8, 9, 10, 11, 12, 13, 8, 9)); + shuffle_mask_sb4 = _mm256_permute2f128_si256(shuffle_mask_sb4, shuffle_mask_sb4, 0); + + for (int k = 0; k < 4; k++) { + // Quants from four different sub blocks are taken + __m256i q0 = quants_interleaved[k * 8 + 0]; + __m256i q1 = quants_interleaved[k * 8 + 1]; + __m256i q2 = quants_interleaved[k * 8 + 2]; + __m256i q3 = quants_interleaved[k * 8 + 3]; + __m256i q4 = quants_interleaved[k * 8 + 4]; + __m256i q5 = quants_interleaved[k * 8 + 5]; + __m256i q6 = quants_interleaved[k * 8 + 6]; + __m256i q7 = quants_interleaved[k * 8 + 7]; + + + // The below code block has the first half of different sub blocks shuffled and blended so as to process 2 values from each sub block at a time + __m256i sb2_h1_shuffled = _mm256_shuffle_epi8(q2, shuffle_mask_sb2); + __m256i sb_h1_interleaved = _mm256_blend_epi16(q0, sb2_h1_shuffled, 34); + __m256i sb3_h1_shuffled = _mm256_shuffle_epi8(q4, shuffle_mask_sb3); + sb_h1_interleaved = _mm256_blend_epi16(sb_h1_interleaved, sb3_h1_shuffled, 68); + __m256i sb4_h1_shuffled = _mm256_shuffle_epi8(q6, shuffle_mask_sb4); + sb_h1_interleaved = _mm256_blend_epi16(sb_h1_interleaved, sb4_h1_shuffled, 136); + + __m256i one = _mm256_set1_epi8(1); + __m256i bsums_r1 = _mm256_maddubs_epi16(one, sb_h1_interleaved); + + for (int l = 0; l < 3; l++) { + // Quants value shifted to process next two values from each sub block + q0 = _mm256_srli_epi64(q0, 16); + q2 = _mm256_srli_epi64(q2, 16); + q4 = _mm256_srli_epi64(q4, 16); + q6 = _mm256_srli_epi64(q6, 16); + + sb2_h1_shuffled = _mm256_shuffle_epi8(q2, shuffle_mask_sb2); + sb_h1_interleaved = _mm256_blend_epi16(q0, sb2_h1_shuffled, 34); + sb3_h1_shuffled = _mm256_shuffle_epi8(q4, shuffle_mask_sb3); + sb_h1_interleaved = _mm256_blend_epi16(sb_h1_interleaved, sb3_h1_shuffled, 68); + sb4_h1_shuffled = _mm256_shuffle_epi8(q6, shuffle_mask_sb4); + sb_h1_interleaved = _mm256_blend_epi16(sb_h1_interleaved, sb4_h1_shuffled, 136); + + bsums_r1 = _mm256_add_epi16(bsums_r1, _mm256_maddubs_epi16(one, sb_h1_interleaved)); + } + + // The below code block has the second half of different sub blocks shuffled and blended so as to process 2 values from each sub block at a time + __m256i sb2_h2_shuffled = _mm256_shuffle_epi8(q3, shuffle_mask_sb2); + __m256i sb_h2_interleaved = _mm256_blend_epi16(q1, sb2_h2_shuffled, 34); + __m256i sb3_h2_shuffled = _mm256_shuffle_epi8(q5, shuffle_mask_sb3); + sb_h2_interleaved = _mm256_blend_epi16(sb_h2_interleaved, sb3_h2_shuffled, 68); + __m256i sb4_h2_shuffled = _mm256_shuffle_epi8(q7, shuffle_mask_sb4); + sb_h2_interleaved = _mm256_blend_epi16(sb_h2_interleaved, sb4_h2_shuffled, 136); + + __m256i bsums_r2 = _mm256_maddubs_epi16(one, sb_h2_interleaved); + + for (int l = 0; l < 3; l++) { + // Quants value shifted to process next two values from each sub block + q1 = _mm256_srli_epi64(q1, 16); + q3 = _mm256_srli_epi64(q3, 16); + q5 = _mm256_srli_epi64(q5, 16); + q7 = _mm256_srli_epi64(q7, 16); + + sb2_h2_shuffled = _mm256_shuffle_epi8(q3, shuffle_mask_sb2); + sb_h2_interleaved = _mm256_blend_epi16(q1, sb2_h2_shuffled, 34); + sb3_h2_shuffled = _mm256_shuffle_epi8(q5, shuffle_mask_sb3); + sb_h2_interleaved = _mm256_blend_epi16(sb_h2_interleaved, sb3_h2_shuffled, 68); + sb4_h2_shuffled = _mm256_shuffle_epi8(q7, shuffle_mask_sb4); + sb_h2_interleaved = _mm256_blend_epi16(sb_h2_interleaved, sb4_h2_shuffled, 136); + + bsums_r2 = _mm256_add_epi16(bsums_r2, _mm256_maddubs_epi16(one, sb_h2_interleaved)); + } + + // Overall bsums in interleaved fashion computed by adding results of both halves + __m256i bsums_r = _mm256_add_epi16(bsums_r1, bsums_r2); + _mm256_storeu_si256((__m256i *)(y[i].bsums + 16 * k), bsums_r); + } + } + +#else + + // scalar + const int blck_size_interleave = 8; + float srcv[4][QK_K]; + float iscale[4]; + + for (int i = 0; i < nb; i++) { + for (int row_iter = 0; row_iter < 4; row_iter++) { + float amax = 0.0f; // absolute max + float max = 0; + + for (int j = 0; j < QK_K; j++) { + srcv[row_iter][j] = x[row_iter * k + i * QK_K + j]; + // Update the maximum value of the corresponding super block + if(amax < fabsf(srcv[row_iter][j])) { + amax = fabsf(srcv[row_iter][j]); + max = srcv[row_iter][j]; + } + } + + iscale[row_iter] = amax ? -127.f/max : 0; + + y[i].d[row_iter] = amax ? 1/iscale[row_iter] : 0; + } + + for (int j = 0; j < QK_K / 4; j++) { + y[i].bsums[j] = 0; + } + + // Quants values are interleaved in sequence of eight bytes from corresponding super blocks + // Bsums values are interleaved in sequence of four bsums from each super block taken for interleaving + // i.e first four bsums from the first super block, followed by first four bsums from second super block and so on + for (int j = 0; j < QK_K * 4; j++) { + int src_offset = (j / (4 * blck_size_interleave)) * blck_size_interleave; + int src_id = (j % (4 * blck_size_interleave)) / blck_size_interleave; + src_offset += (j % blck_size_interleave); + int index = (((j & 31) >> 3) << 2) + ((j >> 8) << 4) + ((j >> 6) & 3); + + float x0 = srcv[src_id][src_offset] * iscale[src_id]; + y[i].qs[j] = nearest_int(x0); + y[i].bsums[index] += y[i].qs[j]; + } + } +#endif +} + static void quantize_mat_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t nrow, int64_t n_per_row, int64_t blck_size_interleave) { assert(nrow == 4); UNUSED(nrow); @@ -546,6 +835,16 @@ static void quantize_mat_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRIC } } +static void quantize_mat_q8_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t nrow, int64_t n_per_row, int64_t blck_size_interleave) { + assert(nrow == 4); + UNUSED(nrow); + if (blck_size_interleave == 8) { + quantize_q8_K_4x8(x, vy, n_per_row); + } else { + assert(false); + } +} + static void ggml_gemv_q4_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; const int nb = n / qk; @@ -994,6 +1293,281 @@ static void ggml_gemv_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, c } } +static void ggml_gemv_q4_K_8x8_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK_K; + const int nb = n / qk; + const int ncols_interleaved = 8; + const int blocklen = 8; + static const uint32_t kmask1 = 0x3f3f3f3f; + static const uint32_t kmask2 = 0x0f0f0f0f; + static const uint32_t kmask3 = 0x03030303; + + assert (n % qk == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + +#if defined(__AVX2__) + // Lookup table to convert signed nibbles to signed bytes + __m256i signextendlut = _mm256_castsi128_si256(_mm_set_epi8(-1, -2, -3, -4, -5, -6, -7, -8, 7, 6, 5, 4, 3, 2, 1, 0)); + signextendlut = _mm256_permute2f128_si256(signextendlut, signextendlut, 0); + // Shuffle masks to rearrange delta and scale values to multiply with appropriate scales + __m128i deltamask = _mm_set_epi8(15, 14, 7, 6, 13, 12, 5, 4, 11, 10, 3, 2, 9, 8, 1, 0); + __m128i scalemask = _mm_set_epi8(7, 7, 3, 3, 6, 6, 2, 2, 5, 5, 1, 1, 4, 4, 0, 0); + // Permute mask used for easier vector processing at later stages + __m256i finalpermutemask = _mm256_set_epi32(7, 5, 3, 1, 6, 4, 2, 0); + + // Mask to extract nibbles from bytes + const __m256i m4b = _mm256_set1_epi8(0x0F); + + int64_t b_nb = n / QK_K; + + const block_q4_Kx8 * b_ptr_start = (const block_q4_Kx8 *)vx; + const block_q8_K * a_ptr_start = (const block_q8_K *)vy; + + // Process Q8_K blocks one by one + for (int64_t y = 0; y < nr; y++) { + + // Pointers to LHS blocks of block_q8_K format + const block_q8_K * a_ptr = a_ptr_start + (y * nb); + + // Take group of eight interleaved block_q4_K structures at each pass of the loop and perform dot product operation + for (int64_t x = 0; x < nc / 8; x++) { + + // Pointers to RHS blocks + const block_q4_Kx8 * b_ptr = b_ptr_start + (x * b_nb); + + // Master FP accumulators + __m256 acc_row = _mm256_setzero_ps(); + __m256 acc_min_rows = _mm256_setzero_ps(); + + for (int64_t b = 0; b < nb; b++) { + + // Load and convert to FP32 scale from block_q8_K + const __m256 row_scale_f32 = _mm256_set1_ps((a_ptr[b].d)); + + // Load the scale values for the 8 blocks interleaved in block_q4_Kx8 + // col_scale_f32 rearranged so as to multiply with appropriate quants + const __m256 col_scale_f32 = GGML_F32Cx8_REARRANGE_LOAD(b_ptr[b].d, deltamask); + const __m256 col_dmin_f32 = GGML_F32Cx8_LOAD(b_ptr[b].dmin); + + __m256i iacc_b = _mm256_setzero_si256(); + __m256i iacc_min_b = _mm256_setzero_si256(); + + const __m256i q8sums = _mm256_loadu_si256((const __m256i * )(a_ptr[b].bsums)); + __m256i q8s = _mm256_castsi128_si256(_mm_hadd_epi16(_mm256_castsi256_si128(q8sums), _mm256_extracti128_si256(q8sums, 1))); + q8s = _mm256_permute2f128_si256(q8s, q8s, 0); + + // Processes two sub blocks from each Q4_K in each iteration + for (int sb = 0; sb < QK_K / 64; sb++) { + + // Load the eight block_q4_K for two sub blocks quantized values interleaved with each other in chunks of eight - B0,B1 ....B6,B7 + const __m256i rhs_raw_vec_0123_0 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + sb * 256)); + const __m256i rhs_raw_vec_4567_0 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 32 + sb * 256)); + const __m256i rhs_raw_vec_0123_1 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 64 + sb * 256)); + const __m256i rhs_raw_vec_4567_1 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 96 + sb * 256)); + const __m256i rhs_raw_vec_0123_2 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 128 + sb * 256)); + const __m256i rhs_raw_vec_4567_2 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 160 + sb * 256)); + const __m256i rhs_raw_vec_0123_3 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 192 + sb * 256)); + const __m256i rhs_raw_vec_4567_3 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 224 + sb * 256)); + + // 4-bit -> 8-bit + // Values of the first sub block of eight block_q4_K structures for the sb loop + const __m256i rhs_vec_0123_00 = _mm256_and_si256(rhs_raw_vec_0123_0, m4b); + const __m256i rhs_vec_4567_00 = _mm256_and_si256(rhs_raw_vec_4567_0, m4b); + const __m256i rhs_vec_0123_01 = _mm256_and_si256(rhs_raw_vec_0123_1, m4b); + const __m256i rhs_vec_4567_01 = _mm256_and_si256(rhs_raw_vec_4567_1, m4b); + const __m256i rhs_vec_0123_02 = _mm256_and_si256(rhs_raw_vec_0123_2, m4b); + const __m256i rhs_vec_4567_02 = _mm256_and_si256(rhs_raw_vec_4567_2, m4b); + const __m256i rhs_vec_0123_03 = _mm256_and_si256(rhs_raw_vec_0123_3, m4b); + const __m256i rhs_vec_4567_03 = _mm256_and_si256(rhs_raw_vec_4567_3, m4b); + + // Values of the second sub block of eight block_q4_K structures when sb = 1 + const __m256i rhs_vec_0123_10 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_0, 4), m4b); + const __m256i rhs_vec_4567_10 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_0, 4), m4b); + const __m256i rhs_vec_0123_11 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_1, 4), m4b); + const __m256i rhs_vec_4567_11 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_1, 4), m4b); + const __m256i rhs_vec_0123_12 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_2, 4), m4b); + const __m256i rhs_vec_4567_12 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_2, 4), m4b); + const __m256i rhs_vec_0123_13 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_3, 4), m4b); + const __m256i rhs_vec_4567_13 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_3, 4), m4b); + + uint32_t utmp_0[4], utmp_1[4]; + + // Scales and Mins of corresponding sub blocks from different Q8_K structures are stored together + // The below block is for eg to extract first sub block's scales and mins from different Q4_K structures for the sb loop + memcpy(utmp_0, b_ptr[b].scales + 24 * sb, 12); + utmp_0[3] = ((utmp_0[2] >> 4) & kmask2) | (((utmp_0[1] >> 6) & kmask3) << 4); + const uint32_t uaux_0 = utmp_0[1] & kmask1; + utmp_0[1] = (utmp_0[2] & kmask2) | (((utmp_0[0] >> 6) & kmask3) << 4); + utmp_0[2] = uaux_0; + utmp_0[0] &= kmask1; + + // The below block is for eg to extract second sub block's scales and mins from different Q4_K structures for the sb loop + memcpy(utmp_1, b_ptr[b].scales + 12 + sb * 24, 12); + utmp_1[3] = ((utmp_1[2] >> 4) & kmask2) | (((utmp_1[1] >> 6) & kmask3) << 4); + const uint32_t uaux_1 = utmp_1[1] & kmask1; + utmp_1[1] = (utmp_1[2] & kmask2) | (((utmp_1[0] >> 6) & kmask3) << 4); + utmp_1[2] = uaux_1; + utmp_1[0] &= kmask1; + + // Scales of first sub block in the sb loop + const __m128i mins_and_scales_0 = _mm_set_epi32(utmp_0[3], utmp_0[2], utmp_0[1], utmp_0[0]); + __m128i scales_rearrange_0 = _mm_shuffle_epi8(mins_and_scales_0, scalemask); + __m256i scales_0 = _mm256_cvtepu8_epi16(scales_rearrange_0); + + // Scales of second sub block in the sb loop + __m128i mins_and_scales_1 = _mm_set_epi32(utmp_1[3], utmp_1[2], utmp_1[1], utmp_1[0]); + __m128i scales_rearrange_1 = _mm_shuffle_epi8(mins_and_scales_1, scalemask); + __m256i scales_1 = _mm256_cvtepu8_epi16(scales_rearrange_1); + + // Mins of first and second sub block of Q4_K block are arranged side by side + __m256i mins_01 = _mm256_cvtepu8_epi16(_mm_unpacklo_epi8(_mm_shuffle_epi32(mins_and_scales_0, 78), _mm_shuffle_epi32(mins_and_scales_1, 78))); + + // Load the two sub block values corresponding to sb in block_q8_K in batches of 16 bytes and replicate the same across 256 bit vector + __m256i lhs_vec_00 = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(a_ptr[b].qs + sb * 64))); + __m256i lhs_vec_01 = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(a_ptr[b].qs + 16 + sb * 64))); + __m256i lhs_vec_10 = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(a_ptr[b].qs + 32 + sb * 64))); + __m256i lhs_vec_11 = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(a_ptr[b].qs + 48 + sb * 64))); + + lhs_vec_00 = _mm256_permute2f128_si256(lhs_vec_00, lhs_vec_00, 0); + lhs_vec_01 = _mm256_permute2f128_si256(lhs_vec_01, lhs_vec_01, 0); + lhs_vec_10 = _mm256_permute2f128_si256(lhs_vec_10, lhs_vec_10, 0); + lhs_vec_11 = _mm256_permute2f128_si256(lhs_vec_11, lhs_vec_11, 0); + + // Dot product done within 32 bit lanes and accumulated in the same vector + // First done for first sub block and thenn for second sub block in each sb + // B0(0-3) B4(0-3) B1(0-3) B5(0-3) B2(0-3) B6(0-3) B3(0-3) B7(0-3) with A0(0-3) + // B0(4-7) B4(4-7) B1(4-7) B5(4-7) B2(4-7) B6(4-7) B3(4-7) B7(4-7) with A0(4-7) + // ........................................................................... + // B0(28-31) B4(28-31) B1(28-31) B5(28-31) B2(28-31) B6(28-31) B3(28-31) B7(28-31) with A0(28-31) + + + __m256i iacc_0 = _mm256_setzero_si256(); + __m256i iacc_1 = _mm256_setzero_si256(); + + iacc_0 = _mm256_add_epi16(iacc_0, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_00 ,_mm256_shuffle_epi32(rhs_vec_4567_00, 177), 170), _mm256_shuffle_epi32(lhs_vec_00, 0))); + iacc_0 = _mm256_add_epi16(iacc_0, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_00, 177) ,rhs_vec_4567_00, 170), _mm256_shuffle_epi32(lhs_vec_00, 85))); + + iacc_0 = _mm256_add_epi16(iacc_0, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_01 ,_mm256_shuffle_epi32(rhs_vec_4567_01, 177), 170), _mm256_shuffle_epi32(lhs_vec_00, 170))); + iacc_0 = _mm256_add_epi16(iacc_0, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_01, 177) ,rhs_vec_4567_01, 170), _mm256_shuffle_epi32(lhs_vec_00, 255))); + + iacc_0 = _mm256_add_epi16(iacc_0, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_02 ,_mm256_shuffle_epi32(rhs_vec_4567_02, 177), 170), _mm256_shuffle_epi32(lhs_vec_01, 0))); + iacc_0 = _mm256_add_epi16(iacc_0, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_02, 177) ,rhs_vec_4567_02, 170), _mm256_shuffle_epi32(lhs_vec_01, 85))); + + iacc_0 = _mm256_add_epi16(iacc_0, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_03 ,_mm256_shuffle_epi32(rhs_vec_4567_03, 177), 170), _mm256_shuffle_epi32(lhs_vec_01, 170))); + iacc_0 = _mm256_add_epi16(iacc_0, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_03, 177) ,rhs_vec_4567_03, 170), _mm256_shuffle_epi32(lhs_vec_01, 255))); + + iacc_0 = _mm256_madd_epi16(iacc_0, scales_0); + + iacc_1 = _mm256_add_epi16(iacc_1, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_10 ,_mm256_shuffle_epi32(rhs_vec_4567_10, 177), 170), _mm256_shuffle_epi32(lhs_vec_10, 0))); + iacc_1 = _mm256_add_epi16(iacc_1, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_10, 177) ,rhs_vec_4567_10, 170), _mm256_shuffle_epi32(lhs_vec_10, 85))); + + iacc_1 = _mm256_add_epi16(iacc_1, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_11 ,_mm256_shuffle_epi32(rhs_vec_4567_11, 177), 170), _mm256_shuffle_epi32(lhs_vec_10, 170))); + iacc_1 = _mm256_add_epi16(iacc_1, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_11, 177) ,rhs_vec_4567_11, 170), _mm256_shuffle_epi32(lhs_vec_10, 255))); + + iacc_1 = _mm256_add_epi16(iacc_1, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_12 ,_mm256_shuffle_epi32(rhs_vec_4567_12, 177), 170), _mm256_shuffle_epi32(lhs_vec_11, 0))); + iacc_1 = _mm256_add_epi16(iacc_1, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_12, 177) ,rhs_vec_4567_12, 170), _mm256_shuffle_epi32(lhs_vec_11, 85))); + + iacc_1 = _mm256_add_epi16(iacc_1, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_13 ,_mm256_shuffle_epi32(rhs_vec_4567_13, 177), 170), _mm256_shuffle_epi32(lhs_vec_11, 170))); + iacc_1 = _mm256_add_epi16(iacc_1, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_13, 177) ,rhs_vec_4567_13, 170), _mm256_shuffle_epi32(lhs_vec_11, 255))); + + iacc_1 = _mm256_madd_epi16(iacc_1, scales_1); + + // Accumulate the iacc value for one sb + __m256i iacc_sb = _mm256_add_epi32(iacc_0, iacc_1); + + // Broadcast the bsums of the two sub blocks of the iteration of Q8_K across the vector + // Multiply-Add with corresponding mins of Q4_Kx8 with bsums + __m256i q8s_sb = _mm256_shuffle_epi32(q8s, 0); + __m256i iacc_min_sb = _mm256_madd_epi16(q8s_sb, mins_01); + q8s = _mm256_bsrli_epi128(q8s, 4); + + // Accumulate for the complete block + iacc_b = _mm256_add_epi32(iacc_b, iacc_sb); + iacc_min_b = _mm256_add_epi32(iacc_min_b, iacc_min_sb); + } + + // Multiply-Add with scale values for the complete super block + acc_row = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_b), _mm256_mul_ps(col_scale_f32, row_scale_f32), acc_row); + acc_min_rows = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_min_b), _mm256_mul_ps(col_dmin_f32, row_scale_f32), acc_min_rows); + + } + + // Accumulated output values permuted so as to be stored in appropriate order post accumulation + acc_row = _mm256_permutevar8x32_ps(acc_row, finalpermutemask); + _mm256_storeu_ps(s + (y * nr + x * 8), _mm256_sub_ps(acc_row, acc_min_rows)); + } + } + +#else + + float sumf[8]; + float sum_minf[8]; + uint32_t utmp[32]; + int sumi1; + int sumi2; + int sumi; + + const block_q8_K * a_ptr = (const block_q8_K *) vy; + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_q4_Kx8 * b_ptr = (const block_q4_Kx8 *) vx + (x * nb); + + for (int j = 0; j < ncols_interleaved; j++) { + sumf[j] = 0.0; + sum_minf[j] = 0.0; + } + for (int l = 0; l < nb; l++) { + for (int sb = 0; sb < 8; sb++) { + memcpy(utmp + sb * 4, b_ptr[l].scales + sb * 12, 12); + utmp[sb * 4 + 3] = ((utmp[sb * 4 + 2] >> 4) & kmask2) | (((utmp[sb * 4 + 1] >> 6) & kmask3) << 4); + const uint32_t uaux_0 = utmp[sb * 4 + 1] & kmask1; + utmp[sb * 4 + 1] = (utmp[sb * 4 + 2] & kmask2) | (((utmp[sb * 4 + 0] >> 6) & kmask3) << 4); + utmp[sb * 4 + 2] = uaux_0; + utmp[sb * 4 + 0] &= kmask1; + } + for (int k = 0; k < (qk / (2 * blocklen)); k++) { + uint8_t *scales_0 = (uint8_t*) utmp + (k / 4) * 32; + uint8_t *scales_1 = (uint8_t*) utmp + (k / 4) * 32 + 16; + for (int j = 0; j < ncols_interleaved; j++) { + sumi1 = 0; + sumi2 = 0; + sumi = 0; + for (int i = 0; i < blocklen; ++i) { + const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF); + const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 4); + sumi1 = (v0 * a_ptr[l].qs[(k >> 2) * 64 + (k % 4) * blocklen + i]); + sumi2 = (v1 * a_ptr[l].qs[(k >> 2) * 64 + (k % 4) * blocklen + i + 32]); + sumi1 = sumi1 * scales_0[j]; + sumi2 = sumi2 * scales_1[j]; + sumi += sumi1 + sumi2; + } + sumf[j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * a_ptr[l].d; + } + } + for (int sb = 0; sb < 8; sb++) { + uint8_t *mins = (uint8_t*) utmp + 8 + sb * 16; + for (int j = 0; j < ncols_interleaved; j++) { + sum_minf[j] += mins[j] * (a_ptr[l].bsums[sb * 2] + a_ptr[l].bsums[sb * 2 + 1]) * GGML_FP16_TO_FP32(b_ptr[l].dmin[j]) * a_ptr[l].d; + } + } + } + for (int j = 0; j < ncols_interleaved; j++) { + s[x * ncols_interleaved + j] = sumf[j] - sum_minf[j]; + } + } +#endif +} + + static void ggml_gemv_iq4_nl_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; const int nb = n / qk; @@ -3480,6 +4054,781 @@ static void ggml_gemm_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, c } } +static void ggml_gemm_q4_K_8x8_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK_K; + const int nb = n / qk; + const int ncols_interleaved = 8; + const int blocklen = 8; + static const uint32_t kmask1 = 0x3f3f3f3f; + static const uint32_t kmask2 = 0x0f0f0f0f; + static const uint32_t kmask3 = 0x03030303; + + assert (n % qk == 0); + assert (nr % 4 == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + +#if defined(__AVX2__) + const block_q4_Kx8 * b_ptr_start = (const block_q4_Kx8 * ) vx; + const block_q8_Kx4 * a_ptr_start = (const block_q8_Kx4 * ) vy; + int64_t b_nb = n / QK_K; + int64_t y = 0; + + // Mask to mask out nibbles from packed bytes + const __m256i m4b = _mm256_set1_epi8(0x0F); + // Permute mask used for easier vector processing at later stages + __m256i requiredOrder = _mm256_set_epi32(3, 2, 1, 0, 7, 6, 5, 4); + + int anr = nr - nr % 16;; // Used to align nr with boundary of 16 + // Take group of four block_q8_Kx4 structures at each pass of the loop and perform dot product operation + for (; y < anr / 4; y += 4) { + + const block_q8_Kx4 * a_ptrs[4]; + + a_ptrs[0] = a_ptr_start + (y * nb); + for (int i = 0; i < 3; ++i) { + a_ptrs[i + 1] = a_ptrs[i] + nb; + } + + // Take group of eight block_q4_kx8 structures at each pass of the loop and perform dot product operation + for (int64_t x = 0; x < nc / 8; x++) { + + const block_q4_Kx8 * b_ptr = b_ptr_start + (x * b_nb); + + // Master FP accumulators + __m256 acc_rows[16]; + for (int i = 0; i < 16; i++) { + acc_rows[i] = _mm256_setzero_ps(); + } + + __m256 acc_min_rows[16]; + for (int i = 0; i < 16; i++) { + acc_min_rows[i] = _mm256_setzero_ps(); + } + + // For super block + for (int64_t b = 0; b < nb; b++) { + + // Scale values - Load the eight scale values of block_q4_kx8 + const __m256 col_scale_f32 = GGML_F32Cx8_LOAD(b_ptr[b].d); + + // dmin values - Load the eight dmin values of block_q4_kx8 + const __m256 col_dmin_f32 = GGML_F32Cx8_LOAD(b_ptr[b].dmin); + + // Loop to iterate over the eight sub blocks of a super block - two sub blocks are processed per iteration + for (int sb = 0; sb < QK_K / 64; sb++) { + + // Load the eight block_q4_K for two sub blocks quantized values interleaved with each other in chunks of eight bytes - B0,B1 ....B6,B7 + const __m256i rhs_raw_mat_0123_0 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + sb * 256)); + const __m256i rhs_raw_mat_4567_0 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 32 + sb * 256)); + const __m256i rhs_raw_mat_0123_1 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 64 + sb * 256)); + const __m256i rhs_raw_mat_4567_1 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 96 + sb * 256)); + const __m256i rhs_raw_mat_0123_2 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 128 + sb * 256)); + const __m256i rhs_raw_mat_4567_2 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 160 + sb * 256)); + const __m256i rhs_raw_mat_0123_3 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 192 + sb * 256)); + const __m256i rhs_raw_mat_4567_3 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 224 + sb * 256)); + + // Save the values in the following vectors in the formats B0B1B4B5, B2B3B6B7 for further processing and storing of values + const __m256i rhs_raw_mat_0145_0 = _mm256_blend_epi32(rhs_raw_mat_0123_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_0, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_0, requiredOrder), rhs_raw_mat_4567_0, 240); + const __m256i rhs_raw_mat_0145_1 = _mm256_blend_epi32(rhs_raw_mat_0123_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_1, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_1, requiredOrder), rhs_raw_mat_4567_1, 240); + const __m256i rhs_raw_mat_0145_2 = _mm256_blend_epi32(rhs_raw_mat_0123_2, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_2, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_2 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_2, requiredOrder), rhs_raw_mat_4567_2, 240); + const __m256i rhs_raw_mat_0145_3 = _mm256_blend_epi32(rhs_raw_mat_0123_3, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_3, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_3 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_3, requiredOrder), rhs_raw_mat_4567_3, 240); + + // 4-bit -> 8-bit + // First sub block of the two sub blocks processed in the iteration + const __m256i rhs_mat_0145_00 = _mm256_and_si256(rhs_raw_mat_0145_0, m4b); //B00(0-7) B01(0-7) B04(0-7) B05(0-7) + const __m256i rhs_mat_2367_00 = _mm256_and_si256(rhs_raw_mat_2367_0, m4b); //B02(0-7) B03(0-7) B06(0-7) B07(0-7) + + const __m256i rhs_mat_0145_01 = _mm256_and_si256(rhs_raw_mat_0145_1, m4b); //B00(8-15) B01(8-15) B04(8-15) B05(8-15) + const __m256i rhs_mat_2367_01 = _mm256_and_si256(rhs_raw_mat_2367_1, m4b); //B02(8-15) B03(8-15) B06(8-15) B07(8-15) + + const __m256i rhs_mat_0145_02 = _mm256_and_si256(rhs_raw_mat_0145_2, m4b); //B00(16-23) B01(16-23) B04(16-23) B05(16-23) + const __m256i rhs_mat_2367_02 = _mm256_and_si256(rhs_raw_mat_2367_2, m4b); //B02(16-23) B03(16-23) B06(16-23) B07(16-23) + + const __m256i rhs_mat_0145_03 = _mm256_and_si256(rhs_raw_mat_0145_3, m4b); //B00(24-31) B01(24-31) B04(24-31) B05(24-31) + const __m256i rhs_mat_2367_03 = _mm256_and_si256(rhs_raw_mat_2367_3, m4b); //B02(24-31) B03(24-31) B06(24-31) B07(24-31) + + // Second sub block of the two sub blocks processed in the iteration + const __m256i rhs_mat_0145_10 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_0, 4), m4b); //B10(0-7) B11(0-7) B14(0-7) B15(0-7) + const __m256i rhs_mat_2367_10 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_0, 4), m4b); //B12(0-7) B13(0-7) B16(0-7) B17(0-7) + + const __m256i rhs_mat_0145_11 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_1, 4), m4b); //B10(8-15) B11(8-15) B14(8-15) B15(8-15) + const __m256i rhs_mat_2367_11 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_1, 4), m4b); //B12(8-15) B13(8-15) B16(8-15) B17(8-15) + + const __m256i rhs_mat_0145_12 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_2, 4), m4b); //B10(16-23) B11(16-23) B14(16-23) B15(16-23) + const __m256i rhs_mat_2367_12 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_2, 4), m4b); //B12(16-23) B13(16-23) B16(16-23) B17(16-23) + + const __m256i rhs_mat_0145_13 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_3, 4), m4b); //B10(24-31) B11(24-31) B14(24-31) B15(24-31) + const __m256i rhs_mat_2367_13 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_3, 4), m4b); //B12(24-31) B13(24-31) B16(24-31) B17(24-31) + + // Shuffle pattern one - right side input + const __m256i rhs_mat_0145_00_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_00, 136); //B00(0-3) B01(0-3) B00(0-3) B01(0-3) B04(0-3) B05(0-3) B04(0-3) B05(0-3) + const __m256i rhs_mat_2367_00_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_00, 136); //B02(0-3) B03(0-3) B02(0-3) B03(0-3) B06(0-3) B07(0-3) B06(0-3) B07(0-3) + + const __m256i rhs_mat_0145_01_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_01, 136); //B00(8-11) B01(8-11) B00(8-11) B01(8-11) B04(8-11) B05(8-11) B04(8-11) B05(8-11) + const __m256i rhs_mat_2367_01_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_01, 136); //B02(8-11) B03(8-11) B02(8-11) B03(8-11) B06(8-11) B07(8-11) B06(8-11) B07(8-11) + + const __m256i rhs_mat_0145_02_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_02, 136); //B00(16-19) B01(16-19) B00(16-19) B01(16-19) B04(16-19) B05(16-19) B04(16-19) B05(16-19) + const __m256i rhs_mat_2367_02_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_02, 136); //B02(16-19) B03(16-19) B02(16-19) B03(16-19) B06(16-19) B07(16-19) B06(16-19) B07(16-19) + + const __m256i rhs_mat_0145_03_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_03, 136); //B00(24-27) B01(24-27) B00(24-27) B01(24-27) B04(24-27) B05(24-27) B04(24-27) B05(24-27) + const __m256i rhs_mat_2367_03_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_03, 136); //B02(24-27) B03(24-27) B02(24-27) B03(24-27) B06(24-27) B07(24-27) B06(24-27) B07(24-27) + + const __m256i rhs_mat_0145_10_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_10, 136); //B10(0-3) B11(0-3) B10(0-3) B11(0-3) B14(0-3) B15(0-3) B14(0-3) B15(0-3) + const __m256i rhs_mat_2367_10_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_10, 136); //B12(0-3) B13(0-3) B12(0-3) B13(0-3) B16(0-3) B17(0-3) B16(0-3) B17(0-3) + + const __m256i rhs_mat_0145_11_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_11, 136); //B10(8-11) B11(8-11) B10(8-11) B11(8-11) B14(8-11) B15(8-11) B14(8-11) B15(8-11) + const __m256i rhs_mat_2367_11_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_11, 136); //B12(8-11) B13(8-11) B12(8-11) B13(8-11) B16(8-11) B17(8-11) B16(8-11) B17(8-11) + + const __m256i rhs_mat_0145_12_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_12, 136); //B10(16-19) B11(16-19) B10(16-19) B11(16-19) B14(16-19) B15(16-19) B14(16-19) B15(16-19) + const __m256i rhs_mat_2367_12_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_12, 136); //B12(16-19) B13(16-19) B12(16-19) B13(16-19) B16(16-19) B17(16-19) B16(16-19) B17(16-19) + + const __m256i rhs_mat_0145_13_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_13, 136); //B10(24-27) B11(24-27) B10(24-27) B11(24-27) B14(24-27) B15(24-27) B14(24-27) B15(24-27) + const __m256i rhs_mat_2367_13_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_13, 136); //B12(24-27) B13(24-27) B12(24-27) B13(24-27) B16(24-27) B17(24-27) B16(24-27) B17(24-27) + + + // Shuffle pattern two - right side input + const __m256i rhs_mat_0145_00_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_00, 221); //B00(4-7) B01(4-7) B00(4-7) B01(4-7) B04(4-7) B05(4-7) B04(4-7) B05(4-7) + const __m256i rhs_mat_2367_00_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_00, 221); //B02(4-7) B03(4-7) B02(4-7) B03(4-7) B06(4-7) B07(4-7) B06(4-7) B07(4-7) + + const __m256i rhs_mat_0145_01_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_01, 221); //B00(12-15) B01(12-15) B00(12-15) B01(12-15) B04(12-15) B05(12-15) B04(12-15) B05(12-15) + const __m256i rhs_mat_2367_01_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_01, 221); //B02(12-15) B03(12-15) B02(12-15) B03(12-15) B06(12-15) B07(12-15) B06(12-15) B07(12-15) + + const __m256i rhs_mat_0145_02_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_02, 221); //B00(20-23) B01(20-23) B00(20-23) B01(20-23) B04(20-23) B05(20-23) B04(20-23) B05(20-23) + const __m256i rhs_mat_2367_02_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_02, 221); //B02(20-23) B03(20-23) B02(20-23) B03(20-23) B06(20-23) B07(20-23) B06(20-23) B07(20-23) + + const __m256i rhs_mat_0145_03_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_03, 221); //B00(28-31) B01(28-31) B00(28-31) B01(28-31) B04(28-31) B05(28-31) B04(28-31) B05(28-31) + const __m256i rhs_mat_2367_03_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_03, 221); //B02(28-31) B03(28-31) B02(28-31) B03(28-31) B06(28-31) B07(28-31) B06(28-31) B07(28-31) + + const __m256i rhs_mat_0145_10_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_10, 221); //B10(4-7) B11(4-7) B10(4-7) B11(4-7) B14(4-7) B15(4-7) B14(4-7) B15(4-7) + const __m256i rhs_mat_2367_10_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_10, 221); //B12(4-7) B13(4-7) B12(4-7) B13(4-7) B16(4-7) B17(4-7) B16(4-7) B17(4-7) + + const __m256i rhs_mat_0145_11_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_11, 221); //B10(12-15) B11(12-15) B10(12-15) B11(12-15) B14(12-15) B15(12-15) B14(12-15) B15(12-15) + const __m256i rhs_mat_2367_11_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_11, 221); //B12(12-15) B13(12-15) B12(12-15) B13(12-15) B16(12-15) B17(12-15) B16(12-15) B17(12-15) + + const __m256i rhs_mat_0145_12_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_12, 221); //B10(20-23) B11(20-23) B10(20-23) B11(20-23) B14(20-23) B15(20-23) B14(20-23) B15(20-23) + const __m256i rhs_mat_2367_12_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_12, 221); //B12(20-23) B13(20-23) B12(20-23) B13(20-23) B16(20-23) B17(20-23) B16(20-23) B17(20-23) + + const __m256i rhs_mat_0145_13_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_13, 221); //B10(28-31) B11(28-31) B10(28-31) B11(28-31) B14(28-31) B15(28-31) B14(28-31) B15(28-31) + const __m256i rhs_mat_2367_13_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_13, 221); //B12(28-31) B13(28-31) B12(28-31) B13(28-31) B16(28-31) B17(28-31) B16(28-31) B17(28-31) + + uint32_t utmp_0[4], utmp_1[4]; + + // Scales and Mins of corresponding sub blocks from different Q4_K structures are stored together + // The below block is for eg to extract first sub block's scales and mins from different Q4_K structures for the sb loop + memcpy(utmp_0, b_ptr[b].scales + 24 * sb, 12); + utmp_0[3] = ((utmp_0[2] >> 4) & kmask2) | (((utmp_0[1] >> 6) & kmask3) << 4); + const uint32_t uaux_0 = utmp_0[1] & kmask1; + utmp_0[1] = (utmp_0[2] & kmask2) | (((utmp_0[0] >> 6) & kmask3) << 4); + utmp_0[2] = uaux_0; + utmp_0[0] &= kmask1; + + // The below block is for eg to extract second sub block's scales and mins from different Q4_K structures for the sb loop + memcpy(utmp_1, b_ptr[b].scales + 12 + sb * 24, 12); + utmp_1[3] = ((utmp_1[2] >> 4) & kmask2) | (((utmp_1[1] >> 6) & kmask3) << 4); + const uint32_t uaux_1 = utmp_1[1] & kmask1; + utmp_1[1] = (utmp_1[2] & kmask2) | (((utmp_1[0] >> 6) & kmask3) << 4); + utmp_1[2] = uaux_1; + utmp_1[0] &= kmask1; + + // Scales of first sub block in the sb loop + const __m128i mins_and_scales_0 = _mm_set_epi32(utmp_0[3], utmp_0[2], utmp_0[1], utmp_0[0]); + const __m256i scales_0 = _mm256_cvtepu8_epi16(_mm_unpacklo_epi8(mins_and_scales_0, mins_and_scales_0)); + + // Scales of second sub block in the sb loop + const __m128i mins_and_scales_1 = _mm_set_epi32(utmp_1[3], utmp_1[2], utmp_1[1], utmp_1[0]); + const __m256i scales_1 = _mm256_cvtepu8_epi16(_mm_unpacklo_epi8(mins_and_scales_1, mins_and_scales_1)); + + // Mins of first and second sub block of Q4_K block are arranged side by side + const __m256i mins_01 = _mm256_cvtepu8_epi16(_mm_unpacklo_epi8(_mm_shuffle_epi32(mins_and_scales_0, 78), _mm_shuffle_epi32(mins_and_scales_1, 78))); + + const __m256i scale_0145_0 = _mm256_shuffle_epi32(scales_0, 68); + const __m256i scale_2367_0 = _mm256_shuffle_epi32(scales_0, 238); + + const __m256i scale_0145_1 = _mm256_shuffle_epi32(scales_1, 68); + const __m256i scale_2367_1 = _mm256_shuffle_epi32(scales_1, 238); + + for (int rp = 0; rp < 4; rp++) { + + // Load the four block_q8_k quantized values interleaved with each other in chunks of eight bytes - A0,A1,A2,A3 + // Loaded as set of 128 bit vectors and repeated into a 256 bit vector + __m256i lhs_mat_0123_00 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 256 * sb))); + __m256i lhs_mat_01_00 = _mm256_permute2f128_si256(lhs_mat_0123_00, lhs_mat_0123_00, 0); + __m256i lhs_mat_23_00 = _mm256_permute2f128_si256(lhs_mat_0123_00, lhs_mat_0123_00, 17); + __m256i lhs_mat_0123_01 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 32 + 256 * sb))); + __m256i lhs_mat_01_01 = _mm256_permute2f128_si256(lhs_mat_0123_01, lhs_mat_0123_01, 0); + __m256i lhs_mat_23_01 = _mm256_permute2f128_si256(lhs_mat_0123_01, lhs_mat_0123_01, 17); + __m256i lhs_mat_0123_02 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 64 + 256 * sb))); + __m256i lhs_mat_01_02 = _mm256_permute2f128_si256(lhs_mat_0123_02, lhs_mat_0123_02, 0); + __m256i lhs_mat_23_02 = _mm256_permute2f128_si256(lhs_mat_0123_02, lhs_mat_0123_02, 17); + __m256i lhs_mat_0123_03 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 96 + 256 * sb))); + __m256i lhs_mat_01_03 = _mm256_permute2f128_si256(lhs_mat_0123_03, lhs_mat_0123_03, 0); + __m256i lhs_mat_23_03 = _mm256_permute2f128_si256(lhs_mat_0123_03, lhs_mat_0123_03, 17); + __m256i lhs_mat_0123_10 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 128 + 256 * sb))); + __m256i lhs_mat_01_10 = _mm256_permute2f128_si256(lhs_mat_0123_10, lhs_mat_0123_10, 0); + __m256i lhs_mat_23_10 = _mm256_permute2f128_si256(lhs_mat_0123_10, lhs_mat_0123_10, 17); + __m256i lhs_mat_0123_11 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 160 + 256 * sb))); + __m256i lhs_mat_01_11 = _mm256_permute2f128_si256(lhs_mat_0123_11, lhs_mat_0123_11, 0); + __m256i lhs_mat_23_11 = _mm256_permute2f128_si256(lhs_mat_0123_11, lhs_mat_0123_11, 17); + __m256i lhs_mat_0123_12 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 192 + 256 * sb))); + __m256i lhs_mat_01_12 = _mm256_permute2f128_si256(lhs_mat_0123_12, lhs_mat_0123_12, 0); + __m256i lhs_mat_23_12 = _mm256_permute2f128_si256(lhs_mat_0123_12, lhs_mat_0123_12, 17); + __m256i lhs_mat_0123_13 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 224 + 256 * sb))); + __m256i lhs_mat_01_13 = _mm256_permute2f128_si256(lhs_mat_0123_13, lhs_mat_0123_13, 0); + __m256i lhs_mat_23_13 = _mm256_permute2f128_si256(lhs_mat_0123_13, lhs_mat_0123_13, 17); + + // Bsums are loaded - four bsums are loaded (for two sub blocks) for the different Q8_K blocks + __m256i lhs_bsums_0123_01 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].bsums + 16 * sb))); + __m256i lhs_bsums_hsum_0123_01 = _mm256_castsi128_si256(_mm_hadd_epi16(_mm256_castsi256_si128(lhs_bsums_0123_01), _mm256_extractf128_si256(lhs_bsums_0123_01, 1))); + lhs_bsums_hsum_0123_01 = _mm256_permute2x128_si256(lhs_bsums_hsum_0123_01, lhs_bsums_hsum_0123_01, 0); + + // Shuffle pattern one - left side input + const __m256i lhs_mat_01_00_sp1 = _mm256_shuffle_epi32(lhs_mat_01_00, 160); //A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) + const __m256i lhs_mat_23_00_sp1 = _mm256_shuffle_epi32(lhs_mat_23_00, 160); //A02(0-3) A03(0-3) A02(0-3) A03(0-3) A02(0-3) A03(0-3) A02(0-3) A03(0-3) + + const __m256i lhs_mat_01_01_sp1 = _mm256_shuffle_epi32(lhs_mat_01_01, 160); //A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) + const __m256i lhs_mat_23_01_sp1 = _mm256_shuffle_epi32(lhs_mat_23_01, 160); //A02(8-11) A03(8-11) A02(8-11) A03(8-11) A02(8-11) A03(8-11) A02(8-11) A03(8-11) + + const __m256i lhs_mat_01_02_sp1 = _mm256_shuffle_epi32(lhs_mat_01_02, 160); //A00(16-19) A00(16-19) A01(16-19) A01(16-19) A00(16-19) A00(16-19) A01(16-19) A01(16-19) + const __m256i lhs_mat_23_02_sp1 = _mm256_shuffle_epi32(lhs_mat_23_02, 160); //A02(16-19) A03(16-19) A02(16-19) A03(16-19) A02(16-19) A03(16-19) A02(16-19) A03(16-19) + + const __m256i lhs_mat_01_03_sp1 = _mm256_shuffle_epi32(lhs_mat_01_03, 160); //A00(24-27) A00(24-27) A01(24-27) A01(24-27) A00(24-27) A00(24-27) A01(24-27) A01(24-27) + const __m256i lhs_mat_23_03_sp1 = _mm256_shuffle_epi32(lhs_mat_23_03, 160); //A02(24-27) A03(24-27) A02(24-27) A03(24-27) A02(24-27) A03(24-27) A02(24-27) A03(24-27) + + const __m256i lhs_mat_01_10_sp1 = _mm256_shuffle_epi32(lhs_mat_01_10, 160); //A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) + const __m256i lhs_mat_23_10_sp1 = _mm256_shuffle_epi32(lhs_mat_23_10, 160); //A12(0-3) A13(0-3) A12(0-3) A13(0-3) A12(0-3) A13(0-3) A12(0-3) A13(0-3) + + const __m256i lhs_mat_01_11_sp1 = _mm256_shuffle_epi32(lhs_mat_01_11, 160); //A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) + const __m256i lhs_mat_23_11_sp1 = _mm256_shuffle_epi32(lhs_mat_23_11, 160); //A12(8-11) A13(8-11) A12(8-11) A13(8-11) A12(8-11) A13(8-11) A12(8-11) A13(8-11) + + const __m256i lhs_mat_01_12_sp1 = _mm256_shuffle_epi32(lhs_mat_01_12, 160); //A10(16-19) A10(16-19) A11(16-19) A11(16-19) A10(16-19) A10(16-19) A11(16-19) A11(16-19) + const __m256i lhs_mat_23_12_sp1 = _mm256_shuffle_epi32(lhs_mat_23_12, 160); //A12(16-19) A13(16-19) A12(16-19) A13(16-19) A12(16-19) A13(16-19) A12(16-19) A13(16-19) + + const __m256i lhs_mat_01_13_sp1 = _mm256_shuffle_epi32(lhs_mat_01_13, 160); //A10(24-27) A10(24-27) A11(24-27) A11(24-27) A10(24-27) A10(24-27) A11(24-27) A11(24-27) + const __m256i lhs_mat_23_13_sp1 = _mm256_shuffle_epi32(lhs_mat_23_13, 160); //A12(24-27) A13(24-27) A12(24-27) A13(24-27) A12(24-27) A13(24-27) A12(24-27) A13(24-27) + + // Shuffle pattern two- left side input + const __m256i lhs_mat_01_00_sp2 = _mm256_shuffle_epi32(lhs_mat_01_00, 245); //A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) + const __m256i lhs_mat_23_00_sp2 = _mm256_shuffle_epi32(lhs_mat_23_00, 245); //A02(4-7) A03(4-7) A02(4-7) A03(4-7) A02(4-7) A03(4-7) A02(4-7) A03(4-7) + + const __m256i lhs_mat_01_01_sp2 = _mm256_shuffle_epi32(lhs_mat_01_01, 245); //A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) + const __m256i lhs_mat_23_01_sp2 = _mm256_shuffle_epi32(lhs_mat_23_01, 245); //A02(12-15) A03(12-15) A02(12-15) A03(12-15) A02(12-15) A03(12-15) A02(12-15) A03(12-15) + + const __m256i lhs_mat_01_02_sp2 = _mm256_shuffle_epi32(lhs_mat_01_02, 245); //A00(20-23) A00(20-23) A01(20-23) A01(20-23) A00(20-23) A00(20-23) A01(20-23) A01(20-23) + const __m256i lhs_mat_23_02_sp2 = _mm256_shuffle_epi32(lhs_mat_23_02, 245); //A02(20-23) A03(20-23) A02(20-23) A03(20-23) A02(20-23) A03(20-23) A02(20-23) A03(20-23) + + const __m256i lhs_mat_01_03_sp2 = _mm256_shuffle_epi32(lhs_mat_01_03, 245); //A00(28-31) A00(28-31) A01(28-31) A01(28-31) A00(28-31) A00(28-31) A01(28-31) A01(28-31) + const __m256i lhs_mat_23_03_sp2 = _mm256_shuffle_epi32(lhs_mat_23_03, 245); //A02(28-31) A03(28-31) A02(28-31) A03(28-31) A02(28-31) A03(28-31) A02(28-31) A03(28-31) + + const __m256i lhs_mat_01_10_sp2 = _mm256_shuffle_epi32(lhs_mat_01_10, 245); //A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) + const __m256i lhs_mat_23_10_sp2 = _mm256_shuffle_epi32(lhs_mat_23_10, 245); //A12(4-7) A13(4-7) A12(4-7) A13(4-7) A12(4-7) A13(4-7) A12(4-7) A13(4-7) + + const __m256i lhs_mat_01_11_sp2 = _mm256_shuffle_epi32(lhs_mat_01_11, 245); //A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) + const __m256i lhs_mat_23_11_sp2 = _mm256_shuffle_epi32(lhs_mat_23_11, 245); //A12(12-15) A13(12-15) A12(12-15) A13(12-15) A12(12-15) A13(12-15) A12(12-15) A13(12-15) + + const __m256i lhs_mat_01_12_sp2 = _mm256_shuffle_epi32(lhs_mat_01_12, 245); //A10(20-23) A10(20-23) A11(20-23) A11(20-23) A10(20-23) A10(20-23) A11(20-23) A11(20-23) + const __m256i lhs_mat_23_12_sp2 = _mm256_shuffle_epi32(lhs_mat_23_12, 245); //A12(20-23) A13(20-23) A12(20-23) A13(20-23) A12(20-23) A13(20-23) A12(20-23) A13(20-23) + + const __m256i lhs_mat_01_13_sp2 = _mm256_shuffle_epi32(lhs_mat_01_13, 245); //A10(28-31) A10(28-31) A11(28-31) A11(28-31) A10(28-31) A10(28-31) A11(28-31) A11(28-31) + const __m256i lhs_mat_23_13_sp2 = _mm256_shuffle_epi32(lhs_mat_23_13, 245); //A12(28-31) A13(28-31) A12(28-31) A13(28-31) A12(28-31) A13(28-31) A12(28-31) A13(28-31) + + // The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane + __m256i iacc_mat_00_0_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_03_sp1, lhs_mat_01_03_sp1), _mm256_maddubs_epi16(rhs_mat_0145_02_sp1, lhs_mat_01_02_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_01_sp1, lhs_mat_01_01_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_00_sp1, lhs_mat_01_00_sp1)); + __m256i iacc_mat_01_0_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_03_sp1, lhs_mat_01_03_sp1), _mm256_maddubs_epi16(rhs_mat_2367_02_sp1, lhs_mat_01_02_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_01_sp1, lhs_mat_01_01_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_00_sp1, lhs_mat_01_00_sp1)); + __m256i iacc_mat_10_0_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_03_sp1, lhs_mat_23_03_sp1), _mm256_maddubs_epi16(rhs_mat_0145_02_sp1, lhs_mat_23_02_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_01_sp1, lhs_mat_23_01_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_00_sp1, lhs_mat_23_00_sp1)); + __m256i iacc_mat_11_0_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_03_sp1, lhs_mat_23_03_sp1), _mm256_maddubs_epi16(rhs_mat_2367_02_sp1, lhs_mat_23_02_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_01_sp1, lhs_mat_23_01_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_00_sp1, lhs_mat_23_00_sp1)); + __m256i iacc_mat_00_1_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_13_sp1, lhs_mat_01_13_sp1), _mm256_maddubs_epi16(rhs_mat_0145_12_sp1, lhs_mat_01_12_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_11_sp1, lhs_mat_01_11_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_10_sp1, lhs_mat_01_10_sp1)); + __m256i iacc_mat_01_1_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_13_sp1, lhs_mat_01_13_sp1), _mm256_maddubs_epi16(rhs_mat_2367_12_sp1, lhs_mat_01_12_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_11_sp1, lhs_mat_01_11_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_10_sp1, lhs_mat_01_10_sp1)); + __m256i iacc_mat_10_1_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_13_sp1, lhs_mat_23_13_sp1), _mm256_maddubs_epi16(rhs_mat_0145_12_sp1, lhs_mat_23_12_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_11_sp1, lhs_mat_23_11_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_10_sp1, lhs_mat_23_10_sp1)); + __m256i iacc_mat_11_1_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_13_sp1, lhs_mat_23_13_sp1), _mm256_maddubs_epi16(rhs_mat_2367_12_sp1, lhs_mat_23_12_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_11_sp1, lhs_mat_23_11_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_10_sp1, lhs_mat_23_10_sp1)); + + __m256i iacc_mat_00_0_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_03_sp2, lhs_mat_01_03_sp2), _mm256_maddubs_epi16(rhs_mat_0145_02_sp2, lhs_mat_01_02_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_01_sp2, lhs_mat_01_01_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_00_sp2, lhs_mat_01_00_sp2)); + __m256i iacc_mat_01_0_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_03_sp2, lhs_mat_01_03_sp2), _mm256_maddubs_epi16(rhs_mat_2367_02_sp2, lhs_mat_01_02_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_01_sp2, lhs_mat_01_01_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_00_sp2, lhs_mat_01_00_sp2)); + __m256i iacc_mat_10_0_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_03_sp2, lhs_mat_23_03_sp2), _mm256_maddubs_epi16(rhs_mat_0145_02_sp2, lhs_mat_23_02_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_01_sp2, lhs_mat_23_01_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_00_sp2, lhs_mat_23_00_sp2)); + __m256i iacc_mat_11_0_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_03_sp2, lhs_mat_23_03_sp2), _mm256_maddubs_epi16(rhs_mat_2367_02_sp2, lhs_mat_23_02_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_01_sp2, lhs_mat_23_01_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_00_sp2, lhs_mat_23_00_sp2)); + __m256i iacc_mat_00_1_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_13_sp2, lhs_mat_01_13_sp2), _mm256_maddubs_epi16(rhs_mat_0145_12_sp2, lhs_mat_01_12_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_11_sp2, lhs_mat_01_11_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_10_sp2, lhs_mat_01_10_sp2)); + __m256i iacc_mat_01_1_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_13_sp2, lhs_mat_01_13_sp2), _mm256_maddubs_epi16(rhs_mat_2367_12_sp2, lhs_mat_01_12_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_11_sp2, lhs_mat_01_11_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_10_sp2, lhs_mat_01_10_sp2)); + __m256i iacc_mat_10_1_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_13_sp2, lhs_mat_23_13_sp2), _mm256_maddubs_epi16(rhs_mat_0145_12_sp2, lhs_mat_23_12_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_11_sp2, lhs_mat_23_11_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_10_sp2, lhs_mat_23_10_sp2)); + __m256i iacc_mat_11_1_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_13_sp2, lhs_mat_23_13_sp2), _mm256_maddubs_epi16(rhs_mat_2367_12_sp2, lhs_mat_23_12_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_11_sp2, lhs_mat_23_11_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_10_sp2, lhs_mat_23_10_sp2)); + + // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block + __m256i iacc_mat_00_0 = _mm256_add_epi16(iacc_mat_00_0_sp1, iacc_mat_00_0_sp2); + __m256i iacc_mat_01_0 = _mm256_add_epi16(iacc_mat_01_0_sp1, iacc_mat_01_0_sp2); + __m256i iacc_mat_10_0 = _mm256_add_epi16(iacc_mat_10_0_sp1, iacc_mat_10_0_sp2); + __m256i iacc_mat_11_0 = _mm256_add_epi16(iacc_mat_11_0_sp1, iacc_mat_11_0_sp2); + + __m256i iacc_mat_00_1 = _mm256_add_epi16(iacc_mat_00_1_sp1, iacc_mat_00_1_sp2); + __m256i iacc_mat_01_1 = _mm256_add_epi16(iacc_mat_01_1_sp1, iacc_mat_01_1_sp2); + __m256i iacc_mat_10_1 = _mm256_add_epi16(iacc_mat_10_1_sp1, iacc_mat_10_1_sp2); + __m256i iacc_mat_11_1 = _mm256_add_epi16(iacc_mat_11_1_sp1, iacc_mat_11_1_sp2); + + // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block + iacc_mat_00_0 = _mm256_madd_epi16(iacc_mat_00_0, scale_0145_0); + iacc_mat_01_0 = _mm256_madd_epi16(iacc_mat_01_0, scale_2367_0); + iacc_mat_10_0 = _mm256_madd_epi16(iacc_mat_10_0, scale_0145_0); + iacc_mat_11_0 = _mm256_madd_epi16(iacc_mat_11_0, scale_2367_0); + + iacc_mat_00_1 = _mm256_madd_epi16(iacc_mat_00_1, scale_0145_1); + iacc_mat_01_1 = _mm256_madd_epi16(iacc_mat_01_1, scale_2367_1); + iacc_mat_10_1 = _mm256_madd_epi16(iacc_mat_10_1, scale_0145_1); + iacc_mat_11_1 = _mm256_madd_epi16(iacc_mat_11_1, scale_2367_1); + + // Straighten out to make 4 row vectors (4 for each sub block which are accumulated together in the next step) + __m256i iacc_row_0_0 = _mm256_blend_epi32(iacc_mat_00_0, _mm256_shuffle_epi32(iacc_mat_01_0, 78), 204); + __m256i iacc_row_1_0 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_00_0, 78), iacc_mat_01_0, 204); + __m256i iacc_row_2_0 = _mm256_blend_epi32(iacc_mat_10_0, _mm256_shuffle_epi32(iacc_mat_11_0, 78), 204); + __m256i iacc_row_3_0 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_10_0, 78), iacc_mat_11_0, 204); + __m256i iacc_row_0_1 = _mm256_blend_epi32(iacc_mat_00_1, _mm256_shuffle_epi32(iacc_mat_01_1, 78), 204); + __m256i iacc_row_1_1 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_00_1, 78), iacc_mat_01_1, 204); + __m256i iacc_row_2_1 = _mm256_blend_epi32(iacc_mat_10_1, _mm256_shuffle_epi32(iacc_mat_11_1, 78), 204); + __m256i iacc_row_3_1 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_10_1, 78), iacc_mat_11_1, 204); + + __m256i iacc_row_0 = _mm256_add_epi32(iacc_row_0_0, iacc_row_0_1); + __m256i iacc_row_1 = _mm256_add_epi32(iacc_row_1_0, iacc_row_1_1); + __m256i iacc_row_2 = _mm256_add_epi32(iacc_row_2_0, iacc_row_2_1); + __m256i iacc_row_3 = _mm256_add_epi32(iacc_row_3_0, iacc_row_3_1); + + // Load the scale(d) values for all the 4 Q8_k blocks and repeat it across lanes + const __m128 row_scale_f32_sse = _mm_load_ps(a_ptrs[rp][b].d); + const __m256 row_scale_f32 = _mm256_set_m128(row_scale_f32_sse, row_scale_f32_sse);//GGML_F32Cx8_REPEAT_LOAD(a_ptrs[rp][b].d, loadMask); + + // Multiply with appropiate scales and accumulate (for both d and dmin) below + acc_rows[rp * 4] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_0), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_rows[rp * 4]); + acc_rows[rp * 4 + 1] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_1), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_rows[rp * 4 + 1]); + acc_rows[rp * 4 + 2] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_2), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_rows[rp * 4 + 2]); + acc_rows[rp * 4 + 3] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_3), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_rows[rp * 4 + 3]); + + __m256i iacc_row_min_0 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_hsum_0123_01, 0), mins_01); + __m256i iacc_row_min_1 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_hsum_0123_01, 85), mins_01); + __m256i iacc_row_min_2 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_hsum_0123_01, 170), mins_01); + __m256i iacc_row_min_3 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_hsum_0123_01, 255), mins_01); + + acc_min_rows[rp * 4] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_0), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_min_rows[rp * 4]); + acc_min_rows[rp * 4 + 1] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_1), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_min_rows[rp * 4 + 1]); + acc_min_rows[rp * 4 + 2] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_2), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_min_rows[rp * 4 + 2]); + acc_min_rows[rp * 4 + 3] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_3), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_min_rows[rp * 4 + 3]); + + } + } + } + // Store the accumulated values + for (int i = 0; i < 16; i++) { + _mm256_storeu_ps((float * )(s + ((y * 4 + i) * bs + x * 8)), _mm256_sub_ps(acc_rows[i], acc_min_rows[i])); + } + } + } + for (; y < nr / 4; y++) { + + const block_q8_Kx4 * a_ptr = a_ptr_start + (y * nb); + + for (int64_t x = 0; x < nc / 8; x++) { + + const block_q4_Kx8 * b_ptr = b_ptr_start + (x * b_nb); + + // Master FP accumulators + __m256 acc_rows[4]; + for (int i = 0; i < 4; i++) { + acc_rows[i] = _mm256_setzero_ps(); + } + + __m256 acc_min_rows[4]; + for (int i = 0; i < 4; i++) { + acc_min_rows[i] = _mm256_setzero_ps(); + } + + for (int64_t b = 0; b < nb; b++) { + + // Scale values - Load the eight scale values of block_q4_Kx8 + const __m256 col_scale_f32 = GGML_F32Cx8_LOAD(b_ptr[b].d); + + // dmin values - Load the eight dmin values of block_q4_Kx8 + const __m256 col_dmin_f32 = GGML_F32Cx8_LOAD(b_ptr[b].dmin); + + // Loop to iterate over the eight sub blocks of a super block - two sub blocks are processed per iteration + for (int sb = 0; sb < QK_K / 64; sb++) { + + // Load the eight block_q4_k for two sub blocks quantized values interleaved with each other in chunks of eight bytes - B0,B1 ....B6,B7 + const __m256i rhs_raw_mat_0123_0 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + sb * 256)); + const __m256i rhs_raw_mat_4567_0 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 32 + sb * 256)); + const __m256i rhs_raw_mat_0123_1 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 64 + sb * 256)); + const __m256i rhs_raw_mat_4567_1 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 96 + sb * 256)); + const __m256i rhs_raw_mat_0123_2 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 128 + sb * 256)); + const __m256i rhs_raw_mat_4567_2 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 160 + sb * 256)); + const __m256i rhs_raw_mat_0123_3 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 192 + sb * 256)); + const __m256i rhs_raw_mat_4567_3 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 224 + sb * 256)); + + // Save the values in the following vectors in the formats B0B1B4B5, B2B3B6B7 for further processing and storing of values + const __m256i rhs_raw_mat_0145_0 = _mm256_blend_epi32(rhs_raw_mat_0123_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_0, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_0, requiredOrder), rhs_raw_mat_4567_0, 240); + const __m256i rhs_raw_mat_0145_1 = _mm256_blend_epi32(rhs_raw_mat_0123_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_1, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_1, requiredOrder), rhs_raw_mat_4567_1, 240); + const __m256i rhs_raw_mat_0145_2 = _mm256_blend_epi32(rhs_raw_mat_0123_2, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_2, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_2 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_2, requiredOrder), rhs_raw_mat_4567_2, 240); + const __m256i rhs_raw_mat_0145_3 = _mm256_blend_epi32(rhs_raw_mat_0123_3, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_3, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_3 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_3, requiredOrder), rhs_raw_mat_4567_3, 240); + + // 4-bit -> 8-bit + // First sub block of the two sub blocks processed in the iteration + const __m256i rhs_mat_0145_00 = _mm256_and_si256(rhs_raw_mat_0145_0, m4b); //B00(0-7) B01(0-7) B04(0-7) B05(0-7) + const __m256i rhs_mat_2367_00 = _mm256_and_si256(rhs_raw_mat_2367_0, m4b); //B02(0-7) B03(0-7) B06(0-7) B07(0-7) + + const __m256i rhs_mat_0145_01 = _mm256_and_si256(rhs_raw_mat_0145_1, m4b); //B00(8-15) B01(8-15) B04(8-15) B05(8-15) + const __m256i rhs_mat_2367_01 = _mm256_and_si256(rhs_raw_mat_2367_1, m4b); //B02(8-15) B03(8-15) B06(8-15) B07(8-15) + + const __m256i rhs_mat_0145_02 = _mm256_and_si256(rhs_raw_mat_0145_2, m4b); //B00(16-23) B01(16-23) B04(16-23) B05(16-23) + const __m256i rhs_mat_2367_02 = _mm256_and_si256(rhs_raw_mat_2367_2, m4b); //B02(16-23) B03(16-23) B06(16-23) B07(16-23) + + const __m256i rhs_mat_0145_03 = _mm256_and_si256(rhs_raw_mat_0145_3, m4b); //B00(24-31) B01(24-31) B04(24-31) B05(24-31) + const __m256i rhs_mat_2367_03 = _mm256_and_si256(rhs_raw_mat_2367_3, m4b); //B02(24-31) B03(24-31) B06(24-31) B07(24-31) + + // Second sub block of the two sub blocks processed in the iteration + const __m256i rhs_mat_0145_10 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_0, 4), m4b); //B10(0-7) B11(0-7) B14(0-7) B15(0-7) + const __m256i rhs_mat_2367_10 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_0, 4), m4b); //B12(0-7) B13(0-7) B16(0-7) B17(0-7) + + const __m256i rhs_mat_0145_11 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_1, 4), m4b); //B10(8-15) B11(8-15) B14(8-15) B15(8-15) + const __m256i rhs_mat_2367_11 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_1, 4), m4b); //B12(8-15) B13(8-15) B16(8-15) B17(8-15) + + const __m256i rhs_mat_0145_12 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_2, 4), m4b); //B10(16-23) B11(16-23) B14(16-23) B15(16-23) + const __m256i rhs_mat_2367_12 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_2, 4), m4b); //B12(16-23) B13(16-23) B16(16-23) B17(16-23) + + const __m256i rhs_mat_0145_13 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_3, 4), m4b); //B10(24-31) B11(24-31) B14(24-31) B15(24-31) + const __m256i rhs_mat_2367_13 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_3, 4), m4b); //B12(24-31) B13(24-31) B16(24-31) B17(24-31) + + // Shuffle pattern one - right side input + const __m256i rhs_mat_0145_00_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_00, 136); //B00(0-3) B01(0-3) B00(0-3) B01(0-3) B04(0-3) B05(0-3) B04(0-3) B05(0-3) + const __m256i rhs_mat_2367_00_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_00, 136); //B02(0-3) B03(0-3) B02(0-3) B03(0-3) B06(0-3) B07(0-3) B06(0-3) B07(0-3) + + const __m256i rhs_mat_0145_01_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_01, 136); //B00(8-11) B01(8-11) B00(8-11) B01(8-11) B04(8-11) B05(8-11) B04(8-11) B05(8-11) + const __m256i rhs_mat_2367_01_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_01, 136); //B02(8-11) B03(8-11) B02(8-11) B03(8-11) B06(8-11) B07(8-11) B06(8-11) B07(8-11) + + const __m256i rhs_mat_0145_02_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_02, 136); //B00(16-19) B01(16-19) B00(16-19) B01(16-19) B04(16-19) B05(16-19) B04(16-19) B05(16-19) + const __m256i rhs_mat_2367_02_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_02, 136); //B02(16-19) B03(16-19) B02(16-19) B03(16-19) B06(16-19) B07(16-19) B06(16-19) B07(16-19) + + const __m256i rhs_mat_0145_03_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_03, 136); //B00(24-27) B01(24-27) B00(24-27) B01(24-27) B04(24-27) B05(24-27) B04(24-27) B05(24-27) + const __m256i rhs_mat_2367_03_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_03, 136); //B02(24-27) B03(24-27) B02(24-27) B03(24-27) B06(24-27) B07(24-27) B06(24-27) B07(24-27) + + const __m256i rhs_mat_0145_10_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_10, 136); //B10(0-3) B11(0-3) B10(0-3) B11(0-3) B14(0-3) B15(0-3) B14(0-3) B15(0-3) + const __m256i rhs_mat_2367_10_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_10, 136); //B12(0-3) B13(0-3) B12(0-3) B13(0-3) B16(0-3) B17(0-3) B16(0-3) B17(0-3) + + const __m256i rhs_mat_0145_11_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_11, 136); //B10(8-11) B11(8-11) B10(8-11) B11(8-11) B14(8-11) B15(8-11) B14(8-11) B15(8-11) + const __m256i rhs_mat_2367_11_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_11, 136); //B12(8-11) B13(8-11) B12(8-11) B13(8-11) B16(8-11) B17(8-11) B16(8-11) B17(8-11) + + const __m256i rhs_mat_0145_12_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_12, 136); //B10(16-19) B11(16-19) B10(16-19) B11(16-19) B14(16-19) B15(16-19) B14(16-19) B15(16-19) + const __m256i rhs_mat_2367_12_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_12, 136); //B12(16-19) B13(16-19) B12(16-19) B13(16-19) B16(16-19) B17(16-19) B16(16-19) B17(16-19) + + const __m256i rhs_mat_0145_13_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_13, 136); //B10(24-27) B11(24-27) B10(24-27) B11(24-27) B14(24-27) B15(24-27) B14(24-27) B15(24-27) + const __m256i rhs_mat_2367_13_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_13, 136); //B12(24-27) B13(24-27) B12(24-27) B13(24-27) B16(24-27) B17(24-27) B16(24-27) B17(24-27) + + // Shuffle pattern two - right side input + const __m256i rhs_mat_0145_00_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_00, 221); //B00(4-7) B01(4-7) B00(4-7) B01(4-7) B04(4-7) B05(4-7) B04(4-7) B05(4-7) + const __m256i rhs_mat_2367_00_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_00, 221); //B02(4-7) B03(4-7) B02(4-7) B03(4-7) B06(4-7) B07(4-7) B06(4-7) B07(4-7) + + const __m256i rhs_mat_0145_01_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_01, 221); //B00(12-15) B01(12-15) B00(12-15) B01(12-15) B04(12-15) B05(12-15) B04(12-15) B05(12-15) + const __m256i rhs_mat_2367_01_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_01, 221); //B02(12-15) B03(12-15) B02(12-15) B03(12-15) B06(12-15) B07(12-15) B06(12-15) B07(12-15) + + const __m256i rhs_mat_0145_02_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_02, 221); //B00(20-23) B01(20-23) B00(20-23) B01(20-23) B04(20-23) B05(20-23) B04(20-23) B05(20-23) + const __m256i rhs_mat_2367_02_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_02, 221); //B02(20-23) B03(20-23) B02(20-23) B03(20-23) B06(20-23) B07(20-23) B06(20-23) B07(20-23) + + const __m256i rhs_mat_0145_03_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_03, 221); //B00(28-31) B01(28-31) B00(28-31) B01(28-31) B04(28-31) B05(28-31) B04(28-31) B05(28-31) + const __m256i rhs_mat_2367_03_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_03, 221); //B02(28-31) B03(28-31) B02(28-31) B03(28-31) B06(28-31) B07(28-31) B06(28-31) B07(28-31) + + const __m256i rhs_mat_0145_10_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_10, 221); //B10(4-7) B11(4-7) B10(4-7) B11(4-7) B14(4-7) B15(4-7) B14(4-7) B15(4-7) + const __m256i rhs_mat_2367_10_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_10, 221); //B12(4-7) B13(4-7) B12(4-7) B13(4-7) B16(4-7) B17(4-7) B16(4-7) B17(4-7) + + const __m256i rhs_mat_0145_11_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_11, 221); //B10(12-15) B11(12-15) B10(12-15) B11(12-15) B14(12-15) B15(12-15) B14(12-15) B15(12-15) + const __m256i rhs_mat_2367_11_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_11, 221); //B12(12-15) B13(12-15) B12(12-15) B13(12-15) B16(12-15) B17(12-15) B16(12-15) B17(12-15) + + const __m256i rhs_mat_0145_12_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_12, 221); //B10(20-23) B11(20-23) B10(20-23) B11(20-23) B14(20-23) B15(20-23) B14(20-23) B15(20-23) + const __m256i rhs_mat_2367_12_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_12, 221); //B12(20-23) B13(20-23) B12(20-23) B13(20-23) B16(20-23) B17(20-23) B16(20-23) B17(20-23) + + const __m256i rhs_mat_0145_13_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_13, 221); //B10(28-31) B11(28-31) B10(28-31) B11(28-31) B14(28-31) B15(28-31) B14(28-31) B15(28-31) + const __m256i rhs_mat_2367_13_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_13, 221); //B12(28-31) B13(28-31) B12(28-31) B13(28-31) B16(28-31) B17(28-31) B16(28-31) B17(28-31) + + uint32_t utmp_0[4], utmp_1[4]; + + // Scales and Mins of corresponding sub blocks from different Q4_K structures are stored together + // The below block is for eg to extract first sub block's scales and mins from different Q4_K structures for the sb loop + memcpy(utmp_0, b_ptr[b].scales + 24 * sb, 12); + utmp_0[3] = ((utmp_0[2] >> 4) & kmask2) | (((utmp_0[1] >> 6) & kmask3) << 4); + const uint32_t uaux_0 = utmp_0[1] & kmask1; + utmp_0[1] = (utmp_0[2] & kmask2) | (((utmp_0[0] >> 6) & kmask3) << 4); + utmp_0[2] = uaux_0; + utmp_0[0] &= kmask1; + + // The below block is for eg to extract second sub block's scales and mins from different Q4_K structures when sb = 1 + memcpy(utmp_1, b_ptr[b].scales + 12 + sb * 24, 12); + utmp_1[3] = ((utmp_1[2] >> 4) & kmask2) | (((utmp_1[1] >> 6) & kmask3) << 4); + const uint32_t uaux_1 = utmp_1[1] & kmask1; + utmp_1[1] = (utmp_1[2] & kmask2) | (((utmp_1[0] >> 6) & kmask3) << 4); + utmp_1[2] = uaux_1; + utmp_1[0] &= kmask1; + + // Scales of first sub block in the sb loop + const __m128i mins_and_scales_0 = _mm_set_epi32(utmp_0[3], utmp_0[2], utmp_0[1], utmp_0[0]); + const __m256i scales_0 = _mm256_cvtepu8_epi16(_mm_unpacklo_epi8(mins_and_scales_0, mins_and_scales_0)); + + // Scales of second sub block in the sb loop + const __m128i mins_and_scales_1 = _mm_set_epi32(utmp_1[3], utmp_1[2], utmp_1[1], utmp_1[0]); + const __m256i scales_1 = _mm256_cvtepu8_epi16(_mm_unpacklo_epi8(mins_and_scales_1, mins_and_scales_1)); + + // Mins of first and second sub block of Q4_K block are arranged side by side + const __m256i mins_01 = _mm256_cvtepu8_epi16(_mm_unpacklo_epi8(_mm_shuffle_epi32(mins_and_scales_0, 78), _mm_shuffle_epi32(mins_and_scales_1, 78))); + + const __m256i scale_0145_0 = _mm256_shuffle_epi32(scales_0, 68); + const __m256i scale_2367_0 = _mm256_shuffle_epi32(scales_0, 238); + + const __m256i scale_0145_1 = _mm256_shuffle_epi32(scales_1, 68); + const __m256i scale_2367_1 = _mm256_shuffle_epi32(scales_1, 238); + + // Load the four block_q8_k quantized values interleaved with each other in chunks of eight bytes - A0,A1,A2,A3 + // Loaded as set of 128 bit vectors and repeated into a 256 bit vector + __m256i lhs_mat_0123_00 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 256 * sb))); + __m256i lhs_mat_01_00 = _mm256_permute2f128_si256(lhs_mat_0123_00, lhs_mat_0123_00, 0); + __m256i lhs_mat_23_00 = _mm256_permute2f128_si256(lhs_mat_0123_00, lhs_mat_0123_00, 17); + __m256i lhs_mat_0123_01 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 32 + 256 * sb))); + __m256i lhs_mat_01_01 = _mm256_permute2f128_si256(lhs_mat_0123_01, lhs_mat_0123_01, 0); + __m256i lhs_mat_23_01 = _mm256_permute2f128_si256(lhs_mat_0123_01, lhs_mat_0123_01, 17); + __m256i lhs_mat_0123_02 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 64 + 256 * sb))); + __m256i lhs_mat_01_02 = _mm256_permute2f128_si256(lhs_mat_0123_02, lhs_mat_0123_02, 0); + __m256i lhs_mat_23_02 = _mm256_permute2f128_si256(lhs_mat_0123_02, lhs_mat_0123_02, 17); + __m256i lhs_mat_0123_03 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 96 + 256 * sb))); + __m256i lhs_mat_01_03 = _mm256_permute2f128_si256(lhs_mat_0123_03, lhs_mat_0123_03, 0); + __m256i lhs_mat_23_03 = _mm256_permute2f128_si256(lhs_mat_0123_03, lhs_mat_0123_03, 17); + __m256i lhs_mat_0123_10 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 128 + 256 * sb))); + __m256i lhs_mat_01_10 = _mm256_permute2f128_si256(lhs_mat_0123_10, lhs_mat_0123_10, 0); + __m256i lhs_mat_23_10 = _mm256_permute2f128_si256(lhs_mat_0123_10, lhs_mat_0123_10, 17); + __m256i lhs_mat_0123_11 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 160 + 256 * sb))); + __m256i lhs_mat_01_11 = _mm256_permute2f128_si256(lhs_mat_0123_11, lhs_mat_0123_11, 0); + __m256i lhs_mat_23_11 = _mm256_permute2f128_si256(lhs_mat_0123_11, lhs_mat_0123_11, 17); + __m256i lhs_mat_0123_12 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 192 + 256 * sb))); + __m256i lhs_mat_01_12 = _mm256_permute2f128_si256(lhs_mat_0123_12, lhs_mat_0123_12, 0); + __m256i lhs_mat_23_12 = _mm256_permute2f128_si256(lhs_mat_0123_12, lhs_mat_0123_12, 17); + __m256i lhs_mat_0123_13 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 224 + 256 * sb))); + __m256i lhs_mat_01_13 = _mm256_permute2f128_si256(lhs_mat_0123_13, lhs_mat_0123_13, 0); + __m256i lhs_mat_23_13 = _mm256_permute2f128_si256(lhs_mat_0123_13, lhs_mat_0123_13, 17); + + // Bsums are loaded - four bsums are loaded (for two sub blocks) for the different Q8_K blocks + __m256i lhs_bsums_0123_01 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].bsums + 16 * sb))); + __m256i lhs_bsums_hsum_0123_01 = _mm256_castsi128_si256(_mm_hadd_epi16(_mm256_castsi256_si128(lhs_bsums_0123_01), _mm256_extractf128_si256(lhs_bsums_0123_01, 1))); + lhs_bsums_hsum_0123_01 = _mm256_permute2x128_si256(lhs_bsums_hsum_0123_01, lhs_bsums_hsum_0123_01, 0); + + // Shuffle pattern one - left side input + const __m256i lhs_mat_01_00_sp1 = _mm256_shuffle_epi32(lhs_mat_01_00, 160); //A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) + const __m256i lhs_mat_23_00_sp1 = _mm256_shuffle_epi32(lhs_mat_23_00, 160); //A02(0-3) A03(0-3) A02(0-3) A03(0-3) A02(0-3) A03(0-3) A02(0-3) A03(0-3) + + const __m256i lhs_mat_01_01_sp1 = _mm256_shuffle_epi32(lhs_mat_01_01, 160); //A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) + const __m256i lhs_mat_23_01_sp1 = _mm256_shuffle_epi32(lhs_mat_23_01, 160); //A02(8-11) A03(8-11) A02(8-11) A03(8-11) A02(8-11) A03(8-11) A02(8-11) A03(8-11) + + const __m256i lhs_mat_01_02_sp1 = _mm256_shuffle_epi32(lhs_mat_01_02, 160); //A00(16-19) A00(16-19) A01(16-19) A01(16-19) A00(16-19) A00(16-19) A01(16-19) A01(16-19) + const __m256i lhs_mat_23_02_sp1 = _mm256_shuffle_epi32(lhs_mat_23_02, 160); //A02(16-19) A03(16-19) A02(16-19) A03(16-19) A02(16-19) A03(16-19) A02(16-19) A03(16-19) + + const __m256i lhs_mat_01_03_sp1 = _mm256_shuffle_epi32(lhs_mat_01_03, 160); //A00(24-27) A00(24-27) A01(24-27) A01(24-27) A00(24-27) A00(24-27) A01(24-27) A01(24-27) + const __m256i lhs_mat_23_03_sp1 = _mm256_shuffle_epi32(lhs_mat_23_03, 160); //A02(24-27) A03(24-27) A02(24-27) A03(24-27) A02(24-27) A03(24-27) A02(24-27) A03(24-27) + + const __m256i lhs_mat_01_10_sp1 = _mm256_shuffle_epi32(lhs_mat_01_10, 160); //A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) + const __m256i lhs_mat_23_10_sp1 = _mm256_shuffle_epi32(lhs_mat_23_10, 160); //A12(0-3) A13(0-3) A12(0-3) A13(0-3) A12(0-3) A13(0-3) A12(0-3) A13(0-3) + + const __m256i lhs_mat_01_11_sp1 = _mm256_shuffle_epi32(lhs_mat_01_11, 160); //A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) + const __m256i lhs_mat_23_11_sp1 = _mm256_shuffle_epi32(lhs_mat_23_11, 160); //A12(8-11) A13(8-11) A12(8-11) A13(8-11) A12(8-11) A13(8-11) A12(8-11) A13(8-11) + + const __m256i lhs_mat_01_12_sp1 = _mm256_shuffle_epi32(lhs_mat_01_12, 160); //A10(16-19) A10(16-19) A11(16-19) A11(16-19) A10(16-19) A10(16-19) A11(16-19) A11(16-19) + const __m256i lhs_mat_23_12_sp1 = _mm256_shuffle_epi32(lhs_mat_23_12, 160); //A12(16-19) A13(16-19) A12(16-19) A13(16-19) A12(16-19) A13(16-19) A12(16-19) A13(16-19) + + const __m256i lhs_mat_01_13_sp1 = _mm256_shuffle_epi32(lhs_mat_01_13, 160); //A10(24-27) A10(24-27) A11(24-27) A11(24-27) A10(24-27) A10(24-27) A11(24-27) A11(24-27) + const __m256i lhs_mat_23_13_sp1 = _mm256_shuffle_epi32(lhs_mat_23_13, 160); //A12(24-27) A13(24-27) A12(24-27) A13(24-27) A12(24-27) A13(24-27) A12(24-27) A13(24-27) + + // Shuffle pattern two- left side input + const __m256i lhs_mat_01_00_sp2 = _mm256_shuffle_epi32(lhs_mat_01_00, 245); //A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) + const __m256i lhs_mat_23_00_sp2 = _mm256_shuffle_epi32(lhs_mat_23_00, 245); //A02(4-7) A03(4-7) A02(4-7) A03(4-7) A02(4-7) A03(4-7) A02(4-7) A03(4-7) + + const __m256i lhs_mat_01_01_sp2 = _mm256_shuffle_epi32(lhs_mat_01_01, 245); //A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) + const __m256i lhs_mat_23_01_sp2 = _mm256_shuffle_epi32(lhs_mat_23_01, 245); //A02(12-15) A03(12-15) A02(12-15) A03(12-15) A02(12-15) A03(12-15) A02(12-15) A03(12-15) + + const __m256i lhs_mat_01_02_sp2 = _mm256_shuffle_epi32(lhs_mat_01_02, 245); //A00(20-23) A00(20-23) A01(20-23) A01(20-23) A00(20-23) A00(20-23) A01(20-23) A01(20-23) + const __m256i lhs_mat_23_02_sp2 = _mm256_shuffle_epi32(lhs_mat_23_02, 245); //A02(20-23) A03(20-23) A02(20-23) A03(20-23) A02(20-23) A03(20-23) A02(20-23) A03(20-23) + + const __m256i lhs_mat_01_03_sp2 = _mm256_shuffle_epi32(lhs_mat_01_03, 245); //A00(28-31) A00(28-31) A01(28-31) A01(28-31) A00(28-31) A00(28-31) A01(28-31) A01(28-31) + const __m256i lhs_mat_23_03_sp2 = _mm256_shuffle_epi32(lhs_mat_23_03, 245); //A02(28-31) A03(28-31) A02(28-31) A03(28-31) A02(28-31) A03(28-31) A02(28-31) A03(28-31) + + const __m256i lhs_mat_01_10_sp2 = _mm256_shuffle_epi32(lhs_mat_01_10, 245); //A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) + const __m256i lhs_mat_23_10_sp2 = _mm256_shuffle_epi32(lhs_mat_23_10, 245); //A12(4-7) A13(4-7) A12(4-7) A13(4-7) A12(4-7) A13(4-7) A12(4-7) A13(4-7) + + const __m256i lhs_mat_01_11_sp2 = _mm256_shuffle_epi32(lhs_mat_01_11, 245); //A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) + const __m256i lhs_mat_23_11_sp2 = _mm256_shuffle_epi32(lhs_mat_23_11, 245); //A12(12-15) A13(12-15) A12(12-15) A13(12-15) A12(12-15) A13(12-15) A12(12-15) A13(12-15) + + const __m256i lhs_mat_01_12_sp2 = _mm256_shuffle_epi32(lhs_mat_01_12, 245); //A10(20-23) A10(20-23) A11(20-23) A11(20-23) A10(20-23) A10(20-23) A11(20-23) A11(20-23) + const __m256i lhs_mat_23_12_sp2 = _mm256_shuffle_epi32(lhs_mat_23_12, 245); //A12(20-23) A13(20-23) A12(20-23) A13(20-23) A12(20-23) A13(20-23) A12(20-23) A13(20-23) + + const __m256i lhs_mat_01_13_sp2 = _mm256_shuffle_epi32(lhs_mat_01_13, 245); //A10(28-31) A10(28-31) A11(28-31) A11(28-31) A10(28-31) A10(28-31) A11(28-31) A11(28-31) + const __m256i lhs_mat_23_13_sp2 = _mm256_shuffle_epi32(lhs_mat_23_13, 245); //A12(28-31) A13(28-31) A12(28-31) A13(28-31) A12(28-31) A13(28-31) A12(28-31) A13(28-31) + + // The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane + __m256i iacc_mat_00_0_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_03_sp1, lhs_mat_01_03_sp1), _mm256_maddubs_epi16(rhs_mat_0145_02_sp1, lhs_mat_01_02_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_01_sp1, lhs_mat_01_01_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_00_sp1, lhs_mat_01_00_sp1)); + __m256i iacc_mat_01_0_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_03_sp1, lhs_mat_01_03_sp1), _mm256_maddubs_epi16(rhs_mat_2367_02_sp1, lhs_mat_01_02_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_01_sp1, lhs_mat_01_01_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_00_sp1, lhs_mat_01_00_sp1)); + __m256i iacc_mat_10_0_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_03_sp1, lhs_mat_23_03_sp1), _mm256_maddubs_epi16(rhs_mat_0145_02_sp1, lhs_mat_23_02_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_01_sp1, lhs_mat_23_01_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_00_sp1, lhs_mat_23_00_sp1)); + __m256i iacc_mat_11_0_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_03_sp1, lhs_mat_23_03_sp1), _mm256_maddubs_epi16(rhs_mat_2367_02_sp1, lhs_mat_23_02_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_01_sp1, lhs_mat_23_01_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_00_sp1, lhs_mat_23_00_sp1)); + __m256i iacc_mat_00_1_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_13_sp1, lhs_mat_01_13_sp1), _mm256_maddubs_epi16(rhs_mat_0145_12_sp1, lhs_mat_01_12_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_11_sp1, lhs_mat_01_11_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_10_sp1, lhs_mat_01_10_sp1)); + __m256i iacc_mat_01_1_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_13_sp1, lhs_mat_01_13_sp1), _mm256_maddubs_epi16(rhs_mat_2367_12_sp1, lhs_mat_01_12_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_11_sp1, lhs_mat_01_11_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_10_sp1, lhs_mat_01_10_sp1)); + __m256i iacc_mat_10_1_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_13_sp1, lhs_mat_23_13_sp1), _mm256_maddubs_epi16(rhs_mat_0145_12_sp1, lhs_mat_23_12_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_11_sp1, lhs_mat_23_11_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_10_sp1, lhs_mat_23_10_sp1)); + __m256i iacc_mat_11_1_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_13_sp1, lhs_mat_23_13_sp1), _mm256_maddubs_epi16(rhs_mat_2367_12_sp1, lhs_mat_23_12_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_11_sp1, lhs_mat_23_11_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_10_sp1, lhs_mat_23_10_sp1)); + + __m256i iacc_mat_00_0_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_03_sp2, lhs_mat_01_03_sp2), _mm256_maddubs_epi16(rhs_mat_0145_02_sp2, lhs_mat_01_02_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_01_sp2, lhs_mat_01_01_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_00_sp2, lhs_mat_01_00_sp2)); + __m256i iacc_mat_01_0_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_03_sp2, lhs_mat_01_03_sp2), _mm256_maddubs_epi16(rhs_mat_2367_02_sp2, lhs_mat_01_02_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_01_sp2, lhs_mat_01_01_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_00_sp2, lhs_mat_01_00_sp2)); + __m256i iacc_mat_10_0_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_03_sp2, lhs_mat_23_03_sp2), _mm256_maddubs_epi16(rhs_mat_0145_02_sp2, lhs_mat_23_02_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_01_sp2, lhs_mat_23_01_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_00_sp2, lhs_mat_23_00_sp2)); + __m256i iacc_mat_11_0_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_03_sp2, lhs_mat_23_03_sp2), _mm256_maddubs_epi16(rhs_mat_2367_02_sp2, lhs_mat_23_02_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_01_sp2, lhs_mat_23_01_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_00_sp2, lhs_mat_23_00_sp2)); + __m256i iacc_mat_00_1_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_13_sp2, lhs_mat_01_13_sp2), _mm256_maddubs_epi16(rhs_mat_0145_12_sp2, lhs_mat_01_12_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_11_sp2, lhs_mat_01_11_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_10_sp2, lhs_mat_01_10_sp2)); + __m256i iacc_mat_01_1_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_13_sp2, lhs_mat_01_13_sp2), _mm256_maddubs_epi16(rhs_mat_2367_12_sp2, lhs_mat_01_12_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_11_sp2, lhs_mat_01_11_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_10_sp2, lhs_mat_01_10_sp2)); + __m256i iacc_mat_10_1_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_13_sp2, lhs_mat_23_13_sp2), _mm256_maddubs_epi16(rhs_mat_0145_12_sp2, lhs_mat_23_12_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_11_sp2, lhs_mat_23_11_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_10_sp2, lhs_mat_23_10_sp2)); + __m256i iacc_mat_11_1_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_13_sp2, lhs_mat_23_13_sp2), _mm256_maddubs_epi16(rhs_mat_2367_12_sp2, lhs_mat_23_12_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_11_sp2, lhs_mat_23_11_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_10_sp2, lhs_mat_23_10_sp2)); + + // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block + __m256i iacc_mat_00_0 = _mm256_add_epi16(iacc_mat_00_0_sp1, iacc_mat_00_0_sp2); + __m256i iacc_mat_01_0 = _mm256_add_epi16(iacc_mat_01_0_sp1, iacc_mat_01_0_sp2); + __m256i iacc_mat_10_0 = _mm256_add_epi16(iacc_mat_10_0_sp1, iacc_mat_10_0_sp2); + __m256i iacc_mat_11_0 = _mm256_add_epi16(iacc_mat_11_0_sp1, iacc_mat_11_0_sp2); + + __m256i iacc_mat_00_1 = _mm256_add_epi16(iacc_mat_00_1_sp1, iacc_mat_00_1_sp2); + __m256i iacc_mat_01_1 = _mm256_add_epi16(iacc_mat_01_1_sp1, iacc_mat_01_1_sp2); + __m256i iacc_mat_10_1 = _mm256_add_epi16(iacc_mat_10_1_sp1, iacc_mat_10_1_sp2); + __m256i iacc_mat_11_1 = _mm256_add_epi16(iacc_mat_11_1_sp1, iacc_mat_11_1_sp2); + + // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block + iacc_mat_00_0 = _mm256_madd_epi16(iacc_mat_00_0, scale_0145_0); + iacc_mat_01_0 = _mm256_madd_epi16(iacc_mat_01_0, scale_2367_0); + iacc_mat_10_0 = _mm256_madd_epi16(iacc_mat_10_0, scale_0145_0); + iacc_mat_11_0 = _mm256_madd_epi16(iacc_mat_11_0, scale_2367_0); + + iacc_mat_00_1 = _mm256_madd_epi16(iacc_mat_00_1, scale_0145_1); + iacc_mat_01_1 = _mm256_madd_epi16(iacc_mat_01_1, scale_2367_1); + iacc_mat_10_1 = _mm256_madd_epi16(iacc_mat_10_1, scale_0145_1); + iacc_mat_11_1 = _mm256_madd_epi16(iacc_mat_11_1, scale_2367_1); + + // Straighten out to make 4 row vectors (4 for each sub block which are accumulated together in the next step) + __m256i iacc_row_0_0 = _mm256_blend_epi32(iacc_mat_00_0, _mm256_shuffle_epi32(iacc_mat_01_0, 78), 204); + __m256i iacc_row_1_0 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_00_0, 78), iacc_mat_01_0, 204); + __m256i iacc_row_2_0 = _mm256_blend_epi32(iacc_mat_10_0, _mm256_shuffle_epi32(iacc_mat_11_0, 78), 204); + __m256i iacc_row_3_0 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_10_0, 78), iacc_mat_11_0, 204); + __m256i iacc_row_0_1 = _mm256_blend_epi32(iacc_mat_00_1, _mm256_shuffle_epi32(iacc_mat_01_1, 78), 204); + __m256i iacc_row_1_1 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_00_1, 78), iacc_mat_01_1, 204); + __m256i iacc_row_2_1 = _mm256_blend_epi32(iacc_mat_10_1, _mm256_shuffle_epi32(iacc_mat_11_1, 78), 204); + __m256i iacc_row_3_1 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_10_1, 78), iacc_mat_11_1, 204); + + __m256i iacc_row_0 = _mm256_add_epi32(iacc_row_0_0, iacc_row_0_1); + __m256i iacc_row_1 = _mm256_add_epi32(iacc_row_1_0, iacc_row_1_1); + __m256i iacc_row_2 = _mm256_add_epi32(iacc_row_2_0, iacc_row_2_1); + __m256i iacc_row_3 = _mm256_add_epi32(iacc_row_3_0, iacc_row_3_1); + + // Load the scale(d) values for all the 4 Q8_k blocks and repeat it across lanes + const __m128 row_scale_f32_sse = _mm_load_ps(a_ptr[b].d); + const __m256 row_scale_f32 = _mm256_set_m128(row_scale_f32_sse, row_scale_f32_sse); //GGML_F32Cx8_REPEAT_LOAD(a_ptrs[rp][b].d, loadMask); + + // Multiply with appropiate scales and accumulate (for both d and dmin) below + acc_rows[0] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_0), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_rows[0]); + acc_rows[1] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_1), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_rows[1]); + acc_rows[2] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_2), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_rows[2]); + acc_rows[3] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_3), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_rows[3]); + + __m256i iacc_row_min_0 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_hsum_0123_01, 0), mins_01); + __m256i iacc_row_min_1 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_hsum_0123_01, 85), mins_01); + __m256i iacc_row_min_2 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_hsum_0123_01, 170), mins_01); + __m256i iacc_row_min_3 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_hsum_0123_01, 255), mins_01); + + acc_min_rows[0] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_0), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_min_rows[0]); + acc_min_rows[1] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_1), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_min_rows[1]); + acc_min_rows[2] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_2), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_min_rows[2]); + acc_min_rows[3] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_3), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_min_rows[3]); + } + } + + // Store the accumulated values + for (int i = 0; i < 4; i++) { + _mm256_storeu_ps((float * )(s + ((y * 4 + i) * bs + x * 8)), _mm256_sub_ps(acc_rows[i], acc_min_rows[i])); + } + } + } + +#else + + float sumf[4][8]; + float sum_minf[4][8]; + uint32_t utmp[32]; + int sumi1; + int sumi2; + int sumi; + + for (int y = 0; y < nr / 4; y++) { + const block_q8_Kx4 * a_ptr = (const block_q8_Kx4 *) vy + (y * nb); + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_q4_Kx8 * b_ptr = (const block_q4_Kx8 *) vx + (x * nb); + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) { + sumf[m][j] = 0.0; + sum_minf[m][j] = 0.0; + } + } + for (int l = 0; l < nb; l++) { + for (int sb = 0; sb < 8; sb++) { + memcpy(utmp + sb * 4, b_ptr[l].scales + sb * 12, 12); + utmp[sb * 4 + 3] = ((utmp[sb * 4 + 2] >> 4) & kmask2) | (((utmp[sb * 4 + 1] >> 6) & kmask3) << 4); + const uint32_t uaux_0 = utmp[sb * 4 + 1] & kmask1; + utmp[sb * 4 + 1] = (utmp[sb * 4 + 2] & kmask2) | (((utmp[sb * 4 + 0] >> 6) & kmask3) << 4); + utmp[sb * 4 + 2] = uaux_0; + utmp[sb * 4 + 0] &= kmask1; + } + for (int k = 0; k < (qk / (2 * blocklen)); k++) { + uint8_t *scales_0 = (uint8_t*) utmp + (k / 4) * 32; + uint8_t *scales_1 = (uint8_t*) utmp + (k / 4) * 32 + 16; + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) { + sumi1 = 0; + sumi2 = 0; + sumi = 0; + for (int i = 0; i < blocklen; ++i) { + const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF); + const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 4); + sumi1 = (v0 * a_ptr[l].qs[(k >> 2) * 256 + (k % 4) * 4 * blocklen + m * blocklen + i]); + sumi2 = (v1 * a_ptr[l].qs[(k >> 2) * 256 + (k % 4) * 4 * blocklen + m * blocklen + i + 128]); + sumi1 = sumi1 * scales_0[j]; + sumi2 = sumi2 * scales_1[j]; + sumi += sumi1 + sumi2; + } + sumf[m][j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * a_ptr[l].d[m]; + } + } + } + for (int sb = 0; sb < 8; sb++) { + uint8_t *mins = (uint8_t*) utmp + 8 + sb * 16; + for(int m = 0; m < 4; m++) { + const int16_t *bsums = a_ptr[l].bsums + (sb * 8) + (m * 4) - ((sb % 2) * 6); + for(int j = 0; j < ncols_interleaved; j++) { + sum_minf[m][j] += mins[j] * (bsums[0] + bsums[1]) * GGML_FP16_TO_FP32(b_ptr[l].dmin[j]) * a_ptr[l].d[m]; + } + } + } + } + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) { + s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j] - sum_minf[m][j]; + } + } + } + } +#endif +} + static void ggml_gemm_iq4_nl_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; const int nb = n / qk; @@ -3660,6 +5009,82 @@ static block_q4_0x8 make_block_q4_0x8(block_q4_0 * in, unsigned int blck_size_in return out; } +static block_q4_Kx8 make_block_q4_Kx8(block_q4_K * in, unsigned int blck_size_interleave) { + block_q4_Kx8 out; + //Delta(scale) and dmin values of the eight Q4_K structures are copied onto the output interleaved structure + for (int i = 0; i < 8; i++) { + out.d[i] = in[i].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.d; + } + + for (int i = 0; i < 8; i++) { + out.dmin[i] = in[i].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.dmin; + } + + const int end = QK_K * 4 / blck_size_interleave; + + // Interleave Q4_K quants by taking 8 bytes at a time + for (int i = 0; i < end; ++i) { + int src_id = i % 8; + int src_offset = (i / 8) * blck_size_interleave; + int dst_offset = i * blck_size_interleave; + + uint64_t elems; + memcpy(&elems, &in[src_id].qs[src_offset], sizeof(uint64_t)); + memcpy(&out.qs[dst_offset], &elems, sizeof(uint64_t)); + } + + // The below logic is designed so as to unpack and rearrange scales and mins values in Q4_K + // Currently the Q4_K structure has 8 scales and 8 mins packed in 12 bytes ( 6 bits for each value) + // The output Q4_Kx8 structure has 96 bytes + // Every 12 byte is packed such that it contains scales and mins for corresponding sub blocks from Q4_K structure + // For eg - First 12 bytes contains 8 scales and 8 mins - each of first sub block from different Q4_K structures + uint8_t s[8], m[8]; + + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 8; j++) { + s[j] = in[j].scales[i] & 63; + m[j] = in[j].scales[i + 4] & 63; + } + + out.scales[i * 12] = (s[0] & 63) + ((s[4] & 48) << 2); + out.scales[i * 12 + 1] = (s[1] & 63) + ((s[5] & 48) << 2); + out.scales[i * 12 + 2] = (s[2] & 63) + ((s[6] & 48) << 2); + out.scales[i * 12 + 3] = (s[3] & 63) + ((s[7] & 48) << 2); + out.scales[i * 12 + 4] = (m[0] & 63) + ((m[4] & 48) << 2); + out.scales[i * 12 + 5] = (m[1] & 63) + ((m[5] & 48) << 2); + out.scales[i * 12 + 6] = (m[2] & 63) + ((m[6] & 48) << 2); + out.scales[i * 12 + 7] = (m[3] & 63) + ((m[7] & 48) << 2); + out.scales[i * 12 + 8] = (s[4] & 15) + ((m[4] & 15) << 4); + out.scales[i * 12 + 9] = (s[5] & 15) + ((m[5] & 15) << 4); + out.scales[i * 12 + 10] = (s[6] & 15) + ((m[6] & 15) << 4); + out.scales[i * 12 + 11] = (s[7] & 15) + ((m[7] & 15) << 4); + + } + + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 8; j++) { + s[j] = ((in[j].scales[i] & 192) >> 2) | (in[j].scales[i+8] & 15); + m[j] = ((in[j].scales[i + 4] & 192) >> 2) | ((in[j].scales[i+8] & 240) >> 4); + } + + out.scales[i * 12 + 48] = (s[0] & 63) + ((s[4] & 48) << 2); + out.scales[i * 12 + 49] = (s[1] & 63) + ((s[5] & 48) << 2); + out.scales[i * 12 + 50] = (s[2] & 63) + ((s[6] & 48) << 2); + out.scales[i * 12 + 51] = (s[3] & 63) + ((s[7] & 48) << 2); + out.scales[i * 12 + 52] = (m[0] & 63) + ((m[4] & 48) << 2); + out.scales[i * 12 + 53] = (m[1] & 63) + ((m[5] & 48) << 2); + out.scales[i * 12 + 54] = (m[2] & 63) + ((m[6] & 48) << 2); + out.scales[i * 12 + 55] = (m[3] & 63) + ((m[7] & 48) << 2); + out.scales[i * 12 + 56] = (s[4] & 15) + ((m[4] & 15) << 4); + out.scales[i * 12 + 57] = (s[5] & 15) + ((m[5] & 15) << 4); + out.scales[i * 12 + 58] = (s[6] & 15) + ((m[6] & 15) << 4); + out.scales[i * 12 + 59] = (s[7] & 15) + ((m[7] & 15) << 4); + + } + + return out; +} + static int repack_q4_0_to_q4_0_4_bl(struct ggml_tensor * t, int interleave_block, const void * GGML_RESTRICT data, size_t data_size) { GGML_ASSERT(t->type == GGML_TYPE_Q4_0); GGML_ASSERT(interleave_block == 4 || interleave_block == 8); @@ -3690,6 +5115,36 @@ static int repack_q4_0_to_q4_0_4_bl(struct ggml_tensor * t, int interleave_block GGML_UNUSED(data_size); } +static int repack_q4_K_to_q4_K_8_bl(struct ggml_tensor * t, int interleave_block, const void * GGML_RESTRICT data, size_t data_size) { + GGML_ASSERT(t->type == GGML_TYPE_Q4_K); + GGML_ASSERT(interleave_block == 8); + constexpr int nrows_interleaved = 8; + + block_q4_Kx8 * dst = (block_q4_Kx8*)t->data; + const block_q4_K * src = (const block_q4_K*) data; + block_q4_K dst_tmp[8]; + int nrow = ggml_nrows(t); + int nblocks = t->ne[0] / QK_K; + + GGML_ASSERT(data_size == nrow * nblocks * sizeof(block_q4_K)); + + if (t->ne[1] % nrows_interleaved != 0 || t->ne[0] % 8 != 0) { + return -1; + } + + for (int b = 0; b < nrow; b += nrows_interleaved) { + for (int64_t x = 0; x < nblocks; x++) { + for (int i = 0; i < nrows_interleaved; i++ ) { + dst_tmp[i] = src[x + i * nblocks]; + } + *dst++ = make_block_q4_Kx8(dst_tmp, interleave_block); + } + src += nrows_interleaved * nblocks; + } + return 0; + + GGML_UNUSED(data_size); +} static int repack_q4_0_to_q4_0_8_bl(struct ggml_tensor * t, int interleave_block, const void * GGML_RESTRICT data, size_t data_size) { GGML_ASSERT(t->type == GGML_TYPE_Q4_0); @@ -3807,6 +5262,10 @@ template <> int repack(struct ggml_tensor * t, const void * da return repack_q4_0_to_q4_0_8_bl(t, 8, data, data_size); } +template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { + return repack_q4_K_to_q4_K_8_bl(t, 8, data, data_size); +} + template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { return repack_iq4_nl_to_iq4_nl_4_bl(t, 4, data, data_size); } @@ -3832,6 +5291,10 @@ template <> void gemv(int n, float * s, size_t bs, const void ggml_gemv_q4_0_8x8_q8_0(n, s, bs, vx, vy, nr, nc); } +template <> void gemv(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { + ggml_gemv_q4_K_8x8_q8_K(n, s, bs, vx, vy, nr, nc); +} + template <> void gemv(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { ggml_gemv_iq4_nl_4x4_q8_0(n, s, bs, vx, vy, nr, nc); @@ -3853,6 +5316,10 @@ template <> void gemm(int n, float * s, size_t bs, const void ggml_gemm_q4_0_8x8_q8_0(n, s, bs, vx, vy, nr, nc); } +template <> void gemm(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { + ggml_gemm_q4_K_8x8_q8_K(n, s, bs, vx, vy, nr, nc); +} + template <> void gemm(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { ggml_gemm_iq4_nl_4x4_q8_0(n, s, bs, vx, vy, nr, nc); @@ -3863,16 +5330,16 @@ class tensor_traits_base : public ggml::cpu::tensor_traits { virtual int repack(struct ggml_tensor * t, const void * data, size_t data_size) = 0; }; -template class tensor_traits : public tensor_traits_base { +template class tensor_traits : public tensor_traits_base { bool work_size(int /* n_threads */, const struct ggml_tensor * op, size_t & size) override { // not realy a GGML_TYPE_Q8_0 but same size. switch (op->op) { case GGML_OP_MUL_MAT: - size = ggml_row_size(GGML_TYPE_Q8_0, ggml_nelements(op->src[1])); + size = ggml_row_size(PARAM_TYPE, ggml_nelements(op->src[1])); return true; case GGML_OP_MUL_MAT_ID: - size = ggml_row_size(GGML_TYPE_Q8_0, ggml_nelements(op->src[1])); + size = ggml_row_size(PARAM_TYPE, ggml_nelements(op->src[1])); size = GGML_PAD(size, sizeof(int64_t)); // + padding for next bloc. size += sizeof(int64_t) * (1+op->src[0]->ne[2]) * op->src[1]->ne[2]; return true; @@ -3925,16 +5392,23 @@ template class tensor_ // GGML_ASSERT(ggml_n_dims(op->src[1]) == 2); char * wdata = static_cast(params->wdata); - const size_t nbw1 = ggml_row_size(GGML_TYPE_Q8_0, ne10); + const size_t nbw1 = ggml_row_size(PARAM_TYPE, ne10); assert(params->wsize >= nbw1 * ne11); - const ggml_from_float_t from_float = ggml_get_type_traits_cpu(GGML_TYPE_Q8_0)->from_float; + const ggml_from_float_t from_float = ggml_get_type_traits_cpu(PARAM_TYPE)->from_float; int64_t i11_processed = 0; - for (int64_t i11 = ith * 4; i11 < ne11 - ne11 % 4; i11 += nth * 4) { - quantize_mat_q8_0((float *) ((char *) src1->data + i11 * nb11), (void *) (wdata + i11 * nbw1), 4, ne10, + if(PARAM_TYPE == GGML_TYPE_Q8_K) { + for (int64_t i11 = ith * 4; i11 < ne11 - ne11 % 4; i11 += nth * 4) { + quantize_mat_q8_K((float *) ((char *) src1->data + i11 * nb11), (void *) (wdata + i11 * nbw1), 4, ne10, INTER_SIZE); + } + } else { + for (int64_t i11 = ith * 4; i11 < ne11 - ne11 % 4; i11 += nth * 4) { + quantize_mat_q8_0((float *) ((char *) src1->data + i11 * nb11), (void *) (wdata + i11 * nbw1), 4, ne10, + INTER_SIZE); + } } i11_processed = ne11 - ne11 % 4; for (int64_t i11 = i11_processed + ith; i11 < ne11; i11 += nth) { @@ -3944,7 +5418,7 @@ template class tensor_ ggml_barrier(params->threadpool); const void * src1_wdata = params->wdata; - const size_t src1_col_stride = ggml_row_size(GGML_TYPE_Q8_0, ne10); + const size_t src1_col_stride = ggml_row_size(PARAM_TYPE, ne10); int64_t src0_start = (ith * ne01) / nth; int64_t src0_end = ((ith + 1) * ne01) / nth; src0_start = (src0_start % NB_COLS) ? src0_start + NB_COLS - (src0_start % NB_COLS) : src0_start; @@ -4098,12 +5572,13 @@ template class tensor_ }; // instance for Q4 -static const tensor_traits q4_0_4x4_q8_0; -static const tensor_traits q4_0_4x8_q8_0; -static const tensor_traits q4_0_8x8_q8_0; +static const tensor_traits q4_0_4x4_q8_0; +static const tensor_traits q4_0_4x8_q8_0; +static const tensor_traits q4_0_8x8_q8_0; +static const tensor_traits q4_K_8x8_q8_K; // instance for IQ4 -static const tensor_traits iq4_nl_4x4_q8_0; +static const tensor_traits iq4_nl_4x4_q8_0; } // namespace ggml::cpu::aarch64 @@ -4124,6 +5599,12 @@ static const ggml::cpu::tensor_traits * ggml_aarch64_get_optimal_repack_type(con return &ggml::cpu::aarch64::q4_0_4x4_q8_0; } } + } else if (cur->type == GGML_TYPE_Q4_K) { + if (ggml_cpu_has_avx2()) { + if (cur->ne[1] % 8 == 0) { + return &ggml::cpu::aarch64::q4_K_8x8_q8_K; + } + } } else if (cur->type == GGML_TYPE_IQ4_NL) { if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { if (cur->ne[1] % 4 == 0) { From dbb3a4739e53226346c772dd72666e68b78dc583 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Thu, 20 Mar 2025 12:49:59 +0100 Subject: [PATCH 095/398] llama : make Qwen2MoE QKV bias optional (#12477) --- src/llama-model.cpp | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/src/llama-model.cpp b/src/llama-model.cpp index 17af8cc30b0cb..cd7e0a0c4dbf8 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -2210,9 +2210,9 @@ bool llama_model::load_tensors(llama_model_loader & ml) { layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); // optional bias tensors - layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, 0); - layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, 0); - layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, 0); + layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED); + layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED); + layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED); layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); @@ -6193,16 +6193,25 @@ struct llm_build_qwen2moe : public llm_graph_context { { // compute Q and K and RoPE them ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); - Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); cb(Qcur, "Qcur", il); + if (model.layers[il].bq) { + Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + cb(Qcur, "Qcur", il); + } ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); - Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); cb(Kcur, "Kcur", il); + if (model.layers[il].bk) { + Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + cb(Kcur, "Kcur", il); + } ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); - Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); cb(Vcur, "Vcur", il); + if (model.layers[il].bv) { + Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + cb(Vcur, "Vcur", il); + } Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); From e04643063b3d240b8c0fdba98677dff6ba346784 Mon Sep 17 00:00:00 2001 From: Woof Dog <197125663+woof-dog@users.noreply.github.com> Date: Thu, 20 Mar 2025 14:57:43 +0000 Subject: [PATCH 096/398] webui : Prevent rerendering on textarea input (#12299) * webui: Make textarea uncontrolled to eliminate devastating lag * Update index.html.gz * use signal-style implementation * rm console log * no duplicated savedInitValue set --------- Co-authored-by: Xuan Son Nguyen --- examples/server/public/index.html.gz | Bin 1260534 -> 1260624 bytes .../webui/src/components/ChatScreen.tsx | 78 ++++++++++++------ .../server/webui/src/utils/llama-vscode.ts | 12 ++- 3 files changed, 58 insertions(+), 32 deletions(-) diff --git a/examples/server/public/index.html.gz b/examples/server/public/index.html.gz index c7a3c426b623c707bf17b900886abb3a3a57c8d6..d0e6da8e4a1e0646ad83f40bceea6462506ffc8a 100644 GIT binary patch delta 1176841 zcmV(+K;6Ig_e{|EOpqiGvXf4HDLYOj8&G2+_@ELB6$+F{^mFzBxcTKI%YB&usEJML!NNKBoCdN85U5bCn6p2EQ3<} zsgtDA;b9~l2@PT+VI=rK#xuzAId#VDjyr=9ONKlnj^MXkI5Ncw-QW0)bMe7-GCq(| znmAE{@nu=5GyT5I(h3P=fCfpgxas2LY+v>-0wnvAEPpVMp=yz4St_Dol)RfV;runGoW6UMBaz-apJx-^7a|oN_ZMAA zJ`AbHi6n_n-^td9WghqE^S{QPyT=k(PIo==G3S0#=JXCj{B{LrA#9zoGZE30N!VFD z4qcxly`%*@$K!q=TSG2A?)&6pwA@_JYK?^($3dE8(vdVw2l<#MvNaH#N#2h+ggiHnk~`OL z2_AonGTDu8J%8C~$X1q5V1s7%x2G3;B6ys#(Di-6WiFELZ7Mv~P6|i6XzH(=i`hAk z`9P*(Cyu@U39VfU9@AP?SO3%ZBQN#43BPxaa>=0gCpQ@vQ!c!ezI!a&h@p+tk8ZuV zh5L|FcQA_L5C-7-WyTCL#;`U#JWfNN`B_V{;V~QzN`Ku?AC5n|jYbAzhj6t=jYi~U z{=#pisu!La6?rTg3)b2s%WtIMJXmWl{H~dIC-G7<^)Y>1bk#&TX)8)3ABsrM8VxTl zL)1#~N!mgnCAc_E<7lArR-Y0<(^Ti*CGnz|1z6F?=+;YHd}8(reABgd*Gvw~RQHz4 z!N^NnBf)P;>@OC+rvSh&QwmF*-a5lLynBHTe`V1;1tx_f- zqHJ?=m1MdOVIZ(U#W>b(R*OB9uytPi{qKKs{_gx& z91VDqap#P)frJcX% zPB@t(~R07;tBhh8!+s-E!#UNeEZxh&x9gKAULgHqD_ryf~qX zPapRB$LEv61Sfx??7kn<4sH%k&u8PCG;YC>chz)o1=n1k$Q7wDw=4!Axgzmau%Rv+ ztw_$Khz9a{oQB->NxG_bk;0gT>yvnO@-zxDIa`s8_wxg^K3LVcRNONWUJHKf`XpS{ zzdW13dbmEBtVka7aq9YHwjwEJ-)F8*h9)VJfOVy!T!()L@31%yhm_JJkK_4VAEU!H z>ZW)k+#VzZ-XR?}8i$v|tNHx!(*3XhG8MX4#FU}bHMP6F!vMu6LL&# z$guAnk}-cLyZ$3=U|7>hMh^)F^a|23S7E_#GkUDPbL^;~yYu4J)pg-$vtb4VTPU7@5MxTvM~xYPI~pk?f9V4q!T1 z7o5=ylAS3N5xa?b255^Khm#72ImM*hkD|fIQ7eD+d=)L%@7nXxs;z|wi3F#(=%4FX zXXLhAcxzJhg4F7@Hi@a)waUA`wX|oj{%2)F)M~WM_36K31HeW^-nKxCZm2*GBpD8pY8bNYkJihQAugIoxnNIZlK zGT?vo@s7^`w-Njney*?af>d7!u>spDr{tIya{|2>N^*yFRt#8^_-U03mt?#EnRxMNO+(r+}@2K6y| z81LQ6%b~oYV-7zGy?Bt6i6NqOSu5trP>z3^8_>@ONh$64x3Y!Ep5O`H?eaL|PIT*a zwjZRA2r1ODt0&!ikUse;X}kMXZa>KGSGm6K@AAv9@~Uj)@~d3mxawBw<%{UnJ8U!# z4Uz!;&a4@FkhGdc9;B~FW6g|?d5~&+>Nr0B@{0lq0Y}&y91>XESmNRyOM)9XxRpWV zI=xT02)?xRc36~n+#T0I=czgo566nf94kJA<srl&fbo{gXZ zpWq4{OWGzwX^%J<%5^tzVX%k3Zy?J90Fwu~eW*cG_W?l4V=U?6<%7KHYUneRFk!I- zsL(x@>+~~*Z~}HYY@daJT)UhCDrc~;U2dac*zO_S)$sJte<#?1HP~y1t%N^F&-Ytl zn((e4OB&18l*KtG)h?{B`($M&3h=2ms@^>sB^h(7&%vQjG=8FPuFPSABa?5+;wW?& za1`)EK*agqD?My!*SO&;Pg?UW-9M~luei%jJOB}lGrJ`5WSZsx6v@T;gzHlbq`wz)~V+|R#0~~kchPd zbq+^EC3EOFlHnysK=I3w4TcT0Tz^r}(XBm$fjo}cBsk;bHWhs~7zLAO3u}KLBCn=Z zqT~AzJkaa}nn37O^9E>&p;04Kxb(uOq_;od*0)M zX!OtrFo$>%!TSdAM?exZwpbw^9x+fS8)8E?K)C{qT}4COR%i%?y#kts^ic%~*Vpih zBxHK;az(+|M93~*O@v^)H(T`KS6(}z!gqA{_qJx zWtaPp4^k5-yWH;@+`Ldgs$}hs=8E^VhMvx zK*R9EmZ`$(M@+6YaUXviLKagmJ+#JTPS-In;|>^dJn0H>c+6WjQ4)ICkHZ3*z-0>@ zfkT9cMMY=)mM*cws&^9R){MeR_QPe$Z9o(%6KLQSWnTcjGDG+ValjXRwvrjSp0F8+ zOM*vzwFb=&(HAGo)HMh%gG4Vq@F44ata_ajL#7VW@q`H;AX0y)PX@9V$6VExNhajy z;tF0h#W%cF-qGDk2)KGu%pMOte{mZnAa8!GW-#sKAn?11`E>zH^NjDYIKE+nyVb*S z0BEYEu`{bY3sc3@N1#?*W%LeF-wzQm%f`rPqA{Nh!>zaRN4^5FLG6KH9|8AnYkJik+*P^?Rh3M|DtoyMP z-NDSRl+l$EydMhbe#F~ODgO^J=hhN2M&f^$C7yW$5|W9!h-PF6?C=N04Zl;&@QIC( zJ^(&C@kxJJ$5b&PRCsDwM|kfjMk%BYxD8I|YbBD9%m53G>qzJfanQ@z6%8+ES3Y2n zgGOVp%z0MT(P|0_eJpGB!K$l%lS_4z4-hYWz?r)=K)D`YUVwnLW@< ze`NTDr}T@p*Z!QAr}PV)(l2mIe=ASvdpxCO1lE7>7lm>p(R=ZH0E_gsE`t{k`+F_+ z*YH`%Jg8iOs(|?4?}#5-)Be$#_CuKVLzwmg4s-vp$SS=XPA<4Q52t{aX_n@VW{+NQ zzsm#IdENH*X;)=Dq^Hf!yLaz81aY~`!zrxQ-f-&;pke)E**0iWcP4;w`)XbbmQ;d;d;k!c7QIvL75d0#zVQ@0s8ia zU&c~R$Ga3b)~CK7%k435G$7&_#Rff3P0)X_Bq~3kuQZl^pc8H}VW<;^6(+4+*QuH6 zdQG*t){}E#NoLZemCsTkKO`Z4XkJ(WL;k?TwR@+*4R1vuaB<*fW!V5OXG0m9P_3 znNL<5AI4)I0<6&rQDp???dqHfn?^%W<$!FBMiFdjkQXIG>xNpD+={Xf$t0JjJPH5p z+?rrskZ#0!1hOMj29wMinqSl9Qsaxjg?KzfW5ySvRB zZnqb6a{|@w0s%XGoO#FGC&wJP?6CWk3c)FTK(ZL7?*O1O>`^cDyThVIZ_OLlM*i*s zG&OP5R;PJ!lXwtdGf ziSjj9==i|w9JN9>%Tw3=MsK*e+Sb~*8!k8u`{E*P(@s+&iYU=r z(n`6=xY&);!CjOLi?SPbjsaY#@iJ5F22p>Y>H%L=3Z{vSlAQM+_#m&-@q6?6-F)s@ zpwO=rMoq2*!N&Zo!j^zm+wX4peBRl3xqZWXpjDN|yv3DNqgu{rzB!W=_ybcwi$RSGLHlpL5jhDoMVo@@5ZlR*95oZCsZB$RXg;EvNabmWN1QR{y; zPE+A^_~wfn?guX`LmF~Jd7j!Ja0O!VP%n27iAHn~3S`@2vOdt$MWqW91{}my>tD$x_+L5C zN4agTsH5yJ~d!v!#As2Gt?naJp#?@uo7wi)V*u2*Ond zgj{fPYVOM=V5~c=q2Z~G^qy)+-t$iBX)EM0n~_s$<w0F3E8XWq;DLo|C z==IIo>{yCD&Ia(@H(uJxKF&*s>$G2QzTWD*+1Rv`wlIlr)po>~)0cd6QAmF^=;`{H zqu4wOgW38zF+d@>;G}ZjgFw8vJA+eFz4JlEtU$toW@kZ8-yH&_W1(iJM0yh#CxH!5 z&CPtn@y4G36Xdw(*%(l83_wO*-KWh%-$xqBs#2qOQ0EG;*sC`l4V-I0`&EO_w(=j_j{ zm5SkuFdbws@P=xJ9L6bHU}@Nk5>%fC)4g>;a@y(U+jU`ZH(y`(**br98%zwd%PZ=x z^91U+IQvj28hE92H!t{g?opIPw-FDG>>fJc{110sth-%j8f6jKhNS53BbM*iG{Hs)}P^OvoJ({Jd2$Xk0-Zz?A$YvM9DB~ zxwum!MDotLBxywpKlXnFVg6%=KP2%)-A^2AufxQe06+1ZbgbGc#>s zecg}HdXHTSEo&dpHLqe*^v=!Q!rDPawTv~`b7@tvFysWwjUO?sC-lYtaT)A1f4^qU z?|0HV?3wt@hR5e|bS>z|b95w$lTgPJNg|#Xuht;rG_sfey|v*a;>r$usE* zzc+;awAK#1nfiYqYaMIj-ND8qIT<0S$P3yqHx`SAk+{~WUGYro;1*<((!$M965HX0ZJz|(a7;!-cHaLS?XRffvy)jjU{% z#+gAo^q`^1wT?kkbYzqrvty6>D*9DDdOq*GV~b)mr745yCYh2xInqWRwOORl;>LUq zKbz`jOBs1EDMfUXOO7_CqZN_agbg^Isua+hy2!XW`ZOnnO$mgt5~1R7+c5Q=F|Y{# z&M(eB(Y}ANG#RH;-XBloOzF|mBZOu9u+Lqi!sCQs1257%i)ZIten=87-hV#&9~~nn{lD^P>Jt2JatvHut?Zq;x5Z`qs=I&5Oh*~%U`H=ISM|QSgX%1~ z#0>=-4)u37;c<*p>=GCCsNJg{Ps;|jRz1K=4TBGbGY-;iXozd^`59M;O?C$36gcjp5g6%x370K%{YI zwj2C%;e-)(C^R3Z(EzUX4~hOT@n?LTN)9yv9u4Tl_&7xsCZOm?Q5f>XiPLoAB&o97 zb&@hYo$$n&#B7#*Na85rfV+k#pav@R>^Ofy&0y$cgLDEvoQ-2JxQ`?spF`Zgx1%?I zX6Xj%%(=uJ=dp|?xH98hVpN8P^gR5T#n~hvt6r|(AW3eR-Y3fP{>PPVtvrn2ULtde zE7E)T$)fLSU$f+A>U_1jPLbZ9Lk+}50`O;QdileNMa2A!zPS9O8C-dntoi-Qx2=D9 zK3Ug49OafwFD4UH*8+h|BAX+Qqzj9qA>y>n8*hzZirrM9EN(c+A8d7fqgo=5rhspSTm9bjp7PuB+k_ zf@IUg$Wb)7OVAkyFsGWS0FrdVk~HJZjx$MdEt`A_q?c3l370sLPBvl_yQ`T@C%Rww zX%+&kvhFyjXXnFF*^O#+*_TX;^o}<}mW`A(iWQOG-ez2y7#PHWP3$0;!EYv)4F6si6uN3}c$b`j_o)brEd-uiU=_h3fb%eplPq`u_a@GRKJY)hA2d!;SdV|5kx%|A?4o|h zVlx37$QG>_ed~S`xfz|eZ{NPa zwp<&)KaAr5pp#4RGiJ$9eJT!`=B zcY^Wi`xq@+_dMvul#C#dD_W6^=JTnhI(rWuQ%O23G-b47XfTiYfdHopRt$07_1yP^ zX`LYJiOXq)8oQ!J%~M0kZvxHjx`(_!eN`qS*sVo=7 z4^Mkfgp7~Hj_h{m4Z8e{5#=g@`Ef`1Ko_lbBt z9u&?1AIwK)m?grg4wC3eQug~L7%ln2K$54E%q5Q*m%)=2Eec(iGkyyMqAeoSQKeTA z{}KAP6zwr`d1YpPs6|mDI^>Jxtb$x#0h1k7V4yapSfqAwEGksIH6*LhswFQ=!nn#f z(7@r1#;DNq8*;TLC^fDMN`K+Xl$unH-Td=!|MW?pj+P^j=+xNKb;t$n7lCePn;wDK zlwLF%)5{~cf%=yhS6%<2*+gw?D@XUJ*(4XuCM>v>7rTA5Mrmh0zknYOYpO_I~A=k%7BXj7fYk@wWS@pWq+Q#U3hSNhZpBp z`n9akP9&8|e4l(>+>fG|d*uLqCD>go3ZWvLCi2ud$o269R&%dX`+P21fP@9E>-P{A z1g=~1UT=Y~ZY1;qJoRbLZ^6`sGKwXv*t(rpR*L%xL8VUFv{TLtD6Lc9u1Zzz167W8 zMK7RJ^gvL69qMuhRDVq93Og$@v=qTp$+xneHl$ZM?5=BT@JS6eMb$@uajLTlB>SuO zh*^O7d__@nl1xD(mk@dt2Hf<`=;ym?P+v^^sk-h}>|K%Fm_F--<07FndDcnyWs+Gj zmGF5f0T*jg9f5HvQEZ`_Ly~h-i&vW!ch4XK>uzD%shj>*mVYrOpwARGrlY#>H4jSC z1vW!*bZMmvM9k-&psZRm(~IT0L{JpRxIOovu@i=%@W(<295ua4xr(&OIP|Es;-GIB znU1^11|zfh<`%Ge?B`dl(=H4B`^(mOWBRerM<~tUlb<0bBBiM*X+Wp@8783)Ph(lg z&feH^wqxG2k8R*ur*~t+RC!X9=OG~hLX!z1M}G~Gdi5KP8pi+}?nI!W?=~7J{8ciH zoAkj|l2KQ%FiKrZHvZLu1=wt_kb4AL6h?AGL=3sK*bMtz`JYaOCu~=F7S_ysG6JJH zammm;W=$-!Dhfn0S7r<*1g+7EwWr+1do6+*Kc69_bIToBu+J7i;h)&(rU z$4fLdYDRGB+@#@5-^WEJj4ON77m5TnbD@V>SYd%@7DgVcx&XRgq~bju?$GAjG7J?s_XGOKQ^o6`sBF|~3< zZq@PJC$*l^$7)xD5z!H9+&R%A>=>IWJ&~8-uZUDIuvhD9Q0a=bIDcyX1SC}vf?qc3_6bq#u}m!tM^!haCFD}5WJC&04NTrfRkp0)wdPu0v**M&``yP5+z zY0{8!8Wd?5t|u239oR~i-+%}4`5Z%_b0A$f{dG2|z6cDhyI^%FZZh;j+{v0>>Icgi z?fnA8uw4LW#?IjTq?mJ~v{m0nh1)<;OQ36O^}N>F943m3uJojy8V_un=YL0FxHIKC zu6d)ia&8+945_(=>dfjGL%T8tSKoMMXLxO{KCbGp&2z%&f=O%B?(~1I%rqrTM-Ftzcg#w1_}+;4A(9 z!4w45?Dm$gSCeag`?;t(<$tx@x+N9WIY6}138ZA|@PmqC!PRPar`x$vTb+Wo?#*^b24|`Bia)!M9N^fo%fAvE%5}CsX9VsdzoobCV3h zB4q5TKDh4sgY^(z6-nWINZzWRRLmh{}IDkStn8LO;@&8fPaNG!_8JXwf4|_ z?t!w_^vW-C0D5XyZ<|Q|8WsFt)_OumZiNT!#CzNE{rZ(zxd>O?ltvpb_`zPY@5bM2 z*EKeRAe@#i(bPSzJyEjS4N7v$c^X?inhhUq*UyEQ`YP2cb4GzW`$M)L(L zCmGLoPTAtA`&3N^ZhsML4X!IAtln75P`aBJKo~b%j_^T)BkKl&UAO{~fZMHhj#nJw z-1o_78Q2#md}J-qucAUf@dv53B)bY?KE2G12GgL;+NvdBwP;0d^GA)V-s#W~cq@d* z8|`-c1!PeKA!tB6lN+VvV4;Tpjy}0{g7GQoG$>cv#N-FA)_<}TIq5SBcmA`L^d6Li zc6G*LIE@C9mbariHVarR;D`AM12-9lH_jS@%r zqE$pt)-6Evoqwcvaz20RMW8U6g1l8)h28CJI(+A@>oa+B>-nYUYJc6`eu3$P(L33? zMLR@*LtSA9e1qj-(6iH&3!5&hWrq`!getq82Gp+QhNP6{74W0dFn~rSI~T2(=~&Rk z2e!s%EyedARam1h=7L#N4l`xSra7UHEQ!Xb-1GruqkmDFC=Y~dZNf5eC3*&t5WGQs zun(Y+c9Kh==BttCpnzYC`wdj8RNTWs%pa`JLy_ha9X=5uh-8YWK^o`dvLh;(!RM{6 zYSf?oB(y@H&}j!joz8^! zu!q%m;D=O#pdPnx6!5B1N(HZsJ1Vs7%DAPv%(hsN z-x7KWkJsHrpawq%4cw#jyGz^?_o{3Aw!TkR>wk9rmMljrl5-uH9!%^=ukBY=Vb?_~ z%Wo9((d+o+J^>Z1`{Yu9uYBuBek(|wl6PFS3b9<(Yd)b1)O}v&ShMhWrhG%+Cv{vF zP>2kzLJueAgMw(d?IEZO3o#x4NIM!nPmv(CXBP)Y0SmFESimA4|;3MaRo+ibeP z^nYxVDgBmsIboOi6&V3C1Q$}AGUSBj)E#8m0S0}er&Cr;rhWjZG@UAk4~d|s-s6}j zIcS&T0zWc+yg*wHe}z8M@B^>Y;VBtmY&0I?4=`fkuMg}`98rHEj_x45U4d2Fjg|Ke= za);ptYZe~Li@Kw%lq=Mvvo1bmsMMn$0NKIFinh3p)9T!VrEn9J3hQyXh?e5ee}9r( ziUv$YM~(s5t+G_L966Ketn3nHRMPCUdgks2%vzduO!RJ)gmIMR= zLhx*oqKlx5%wRHrM$;!#fd^sV|2@$HMTSIz=LC%_{;plH3?ubBi)uanN6FLO%YZ#? zJh#27k(v zn(%{JL@&ivFpQ|MK%n*jGmI2e6Lc66MrRQmoLNN8AaP8Dp%=gv*0TQ)+EmsO=+w}C z&!$>-B=~5624zxF!V)$+OeVgK6#HGs+_u@=su7ZxrtMfC@rF^s1DKglV1MvH!(_;iM>`m{J<(QROkJ9Oin3Y*CCiU>jY<;k1a072d*G2`$#YY7Oy+=oM z%|jCICYLgW8(x%hLLE@77=IH292+h?*9VEp+Ni5t%9S7k{0YH9t1G|crI7Bq^S0R` z%!pI+l4+BxooOD&8K62M+VSJti>oDSToM;N+DB9FqWr9)ou?kV@rgDDXJOa}!w-;D z!*j--3l_vK;gWD!*xzewG=QPsmm0feJfb%1YtvD!HXNfY%fX;RG9XVI-Rs(e7zP?ooEe80=1 z`P_pcw_(V`imSEAW>5l=oTB7Wv{;NLn3DUN%GC^apV8tQiE}|&H%zfGYu(0dm^HTA zZ{Hw3X>!8(C`u_+JyE>s!9bY?Z?in8DjM7{b5rRooz0S61q#T~cOH`qEieHqlRPa$ ze{|F}p05^4yAtYFNG_$cufCL4E7Xs{ZeRH>>?6Zm4`Ciba3Q`PsOfBh$`uaEvMUiOQb%`8a(|y&J%O>!&xWo!*y#46DyqrN*X0*@T`yQ98U- z=LKu&D)xW%n0Apa*0W`wQ?#5-seIa%71oL?U*Ngq>+b8l}j%2$Xz9Ul~X&ynVqHTEsq>d%gXo3}(ATS|9l>3_DwB6Ai6C0Q1iDe$@4dQ2Iva0*q=cVB zO%uI8N{SBY2J!dJjb8Z~FhOLbf70S~NbZ^X{&c)i_MZuR7r%S=j<$(FZ_1k+!DgrF zN!V^L?%BNU&)eAEg`}^xND|Sl&fBfc_LgN}CT!V8qbUjxWx9cmaZRSf8#N~&TQz0H zH+7B1&MU18SyZv9>v(0f|2E!et-zPBvQZewfX%^TC$)|;2FUzzsazV5s%`YakS zbsJl4J$yBfx*MBs-n?p?N?yHw{d%LLYuVg<`EqM(Q#btTb*KI2&8w}l(#~ePy|KBq zv1Jr6w>CPjUvCt%Sw!ene|Ys~YxCvS%VKhi2(6X6`RdK<_S-`3vy3p>x!R9h(SG)xvfNjqef%RV~r^gvr{^1G)JUQS)<9w^d0Rho50SiM#JlyP2ppwOQ;SlG`674 zIn5iLSAZ13)5wy+t7dxW*rIQ)4wI;;1JoOs2H6y`7WMoB|6I z^89PsEi?v;#X3o~f0M4N89~+KV(G+Z9UKnoGi;8oIk5k~JEm;?CExsSJfh7`{b1J4 zVeQzhK4r~12d-)Rar%&!&e96@NcGf=YNrMcR@FJ9K3Vp;Y;X5926CkLP~RsPu!oMw zR|=+1-LJ^+?0IY6MI-m;^NR)=_(B>zNxchTdIev%!N$+ie=X`R%;|+s`VIQ83u`2j z)%b^Ye;UnVUS1;7)$WNvG|I{9S@YH|E8R~$!#BGqG<`4R_BQR1HSa6p;9c*B&6Dc-HL%b)m&&IPvtz4p3gTEDKxq(f zkEcL_Ya6rM-p&h(PK`JRyTH!&_O#0`r&qL1M)1=Fe@^N*Z_t1BZEh>8&PTO}bIQ6a z!D!bsJSrPTAUf(!8*DBvM^_Ep+LxoNIq1?syIL~ze>=R>S8AD5EO{HWDM)8q^tQG| zPn7ppuk$J>VZm0Q9C1G|@;MR{nD@Y%?HX{LB@lf-*GmWc;`uBrWPPW;E#7Xga zmq3|MDRsewkb@^72fy-(Bh!2^QZ#`22A3-Mp}eTD{9yoGO((pG)ra$z~CI$+804^se=!3nkivJM$cKKl{ zi?y=3!f~1{CB(_9+N_gwn%X?O6~^hH`GQ*QaInEdw&w}@cmwnr4^NVSkuV)&B0k-i z6NCWgvrO{w0T4aGh*&iQeWHX&pvb|Zgo`Y=oL()6tsV$69Ohe5pFt$<6X6G*1~FCx ze{(uQfK;!SOxH@qVI$O=`uc@p4d{VoV{x_4Ef!|6eOP__7Z@qUT9!jambI?f zR=TE2^(0QpdgJ7|dRMr0ypKz+;n&nNe~VUShG?)Jf&8lCjZ$$Hk^h5SMPQv`)D1&5 z*CL;26&sW?atq4G#<8{i+%Zc)^|8=lEna6wf0Ce=w8TTIGFn?vR@x<2Tg^)buw9GU zoF9_gRJmv|Z7=N7Ri@MTv|u_FS*na95a$&r&GJbA%E5>Z&Fmh)?Au3|ET1d_f6ib? z1p@dL`P`$@EiY0v8UPnmZkvT^2cjTPoF#Nba@|EV;sZVimi9(I(K2YY6ot;pUS_Ya zh6kiF-(;=RWi9me<{38pnuRt##0Gi8>4eoakkLB-yMSD(8~Lz&+T}Z(a(n?@zpX(s zX(wh967{{{ldzH)Vq$0#KgdGef9E0g;WSx6%Wtj5aO^Qsjdxe~3F(6%q>qA-XV`<@ z)9kd})VDeN-wF%SFYZ=Cw6I+JU4oB!R#Z@k^naIhN~(oy{Cnqr zt`DnK#Q^g)5b_!Ys>ch$)e>BF%nh!c)VE2#4b_OkJX3|>jn2}qXNkg4mT z$f-*b1}vOrXZurHpc9~Jg4z(YaKsku>dQpm$LN@RF*1f>6N6m-Y`}Ho0@)t;LgBDXSo@M!$K<) zw}MsK{fHO42Aj4Ah7tBqbW3&~9i?+sgk-LMyTdi=YB#axYwr9*R1>(}MKuWw_vt42k93Mz`CWjV-Akw1U_W(0D(LFlVk1GI|Zq(~EN5-(<5^1HjKFfa{pY4G%qPDjJHGBx;FnU_F zucDNwRoOvxHt|a7Z2Dk1#W1PUHgRNgV;ejUVC-g3*i7wnRz4wUMgNkP(H0dQiyDSr ze<$G@{SxA{1^Ow2YejHh;SvuXUXE6|kHT^tHZ> zwk@P<9lv+Zf?raitGDo_bD2&iJfu2tQP_>zPX8V!s&3t_s^7mTTk1jSudqI$U9f1( zTh;n{^$5=o3LhudP}ZoMCwB>4kXVa7O7Lg))Z~AiGhev{vBn+L-m!YJtF>~yd>P{` zJF70%-TdYzUh1u#2ByBxECzOX!bKKkmC0U^C_7crz&<=@3!fF+C;UBURK&fMq_Y`H=YD5j15JrNS@E(Nn!ocFj?ARZ4zalMV{gJf{Zg73-6lw zBw~LQO^&a@<~ZW_0azPL7R5n(;gccei@l*Nr~(lqkLq^*8nlV}`Ubs^ z!NvO5;tS#?Y{B!;Wo4??mw1A5@2NPh>Fj6tow1%W$vjKf}T-7OLo z02Zw>H~R0|1I7sa6NRUb5{>A2f2GhZHPg*KW5V-wpz$V-! z=U{q!!$jaZF8Rc7W#H}zk>10nOD;_&I(RX0$r+j&sxW@L1eEf13~KC?eI5tyKJx!u z@>`Sy?#a1Jj(Cy>#vdI7T(UEnWVNVsL>>cI{eDW}<(Q-N`>BW$WUs-&b1#x8lOa4U z0q2uXJQaT;p+z>@0X({vsh(`SE+=hs1E*_q6Y>thtblB8sc$t6n=hfo&?TF%0{49i zPxWlRuFUM_8*64a-&Ut}t6iDYt(SrOA<4Laq*vwqc3_ML9r)M`+=C7H*b3Z(P55{j zxCdMC@hWf+Uc$%gz&&^cA8!Kp;5B@_4cvn_Fjar;z&&^iA04Q$4PRI_)V%?9ccAjE zt-w9b$7()0&?|eXHa4~b_lQg8F35FGA9rFIxC(N*L{D_!YWNC!LbAZs0M#Y9D1mE$ zu4}2#AGJ#ia_M3%m=$$^+*^+FP|iiKb1?7et*rqTT=8J_u|x}8P!{P3@%J>M47{?S zQU{X#s6SiA|Y$$eJdO@+WOKuc^n z3fe?HrvY~fw3!RUu7GurfD?^VD>8}^e}P}XR^&Zkb#0<61yeu7r%pvQW@3iGe+Pm8 zedd#scDa`orzXRO;QISqgcZ{wEF)H|_R3wcV!`3=!K?SA?UN7f@@lf*9QcVF^)r7~ z@}cdM@)bMU|BdE7FS= ze@EOlmh4FF9-r*1&6E3NC#RQhNoSL6ye69)SL91d2aSfiS2DH`5<>ObV3endkc4c; z2gZ@6vQ4mn8h)pF_!-NOs!ByTi6CQ@g2{s9S!*l;a%TLO-7m+IFk)Vi!*1=vQP z+Vm0a-oc{*aJ0=Y5Zv(Xnm`k4ca=K@pqs%>LYgSQE6VkR@|cii@bD9SJc|>gNGHFPK(xfzkJzgKGr~T|2W>Q3Ozq#<#BhUG=fswHz>OnM!YM?NfjIQeClE z0jrqMTFM7mDRTP_glvqS=J4K4n(QgwyH83T4fUiBzjwx&SLPr_1fj)yAUzaHiAk|R zf}F(KWHk%e!n`u0%|Vard|autN+$5gr^N?NqjFmIV{NDO9<)-6LuBdV=B3{R<#SUF zra&E`p4-Bv7pomx-}lr_6E%O47R*N*!h4nDnm+3tse4_0U#F;!xY|?@9*m(H0$#Ut zvTe$<7}I+t6dWVmqhq)(R^B-C`P=Q)S5Y}7pFyuE0xh-(;jhtcanr;g6J{Mxd{`B| zKvoqe1=3!5fR-9wdZUp>-ZAsdGF@JEA2N@TV@3o)pA9|{i6>Kd9mIc9BPz6)mCG(A zFYI}rJ)f|MLV21K7Qs77&v#q<1uJf+M&q+*I|MFYNuW!jGHqwB?$R=Vm!8=-YIv6D zzqZRzK5AvQ^KxFzT(Nbp?ax}#bje$qQn*<|pEsQsNaMVKH;aW59|O^(_V`D$#k8S1 zdI%|>M;;3y3e>n=Eq8MqNPnD{dI`+GcDtSPyq3JceG}SD&q?&*0&uYNV>eYJblU zT&)<96^uI_R`0O^62JsEPFhbjLeN`V^PWruKkl7Z;A6YOq!8fd#4LrN8s}$J3eWE{ zCjyyxGQcg~0tY`I4g?z;@ZMGLBWWpx@w=m6a{f(m7Y+5_g6H>2hdkP_G5|e5!oSyk zDsmrC$Vy42wxU#Pus@;z=A{9DIgkW#gkvs_-jsCllvQ9+n`#yRKrV;mY z(l=6b42}Hmw9z=iorp*@`Y2SdV~Ky3dNuY~5vfz$+-39$0zgbqun>}Pujm3tMFEU0 z@*nB^KAr}q5josUfO-H1him4k7Ky1J@LTPUJdQDY=J}-mZqx%`9$~0zKt*6R4W8-& zDH`+x^%g*XMH;Ep;cHz}vlER@P%52?KLD9#ZFpoW2O@TvAXtAF5Ih}w$csfxu*_6}g znJvHX4xGg}kUTs9ec!dwEbhcHQrMKJRmb z?(|`QN#Vh3rQV44`6?(dLP&oy@_QrthzBVc)u005NoYs*lW+n4le|$KjE(9rZ28># zwL?xu;EdE9pkQR3CSsqUB1HvBafE_Vd7OxKgiO2&EqwBYHyRoDzHox`@rC2DKh3>U z4kV@O6OxpplB8r7NvU&~Nz^1I>YY|eN@kIiBaplswU9oq>Wo(~vB!T!;fCX~_Dszc zXo^-|y3Gssnd{<;y7&5~Qv#Mm)>FrvIf{mCt_udt%uxaaPe2$z)KAlvdukvB3P zVxKNiIFYr>sm=6c8MOg@QYtQ+PJJ$7Tw(>VTMH6_Yb){z9b5sA@{@d?*i}9a?cr4! zp7Xh<-*)+vx9kZj17v?ihGJHp&xVsw2K~x_>K!S|S0=>@S=M^r%>y!#1c{g4Iv7C0 z;!kcx3+=2ABT25@?LX%_w9&xiPqI=mg3^+tfI5GX^t<$2d*WQ53`I3IvNp(mtwg3b z!q)`jD?@jE@_T8z`CCd|c9SYY%spl3s0=R$EIEzY%=~^<&&huqV@OahHnxx;jV^5_ zL5mXq^|gH_O5)VlL!{C_ff6EqolJGwd1=KN=GuL-vbf~qiHvyY@MIun6Nz6T{0DbC z&M=jyN|>Wv(`jo?tz8Tjp8MM5^VFDpUQ8T#z7GF!)Ctg^AY%W=2*GVq*2aJDetUTsvBT`gGc0dgo|G|6l6H~2i=(=wix?;`x8|pTnP`mzy`b9d# zk%5Y`o-<{cs??GlrI3t2&}=3dyilXdG4NNv)eZ|yJ}99RmR=r2SLExAeoJcr{db7FY+uz}7Jo~%5@yHm_7m^x46Z77{r8!_=qKJU5`s0r z-A}xssKndgxu1CZ)lvo!_Y<$wqbD`jjw;!(zaaGh)uFBU6d8zVvJ0Bj82>eaa1DYJ zyh|-tjO2f^)K(^zrt+1Y6%XMLv+BKH)N3gBR7pzYhN~I|8aY}s0FV?N93|D%OIsFhFCHtq9r%Q zj98*dELF2>FUInOJ$os=TkfD$x7wv-$voh{tA2k4O{xtjn}Ywa1-x>50g|;P_)stWwcNlPKkC*&9!+ZLi zOV2g8GG4sqa>HA!0)XxEz;_ui>D^_%1$4!vN*F07&U|hZW}Ap`yIoOJwa&>uB5y>P z@hN`>x;87cu`Q2Um)<2Z-2uvS3QPx7R|57!f%$iNppDw})9UDj zy!1slD&OmFZ_^`qhub)88eJY;k+1ZkW!QaW$mvD3|6Oz+cu}-RzJi;9Mq|2CQP}f+ zX=PDkOOVtbJscWIJ}Wfs-3p0 z8?lYPu19mat&8e$w36wKjI4SGMhY~Ca)3N>f$mT0wU+^0?u91Hg zk3(L5%CF#csZTk)lSQKCXAg4!5nmUp!}~_V`*nsR1gAu&Z4a!y;UuU}I22&UZ;Ur8 zoCOio9vg$_LN@=cE1-Ju8;e}%@#qB$8TV6Yytzb%b zOpFnlPwDUQKHIc)6D6T%h;SI$RIh(OS(9=;N9{bzN=-jiR2nPk2!-^`=iU^x?MJ=o znx&|{ zhOb`KLm3wjQ2Nr<(wvIqt83trWJB~J2hx!F9DcS6$n=!4XjgkPozCYM_Evv-(`Z~+ z96`%GSGTa`E8t^Xw|0-JHo;%%k?D~4RrTmA7Hc#-NPD6`N56}&Oqr$>d8T!*7WS4)Jo7!@O4h|23q z+NE=!oN%x$&V91aL0GM?lO2Cfe^>rPl#;@+uKevBd|2gYjYvi^y zZz}1YU7PcZR92Hip?>!z3(Hnt+e&lnqp8p z`%hgu{rkq$|DJ8EWr3Yax}$UgXR^d5$w5NbX0p*x`Wr)l%$6hLZGU0SwjfLsZ zdvqqfkDN%p?kf08QmJ?94mT2rW-+Ts=`RmGnJ=?{i7NjU?V*jj?{=^pw*+U+uF} zAqybGuOTmW*ELmivOoh-`cK|-m}_gIyQ{sVz)I;;^9+t|)u!yckwf$)^^?a>B?8W* zlkQIxpM?+jdBK8;eHNeiVq2PKGW**lYHgFvrk0G+%> z8uQx16Uq#T0ltVl0dmkNw>U^w{&tT&^lX8k}xCzUy zf9zvRwS4LIw=i|e(&kAf^a-F4!$K`nD0iin=J&*AKbdLjQ)YQmAxAJzc`5qg6@q6>xHY=@nmV z?tDI4U+?C0x=?^I_q$j}IjAHpO)=EAK_%FVSwmgKV&16hDE;%zk|WM=THcD#nZ`b;ubx{ihoi3;6fWC-HaR z#Gd6v&wp~$vq#;jcBz5Jiuu-^Z8X^W`eH?AmfrCC5YwNKpAp8Wd#&ysd|K)kcl|?5 zuCpQ*q>-75-6``HE}vD(`*x9n@)unonikPUBBN7Snev_`Va!#0c}~9CUeYcZyHc{? z;-MS+%@Rv{THDAlnghRSKF^`3GMk~yx-~V#EPvjSwQk!=ihd;r_kfMMV1?K((J$(15@y5V@;OPc|Y5U!$7@m_Y zUVqU0vwis~z9Q;JU8JAlX4C4+NK3On#p~-PUs1l)T+n87Q3@KcGeJe_MX68z!Y|;3 z{GQS%EjxS~S0DVqcT_c%3db5>omwm2VvN$_L6Qm%1{&fmy}>^uxa;C0?P2yJ8MEw; zhX`V|!ymA>GIc&#@Mx)+Hurd3kUpM>zJG%@D$~n8=$-Zpz0;9?*3*7l`rqjtdpiGe zr_dzCGSU<^X8{8%FHR}Zw}9m%5zwfc#QRJ5XeQ8E4n_TrQXC41czW8D}PjGgHI)LqEYx47w3lcMol&l67|gB?M0Ygy{D5l z5s3~Nx*rHHfv*wwjhO&2Z5+3n&7Mgb0Ur|>ju~&Uh=0jDCI+!5&Ncu{2Y+Z%nuuLR zx`4a-Xa?~1UZdqp`BVEVtgN2N%+96aE_)>XNe_}dD!|gsw(qmknfEJY(F`}RK}(Os z_^c{S#$RKg`7g%2#aIO%LKooK;&s>-kUGl)2jW zS@PR&yvNEXyobGh{@hfyES^6vmkA6+xS0vwPLv7BBXWL!My#*Qx zo3-dM{d>#s*PQd$MaO?#@W00APoBIOUx*RD(6d!{zoTFNIP3TyXZ&S){^ZH0^g@iT zeEB4*zkgbPA6Lv+`lT)8-!sHJ>8Fm6JekvD_%!wQgr*DsYx$i*g`Xj-y>S%23qOTN z8;s;Y!N5#0Y>-_WQGaU2F=}u%ygbDB>APISyj_V@KM-gvw>v#CuIDyL4X{$u9cwS3 zOoM^&FtO4i`mwNW5};RDV>*%eCpsSqG?(^oobn7ua^^)OfZa#usfNUJ)+sw9lg|yA zdJri%~<%WbI@I{`YyT5yXH7hbyH*-aqi2l@Mn6D9ZcBr5u zkQ(G`ShqncER>wn;C!4KObHmmGmXoH4Zy~=n1rpsQT9YOBa!VJodCJf=3-2v*wrlJ z=>M3+(Mb71ew~eOHk!=_DE34lvvhHz`H;~A>R6=JX@4x|5Nd3VDH7kP<4YpQBC)~g z+=7>f=!+8ObT$lU-+-Q5?KL2FdJl=!ojJ3*(|+pv9SJ$%Xl2;nRdgublF(}03QA=w zdbIs2TBBNO5GB)csk9%Yij~}{GON}i<>R!A-!XoU^oukb7G+prA_$DcB89GI)fN+R z7y#<=dw=9CGmi(j!TYH?Yx*fNk^NLNk$51McckDk2jZs%k$N&ztY0y+a|nhD3G%KY zyGR6)OZ2LdY7F&LR<5$%x80S|dgd#dLWt<9mYLsu3XHd8wg7qnwD9^!BueEhzH&Q( zH|=P3ytQ>H^xmiNl!sls8F&%bD%74bl9cdh(SK2{r(GE(+CL930{D;jRxG0Rg#I5n#H5rjZGVLXY1Q6oq8MW(T2advDyeS$|-)G(_93X=`=L}vR<}z zIe(9CAOwZH@pJBE1VA?(u_{GPb8wk4TvZ1tggE!+i!GKy8iEUhk&VH6vB5n&XgxCw6t<_-chuK)5V zCUi!9S3l$PnQHe)R>cASQq>|;w30ePQGXts3QaZgO*ntLo|pxr@SJOZNq96@EuP2E z^B5;aPQx~KY##f8d(*ljQ(?n>)2bf$BM}@S+5nw>idO4eo$BjOv13z@es#IbW2OD< zdU?zrkGwg!T_VQ?uQ2UU$;EI6P^|RCKvkX+N^O5xx**MCi;*uuNjC)7L|qzT(|;D) z_$J&u5db;JiH$XnEop`GGS}8QsMVO^O5;*c=39>+i$X8}d9(R-?v*((k-2D?-v#GW z**(z|@IXcC`DtNRh`6UZehkBM9P{g;Cp#$HtSe!1xtVc&NTozXiXS|YyLi1O9-+bc z+_~ixPbYIRt{5yO%G>vFq5Sw;3x7pV=W=;|ao4IjvHHo%{LZdE#ioqMc2nT3f4ppu zVnPPY;RRn;r&qIy3vt7kgYl`f5&NdSk4N6d#%6BXR`cnO^9{Xh^n@y*d|k}N)anyb62X>;K>_2_e8I`m`Y-P0RMd&cDrAT_5yJFM?D{E$D zZq5AMnfZCm%xi)pgmguMH#RoKm)*+sl+PwU zGchAPzaeG`n~UqdFP0X}j`%{y`GTIEAa1Az3}gf3*X>z* za4pwyO*L{Kp5^@5*@tvs&#-bcsb#Jv^;k~ov652qf?#z_4YD1yw+vThnbYIS6Q=pnKzr$X$@BviCU3;4ZZ1< z<-&>bOjXu}-cEzF+xG2+U^W9+MQtrPholu$A5j9S7V(gGtHOaEKg?s zsyZ1&N0-;0J%V5oic{JKPe%c;*Aim#JmzIC1X^*J%jtHDw|!$2Jp=#xJN6WtPGOl% zg(;ORhQ@TdHJVLhoGYu;-`#1Gnb~R6sqi{hdu=j%@LrqD?%ivXS?yd;W^}G6Gk7|g znY|`^`j0l*9e=|#oyy%tJI$t3XTSX|0`c(uc6Y<*##`IG%qDO(TW>mbww}20Pe|W= zTd!Z*Td!ZFVi;>u2_*$efIA#bW$^HL}Xyz&tQLc&T`8? z>MYk>9R4;EHIt&WDkc=zPyJh5#b%swN=ol6XLX&R9Dnr)V>y*7S!aAc%m1^2H?$Q5 z@B14n+o!}9d~#=-WsQn{h}_ZY-dWDg$5wR8jZo?8B8ZGBbrLn7|8GzQj37c*b5f`h zQjDTpJ7;R$SmC&^*464g%YE@vtc+cdFxP1bcvVZ_6t^`3yhckPC)DWeX<53uJD ztz}BEtA9)++_RK}tNT?GSbxn}8rkQJ$Ud5UoZTh+s3XH+YY~mY5{eu~peg|UZK)G= zk3zO2q*>4Ne%HHW9@ebif~b5SKVUR{hU4f5t_1#SiuRcPnE#xEUk&>;$E&u_elNgV zP)PEBNI#dyWAXQk>)-#e<@vW?TKw^t%d&>Rzkel}+kP*U<5cJC)cYL~Q7`2kk#?21 zy~HDB{!wnzG>8VgtMrkF=1ghK29x-r8KI9s*Sfg`XL56FCiWxW57s=CmDRk0CW=D( zSCilx;><36EB^qu9x+0~(vQ9w;F=ImhpF%f)D~gk)}TD>^fA9R7OiPEy>A&}(AY)Y zRDT!mQ_Ipp#~B9fOA8V$_-I?ZmF}KF#*`%$%LNewW<=yKg85hHC0KL|Ud!0-w7{VI zc+4~QM`3l6-(nAf7NgAu-&iCWkH`G+7?~e#>}I$?y2#*5D?K}e9p?^T#DDe*~CH3%vy@|Zca!SM-5cVQw zZpovLvKgDDix{hBmJ3}L@#0yH(*WRQ+pF5Dky*7l3)ZbtveJ?o?R1Hi(1_6h`=QRK2%IqAVT-x6)xp+$2M+-had; zv|{nqRXofWfs>mml-^3vOVUCXS?$FXBVM$=5Bj>&e!1z)9k(+u-PUv2v206R=$2<& z@`{GP97Qx%O2ms{PP`1e4t_wO`%UDsF+MSGuu*C3)?ag z;PdNhr`yte!FVCr@otg5Ow8tiN3!Q$(cEo6+uh!6p_7xZMllk*6&xzHqZw{CRxEkK zx4(@6#w9q4?s(*T%tg>QD zUyJo72of$jBolh!L9P|NGOMrHQkP^6c0w&w0@_s&@=(lGkEuj1Dr`!%%zh;@uSDXc zz`LD-?%3px9b39PG*nwMakpfmwM&v};Mouj8O6 zo?OG{2l(bmY%SXpVh%n*TpPM<`*6c;G~~0u0A2Z^lejXV*KyF-U#?Wa2R^d>q3LLp zJvxd31yC)IPLH<$H4Qo`I@af*iZDEc&|+Nhvz+(MUO)~rX&xeK%YR|)-&T8vjUqc~ zmWvJ4!oAbC_D;XLclvdE$5(i60My2Aa^6>HZHM5>IRiEY(P6GNJ|bxAat>Hb@(WTZ znOLEv{_PP!!U!$pEN2LpVm#CEp#{+JXeXHD((zGUG@tD6?d?2Wwzm&NnO61dhcT~h zK)3SoU;?HrIdAIIM}M(M^=h|Id~G{`ZP8k-%+TpM5tC>;q%QgXE_|U>dmKL}>5RI^ z&-)j_p>)dW`@SGJv)Y+GjQ@?Pg(axYwrf~GfYUSJN3kD}^)$;_Z^;j1h2x56U7xby z)tzph3&8}|H0vWw47@G>ily%o4t@wJVrpo^O44YEzDTdU6My2}f{kWmhw%X`Yc_86 zV<+HTsy|21eG@rS@(3b7dP^j7(-_b@1#J=y0>I2jPE%+arE3)@aLeyx<0bDg$&2Ha z?QDG%({!(M%l9zSOGaNwIgSQRg0E;~5P&0!02~PfV6_ksSf@dz(L53rCm?`GwcTAt zhi7e!j4H)uN`L&7NJjI10TFVu2!#0e3!fb>)KyPi5owAdKfGVKpLn`!La&H#UpBJB z0+OlMTgZ^J!-Wi%nneu0nGyGRh0F8knE(lFw*x>^hE%k6Dk~p2$!HQU=Fu?jpS}s+ zhPOT(?M zq?>jo>7?*M*A@?ZoMb&3-Pqi)-811cl2g=G=|;#4Y|COz5PLL27)R}6F|u5=Tb=BA zG@=o5(daz8*t+~V%f7#X)GUbM{yEMT>1--fz<-O5=+}4)KlHJ1y|9nc#T*>5%V8bq0C*axjK+HTVYzZmJ%oz<+H zHrl$$>gyn{V+f*$!0V8V#$?H?@q#Z#2B3z8_}%H4htR=@Ir~9$(bgNtTk@b`AGMN` zn}3v|?qMTO{;M)bN=S+30XPmjUHG@5fV%+S0yBi$d(U}T(X)X8Io#xJ7?tWYj WYxIPc!^nR6Cw4lJk)ZmLh<~CA!9+`Jhf`-_78%ciMaCCWZp+w$H0dxE z;jJHy?}=#OAu+&qtnia+%LUDX5fprk41`+Sra-nuC5OcCE;1g$*2_97U@2Rpj%~v< ze8g}scN{-DUm|1raY_VgGq&=!kl}u)TX;cv+rKlwD?KShbGonqgH)7+B|t=8=6?r7 zh8~TKx*O(+!D048skhcCtJ|=NglK*Wis~#LoyMbL7-u?+_Tv;;H`DJB^2{2kI;4DD zX-S?v#j8s7lu%9{GNM^9qkpk{dkDewyweb8bN4u5KJ9dC&%mPCs%x|XPoaNoMYV(XdtMTJ?GaQD5g zRXE7Jgy~QE67*6?@Sg*)}!X*w&mMv|$xAUFyCT|3o$jg@AP0ytik zt2G;|s4BsFTn_+_Q|5)Nrwn{y~LTT5+(Z(?VNUFOD1~(q(>0@WaORBNooz? zo-!ZPoH0cIIGaZEMS`Hb(Q+9smkVR!TsX%fNCVBb48>ECGOgVMcC_%Gv7Y;#62we} zaE2D@V%_1l3juu7adW4&TpmIK7RaEkjSb{r*YZAzhTl2Zg)@_MCVz;SK;tY{EP_Md zG94IO1yHL+9-^i|8n^OU7E5ClvtL9mQ#=PNCRiM~x}im^E-eDOcVXwGp-9BgDM3Sd z!6G^rJYgq*?$>O-v0*z-=QaxFqk@98S?|zj5G$09dlKq85*gg9T%uWn1a|BzvNkTH@e zvSW;!4v7lhELdeL2N{H9;(n55Syc}o(X=&(Q`}lEZOzE;KU$c{*C;r*pq9mD7KMS zJJ@;F+TYt}2Y)QHbZ5!P=HrJRl6AM(UvszEF57m>yr?O&>y&vl*P4Smk-;~JT#y?b zpack&i-fn+N{U(37A{{fa=rDQk(_Old%r?ddC^dYLYc=7>Ai^X5*>OqxAIkGzGb?e zkE}!fv30J1n9yv_9qSh)w1z`9LpQz1^C@ao!R)dL zS&rWxNPicHrR^zlHIq4{3MqpnN(3t$(W}v>!j_|ZVaISl`=23$Ug7IZ2L&e%TEsU& zkj&G}^E=;CnK-ZX+3$jfOQVLkGf(E#m^TOMgj3Ta+;StmmubXtv9nk#?n8qSU@_ z@`eypnUq&c*VnDor-xwS^b%a@=`87VBA7YMC99oFI+@27Pf2uY20Hq1Dw6g|Qh|2} zifpI~o!v}_;V*%6cm_Q~Ak=;LwNB$Av|8cI{K1KHLBaIb^sSsk8nn;_PO1W%8_F-;o%zqLV5{=p2S}!PDRJC3!Nh&Kd2BX6wUSwP8D4yo& zRT^g>viK_f)gkD61z?9FOlSeno(KtQP(pnh;&ZcL5#Ih8lJx%)79JbzzH&XZ9s zIq%QwNa$bl3PFA`*JSg@oRYcGNNxIebF*>DRH>N#k@sdHi0|Ln&qjN$hU_&ksuhb~ z=HI)$MFY-T7fe)GE1V%Aui3<(=Ji|m{amiiZ}YonZvOwhnY()6%w7G5W=>m^yA_fj z{Nq`@`hRm)jl0L=RI6k#U4M{t*%Ba;w~_GfjC>goWu6&F4}b{IeDLC+iQDlv_;(nl z!DJyYI2I$K6R*HT7U&9rQ89&@fIu{e6*clavn4&tMVoLV5^Xyh^4oOLn-x=lo~)k5 z2sLRVeJ}iO3efT3RfL-(p5p0o7U#|yR7_^I3Wfcgp|MBE!XZD3&;#kC6O$cr9e)dP z?fbfory{%1f2^J0Al7F8eeHcU%Z4f*z~PM7G`0|pH428K$b*$7(|2vNsA0TMbps>a zKx|31fq+e`8i=y6**t&Dsb>=FSvdtvVgqI3*jgmB0>I%Xb4a|GOD|jMG_j_PmtggP z{;Z%G!7Uvw#Bh+f^R}{uc~#g9 zF{%WtQ~j30h`bS4zE&B}hRnu{j@%W*GxqOhq$vMZMvBm9G{A%y2Q$@8W`A;NsX6^4 z9gk1rVVst+1kAQEN!J#{gdkb(Su)(JAw6ECN--xK_Igx} zv7ssWF#4YtOeW^5oE8FL72K*Z0J5X9*wLkY=Nc|mQMH!C+O=$o;j#^Ve{GZC^Ve~H z6lZBE@4d=q6C5!{+CmMV@_$GSf$FTOm(1h3+G(W*UxM0zMFPhv2_mKICLPbqT1ek= z>+XWfr5CBJitLVe?D428;rZD-Ii)Lq;Q8JVTCv}?wcpliKBE> z(>xObBlSdNUwo4x>c&#P0eLGi>Y~`>S0D|dcZwL@!fyhNp)y*KXslumura9*0}pnuO9aujEA?w#jJy14Lz zVzw2HMjrf^by4QAc|7C4C(UIBL168jGF6F;;x zghHx{WV%RZIe#z36A+A~%c_;0Kt!j=d<6FKClV7#!P$hR$a{gX-Ri`G1fN5uuhrqk zwHS!`gyyX^dMV6x(Ir8JdwVkbPZt^F4 z+Xt;ZrZ5@WxI$e>wXS&FehOglVz0JwY*7#y&(N~g#(#{o%Dmn7nZ)0}uHf&VAQ(Q_ zu?fyYFQX1+B2$R=>;46L3FVRJ2mLWdHDMtqHP_S{N0A^GbnBXd%Hk_E13$*No*??o z1ZALLS8#|MATTM7Z&}v?aBu-1)PQcc_IA_+HTi#)>|G>mlwk1$XtJW&{M>?-9_dM8 zviE4Ilz+V^oyx3EQSWLN=>|ot5&(}SVQB{l)iFP z3hhWP1wbJCz|a&XS8uS0S>g3qg1Ol+`GNSh+JCQlinLb8Oic#p@!*nlZ=*>RU1h== z_Jn2C#xG+0ird2U3Y~h}EV6S8B|KzgR1hda=fc5Fu916$`-AS8ARY2mR_Xdz5}-tL ztxas|Z#PGTn<{9u@a~WlJS}i3K+Il}jz;lxL!{`7RCXWVRZb%M!aHeK8^X!*VKy5} zZ+|AOnN(ha$<+HiW<~0=B3<%AhpjZcqG>W1`NrkS$o^aUL(3v4v<*EX;5OV;!R}~h zc7Ho-0;ue!XH10C{d=QP+dc46vA&haxMrQmAmP?%)P|?|0R#^_6)8$2$yzCnVjMsM z=sdAfFC%5vluhKMk>P9vtxBXbDe#C^dw+DVy=`decI(;G_TDzl!9f?a+<&h#uW&?R z&C`J-9D%+@oexFjd`SK~&xd6F`B2oK4`Y|yVE0sC8?DY`Fl(!*b&<14buJ>BBuS&9 zpF#7VsZq4yAVL+2<-5q(~}bZVGM#lupK>^*@0V-$xj@@ zxDS>~lZte?oXxsk?T87j4=k6H7=PAIrBh4piM6OB&l34uXmzlFOc_l!M4}7M!EBJUXB-Do9 zvH-D>@czHo3{cZcPYIiLNL;ll4S{_fDR|b1qR)2w!Q<2u&sg8I{yK)u?SHT%WM?3#&Oj0!z|2`obUFZRB0o0E|2ft1|IgAK*r0+U zP@*%CG>28`4XdX&9SJq$QYY&hlYA}NndS9lXO;umsaEVWI4cO@ffWptXOgo4n3dXo z*E_k7`iS9w$S_t*uBAWKIe)DXAZqk;c{uV7yuKvySu9buj1gt87Wbou{R<01&X09Ce*>*=&$v0C=IyXAs)ES-O3G2r7V{z*ze_KvngL!Cn}7-Ky0#s!)c64ox&3m zezHN!D~a&4y|>SZa(__w5jyp2WMwUu;;%_uEL6cdcX5k&JA5A*=a|1Gvy<4B`&1(D zro89MB)~y9`X|w~cY;FsM59@b55b%h2+1`F8Rl>DcFiH;v zx^>jjCR6YIl$Fi4uaPCyZu%3Y(5!Nt(?l$sro1&l|6Q%g>3?PvovEwTDF=bNGP)FV zOIRfXuSc~?8BeyZ6trCk+6k#o43%<*_4qB^36xTash5M^iZ#5X+4hNv96WoBPNxO# zKUBlk^MjDz3_)+mloMD?WwD`#H6qxr(!-@itn+7E%F z>4`)yWz|Q(%zp#m@{xJa(3=`+UPLWHSNf=q!Jtln`5 zN_itUydI9X0m~}Iq=~8%d+-x`@CPg+sZ8Z^NI(@^0{fF}EsC$O(_bM*WNH0uws-dr zTCM$EUk^k#gja!*f+NIu3(RP(84k$)$kRZElb%G<+zm>;Eg>-lrsjT7@&kHje^;VyZXbw??Y(_* zv47XzmXBK7vJeEJh^yV!Gg<7Q{Y;euToiG+v;A~mmJ}v#Q2=dSva0><_O2|wx4pZ4 zaG*Yh$&}Ui&_EzRiJB<+%E8XT!G24P;0e=yZmp~{N#RBtH$dm~^zuO8*Jl0#yCj7yr zESOhxMApA}DUjw%4027-^B96?bgq?==eygw?QCleOheH$yPma&D;u2mEH4Z2786dQ zr_eMWsa+3;3E{-qBIVPWXRU6ZjZ+@6vnepJ{j_p0yiCtOrJI`<{4`?v`h;A6oPV)X z5NpyAJAuEmGS05Xv!CqaOgKU#Wlab%HL`E(*}etlFq#-h9xH!_jgD_6Z85nsAbX64 z+q5tg&T5QL4j*Um=UtZnHeI}khTnfi*=PYlk0@;Fs6+kpERQmQ72U#U!XROJl#YV} z|MY;ymaD)Ew@7u8PRbf2lK)najy1CoX^GEIy`0o+;=+Jv zIAinDaI${gLmL@3vyov%JZBXd7F~EW4!KA{RErIpx!ADehN*yfA7w5stbbmp=p8O` zVUs3rLpy{tm{>fWU&nWd4$IS_6i36x)hxrswSj|PXx3O{is%$3r3@!dMr6+;f-r&A z1||}mI+O@f@RD$drT%~-Qxh^4_Q$w2l`4m&5w4iN;y4iNSb_+)5>0JMImKyhKD&mVae<6P(0TpU9$~{-fin=3YYZuSiq8Y3JVJI{B z3?iINuZMyan&ULPmaO&6EJm|1>+2*n*$a2=M3owkL~h22Bxu+P78B6nfL_hwQ-YU( zvSx8{0fYw#r0`}F9R}#*-5?v8XoqT+K)bG1piTgE;Js9O!O}J7Ne|rw^0TqTP8(b1 zyEoLvkssL!kX?1PQ<`^GTyoV}vn%PGIbND=zfO43D~+tTu2%a6wbf)?FLFlQpyXr* zLKvud(dyeX(KmOgBd<=6H{{q=PxblRpz z*-m&epfQ=S%MqJT*ksa4Cfp91Gq~L9TrVtotlU}m032TCKJ>G#Xb!s#1D8CKdyL=% zLE#1_#4H?=P}m1~fqS`nkQe407RPu|dyE(EF<#JPT*f{K?^Qg-3-=UPPbf)Qt=OgI z2k7EDC=bvL)jEtegVUj~06(2~HtT|8zPp@{OS6VJ0M#L|i2xxu0Lq z!)pS-mgWi}n+{TGG5dK=4#OfXM%3Yj%@7~h=5`J3r9&R&1&y;B}m8;S|nHx|Ww4oLqf%}w)glwvUUA5k_H zNlEQuuaVrQv#EbuAS&|NtJ$zvz+ld5#-JN6mmbU-Dvm5*D_SeCnKCF#Hj>F zMdYWE05~{Mfr3t~9d7XH=mQ*eLvF&p9Q(@PL(PKf$Wd?94Eu69Xf_9R_iLuZOyR&E z)JU_doIiv7I4;q*-;#Mw=~jfMCDs*X^Yzz1ry>((fw1S={7Bi9ie%@`5^i0O{ID6U9q zoL{Zjq>g`j!%3GoMU7eW-9jTEG|r~JB+;1`*d?u&=9@r-y`*E3=|cD$>*F-4$@LDR zolo zZJHqV-LF|KIvmk6=zCHW(1Hdlz@#Ny6e3?Ibk~2fzY!k`m1=6SED5W}lI1$9mUwh! z-z$xVp(Hjt-2M3Zf9KQWi|an5BbMlUfI&xf&43V1hPS*+U8J~+-=!SaMjFQ`~w(9c@WR3lL%AZ z{}?K8ZvcRa`AB3|rHZ4wqYc>KHz{OHcZ7d>&dIEWV-1mxbm~=?bgY@pV5`(+=TUS4 zwyE>W#Ra55;6}v&NIr$=cto?UMQa0(5h=`bE|V0bqC9M2Xp{GvoPy~?6V zyn!oRIj)Ky@#HIzT6hz}VBHtXbQI5I(Wn@uGob*3L`voJ?+cabsNCH2%c=K$ab8|H zHHbieXz>M%X0EhtaxFiQPNHko$Z}xx~n!LgM7(qKP@B$xNl?x z6dHj43xT@~t1O`6_`~zwMW;^ zX6ZcN=z1Br%_G9U%$w+KjM8!gXmhIM3BK`Ml>hDd|NSp{wITDsPiAE-^e-~8IsWkV z3QM-q1zFV9&lT)8g~Z9}Vue=5%5$H8i582gYBs$dfZ^zk-0kU0-e@83_ZQe!dw9Xc zZ{PQWO4aYr5JHz_l744MzY~^-Gpf->uSa^yWROo+8PIu_30RrC$a`jycia}qKP2(h zm;VWw2jm&MmKsuD)6R#H`;3zNYAv-Szvg3_?4!=7)LJKt3TM_%R-4!DoZR4l+1=+t zLeGVDnyNd#ZXFo1(Mn}i|C(_gNW@`MLTpTlCrKz&5}>1S>1C)fx%=ng1sa;ADB67q zUmk#q(! z&$IMvk;mQ!KE%WGSMJHOzF)t-4G|=(8BOv686=cyv=7i?y9_ixNE9@{XWF7_X}cUb zao6c%@6@Y)kFwhSgl-tol7L85%;jJ zk49c2Zte#HsTw5k38Y zJ&{X7L}_PyoEpDys*o~9N@NN3o2&p}10oSQrb}l?9>j0nQTZyYVYMEC?ak)3ilDH0 zEno5npWhSiIHIsUZ-=$qJ$lUIohieRnyg^*Fi?v7Y6UVtFX2ud#&p zET{=fg0zK8I;D{vLILXPg4G$+L%gs>bO(tiq;*wg-{=f~LaC8=W?-N44Inl|AWaq}nPA)iu`+P^q-P=Ny!ks10`t zHCny0Cp8o??>&=kiX?yHtg_rTad^~?P&kH~WZt>w|Mt5@OJL;!lVz5#B8Soy+aykC zBtD)ZUkdQN&*;%ot*&tJR*H3-ps+T9Xhq9czO9;k84p(jQ3R2uX1zC?U|_4Ni}C2# z{$J4dCv_gRI3UnG6ZrP`2B>2>@^(>mA?#PKr(zG&APWCoX5fFYbopMOODZqlo@Q08 zxrDro;)dlm9I0{BmcV)^;*W?+YKH(7mUpl4JM9**wt@oPvHZZR96MEqq8Gx`y*@&P z-FKS0wp2JreZWrE;fqDuv?fY4$tIr7{Yo8v;2uTW`p6{r_UOHp>z%kS?OrrQTBdee zYaLr}z=E?z*zkWc$)u<@^Q`#Oh+ThZjzdwhVYna_MQ`Dg0N8CI z6J82*+r+0WInEFBV?REio0?@F~A}pSc=*@6CS_#Q04sHwX!JybOA^}nSoW?(w%b)3V zH2bMjQv!eRf~pP5(%&NIPy!NG1`4{D0?&xI0N66wil)P4mI*v@G_tB&Cs$VxG=_n9 zpy$9MiTWj;Vpc~Z;j&O)$}D>YaoTtkuLJ}Gcm+LavjO~f4Y467@ZS;ST{@I;0XZs_ zJ00VsImz zAHsb=z`}!M0QcoLX{eLhAwrRHgitv6{Vf2ioTC0ay?E! zOL>wr;}GvT$6vc8#Um3U;FD*M|U``&dNKbaqoqle$$rhn#D({$ zk*-?A(Zxw6rabMGbna+)9R=hLq#Y4N{5VD`9_iR>b{zY{;lgl~840FRYf2Tzr6qr& zX<*svpeJzvPI7)dVR$aENlsEqFI>+fA|VLLUkTCOWXnxQo;Yb^fwID0?~Gs3KHVa_{Wegw+xW<8 ze8J&m+W4Zc1pd9{G7-@(_Uh|e){KL0 zK^Tv%6w}v<0|?1VP}JP>oGO33h|^jyYqRxE0Upv3YFudKP38+NlZ+dm&G3}MqfBMp zS}r4H_tPlAUPMiSCT1rQ@j6~+fK0Tt38q{Rmp#Rr2dRB-M*Hv|j&|Lc+N1E`9vt4& zgWBiFvClKHS%0LKXtbEAK8ZlrqN4Ntse-et_#&f)n2<2e7rA!lf02KK8Fx0Br1`7# zYn=6`6Bh>*g~2uDybn2qf5|l8%Ghfi2S2mr@_6iF%(Y}b#JtSGANfbxQC-%3WOGnn z|44bs<|N|uM+)9d^GtM*B+0VYct*Dg<%|kH$ebH~Fn4Z|5HmZx?;!a5P5IICXlod@ zfbx9JCKSQ^Zg3L%!H<8I2bHdC#crTS4aTTg$6Mm+Y1IuVt<7b{cQs26#O-G$eD>EF zDu9PFtcqc=z<W@A$&d`6Y{+bSKw}blo+d-l8 ziu}wVb=}X*hX}-44XT;;pBYd5N}Y-;)Az|q^~>VA93DF&Y`x1$bgIl@!AbOq#f`*2 zAN>-5fwL@(hKWg*q+=aV?bI-d-zhMKOoG8#m_i1O_aa$LiJ2Q|KVGLcQoZH@E$x>H zU}<%d=Y{g8OE!NuRWpk7s!Qcp7B0efr6@2CzeM$ zPMNx$Y`?MlH{MwFd0%l;>6JIn*7=x#V)f?)LDov77vvNY{%8a*p))XaU_jYe-vpZ= ze<@b}wU8N0hS=__m2mC1)DhbUd%HLtkv3U#-H@Ux(FYeJ7?|flp zhxj{9*=ui`L<`L(5;_%G>`3}G9l7&u9j3msU6%n7I&z$QC4?FoGPtZ^f&U>5fi_S@GJ8+ilDE5Ps zyMsGVa;u#2(mYlVUU?;8^&iE!p%mq@{TDw^u_J%=Zi|;$<)M!778wXi+o5)g_|tao zc*bcAh5}R%C1}JfH$I9bqa^_X9GnypL7YCo_=KwCVoeQU#cwgFE2Q1Z&o6?F4fv?P zDn($=gSX}W8X|#f@m_bnZ9tm0c1AV35FR;3IsP}=wo}_Sy6cNOLd$T!{26%_6yUo) zZ*hO9)~6lx54%|gvsA@Kf*lqkam#wnrX+`6x_FH?ejc($xLlSs(Vpa9ZGFq7cQQgt zw=V|?*&m1XO0NL)f9>#TyS2rkUB`k&JBgX<^v?_-!riGs+w-jmU$3`#GrFiuzcKu{ zaT24UTzE$UaRU7+7p0QcZ4`p~PJB%N6W`0|`veir>U0gFt*b)~pxA`Za#l5uz4UMIQa++EZ3 zn(ukevIh)dcf0LX`&L;#hxN{x=vfI*AktjAs4eRyE$48h>EuAJ>H^{w>; z;))jYadfl6Hm0*FxV5K~`7Fz$X};kH;g)>N#Zy1<`u4{M@Y8V=#&-0$cH6H2FX)f@ z-GTao(3|(g+xoMAPDu;F?f*`Sx>A3t9zBZt&M3n0(e@K+eI0iBQ?`r5A8-1zf$g5KGwqxkBULxWn{?=l!5F&p-YH4l@ zNgxggpplwK2hpgU=!{;8vWPs2R|F!cMg~*7Tw+O)%2~kB%0#p3pzV-cIZHG~@Xn6H z{Xrdl2NjNxx}jsS3JM+;T?LZ0B4uCJf{It$UFB*E1}0w@TSU$Mnu$0aH;TDgX2 zm<`d3yr=Pfh{!qMea$EI*S!BT>-70-s053jm=t+7x;{3r=$Yn`U`{id#_-yD0;#GpVifX5duYuTrNlqOLpl+krF)9hzmTs{J z`5%COdOY;@n2WBbEX@{&qUzsmHXA=PU*d-$?MkZYv^?oquCiO#dL6jJu5m$dp7@=l zCa;WFvZ^NX0UitfN`ZeKP;uJi$G(IlC7~TSFZ~*NRr`C7I`S0?PtrP!^lXR|i4r*N z#7+j=nv0qOt4oo{s_<=oOzcW$+KetzPawEtskDY`fFGnc=06Kv8)%Q@(4Qc1pBOIV#&K#q~s%A^d}FVpp*3WrXU_cT?z z=fxg=#km;~*5GrrvR>!!J1H_cGZS;>1*o@@={bj%h*H&y#lx|l=r;*9l1 z^?BTTseAtgyzz22y-Kgy+Z+&WqygZybWfRNa5LpC_IZEIvzgcO+1n{^vA>kF$rzpw zd5fKnc#FM|w~u8tCvfxkF>n8?#Xdyh-Vi;U-$*x|VkZ8M@a|Clx`G;CLFmqrESC(y zlXX7iEn^%%%{=Y9@-}bocR{5SB*O+eVd0X8{ z)_gL_f^mOI^0;V*W2&tEO1644q8O&AaMe82M|MPPGqplx{26JJEJ6`3!i z3k$G}%4cxk43~h&h5?4V&1M4DzzKMIe3l3Yg)o0nzs*Ho?VK^ck8wyh_gXFXqi8lG z7zHxISP3!93}rqMBnNM1CQDqx`%qLgusg|umalv@TE5Y`9YW?#X4j(>Z|dHMES+U( ze#1?+C2sAJ$n`!1Rd$gq!2gvRz$nMbuU>FJgC$Jhf*a_c@uom2m>dl!WR9-cIJTFYh9 z%(miR^E6YbIhK(h@yQUEvKaV?C(`ZyZQg%*n=kpc6p?!q&+m8z?3Qe~#Qh(^8dsEx zuJuR+RQR3I>=B+4rQ+#~ng0+*tM5yf1prX^*z$a~7-~ny5U6a)UTcaH@mAm|qKm{f z;%}$oJQ48&@?_aYvsx|`=)!^9Lr^0AZiHC6zFNa#VhaA(LNvPNGu-2e+~KJAW`%#^ zO!j^GRL8=ieqJ<~h`0?LaF9|jORvNzZ8gkHH87M740QuT)xgj+@WCzA=G&`unl6%f z^v5jw&P!%)h~Ur!mZ-(f1aABgP)Z3&ID=0fXY51dNI0;jWHTWD+)k^ty|cT$>&t+F zh@XsLDLlatxpwQh1YN)t+Y$%`qCS5)hIY;v=9SIF7qHE6aDne;I=xoQwo<;Sa}R9g z=_H;N`Riyp8pr%`%bSfYCkfFREyu3b^l;bdAw(%|@3ydi1P2>5UwCvuJRsx(dD8B* zo+llpAK7U?`I-aVR><4WpZCBs0&JhX-%4DfJy62&{|Z+kXA(d>LwqIVVDo?4+pXV{ zZhN~G>>j|??m;VPx9}4F1np;{z_aaEuoLh6mUMUGonUtd@4}y;{cOMWzs^c8`4?RI zP;a~S9FVXdI!ehsqqJAEKd|GBTAj$q0FkR7*Fpnzk6Ec#&GaNQgTx;-`G1{`82l-Q%q2NXWCjRTYV!ZW zU_5ccB4P^?g-+{v)KN1k=m9$2wVyvPI@x(~0jvUiB%#jGD4_z@!aDW=*o2N)Lz1Y< zd~GCE8`)*V{Y)>=eBhDXSWVe+j9#5u@3|9mGkQL%&N_=0X16Y4}M%BZ!z{9id zpwXMxI=AW$Rre%|7$H8)>8lyazbtYgxtcbvboFhJVXx9@G#=mFzRh{T6gywf!IODP z$1qRBnckPfnc|8;7?2DunJfK$(VSjNBqx^eFS*)+))T?tOY-Q}6SbkSa+iZ=I=;@`ODXEzQnaCySz4PY}GtIdz`2 zSzaPuqh|4Z;kf;JT4OCzHX;)uE_@qFh&XuEV$?AbB?wxPxhZt_@%TKtKtJIWbmikX zD&r9e7_;shPuEg9II|EUDn#rJLL*t4QgC@jH)!z(xFCOV(Oc=ac(RJeR>B~12p>wh zOS)0R@===zMT(Ck27AcfT2`>bsiVnFDrT^)A9(s&nPpV7Wab#z&(+%kK$g-gZwdpw z$oR)8Ka`vIbOAb>6VOfcMpjU`PW@%2OA!O~cFNl@iIGw=Nq#I2h_kIun$qV58}LX- zE0c`%6_bC1IVPELNF|_z5^mGnLvP`#y74;Z18|w?Gojj+;UW$1BqkaUw2!x>vAx|^ z7kkpidx)uGHNvFEK?-NG~08#6!`NQ_S znwqhArRnl*!+SNQNMJD8>W*Zh^NIIrYQ|^K^1h9|0qgsw9jlNJxV*~{Mrv1}c5VBh z4wZjg^os>Q4=;ij3z204Ev!9=l8~f7v{3eRwp?r%a8geqg?gm2U5;Y@1%_q-e9swN zL3$+1VZ0sm`2Y*&F&F-?Yz|kJg1+H}Hu{>}H93Y+AH`W(%H{Pcn@w=g=~h}U=drs9 z;kY82`7L_91_rKaTlF=yG3o{y4U#L|LAtE=H5*)d_fK zNR>^QLs)y610h1#03wULo^o?Q5mzlitF(s4kZe#U;cCmcQTQlUkf?yBLuN3ah5;e^ zu)J?Q0sOXH;@=(l7ZtZ}r+|Z07{IuOmyMe$dF>vkX1roZZY-C{%qvj4YrY~`Wt)F@ z+?Lw6`!r&%7VM>~1uLZ%tfX2nb!s6WN`@jKT$0U4Z-Xm|8azuq_I65aUZp2ZUb>!+ ze>(AAPf342RSPwnjVplcsFXb=Q84c&G|rf1tVsQi8<}gz-=cmCC-wv?3SNV7=$wq^ zV<$A^rlU`K$5NXSIZylkJl#K#_V$0Dr3iSUwnItaq1aPc+C|5(Sp@XKI!7}XGR+O2 z76R_uDc{$}wlYrXlx+MfwgyIuG{|WIkyc5F`)ka|Cl3z(h5nD3xdpt%tnUVUZyQiF#lu+1Kej} zt5&vK`@4dAskmymv0RRZ%cTc~!4~^4@#2UPON$(YKNMWk%7-IW*U;X3_N*n7bU1n( zc(PoE{%z=ps?`-D^2LD!3rl~cBaFF{WbRgYUW73!eD)noFPYD7V((SUN2r#@ixzv; z;(%@>hgH5jvy@TrQ9-cha2;CWw7^gt@##lhCs!Ni_oF*e$r0R zhiIhxc4MBZ+oA=M6?iq}H@S8kF_#kZBt0P zWVFg?BI+}^&1%ZLd*Ii|EVUE+jD;-Jv5!({38^p4$c*twk8w!6VU$1MqZoT;ImF~K z*#mMl=JsmrskMNBta|8sbz^^{#Tb7cTkf*+m<~_R{RZZ;Omd}}5JN=Vt@?Kk(|G7T z1Di1sN#0DoK2yFytjtk84Y7HEXEhE4fPvJ5Mbs1ow1`ZjMF3{1;-(U+?=75jFVf}=;w69cMmv5-Aa)l* zL-A#fcQL8+c&4r+E-bA^tV3^^wpQmy)OAII6v~X}3INB)55mDt2u{?njXA39?HdeP z)-m^*H̯R+;OHLw8K1}Ye^{jiKtet?QorwX=V9-9!vEevqzzgYl zS4q#}p=HURWXeq$p2(;|o zr1Q>AxGHun!8dheb<9RcrS;|VhY^UrWdNRFbdM5&yIJ^^9&lL5Ji7MFKJK`cMtZS?l+rr*mmCvL}VJpzkK6?u9L4@`7{;rz~qIIGVr?z zIR(S`VeCch#`LlC18x)$fLlmMgoq_9W2MS=8}cEjSnX5@OWsxyfhAS0m#)@NWv92P zPr3E@u_$2NASF3$v${kNj;2_+qe6Z+2vKab*keq05x$Ks)xmLNZ<1~qVE;>>eHCb2iP!)(Cig$Bb3Cckxf8?zP?ZIs6tXo) zEIS9FgK<;q)+E9xd8j_m!ns%!g96v2u)N7E_wIj-!jH0N-nHZZk`yog`Sis>lI(s&W{w$?slhP8uoW;JN%R?r`A0p5ZYXRR{iE4z5I(SW8=0LSYVSa^f=z#v zYvhw)TB2q%_^!65&8X;%&aH1{dC`DdLeL!OH00<;jGfYQ<$xbW-jg;fnokO!4e-IX ziZ%{ilRb$5yvFFiMG|H4sAIwbm3NCqKIjgF6U!j*)V~_iC*-K)=M6@#N_targ+WUFUsi8Azl!~0Ww9-^##E$o=SE6r`SCW5pZf&-n zZ0^>OO{9qS$9#EP7V%kDrIu}zz+@uH7?!S3O*eJfKc(1;Wmm$E6k;+W#GqiP#^=vD za&e`vWDJxO90?;c4b*dRWglk~qegPPb0zKN^1~M5xETRqg~i`WuE!rXjyX}%kWfHn?u~!a z88Bu0BO22P8n$ef&v3yuyhgTMHnOdBA)-E&qL6AmOX-#^rFctxSIU15{6@DOU?Rn= zK{H2YTE0i_=rUICwA&q-TJqG~+f{UA+q7UlTZ81N9YA!tx`M;VAcQQqs(+7fB>tAq z3Ty3T^cO3{3dMRxbGg)r5b3kB!K3ntJVR;0;&_ z3Hq$n%D+QgfSB!&W#Z+Z&dWREW%t#eHLrehUVT>o>Zh7lhdM#k%@8t~i%u%ttvz;H zFPJ&FABI54G5j;=@wS6)dj@0n%)WOC_u39@?kV2fwQo*yxVg^~+c@SNdOIzkP(ol; z$*eP5u>=FuQ{#VQpY55^r$u_4B9i7~Tp`&w-cJF=7&Uh~nJn@7yBXHMwY_a#qpcX` zv>rmpZWTUY&}gsd7anU=d|LlL%Y@jvu3bwl|L)k1R6S0h32W$II9)gUD~-7xq;*oKPVmAG#rGkKN*9D}%P$7jK#CvEj? znn%;?aZH&I+dE9KIYkamKBsX+@6ym}GdR`_+c0PK@g3!LR6Ahetj4FKc$%kIX`Fq? z;;ZzRVzz&(&PTjtw5?~eA{)kGbRB0pqxeyjM-DUfdPGkO^yVIr-2jZ0UN-PZgeil8 zuZ`YQ4Gb7t@&mNwL6Ys$)Y}o;$Lt{!;!SheW|dU3>vXJzRI!RBKs2RzW4*!^`wFIv z&-rJ0xc!a-T*d8s%wwtN+xmJu)e$nvs} zDoMIPdA@j#o2W%l83ysYqhN8u7j)Fhqeez%xu?zLUNoP z(XxL+8ze__1Okc7X+}chCUK9md%-=&WJPpoxZk^@pf1gQpuq5(Z;&Gr(~0y#DMcy^V&nfN zy4nK=**^<$?fa}pQY#JRZpg?;H{=;>x&CVQstT)SqQ@O~b)F@;M&hg>71GDPP-kGe$=kxzb;VXgAh3_q-;Fu&Q94@j_jXgUji+{y%gJgVNK zUlO#bTAf1Ws8K@hww?h|6Z=6QKK_bX-!CL&vef`}llV^y7uWg*m^6Mh{ndWaAQjq~ z4bw5j!YPLed4V+6XVLj#bRkY?GQ593hYuUxV!FVi|8g-*;!!b<^}%lqFLXi;@?TT* zxVim0o(;dpqZ7$h=23PX=RwBilEVt3RmXt4f|K+2sW~denvI@+pPm;NTk0Kjf9`yr zZY}2HbQpUDYoRS$I1Bw8Big0uiQNotem; z166~B+vgjP@N|2EYv;*)mQM30YWyElKz4QT_quBD z;D`-`y3uC?l6z%HCs@UU)HDJ^qjgL)8J)6jbjnWYj?O@?0XaB`^*2O7K8!Jw#+1KA z+>ziS)TrQ#t6z~t7;3!TgtMEK0#X3J-VeQ^PP$jXAyS#g@5n>W`y+n><^HlQ{Ut@5 zg+?&2z!)Tyngi`=&j|AJ=segK$XHH>Q*h@gC|P(G116sG90`eMOk8ga=g ztXAx=DX)eFeaTnsDzkq)1Xbf@@P;l3iX9tvqFAlim$>3ysALE`+e&)neV%&K8?3;L zGD98cvK+Jj#U{LJy6nTFO0oKktliAtLG<+@$n=sTz3ee&MjA)}#nEWjfBy3r& z*dXSAjo4w%Q^AD1DEV!%06u=4Lh9pbp51(alL~y6LeP~gg7cRPb~I+M3ibzk8MD7e z7c16Z@Y}0&IzoRJ^%pm?Buu(h+*-e7{C zi?a{W@OyL}zmFzyup!RM(QLA@Vy{YmORDHEw#+YGU4OA9FKa98FScrK>!Jd*la`V& zo}e(v(oy_+Hv4{Nd>t#dkI=LBLzE|L3Y^BU&Z-57i;sVGYh8u-(5-9{3-()R%QAga zDLCx=lGhXX)#BdJMLj)@uY&cEs4&-bl3(25%v;>Q7KeLCkTT1vK@+^Pbt|Wjnzgz| z%jJi%(ul>Y88kbK7m3}``kS*^uE$B1u!iY6Rvj086Wn(^oK1)6ILQ4ZY_#Ah++$DF~*R5$T<|7f;GW=RYD*N4~Hst zMCmqu&$ccv&%&3d;it~j7?+@RdT^^lWzCVBYblE=(FHcS{|2*km{_*DByUQ2h z<*PTR;SqmPvfneABKremC>hrt5!-j_%C>8DW!q9$2HpIOplVJ;d#hz4+NE+%`fILC zqfq3rn zpc8)_Al&EhPE|om-vUidwFII-B4gXO8QTJWqWVa%tJVy>%*ltQ}bs+?#nKS5&|m`WC2=5KwQrBiQ% zZTQRpARIt!jbZVDD4~1iTC&74(fe_+NX~z5rbAvs+Ys9ei%Ax?E{LM>Yk1H4{jNp( z2$Y6}Rb^!jt7_|3@osC+S9ri0k+)D)jf94Ov0REYV6f0bSm-@i=oG})EACq4B`)&f z&P5IqOm&eL_bhTiDq6G1QAm};)MkZLgqE<(QOj7j%mJPflb`ceCwrbKmMz=d^b3EU zoM#shyRmh79p{poL!Ml(*t%TcK*6TG&o-iTq4$+LFIagI+*?lI*`S{^aa8#^RGYD) z`}(KLtgV)rRAy;kmczVmr#DbPQdw>O-9mHMv*j(-0@;j4+kzmd6}v@IqvQh78FCVW zSr%I_SLs*eOk^&P1#q?WsTj61;Yfe-W}45;2kfDgVByf+qJA(9*#iZS8_?oVF%ynp zO@sIp5NI(6l5yqt@)GR#OeixZMho^wDFW^X6GdqUBQP`qAJ7?0I7xpVi;|6pu_M22 z6ecY4?xI`6Bp!Z0dKoq<>7O6?q8BR|q1;s*Ls~D{v)J>Q=^g*Qw0`5zd?9~B^QRn| zFM4RshMg}Hi9HejKg7NJew#?PF#7*I1t!@Vh|5^_3vR4!OV*^j9Xsu~dv>?tu@Mw9 zV+3kIkrjz2IDeh<0^g&YSNa~|{MM=}C?q+Tnf;yJnMp)Z)OFRWbzc@N-AVpNCK{;p zKt4DnWK88J^1)#{^2vhU$lHI!T4&@JX%K(#LHv>r;%##B%gm|~er>tb^Fx}aVtbnq z_5PkF?;;>mRkB8I&OE4Q&^lbz3AGaVnb6p`1OGF%<`7XSNfl}x|6}}dQLWZ%NAOSM z4F4YB-xpZaxwt!geN569YKl5nds2GEuJg zb*T3Z)!w{5_KBbOU6uPD%6&)TQZIznY4icPa}rpZrdy|LHs@zqV3CRKd@eo|EwLTt znz!dn*YskO6788h>Ab}ODzO7{Wj%{4!}qkSAL?QQ2Yk7QmiEMN0Cw!nC&Q%<7|?At ztCN7H%8gY8jVY^tX+3{4E39izQ!SO;AUe6UAp9i{a?2`?+4Qi+ zwX`=sT$5Y6EdqWRlZv`11lOA%^OLr^=K<@Jp1Zgh?^qcz(4CbLFJNT^o{|hhwP!p! z(zW^@lWx3X0b`TVydDqUiekkBn>WP|;RQ5t`QwxRydfG>wEXVJ+rKDpj)5xvX=bVO zXpTY^wV<|s`I9-lSOGnglD&cf?~@h2eF2k`lfEzkPm{#H$N_JYjlYHouOHsBZZ54q zJ(C>3ECOWblS;r*HhV@}l}9_!hU}Q$$d_Og`%@p006>V^-+5(j-&+#2s4GwdDB8>HfobA?y^{sOCIT>#lP$p~AJdFX zb>dn>f&#jT+w<+O#Ee#n1Qx>r$!ifA>f+_ElXJm&0YQ`P!Oj8Ilf1%)e}Q*(@WAoj zELDXB5t?JM5?yG@!M4T;7A{SYnA7zL+bANA;8B2$4V%0*UWd4#%ajPVuvyTZ6iwh$ z1Y@)x;=3VbZXMsj9*;vH#Qi%fC6N^xG;}%q_f?$)R#;q9ANz1mvF7~U=9H*E4v4DW znd9J&BbBhJ`P`b~Sk-|Ge;wb6MRVA4Bat_==bCGsy0}jLz#Ip}y^}uFNdzmEf1ndb zd_#8+EpJ9WR!vuCm|L(s)JUQ@)qwNUt<%)NZ3gpJ_N&kTJ>qXnr=_JvvG3TeZ=L!^ z38&~J{Dw+83zr$UtmA)}t3m9RRVPu)s*#{&)v6?Ee_2O$lGZG0og&Tl zMiPH4T8x7eJVmk9#J9rSN`d2eBZ+2IOEGzpe`qBwouuri-GXs@KDhK~k#%~I*0f01 zjhMO%hib^3Tc^>X7PpvqI?(%m%M951MlCaF>44-5{Yy(`EfrcoNt#=3(OhV7RMWwK zHeP9(Pa@qq^`YW8f4bBAbS%C-s@)x>qT1L|iMB@rl71I85s z?1=*fdCQ$2{NfgKg#5!FJ9lJm!DZv;osyLmFmXgbHB+JN61b9Guv07Ltf*N=ms=N< zTfU$O4P9VuRyuhGoD35{ZOOD!nO7ncnylbX&)=Qf2{l(OZMzDT-{~Bun$MZO_3i-I zyHe2t>xEARagj*G#exCgt~3G|c#AC*-CCQ28e5KPrgMzw-&tNuW6}#kx0eo(q)s9$ zHD<_sG1BTFf7+p{0xKFElc&x#?3<5Qx zfRN+B&E|kgchnmd^r2G0@f%eq4>u)xfM%+)^n*Z|pu8FE*c3Zad{6OHWl3X-m85$P z%m=WgOjRb-u@?zNQtRWxn~x6)NPuhyB<)DH*um0XC5jgXL9|qC<>SNg#|I(&=94p& z+eiWae=7FfTmlYqKQKm6=8VE|9_?^Sxy(Ete;R#ZbjE{_2#xPZWU2p959ON2Z$7@^ zcXmTkn+gFeK+V~$Ie?K1I9qN76RFCr~wXkv8+T{M84%9bl zrjzK<%0UBv9KTWfs%E^=U#q67_05kTk2T{^-+cUd3}w0*_M<~fg~!*biC9LhR0?bZ zFSWj@^{tN|k2kM%UDe4L7~LB>y?CB$5lMwkUiIO^y^~V=B84Gof#jk*ezj1yq@o=KTqfsrFl#bf zC>MUG?A8>RkqS!%S$ilGaB2CKzJpRje=C)byQ=pK(!dly35ZC!q&Cc9%CA&>w#`h~ zCkT;{UEq>2217r{m6sGsp%4Z5xo9~;WzrtuHwuAGx+7Nzfx;|Pto_iOwX~2hxi{|)Z=B3a@+D^Y)`3_6GWv`e)AK1dYYN(3Sn*dfDE`#HZ>t5iZ`9?dn~e}oh~ zs-a>59KQ%Sj-p~eEFjb@E#XgYA+KN~co^6rz$E8(6kGW#fa_l+eCme?nBf9Ezm#N7 z9kBg%m5&c;WB{and{8`}2Cg#R(d_toc4sJpTTCe6i4?&-TBHEM41r@qVcw9ToN;${ z{}T;X$|_nYGkXQ#|H*e5GWHfhf56U{4F`vH31j`?(hr}MG=k+b)KkR{-2!{Z?8-r7 zI~WB>=2p?dc#tI0h%E{hl9Az@G2vMUIqQLh2ym#yQ_fvWYBMKfK%E^bg~ENBa4B(7Fi&@rx^$|4`fl4^iXDef5}fS{7Y2v zS|utw3o40p{Tmy4#T$B6ZV1ki+ze|N8(}|RXhIC2Xh7$L9px;2F$ydIS`Qh|qMV={ zAmM^H#|l}vXlQ0C?E_*)AGEV2-LfGxv(Hu{A8WKhdmdNs`0+i_pN-_v_fkzK(<7^P zA`R?WzDKO-sa5T#_F%p}bPoB1)ek7A5c+Cg zRn4;8ps(tEidsKPCgtj{_Edkxs=rbXsT4Dzbe<2&$rzZ1b&;SRe*vtm)SXwi7#pSF zSMt{)0E1o{yL|u{aPP zfKbV0_-2}+9{D=d9v7NU)J4LSHacFp6RIr0*a+1}oPFZ)fl#Wg{J_BERLRpT87gAq zHJG??`YdEC8pR;~3Bj-?ANbtX$(qq@GCRuYtt7MNi>=`TcOK+ow46h5a#>J_a*vP% zAjj}!{bWJ!e>r&%oM4nyj<66#Kv{XT-hxYrCH-VnRF)SpS7`k^3rTjm&vlQ8UxKEk z{vIuqx?Ki!x(yD0fA|G$-2b(u>E?Y)lUjkN60BYo zv;FtrRiRr$NfMnDK%7cI9PR~Tahm--bCL|tqRax2A%V3;E5}U@0AWC$zidDf{`C#@ zw1Zsw1gnp_*CNm&dXDg(cuXx#bIBZzGm}02?rV8<5;S#kkU#20ibsB-xy2o*uS3v} zx}tL=M^={K)PG!pgGTyU3v?2Zc`n|B(+9Iu^%I)sJj##fsvvw;CE^T)w{T!nvXTUq zI??8WZ0P6e4wG>_y0#*7Zi6z{GcpDuI3N+BcTAQ)XuXWGMaJt^L6z_9m%-5VNnx9m zH+R8eooR!X@t*M`vGC=g8Ii5cu38-81!U}lc9L2psegLjWk2NtUUMyRnLF4pHv$r! zgkJ_fpMzht2mC@^a!-UNAXFcM9da7d)<-(=Nm}eU8B-EL(VaB^v$EP@f!vDpe$gzr zQOs9%A|Gr#r^&y64Hd^WA`DfT!57bOvTp$$D;X!nE)myq`cAPypqjvEzRI9^Se1%U zuF4b=4u2@km_}jE>}jTz$~4!VpEylzP7bD<&1hSuS}FkU>;aI!K-P*&6NiSb9j1Ie zl2b*CV!RfRVV9Qm5(P_4Rs`si(R!0ZtvIriJF3ao&$MD9M#H zwta|6t*CEQE~}bRJ49J7uZTjnR;qGFzcLWc4VFqTk3qz zUbgfAFkZ11Hycg6v_?4;=w*0 zsHk){Pgklkx2M#|ed&fRZCI%cQI= zK$Eaaaq)~^)b)-^R}n{etAC+46n{eQ>yhbyGoWGdQkSTwoBj^{WHFn z_lXAJE3f?T|Mh?WUySvA>3<`jdK^2<6#}-hL?RK?y2`Kr1y@wa)wwyBYtQEj+Q%je zPC0=|vIgLUW1VOjD(aq&;UuVJousP(>=TJvS_f4psuIO2QNQhD?4G~IZ{aReB0f%r zIKeFIUs>13x>%!ML;5_Y4ye`7<`8$4;aC=%W$*GnHu7Pvy(mmS;D1lmPwl{W;QWB= zMSst>#KrugjD$Lmzr|Rwo7OIXylb&Qx8%BIIf0!d3Vlw2v<1E+$KuTEI$og16xG+s zVou=)DUu^u16?sh2Y*UoWj|3OrwTr!%W~ z3TC)vc20}|Eq{zfEK}jX3Gvp@-c*}gr}KQ7(2}myN-!?o-swdg24E}QQ>#iI2DvE- zG>Y2!)Oo~nf;2T~wfP+f>SrwbahQPni5yZbkV2|~S4xx{y;TW%FwXoCym6WFhIBn>K+Vp#nWbnt)&BpMs9=ArLTv7k+u8nYXo2=U;&j zDD!40R(J(qNQRasR_-P@gY4f}Pv&#~#u}LOZGS$sf3r)4>{MbzN&YdPegDSVDlu;? zcNe?;*y1uob52SqT{n`tIF#-693G z6uxk~hh=Ihzq-sPPB~4BQ4TeI|3pj5&GK9is#&JPml;oWP`Hgg7Q_~?V3`n1G0875 z>VK%t0tGua<}noI1EmUtB}vTVV~n@J-#F3in1;s?)8&{ICC78$dGf>Qk zFKPp%TlG5BQi9_wIe5H(@msh0wQoOvNel6nZ$CX2-(ou?6vodq8K+yfnxtE}4z(~x zB7`kAuu~{qFGTG^)Pw>*p2I@1l||_PxqqOF>bvSg?qFf~u9QcoRS;*N9<^MW|D!8M zN&u!C<^KL&_1p9jSn~I+;&bj6OF~O-l$V|q79KfiYihog3c8?t$@=3^vJm}MXlfW> zfTHG-bu842Dh!5qE0u*`R@Q}IRMy$aDs;e`gKO-5_qDL3uFIc*WMksysL;~&!GCj@ zD?cOMw87Ltk;}*$+_3n{n<>aff=mZ>8AU}6tbJGmU--gPgsWuq^+5`RQf*A{Disa? z1q9CQTSHyXLW&Xpwbw04r}kPL2Q+jd8Ie@y zX9wzl#aNA~XIzwj3Q}c+UsMdFQGX7{b|R?|BCpVlaAZzOx&+XoU{sV~l{>jDV|&hc z^SrMxS2-o8h78niTUwzrW^oS+mcZgko&*lwmQ(rM8GoP33Viq3teJ+4i$H(~6oSn{ z3Fe0hibcW*^eB0`p-8p@Y}P16oiRNNVgQT~BLpTwHJ(^>uL+l9ZRrW6$$uw%ry-zX z4JjAK35mc;L)a{aCBKjmtQL86g+d2f#wJ0JgvJj$wA&f?_BiEI^B^!`dJ2ZMS_n^$usfq^3HzuwI?Cn=li6HH3ZdE z0vapsj4E8qnYv6UX27jA1AjFh*QpdH<+-eU%Q;oFPn3jN;O2nC)N#3_d>b9=*~37WM83tbAnc2$Ob$Zx&z4qHp^{jlKqF@@?X8FMAo&1#oP!jW*oA8& z$d-f?tgyAFggd!0t`ZWL{08KY7_u*H-MNA37{a5F=#F_?@&p!=Tz?KsC%{6%u;=q& zrG?}z)GPD*B*^vb*>zYn^Rq)9#;=8wlqk-%i>$z8|6tIF>@J6l95ogCZ{-ZUa&y}r z(R&F83KFaMJ_2-s7DHl6rw`0NS4;za+%}toooT6OpsLFqhy{Au^&ZZ#%G!6$yjgd% z2{=P~1h}Q-43>9R_J8@}^tgB{PI^7~7bau}8vVSBO&f zNcQtGd9D4g$riJfSKlfpGI3u_R4OE8p}eS4Dg`{XzuNW-^vvIE_O^1s^@jUK!EqS7 zTy6vpQEc4y3&Kp-sZ*M& z| zQ*>_p!DBYk1}{Q08dQ7>@--e85t-&S?4=xnRfin?oWC7GtXh4IMaEs;+M~e{(>!m>y4-k2Eycr@9{577GQU)c@k?yLd46>GNH zI4`cIr}>uKN5b^w1%<|nMRmS=xzW{7T5{@X&UfvI0}9{+IGBumM(TJ@kWN`~vTmAL z=wG;!_JX_4%t6;a&x{9CRZjvSnP}*04-onI`GKeqd4H)~x?-ga4CWBkguk7Wsh(jm zU*?JIjVebgPm|@PSz%0HsxEGFWTM zN3_Xc4;fgp#Z=ToTO0#elyY;SPI$1cdy#(vSe#r3_Jx;*pM03xkQs$+$NOUaYAk}+ z`#75U31ysQR*HVuknuccoepXy#ZYAY8CYs`S$~+bR!hD`0AC+s=L3Dg_XywnpJ`IO z&SO2YcK3Y6I?u_Jt!+;mLiWRyi)>^0Bn^mtOVP%OeHL-TW3DkgzD5$KV zpITZ#3b)_H2>kFV>GOXz-5lR#A_7iU7$VyZ%{RQjPPLjYoH1!bm%^h&Vja-}A$g=U zMSl;#t<`0o-Y?YegI{DsVj7t60bZplu@jS3QZsX3g%ZOW*Mv1Qks`o?mIwSs$Md_s zo39T9wi(a!pdk?Cf;gfs%0yQ=(+c&3dot$>Rn;)*(O8esQ3~txfVvA*z(f8v7h6=Q0-GU^fA% z9YHb5^>=wO#Ei;xaiHbnBcV(XvIr`i>eAT#h8)6`gry8&RA(`y^gNu9kMX(mbnM?* zbnF+t6-A47L-yL|oys|yU<|2FXn!8{g^P>}Fu?`iR}&h4qOo~=4Cdki!&S$sxqX*$ zz{`UnDCv%0PEnxVL&R_ckGLt7KfGQSPqODPbNtQliw9Y26*O|Q%NG@GNPo?sh#Lh| zTpo!AeS&NgB5vh}!dF$?-$KZk%lOq&AmHSs5lHH~e{lq83ijn;06JXm@!%B32FooU z&QWa7=rW3*ctuGt;Wl5G$nx=Dn+TUZxk4Pgclj1uM`o*IGWJPy$IDp|*n^r*#sNuo zOV?32ikwAaIwT6ugGFq*B!3FuMc}On?$zd8JYG`!6EGD~cpEJo2)7%jz+0r`S<=BV zA20UIeGoM+++~@DnH^cBl4y|Y4$D6IbP2eLvn5y7jdi-d$tA>>;B=>5^3*4BLLIOO z*E2mMLqr%waTH!p*x}Qz2t*C1$zv3_rqRmuB7sq^(;FHQtFGmRI)4kWIs_1KqA)~% z%?#8BV9&jGUcMN1LepD+MxzCPhf~Z*t(rqM%dQkqI~EnMSmU6NfJUB}Sw+OY9OO?m zoh0ygVU*Cv&^+J*AEqiq70)dD2^aZ5FK_Ta6K&4wNZb6+ zL|d_%(q{iN(Wc}{hksrUYOFs=T~M~Wgv*RpDiB46gd!UD`g8Eru?DfExCk#PQvhho1~g>`FVR<$w2qU9qt%Y)kYg0Tpc#EZGx|s~mT9FYL0gc4#i-ge$1p=q zBwwD0FxQ;^8h=Xk*TKzZb8u5o$eWFJ;IbH!BkOdO*R(8aC>ypF55UI*S*BfDf!uT+ z^Yn6;jNWXpOO`gAb=)6V2apgA2pXv;fQcs~fvk1gu}qaK3(1*WzgnrVnh=Bbimw_Y zeT{^s8qgXrf0g9C#LzlL51>+yNVX2GQ>m;lXw=F0s(;uU(aRkwVu3?*NlGXvpUHH4 zZ^MQ*Cy`FZa}wQQ{Ktkim(R^ttnybluqW$u*Hj(|K9wN#_q31O4dS)Sq_y2D=iy_G01e8UVm&r$k#&3kpjleK^pL{q|`5R{6bC* zVekez{(sy+CWRXni6gIRm55rFm!njar&amVyYhsfCQ7ZmW~HL-lm7fBvUOsn9yQPL zQTSdrWZMqOu7k63JBHank?1FWK1VU@qU4uPn#kS|7Ks{wXVW>0mDk-vK?&JmBs7t^ zmb?glpA}Xbx#5uurb#?>P7#&`VL#Dav=Bqlhht9Rcs*y3sIn9{Ww6gp{Yo5;xh&#)Y_dqe za~;w)h#O{4#j8q18(MSXHCY;0yql;c_{nV^5%U_5->rE$I37mFmWj#^;yMI#oz;$8$x%Gh_{w^-k-whViAE*p=7`b|^ zi1CK*>!-pxF##`4o<0n1W0sG;^T=bqiP(u~mCYQ(9DoDrDn}yP=K}K#v(Sn~D;U^~ z`+{FuA0YO45rQECL#7cV%nazAwP-Xpt-Nh)5Ad@e_ndXXS`pJAN z*c8A93zRf#yodu-JNE4(j#^bnN`C~cs#{hkA$mH1gBr#*#rTpClQ@EM?$8J4JlWnX zr@(bCtS?5A=E3FW3oVi0rwE{u0jgnJ`+fg$ z%ZL=ks2G8ZBY=bqC$q**khcO+Ot%q*w$4cdT95|Z!s2JheI}?92!C(3MHTotRS;F+T!*|ovKx?=7igQ?R~8k2xp znoY|Z6#GjC5Bg@4oPP{7n?hI|4m-DaHIa2`qxjMXw7g~8JM_IfAiL<$y#`0H4ZwTx z0Sv{)Ulkkg_g(W6_j30(y{S~_7jDx`Kbda9G}vWG;^9@}f6}97*8FCVn#PW23rL>U zoX|+~%?kNu`TKS#Dt0i~-mD}KlchKo1fnA@yJ+Xz;RS$A~8=;jiK%u(MAk_^qm~nJx`MJLwb2UAxL5xNuP!<|Qk}}kC7*YtR0!ZXU_Dv5& zw$FtB5Lw$8Tl_(ib!wt6U?XOC7X+G$7U|3ckjr;h)+rj#feoW$xHO<}7xH?<6~x)p zI-M3lx6v{rYkRfyZ2qLVOmH=Hzd)8gF(4G5HWW#bv%7eFOXx6k#tWz;@70$#*M=D)URze zntxxjY9#fnwf*P9HrJ{H(U6W3=aJ5+YO1asKY`58pJ70wSJfVMyat4+XGKToe+XQO z`l?!OHCiojK64@-0O?h)-S#F8l|W|AFg4Xzo=1DV9>f7!q_G{C>Z``2YdcMqV9xKO zTD1yoHJUBg?x@5My~tEw)m!Zbov0*PIDZaJ5>tIuYuk;ctrDJ(##CRqy-ugvR=FRx z8a)NhqtK&ue(AA7g0&pn#>RcNg_*0&bD9h&~3QD7|;(wyB&I)Ty z_#+A%tgzmOKcY}tOt%exO6$aX;!6TOc%Hl@)oK@eEtbUdJP%6vlDvW^9M{nihJU`I z?$IQAR!w!QEDd+dYqoA?*&^-fqum)nI$;wMJd(wT*U7-wT|qdZTOf>QKGfZ0EJ5 z?d6SDjYf~bOjGGpjZRy~qzb&+ZhxyiqtV4y+ugjr01)0}&8T;<$yP^cwT)I?m$sJG zsnv}Rf@-zXQtI8B(d+4DxZ?Gy)&EecN);g4nXR6_zE-cAt+R`Zi!vl%berAonFPtB z(Wu_2%Me$y;m?=HAi%MCcGewKWpM1)8}(iZ94{`qwML@=$BSNL)M&soqJMZEfo09N z?t5EoQN7+(>g}GfwUMtnlU{vNmuA=QI1{^Cnq9YM*I{dD>L6Yvw4d< z$IGa*=yW@W%&6^o!;hN$u6$Ia=KlSYm=R|&ckHKcSo~M*meX*$<)x-x&zm^XQd4Ss z4O?F7dau@KY@>M3_Bvi`3x7#ckJ_}og(PW*)?IJUg6HjSFKf-{$tyRBVh|Bv_`iM2 z*@4O#aaI!=&llm#M+(BejqI5ptW0&`AI+jLO6I^2d>Flp!szIRP8We4lXo->BJwT@ zZ6_iFG`zPHa_LVfivpuSZ8C@!u}@>=iY`gs)DEn7nnoRwR=wI&+J8o)wHIeKj79?y zOt-1jt4*U*-$6p1n$c}DWZ7vejat=cHg?fay@* z^SQM!Ue~MVw?=nD zd!8%b8m+2lcP1qwrsmmQMFx&uujkp=<9A&+d5ix&_kd@IshzC8^%8D$ zj3%S4yFH~*ZyDA4zVp*+aH6Z$t}FFs&1me>N1c|@V~4ET1p25`qK{tHyn3(k{ns)3 zDg(g|SkQg-1b@`%q}yt~#N05_Sq78Z@Vwi-kYE7{&dy6C?={->;r8+F)ateR%hy&$ zrgrSo@SCZjI^T$Tr>iuKZlm33SF4?7qt>g}dY%0gF#}_HI^BzAtu50`EvMrFXqPVX z3uGYn(M&zsq1&8LxlrDmYSpWHYDOMyh-gXo&2}%=?tgTQW)3Rts?n~$Fuz)()~>bp z0Aw=+88f%NMQrPgR0)lQ??={1^-R=e543vusp-#YR2QSH3i+QyBr z`L%zawQaYI4m(#piMK@v`BuPb_KX@f(Wrj$3T*d`4qkALdat+yzrYgpy8MG`t@`q{ z*)}?DL4ULCyEe}o&E^ZhHtYkjS)#1<9^?A{{XkWCyggb)B^S|MSsSe@K z7g$eeSS1v4R_oPzUtCM+%lu^KVuKwh-1(+h&s-|;5-qp0bpQIgwl7*2tv%~1#=wA| z!$HqzA;np%Hk5j`WdPrs9|sOlRinn}+g3xVwSSvNt*J{VgcHGCqrtfBMoa1Rj2_e} zi-WpGn}Hm#*haIXe;2n_{Sv!Y{Q|#M`|oAgg2nS9UVS6NdELs(K%~u%e{KlUUjlfw{~jnT6bISMeA;TA4}&tcB|>+*s(-o_yCv!K+RntSN$WFd(q362*Qwf$)A^pm zQYQ2GTDUd08|Fv7Ua!}Bf&T5*oAoWJt=)7cljckGulOQgnc<+@ zZB_T)iK4EA)i3JZ`q_SVMFNv&{_ArrVutGeg7k!CCKYpPYb?;{U{z zyAOXfUqTh)@#jAPsF zNe$E?#TwgQv)y?C$9h$_O}jf{GurW*d)Bz!s5_0xe~Zeb+F?M^*$d>~hMSgtOg-?; z{3c=A;L#4ACXpSx5L4H8lmt@K%zqN{SNL(1(Af9>Dlhk>3xBqx6}+rK*}R}wZaRTK z--EI#{?@xSlV#n^4T-RgadF2<<8gg)voqmTMal47Mu&_0HR8^162G?X%Wk8&qrXr%(~pnW(E+rDm_!=$2Ikz0L@fWPc@yY~>Ky ztSR-TQEQ_X57K=>J74GP2hEG_7Vq8}^r}5s{c&;D?lsHIZlm6BpM6hr0fziD*Yb=hku^`6ma_G;ZutJ`jMyVd4CQh%WK;*Qo{+RoaS z(P}_vQE0p0tSM^=l_!uawMNru@7>%E*FAzLyjH6j)!wcSvn6zH)f%o(XOH%vQRT7~ zP-51qUF>d;HXxS|)RkJD6CU!mcH2g+$27GarC#U!-uAZ2m1?by(QWNv3QLu$b)&Yk zujS5K*i1&k{gRX&D}RCR#Pzl=qHe7_7|9pWsM)EVm&oASMZMS9mit#nwGPn60=W#% zS{J=NWKu4S%&|^=IBcGeWUMo4o{w6R?tgUNsdihaZ1p_)4P@l-yjO3YmnT+lH!d0@ zS>jo3b=t$y#Ol>St-gabYwg--57FYZdS1Igr`_S;Y*3RY)_)tGcgpB=G#b`hBWYrT zUaMO}iUU37zx80Wg*L3!QtGXa(duk*$jx@+>`a!24r%B%}TT)Q1 zw~AYMPrHKN?b1n^25Z-;H*rhsf#+6R9hnBRX>S5&J&M0YkJP&8)&Ky?XZ@@;kcp#i zcTgRc#K6r#tKEKqak$84GSy}xzpLS zX}JsSzM-RqVz3~|g7^vX0XT3uUO)$^?kjB)Wl@i~DBJtfi6}3zr zgz9yoK_DO2s%}7cR#$dcO}bWt^g^-Kxxp^8zxiUIHJq^^F(o`DO+N&+%vx z>izyO6cu$n%Yp@(VE>)kPw(tGNy?9YQhM})(&M723;{g;=~=lSDdsMRaATn*`U@8N zVSjjX!ZO~02Y<+nrLR5h;V%zpl`EZV918P|rmr)dxwS(Dc8*<}I14g~$ZtLg60&NNPfdQ5uNtKz zi$OH=Q7vwIS`MlYazr1e?D1=aoJWet9t3$K+6eESCJaVb{)U|aZ zrV#$M2xZHn=}L<$;`%z$)>ezfWPfd7uz}0Sd4fo?Sss251_bHa{`Jx2NAnOn_s?l%(R$!9=r|v|d=T8^;^D_2R@td_hX`JYnMYwl6?hGyz*(Aiu{){7J~Zb3oZddh(PH|j30IUMMOPF?x9K}o1`Uix z$~^rvzGe`W^j$+sbsh7T%8^yT71zW>2%!{ztYAF6zPh+mb$#X2z*R8l=-iGIs)>8C zrR#aJ({hWPFwT#zZ|(WKt?MV-%i@cy=9`WBYxopGZfweOtz~@Et{bouu(o+@3d0Hf zIqEs`5Ziw8fjWy=xMJyh>KXuy6xI_q5W~2OwSOt412kpF)K>MA)Ij*PC`k>B5@5`K zBj)d9srDp^f<;Qz{9Qo3)KZUn_#c={=RL4Zjw$-gr8;~;1B}2})S=JR8AP+04Q_Q# zG+)7LNa=e^fxj`a_)QT9Yah=H;lCKEn_)aKw*{hk4|fW0q&MnyW= zigs3xW5>NTLkNqTWkFOrAw%7_ZES-4~B8Ts4*Ja^v62LQgO1(@0i{q^QY z9fu)(xEpOQUJ zv*%PLDV@#1PXO}38C~M8LK}+iV>_Huvsx6;XP(8WM$%}0|713+_+xGwy%w4IA^fVg z$aCu4v+z!S;HN9IiX~F~)f$8c@f|z3pPMysccylFAJ|EHADZo^D45fK&<@g-Sp$ds zgoX~yi>kzn>7NT4IxDHnjaT<9|4ZQ0m>0nIm^Gu*!Y5|@>E1Ku&8%Y~Z$XUUs77Y? z^L>h|4NuM@NuwET*?a}jW#=(XX?zjFbq%|akx`@A*bkJYPCtA9qw+IdDZa;?P-Adn zg%k-#NA}!5dZH^OnzPt{oKcKluvoRm4rJWIJGevOYkmu`JQ<9%NtDNE3Aq_)AhHQg zD5UvTfU_?{0wxAw|VcuWPjj8y?QrsBq{ zqf>;0qtkRXzn`KvU);PuxMtf_DV#ZkZ#LdJG~=teV8up%aYq{@_r z6vH1YfPR(9O0hBA9jpSpmq1K${e(p&!C>9&`)HA3q7d9iLwV~@A^D=k(krs=hyvh{ z7oz1Gb>VcE4}p5-y3$5Tvk+@&D6qb^3t5@*`h%cld|p!uLxLv3(m?u{@O`k|e=d?V zk!Ix61SSmVIGV42a7Jtz>Yn5HEu;G|iy--A0;{sKdND1n9~>D>I1BY=QId*aZIq3| zQ2F^+f2MTK^L#@5w|Oy2h8NUJf99tNFJ?;;B^-FuXi703CT66w6=yM9FlXt~;35wI z7Fdr|nX4SlY&BdSu^;-g#jMl_5`C;TPeuxtijNsaF?|Vt>zD7@R`>Zb1pX5ZR{0`q z<2m)eZU7hSokuo4>le1IB4gy1|0+hop zB+7r?4tW0sR{i&SXGa*|@Vv#olo^$nm)P0rCl8maE|9y>WfdqcPxe zWkORh!&icTXnG{~3oUTCH`J}mLA|t*VlUuHY$NV4B69yT=BuG|nj+DuZr>+ccqgxz zpE4z3cMDB0*Ukv$Ur=Oq$|L%6!I&FGJ1+)NOfeWdv3Is8U<`0c_|HF zZWi!F`8HICH@`FRV9Nv96B2p$ez=WsfzwoMCytVTLNBI zHjVBi_?Eoo|e8ornYFfY?k8h}@HKO;-4M(!;; zo+1yGh;Wa~AMQ_VYT< z6Z|A5e=?$F_%mH4`b%pt&^)pvun0Pjoc(0&6;c<2A23*i!~29kU2R#ZOtmF{!C;Yk z5p5mTq!ev)4bl3r7=$e55-`iQ*f}$~5PbP1fpkXhWx>%EoMo=DfUrqsKF4C2y#j`} zq|P%*kw*raRFSBX)=nj!PS#8#Z$29nKXfsj43R#7Y$>>mLkTV<7#PyTiT!yR#pK2s z-?76~tnH;Z=X}gJIcSijG~H2u34UgpC%v;NtOZE3yqCdUY60539+dE*&N7dFdFq-f-Dc=?9~ zzFS)3J92MT$!&r1?ti#FIXt{)ci6RsBN*S`=_gB;LhD*+C6y&^$!BYSR%p--ra=G9 z)zt9*t>ukp5O6d}W35U)>v=k`w`DVe-?XrttwZ0_*&OkdknR9e1JX~}Y(;gzv}_1JjF_jhCo zi^o!=;);^bT7WGwOQU#SYc3NliDITvYMK*LNoTWi3sIYHzIr4ff~yPTy<}0GzJ%KE7UO=3DZaEOp)d zYHu^Sw^^yT8T2;0vzF3UUg3!lgf!kkqVjJ0pJvWmnVZZ2&gOw!{UJd zULY#viSm1Yd`CQne4nYMp8L~Z{V;7e@unl^{`AM+0m5 zZsi(ZQmNKGEn5d!MT}dGA%cTto@94f5GMltEl=11y+>VtskTJabT8iCqt0Dh>iYT~ z(ICYnIB)~!hnrD@OPLxxJiOKK877<|t*jT>u|FVnVejpL8c zNS#}6Ez8;_cKhqhEMKQ@b$N(nsRM2B8P3Z6`173wgZT`D5!1^wFJEB7tmSSgkWWH$ zQLOrnyy~}qoC@`HuB7aeqZ&I$VV)C_DgKeRWIqi*1=bW)p8xru;SA5B zFM|Xkf}BFm!@18aZycs~dZjYs=h@RUbB7O6FhHSXe?ZHZxaH-TcBYf+57w+wDd8$C z!`b~x5gCb=-b+}D+>=`Xmu)`AvaA{cA|S^6*5&Je#xjLI`iW&(ztWt3lQIE|Y)u~C z+qagJLNjM;OU+?gA_%I(z>k&fvqsd4w)OTqX^iZxgh*m`FbtTneBmta!qK>4!{^~r8p6Y;6WM}#frx_ zr&wuIMSD-`4$o0VIAd=N`b6`r&F32N>;rRbMPVB~KGo!bazuaeed? z14HzB1b=fap*1V$t7%$7f!?8NFh(VRF%|Vt7N*Vg@&#RRNWi*ED(-Ce;zyiVTpBr+ zT>_1IpK+T3u)LLW6Q%`|i=&5CTf{W6W+Z>6F0j_tJuBfh=!0kxrq;4NXFgGh*Gjin z3C|BsUM1xozkMCR+FbtheDyxcM_d|9shXZ$Y3I5De6xll&<)1qYHP8ud#}W%54pBhuv7>`>*4;$>tF`EOF|oSf4aKW5Bg z{#gN+_u4JFmREd9Zu=am-%5#-wbE*-sz3v`Ir)0sfB1{?aCmKk0R`6f!;r?-^=5;Y znklu)3B5|?#b$9Z1WZzA)?pV)*u>m0@^w(xcMm8~Ba z_YI5$E;zhE?j#DyVutV9Za2D^W4`ro!4|qWd}%D-*iwM-xI6)i$C~zkEXzvcz9dvy z%CZ+A%q6Dr&#h%B4vx29%u8l3h<~NrwdwZAthcE=AOZI;baAqjgb)R7IIiAVOP%u` z2t`rI(HOZE1!&QCQhv)wb-4oLj+h|kh4~1A>Rud%qPg;8|IbjnC*?BTyw}NX@g?h2 zet@rTOJmZ_Tb*1Nxi+qUO_{m>E&S@5zO zuPx90*7DrTF!r~vt;do9+5L+^<}#L)v7=k7b6s8?`40ISRFUON5G4b|%WHMJvsM5W zUt6o|t+l$=$$cSSk|a%BCb%?R?H+`*QX1y?s}Lrz$o}~qfHgCJaDiXL4MK z$nbNyJ@{V8f_q8tgbPJ(cbTediK)8PPi}#!x(3Bkfo^UKri!_Dyu1?k23TG=}uqXIGVV4$nDdWd$ih=ld{EN5%~{S(e!2e`rAEZjjW~O-K}~ zL5<)sO43cO-uY;MRPlc`{g~yv+A{deH{=%|u9)|s#l#tZ#MU|^_pw#0R>?E9c6Ckt z@!j@rc|y@-#59S5Pl8Y9DY#I;#?({?>;dGjX+U#UnSh3fcy}z*p;fA6xc(F-pniCJ zcGRk2(DS8&Qa-kYpzszbdpsj^5K4G=nWh7m`V9JT;QyX~K}`-bdZyz#mb{KDmDe$D zFZq#Lzr>ZwFR?6UB2TGxaPUmYb-Jx~j+w+@>=C%dmtm!%p@-Ky--)BtPM);06v`p9 zPey)D!B}^RCcA$RK`{0^{ym7TG$vtet$o*2nF>-R>|f9#LCUQvS%i-$?sOA#d`F@% ze>UHyWImmLqs40Y$y{f|^U;Qfns3vL{6wuVCUFcstwO6xKKmd5$#?z_p?=}}1RyI3 z{0fasPGe~*-{&MxEs`%6nC|&D)CVheKuGs2r!I{X7=b97fL$}&W>)n6!_fMPO7P+x zl+H8AU=A$nk9hxxuA)@2m7;zLVc%oUAL(4z#6XyTm3{G|euw@^n*15VX+Oo>53Kl3 z*E6|ZaT)MtvEcAG0oLG+bz9(*YnVO zZQLV&PK1{xYXN^}x>-!*pi1t=dR&vgYxlZNK36I+y2z2AVo50p`cdYDcCD{}iWx@D zxThtPl1_aKHL^R1=O;>L)+hgDdzBe|hfRF84m^V$Dxks-EbAw_*`NSJK)k=Q%h$oaIyz~Sc)ychnED1AyvMD_Im6h827e@|TNwxAt;iY3OG+@Jg&a-D%ryz==0WtZjsf8@RUcH+p=C;BReRfkb-(iHCg zX4Z^wZl*5oO*hqr>7w0jXg)1$QnsoP@|J{EV0gB9r1J`AMdV!)F8997I&;oHHoB!! zd5MgS_{A@*Ax~rWE_H5foWXA~K4TiNkp8w7e<`YTTiK=1nqM@=ka@i@DNSLzJ7n)6)sanEigz zQKH`?AnX}~?NhtGUDHu+D=(*~aJAcNEg*W3&gOP)-*N34SMAhL0!e9Q;&eZ@hlC$n ze|?S`8gs1Xp7ogQa6guPO+3{Nqz5ANAzPx@RCWKI(^)}rofSSIyXjQ_n@{*%`?NvM zD*RC_U(svCvL>z8wTeP-wLDyoL2bTVgKxy1kbU>ClWyvi!O)w4_=A5`y<)$=HgWJ1 z(hy7i8sprq5&)j8oS4K zks=EqxLZ^cy+`8N>1i?d>_*Hj>yKQTrYVI6wC>`$ZBG12=OZgEl>tLJIDtE0YADNF z>J=CoeQfKd2U6T>#r7cjn0gb4fAojVyjax_U%e@}5J^w4F8Oy6MRmbsRcTCr0u_tmtDOb!n7hzka78pemPDkY8PdYh0edA3p0hfY3$c1!O0&S$hvI(W z=Q)8nNBkE&+iC?JnGeu@RO;@ltzYA-GsV{638rS(6@p9i*mf1)@ee1nXvBTNr0 zMA+YI#jbNAmv#oB9~z&0b!&0t%djp88zjCZ=DHx}s93%HJf|t7b^RANs^g*x>e?zG zcKx6|Elo7pEVDa%EoCFR(|QQ|yCahj0!gPr+6-(U{nRSbnlb%}NBogzUj?htf$k!q zLYmc-d*o^05xAlhf2>Grahwy-wg`QZW__@}fY1=m3P&w$w+Q_a_)&i3z$5jE=lO~+ zh47(GbEZlEWdr`W46zidz%q>jpz5CzDawn2*hf%_H<{lI)uy2-nL2g{Yh60_R4O+Y z5RbOVk~KVaY^^#yEkJ|?V!9G+>CR%k)spGW-CfEdM>Segf5b{48-Bh07q)m+ZWp+y zx$lQ=D9ds>vHBsX?S~QF)%C-u+$sah0XVQ#fB|`$0LX11WPWYcCd6q(uq^!|UX*5F z%qX2=<$L6ZUFGZvSwk|vOSuq!T^cR@?tvy?nVg_v&m-2QkMQR{V_gfUks64D*#7>+ z>8MOIqx)e5e}o`>owErY!KMcAhSL56n~zZ^TC#TgK9kEf9pU2rCGS7U72Th5Dt`hP zUZMP_{CSu^toc)%&SI&8UV(IjW;V;q@`x_hTRgMg>NVE%q+0m!4E#EPfj4fomfncQ zAc1h}Pe(>bI>Ja8P_QAy&dxqz?eA#7ZX$0)Lu_B!e_}XTN48BNu*@)#Sud{gPPE+b zJ^atw?PLu#yUIJFyXjTl5&hj}K}zv1QxF*46sD}9pJ}zc(^GvU3>l*ME-~dXeaBw{ z&=sr=(apkIcJ%|T7TU)w^FCwjN8b0~b7*)}y++GmT&p_crErakG4ZWU?VCi)j!8P3 zq?+z-f8Lv`wjgC`#q1^?N$rA=O^wrc(mW2p7KbY zA^UyF2NCxjV^+KU7*VT1u$jd{gLOqOmRpzUrk_Zk$4?w9Kfy*Pc5Dgp-vCu38dzH` z4>TV1M6Od{e=#~TpbeyBy=N*vM5(3L%;l ze_{z}vrPsg9P_5QUlJqR-Ceok27;Z1yDXz2qiD5o`hHBk7+z^tDhj~|f}0gD8)VO>Cy*KD?5rNl(7%+`WP0a&=^ zO^qA+DPmE|2Dw;E!!^@O0xcre{J;O#fB$OIan2Ov^aRLJ9@6A7TJmKFSX7L+4!nHZ z2OvbjXbc&GOiv&z0}z*}aGZ;)Cwmp4yvOT*?KJ7liZ#VP_=JCbK#dHj6u_@YzeKDV z1j`#RZe&f$7pw`G&IG}}KX7q`+*lUW?V$u0>c{;W<7$0R8WelEi~Z0S@bk-@e_>pr z|2;*n694q|&E|9QQ=YeBqP7{%8k`b_x8J}1qa&T{A?eped>rb6 z{Nh7V(5b#!`$e=$nUn`pgA%~f@%E-NUaysQq@Wf(Vjv7rw>EM0ubUs_G<>uglfc@aOmb= z_xqoC2X9pPL_e&!*oLg4O}+ z>RJ#B{<9N4ZaNgPU&9V^xi26F%ODTl@{S78@R}I^aMEl~+D#x|e*s2}92XWVxIyp5 zVsF@qmN%jyhN3|S=!xhl88u~S6JOqRf_3s3JphCiYN}8415|HCYxaXqAh8c#h{2S; zZD2CygYs<$Dmcgefa3oZaJ{Kg;Q)FJTzHlx2-<~aoQ|<^5)PlTyaCQ?E`5BR z$)E-7^A*n}Y36P-+4ukB zzX$Q+>xU$HVBbpz&Bm}1Zn$zVe9Sv`JWZBVA+i(H_=Px^5K9XIFN%FSvA8+$G)wVE5r@Ru^wCAK!b~j2o(Pfb1ll=@@RbrH}IvH<>ul- z_SLuEMS7+mf5m_sFR-pl3*5d-{(^=lokj}J_n$w-J=v09-BRXYMSrrSe zqu-&1Zw|X4wA+Sw9U^3>dJizrfFwu40~y3Oy<1glg4nJJ2V!y0Q&{UdVU%Xzyz(VT zzi*ltwKuJ{EBzJb7s^-+uq90*-HKw_@hqjf#&H2*f3m4~Qj5DUeCvmmF+Z`MxlfWW z$OaCvRAH2IL^G&IfUU-99{e^|^2;8L#KaXuN%Y^~(RZuGCgTd<)hmU1K6`s}W)?ol zpxSN{+MFwXqL^i#C^J6hAQoK{g9D($|GrtQG6>C>b?0*jFE18Cu{-0Lvxv$|kmqE` zx79eVe*w1000hPZESj)W;g3As)(+L~IU7LC)a4?-w>=F9VidzE>-MJS8ZOXoPYHga zx?LAAo6g0?k*HuzIba*zgAU&$oh(T$=LMqt6oG`UiZohHOD7Iw)}1RDwpys;#n&O< zHW(`+WfUm%b1FMRtYH_`?ilI57`aZlsTHO(e}w?|L!)T5K#Ou$C}7t5`-Z1mqZ%PO zv>1$02>Kjt0s~x)Mnsz@g{1ym^DxR!AQtIlandC1rs~8KZ0i#<0OQ&L7`UTn`StDg z<07_*QzlMx-{g1*<^#?AQI0nn!u@cOb2bDxa{~X*=5)w{*>p}b&Y*1+1e){xen{bO ze`DBx$Whj|TEh-9%9;J?Sl409uDmgw5_;>6Nvkz3!QdipIps68hWa%H;bl=4_;M3I zKhT**cFYMKu^rTZ$95J9V`NLrkHJk+2vS1bX+-gB8%?=E{Z)>ai4-`*tad&%sMe@T+4IlwQ znsIg{NVU{f@@}tOO1yZOB<~@QFe?3e~ zw3IKm^2*rT^REyOVI(-^+*T1dZ#WcTT!Tz&vF`XPon1=M12@|;ikfrMZUWKj8p8lA zeGIulH>*|C+UzFWlXzxZaho1w9qH79PjiJO?z?_Q&WR72w|%Dt7lO(soE*FfxHAZ? zcUnwr-Y!Tde*}(E9H^c6;;h0ze@5uUoT<|o7Iau`@AsO>aH z$OOYV!JS#c!Pv#7@VB%hGv+RP?XdeSbsR)l3M`h(qq`L9sv%NSr4tt5mXD5Dpw?9w z1J*_h0u^`cE#QSgDW{dahqNr!01V{jBi@K?0Al|!K-`_*TtEtLUwDjg-2_XZu&?n0wpm`i|VX!okrXMYvMyDa27$X5;SLlVv23ONrs(*yJREBVx z&0~pR#B2U70_s{(hm&Q(g-itc+C{X6c%T!Rmk}I9dRO_a3YMGVe_@mVA2jG5fFi~W zuln)#|I$Gx96{9u-myY_v!Sf`|GI`MWJ8IU$q9-?VwJBbtR}B-lfrEinS`vOKLb{m zRX|pM>Z@$3%$l!?ov#v%3{)f|xXf!l&2q{e`Ly^&pLVlZefe8SuYW0;!@puG3m#$ZRm)31uOP@ zZ=&MPfzl8$6yTDUx4pQ>cX|gLl;z6+G%yq}9F;LCV!%Yfd8;*X9T^q5DZkSrJ_yFl z*x@2+m;)D>$Q&RdE7vz@4uG2lGv}n$Qf|8jGn8P@zgGRqf27D3?Ehc1w;uC4U}BR+ z%q8u)jOLsgoFIO2t2O8gDD&GK)gxca)}U^B>?%w0L+CMq!XWSoKX}earXNYG4X`|^_U1f|oL$KH~Ie$hdC+%|ECRe*;W72Vg74bjkQ-N zWjBC^gm74Q1uIbD-abs?v1B*v?y#Ii$QF5i z&y{69Mt&XB^+8d*$-PJLj3I!-nO%lv(QCdFf&XG_ zVypJBUpBoB4kQsbd&|KGy5;l{vU>_FXC{mu@N*0y?BXYQUk_pqCY(*pr^&AQ)-;=1 z(@fMafAgPI_hBP{df>O2KdEB)hobE==ns3^K}Xrs3UPQAyhWQfBIu-z+eu^6i}?9Q z_fe$#DCN+h8$OHpoNhTA%|0R!$-C!d-rI7`oSJerp55~~l`PESU<{qgQVw(wwNpB_ zr?l!2dsb(>JgpOcWL}Stt7-3_+bUS`vM-*9f5G#+hM;cO5Y{NxJB8KUIlVbzJ6me* z*v{$Pi6b+6FKJuR0&d$*%1v7wY1y4@Rm*1etINZ-MhKKel?;>*=;O0pbQPi@&aE(x z>FF8iot{p6g!9=HGK`y$)TyBxn(gKtZHux4Ptb>bFlN+U)zbClV8LUaKM ze{Yc~Z)DM{pViPJX`1zsMY|RluJG8j&IrDrPHm&^&SH*{Dok&{2K)UoRf6P6e^!c2 zHn92dr);|44|1xn`L--bc*lB3dpn038(X>o#n%u)Cr~;Yc=8t=f+fSU@;&n!Rw}gY z-OY?YECbW^@L*n8dQ^JykR0V;cQK3Me^0^>V9#WAHdP}DIiC&Z!b_fULPj1q_h!R6 zc+$j=v$;$(i;}T{nNAlE&2Ok?3V47ykxztWS*l)R25-roKz~cZRtEaU9_X+pMjS}B zgkZJCdBo?vvD>)i?*>%Qio2*0dm3!Gl7j~uNB)D$EA|nyhybDFlNOaAp{T{_fBLKQ zNW%+dPeatfVs)3KQGOr8L6SoFLEH~JkZBO}iusLDq$u6~fX`T{#Ze-PRmCyDILqG- zv+9qDyVr0_?74?5r~yVl>&9;Z?o*4^}8>2as zeP_XJ=fDz+Pn00D2`7Z!c_TW6LPxXV9C)B&ReM6T5L6H`Ak3J-XLka@a1Vqg1)*;v z4?7-+3aK?8>5f{x2^cKwe`68VOy(Ms<~)xbV2S{amgh<_UHi7uwLJH%nTUx;%%;T) zyb-}u2fwf~RJlsgNScGS%M8=gmgAduHGZNIrlOA48*mBt;#R8>L*!o7YQ19vH_~$? zhQol+9RqGS_Zkibb&c%z6K^CMgdQ!O(48m5FpkKSdA}`eSix6*f4e!VlFpj{^M5|1 z$wQnhn{$EHS}d2E{!~*V!&#kCLh1Tm0$E7V#S(%V0g*<9L^Rp7g9R&7@_SOb56OPt zCGDbg^G$RI1op&=Arcu)ypXgDX|H!LKq-e>ueDlSbDqWqkp%p^__-;MlSf|q;()0O z;-dro(c)_>nuxaRe>Q?nw8os(8JcrtYxpxOTfs?CX^<1$Ag4j;#^)O2G&s^2r@`}$ z0fdg};-0UUrbz;mUZVh8TC7rkU#2{az7nRmjcbTZK?5r4l0aZS3Yb;dZ&40WCjC;e zv$J_Slv#iPN1)P?Ko}?eZDH*Me#0Py%BWRznK|>u+zt+af9=#!4_PO;&A*zBw4iV; zgx_9m$V4pV+6t_cH$vJV_g33`V1YH=3gO=F$V7DN6U+L<->Ofn>l4@1N9`qPbCukF z!Ngsq8ZDN~H%b^kTd~*(ZP22uL!<_AOO?w9Umj0()_$QTeO zpd0HPe`(~!7$_b)tyXEb3PSFf6Rh$gB?hsdz;SHe0|M$-Act&pwOpJ?fSp$O9HlK^ z085b46r~0iXbtWtVa3k?T8*Q%x1)i;%@Ju18!>Ile@`QuCsRG!ZbN|UffjUfG~i)m z8A{4c2Xfm1s2TYYs`ONyriBcn0Yfo3(HWIa&+Lu5&>A)HzCZG&vjQM9uy;H?BmQ(2 z%q8;F3@ND^LzWx}-?NJxxJBON9=rs2sv(kSLdais2N%*N@qSgX&{Ql_0|#mDG)En_ z@E;mW2biVG`K(q;myjm`BY$0Ytd71lJS|898%PHPo%9E2gr5+9AXTxQWpH=LsHMOO zLkZA<6wYTI9kV-R4=Z<14UF}OTnW#SU?7%{v{^`h1wPLHj|qsB3i)@1#LiBP0-6An z2LI3cr>BKKv8x<(7RyIKB6m;V!I-oXOQ`yZrk5=_dlp8AWW#uxzfkoB} zL1kG$i%DOIQD53m#7)V1i76VRWGx&$5^Gt#;5nt%w$qYlx5p}HA)_mL&X8|krtYf! zKWmsS!`rx5Op9sRdzi9DtX1bXp5lS+Ynv8xJ9q$&^_TjO&`uhaz{l7Cg7SZ2U~q-!>6 zMr8lMlt8y_jeqz~v;4U^rk5A-{6Da^JXd;Yd8wVU%P7mE^=)2{Zs5-+g9 z-++-+H2_y_kT#}z6E-WEys+qTWKtR@E}XcPB<*{*TwJ3U+KTC2f>|40(Ix&RSnV!=-^#jET~#}u zdoDMO16Ek#fDYNfMt(@aJ7vR8vdIx|1wP{;B(+Nyw=l#YMB=+{mkwEsftJYwIggAI=6MpnAU_&YFK|&iDK8x?O*0*|;+KB^tQ$Xh2MW zUU_sdP=KFcDL;KQsEZ3gU0iaOd0TAM^Ggl3xX@sWE6y(Y>;heYC}XyKcE#sJWT77e zWJjSljF^gr6~-gBtgny>;80LsbI(2XDHq&m-+u`Z0H{6kqsj6*C_{|`foA~8j)AL> z&@^r6WQjeBVt_+H7SM`dytKyT6-QE2{5#-m3W;;jK68zyKEq)B%+D`)uigGED;(v} zMxXhde#n{R9W1yP<%CM8ke~9|BX57^^FwbZ2**F<^g}KU_8)SUi^)^xWEj)6MEB_D ziGM%c?{5G~hvjpwpTL~x-M+VzkLdGs+G-6doIa^To{EBNb{=pHj z38Gcy0(!-(=9}EK{44M%p6qgyH}aY*Hdmct{Z$TJuDy(V`sH*7mY!pao!33zsXO=k zt($#VVIPjy-{7MLTz$um(_0XWX|(XS+J_BbD&$_Av9s4eC|TFMB7Gu$t<=N{u?kCltdROxPYZdkTdiFbdfoGJ6&xGpo_|RG zcp^kMaHu6~WL=BFDK6o$O**C#r&bnf{hbQ_RoW!E(66ex0j!ExaaADT9stl_0Ri^= zxFeIsd@m!H%ImcSQ>f^^_F^_+vE}$QcRpvRRo?X)K;X!V^sq{ME{es8_m~{Rqzy`- zGZ&Gj5x%<;bZuAa(sxK302l^|41a{!c@d?WHK~n49FSwg1_LBEprccZ@#J(Tx?4nP z$#AT%3+#S7dmBWw;#mK>s5kG#9g~yFBDI^8awrN)wGgusbp10cA4aLT^YF-xI}M%J zx&0i@t}I7RuFwvz4v8N^8XnjP$TW()@QVFjI}Zsa?Xl7wrzQJ`TVB;7e}D2fjywP= z0^Vkdy(qi)DLV8Ktbn9g+NdrdFJ%%Biy1)*l!zNAdh>J7jjGo1}j&*p8gslWiQ*uR)Fh{SL>NbFtL2vEmYY7aar z39#hmCqi3|<-@<{jWRq`=G_CBGvJ%iDB^87ozk%ysg15y=^tepyMGJL#-Py;du`Bx z@r9RG^{+VHaXR4in$ypmMwS}(!LofD9ZN6pgzj>7dPZO6tlN8)pMS`E?e;6_gSkuD zj?Z4@xC6dR8;o7!p_)>kL2ZK*bRf>3cl%dddi1?6eU+28xY8cz)35UO8S%}d*GC_{ zu79+9&UU$T^DZat9eoqVB?DF$&A1dRQD}G&D@q>4daRLL_tI;X`-GR@qcWLsgJ!*P{ zF}vXHGvCu^_WSPn4t7njZEm&T!lmJJ+klqVP+VA+-oy>KNq?$wK1d5Bk+T+n`?O3L zEErzDlMJt+lrOPG^IBNcu7RCMn485!lyX)oB@~;}EN4gPrK`{nrR zf9sd48dTqHkYTm9-U`FBuEfns>f>_afdGoW zFl!<4FQU@t+1jdM+=!}D^%!@k40X^1K`b?$GhOo|Na`fBx*rT#|5I-09jGfYuKpu8+#hp z*Vh=|<9`Pf__5Hgi)RF{%ao|YuGCjbPP;!h^}x+S<5_I-UVzpfJgWvns!BGpwk(}s zk=^n&@D%uACjf7-IHKmOWH!{Uf#Docm?co##A+d}T(5gxcvBh!+cBoyY_}&Eohel( zy$czD9-?uZ>JbS;+1chF3!;tpNbm_HP;t9=oqyy&y#ZyTcp&7%^$!m>hRcFtw?kdip5&Qk|1e#k(>%jH$0V{ z%*1XVweR@}vJ;))fp}czI>(z%^83YF{C{lHYJi&}5op9<-E?|TsK%T#$Z6Vf`Vy68 zVf&dl<{A+=t(t2no8ysV9yu8A;C{R3mC-AoD|^bemV@m4l3SbWE4YYmGQcc)_y&3< zn!SryML2hc=el!3p71~0c~@4A!~<;L;eUz`klszo7heu)Cw2iFwo49uEoR0eCx3Ly zMN()nc_BRL+4(wVU2xiQ-me=}-E$Z=G~u%`1}R!5Cg(0zx(!@_(<3F0%&pyLN-W(* zQ?QYMm-f%CWC^sUpj@3L+?6(sDQzH4uW<-GyTU;Qhiq#Z&pc793Gv|wscw2O zdL3ENw#xIYN7f~$ICOAfOTp}*)_=4yV@y|G&DuxqU2tnzYeeAZW>=iFT8$0*ya8e< zc7<0~?}$~ee4(o@JUX$PKz}jmZ>_z(zTK~$J+s!!d;YvK_or33lI*K6xW%^RwqLLE zm6wJR1%@tj7Sp(0L_k;UYw=k60FRtN=*CHrq+cZGigY6eLj3 zDv1tyvLD2plsOs*ZG^2>qaZTBkWRNP^9;FJ4X~W1?*uZJax4Tgd(Y2NmPPBkH7>nHeCJN0Vqm zG?FyK66(pJq8f*V`+vR4wSaEa{k|X_D{qUWK7}A{BzvX0r|8oXtdP+v%C|xa-rlE4 zY^O1KuWtYdMY0zDJX<_X$kS?#;BgpK$g3Ah!GvCtWWBga()`#n;`>2kQ%}oQdUJy= zBo&pWZv`bU?j&okiRCtl;k(1P$(G~J3(1rFc%VQkgf^w%x_<;Y-i$DKD~dXEALJ`nb+hQ?lIhYxoJrxq#Amq;pQu;XdY}!2(;gl^(4RQk4 z;1SjH&Lh3&C&jS{uX2TOy71f^SwXl1z|+w`@$jzvFsU2iZig^7!K|kCIb2%=63H1! zLUvn4S${$W5Lf011go|8H@6W*5WF{o|Io+Vm>d00K_~_q8(piTAfTSf~C{! zNyr!gLz`?gkfpJj6$Vb$kf2nPq=hd@l2$M~kOv?2Aj(!KrGns?+3U9WT);(R63GG@ z!`d!_8ij}-|@L4Ofz?gtd;CjALjDNy_o&C**RGQTu- z{bsA_H=CsG1^wn<1}7HmFn3E zD*>i0bq$VtI;)T7%Rs=E{Ea}6Sls>=g;m69qmgpr+qcEy1>%k|Z8SC<=pJt5F@HX@ zkq;iM9`hLBLo>#a*2s{4`s(z#(#w3M$yECI3WC!X8T@N5K&dk+j20j?e|Y&8v2mN z%YYLBto+=Y(a?>@jUj6&J z)%txso34%9roZFBQ|i6X;eRw0SBdYOC4_$Fdb|E1MzG5Tln!I+&EtN`EowQQD#u+j3$> zVS#6;w-`H#vk&=Pcc^y($Uxjui7SL)`Rqdu(RZc|KXY*` znm&TJ4mjxzxIn7O7k`Upeqj^;xfYh5yK2+Q%A=St^>xC*w1d^VYFA?;yZi7pE>Ab=N5rzC2f8*O>lw5HUE~DtSt^ni9 zK64oq^BI5Rq6@_js>ALYB%Ta15z!(20rv;80EqNK%Pf=>cz>UTQR&Jdc22qXB-+ZK z%@3qE?t9)21`R~B-SPa*YUO--P79e=S+Apn*!cE$VFM_xdJYwXPrxkV*N{L(!KQ7`AyVaHF+H@sw;$sxA2 zhLup;WF)oJ+r+N(&ns@%=6fp!TSiRmQm@F<-&*4;JVeP z(T@uQxN%=Q9tN=Dm&QW?(u!Vl;t!lrq0jd|JFS1kiGRDK*yd_8ROl|JuX1{qf)epr z51aRS-+P~Tf*^>Ntlj>7LEq<{B@ml6&OkuFHzPq9h~DQ2%mATc51(kO~_{nG*0Z-$Dpox2*%Z%MAhu?6;u0wr&tk=K%m5VgjuLO+&+7#OP zecpjCEq^R8YI5?jPY5X0O4Dsa)+m`nG z+S-Lqr4LefF>*BizE*k3Q6rI`h5868bh^{KHRT^~ts8zHt(PaD2%YeCo^G3}g7_uS zHoqmTcFta`DX3K!YiL_gkK9rHLqv4r{}Ac!)_;$KCioAHfxDDm;Nu5lN9T=tMf_sCC{zFP@>?Jmlh`A7F#vizvzD0q&bD9O2kgN9!J4~ic0bp&(L>`_W*S9GLlnIzRAaycWW^l91L`9_e zgPL_&)=-!st=0!RIK=?rkEOXmTU{frZGW_5>b%)cxn{2JQo|bIe_42AP{pMc^m^fC zvU5d~xgUCOGuq2Ba9?eY3qufbAwTUAe@e#eOFTf^@dd~VSl=&9%U!B=fk=1tZMTR* zQlEFBKj)ECQ;>hjOCNW^4VCnU&(?Tm&Mx_Z&{P1V}F{* zddni78k5o;gr=xA-7m7gp%UtM&j%XE(G-d)=mYMUgGfPuTdAPE_2MzQTjWW4-rO`> zt=FFx;UbM*cXFQPp1#=GWITOy2N1_rtMS@xs^!bS66 znszFd|AOj42qP0*<5tb)2m14mAA{k=wU6-PpWpnB2XDd4p)8tH%iZy&TYshwFFKZq zx93JO@a6lr^htawxFtSzy_imD06+zCw&IC!{SZ5eyP=U6c~1vG67IYa_{o4^*;H!^ z?D>I0AqOH&G}IM9#mnW27Yhh;FDOFpA2M)MiyVu1GqTu(s*@|-IZ?j&iOf3fums4b z+`tGsZ4^Ai7#kfR>}(?58h_dzuMwf)Y&5r`%0uBV13H7*Xf7TRPvVxvgnBQm;J9Zh z9?T~5UR=FFyYEd)dKe_GtN&ge@AnW({D2wCS}k?g!Q0UZ>3NO8e&5865pEB_^)E?U z6csq8LWTTl7Vg4$I`{kCiTnBHuKqiC$+Llnq8TUTKr{QMDS7%LP=AUODp_Zy6i_8H z@UR?+=!7YDF&Brwn@#32Mq0QqCv+sXjaHTpnH!^~feV9V4S>C+AjK@G?P*E00GI8u zzieDagHCW8-gRK3871q}*g3S4H!P~j8-nGHNN3S2D#}JjCe+Qm=Bt?J3t)U{Ogi`* z2QB_$Usvin4Hm#Z0Dm^!*&JiR4QfH`;#EP4Vi;umOcE^4-3Nf~6pcs;8be zWg}wYGYmbw`r5u+D)b37r8Q*-HOqS>YoaB*-{)Y-umCAA*?*2dk-xue5=vk`h<&6-hy;bTBAx;}=hQA-h@6uO1a%H|8FQjD?td)!9vRaR8PM*H3 zz$18#UWjXOL4W8Bl;TM0l%Nb;aG5l(z9e*AbW$z`rwf(%_2DjEEVw@( z8PK4YYpVx+bH!)Zd=8N&*Br=aen`9_oek?3ojIWwi|4M>46P?uST8X=*%g=WRWQB% z4Xt*H?}oQpKSdteK&R0Bt5s5z$Ne?$2~)2(hIJI4@vb?GQVYf;V6HnZX8tvdS|T=; znwv{rbbn|CEx7@f;|qS2^_t_-Mk`a(kPA}bbg4{EBt3u++;Zwd7%nj8zp{y~cGHy+ zE04&Or??kH31V1{teJoYhJUsvhtA?+2@#stCt$3o8|d z9k@Cml}vS1CSr*wAVzL7OSoAc#F4c;D3t&e+uDh4!Jj%0$-`y^7=Hv%kBD6v0p6vs z>VF&6?I9Bi->->h>1;Fx0Js@K|JPZ!0dYz33x*=x1rR%2uj)n>EHKt+wcqpltb2M! ztge=*K~y3_sYJvCR&Aubvg&IkX6(`lEYpR32a)R(BxuyR8WpMLh}l~dZrQeM|bkHSY5u+iiRr#onm07uyrMCo^dd%EN4&DSaVH$d+k$*+` zr~%{nS=L^Ays+|j(11NH`3hhri{%pjiDm=GCJulCc0D19XT^NK4{+s1v04ywIo6w$ zSuv+01~+5IRFyMK7}yHB-JWwl; zV|n5dgEZMyrw;4nAH0JgqU0Ct^^EQf$v zSK4hebL4f;zHc_26KVHVO_p$|Vz zOhi6KAkZ*ztEp;svvL-7~;|33iD2zC4UE)HPgMA#hpBjAos3HE$F@WX05->_kXniXp`4>KuAOC zF_VoEkba=5`JaExyv|?9AM;nQp^ECA?I!U9?HpJ^PKNy+H@mO@Xvtz?LSO&!7yF?7 zN823Ug5ba$Ns0$K&X^Z1oXA69!NxJ40o`>^=1XcJg`7V zB7;_6^+D2OMw0waIJPw)Z34g#5h>(?g8q$-q++L9sq(bhkqcr1I7 ze&I{NH#?&9#A~;SfHyacysa%67Z3|Jy*Pnscd}S7txs62Pk8*0Z%-5xCuE?W6)4yW z^zT4+59;&{ZhuEDFj(QlGA(a=;fDgF)r3$Hj%-;3@T`cGboxc^E1t<_QZeMQPk*U*>>6gh56zn_6nwOB;D`6B%oY}9fDK>8HXqu$f< z2F7&eH{oCN0I_J&W-qVU0W*KMypoqr|08(tIAzGHN@5PomhCF!ODo>hzD#9mtoIh#uv+$}S676&njXgTkF|7q!; zQFxyp(me6c&MSAset7VomOePiOb`FGaP`KU9M=|xWejJ9(lj6F^~sDlC0rVX_=qFx zpFzeu`7wDS=&Z^b=4C?!Y(4f^Sk{h=IQ#%vqWN(2Y}&nt0XTS)Yn)I zu`=PiILJlF+7rYSt^M$zqsjQ!+^=;XvuxPssE+j=BsPn!onrfVN>p_zYnFp)Cixg&SstIpJ`zN3u*v#y92h{l!;D}U`` zac51TYk*cxJpG+`dffm@K()U~VLzR*@2UHylxBYS&{O!t13vV<4=U>BK{oM|D6)Gv zSOfgi%8K`=0rNZ$8Td@r?YNS4JMJtofBLEC;i9v8Hbu9*nNHZ*dC+gBO+Pq4J?l4D z5YXF$=<=9NfX1RJ&ZSLSFhsC~F#bc>rAdED5Qq_n^E`lI2^Ymat`NWCIrYL>JZA;z zEZL_G&Ao*zkh>1tV(&#;ggsL(2;yLngD)oDP3x zqd9Br_Iu^!YG`xhe*XzESzrh@NvTPH=Lcv>S3bCdhqZ9YwYOObL$SayApY6OEFdu0 zf$>;mv9n^{YF%uwe6+~GV*S;7Yb#@(_bO$enfNv8)aOJpvMKFov0MV~ujbaD7V_5e z&6)pv&fu`NmrSWJ9}2_Ua=*yRH4uMgUaSN)m=JEE6ldg(jZ!CL7&a3TjQ?-85r9WYM8EnUF*Orn@48waM2kJFnB z?r(TlBw)q5#F`~HaxfSXfU_0sG)8gy#B)Ipx>&7<>&KAeMMQyklLaX|FoJ&#j^|)5 za*7ipP;njzd$=%c(*b+^pV9K>5C4zXf4s(xv;6xSv$TQbX3%PRBQ^l)s{tF#zME4h zelcrC%NzgoYi;o#(dKJo>>m&?n?O}$Ey$s$bvP(wzpHx&8)L3D3-Sz%^IcenMH>Kg zKH`}dBEUQ1nE-l|-spU%T4a9&E*)cI+QB@ZGE64H6<`JOz`+Oh5`I2Z4<TPQiMsMzlC zhs@w7MV8Mfd&{Hs9Yn(O<$fxP0l1VrqFFBqEbJuFcQ#uj3GExk%KkNP_gbmp`fz*fgMBjpM+@FKVFC!X?<8xm!V&n86yA6AzkNsgUEjG(hb-miXUdt)^f%H`tA)<^1kh)& zwN3f!Gi1>n!>WIxx~ncigghl8(M(SS!BCNVL<4K8FT3LG1BB965Hw;xYBHrjSd=7q zxOBSLgOns!+#xhU+FXb_Xc}%bAyCXnAomFB*+Tp~1;*Bph)D_^XJ7y_WT%89#P5PO zcZ^LRyn9~fCj!$}%IOSl*TyZdSZ$G`S1y%N=vCkZ5JP{6k=5gchcAZTA&~%t9Y&s7 z*aG7CG7X^5)$Xh~o&k5B7;xA5c>u6hV^j|{5Dt3r=J!z;`tmo*B>pw$>m^?z6>749 z6prAk6JEc2ezM-gH#|LgSY#Pr0$jmhFFevY5oH(AYIU+$F15c9h?g?7%0}AAyIueb z)rovq6`X%3NX5E92`o%{O<)+%Gk5~SNIuj)1Gx;^l{WzHFNC`6G+XlFYa}sISOW6B zWQG#B93mBW9&dk$Qn7}F$XMt+pqL8>T#z0BcOg`&QjmBSuer}adRQ0-64bv#-eD21 zW8x1)xQ=BQq<>EY5AY7~>0hG#{x0(n)+TUH0&Rb=Gq|BaP6(B0*$sfliO7(<@Jyv8 zl^aDF`ypvMe_elBC&gM)ZQ{I+JD0zleg8lHJMf)5olLvqK#*)3%4s@K7y=}u?$1X4hp)&wC?eXz+h1+ z`RIR;ndAcSG`Nyx0j)#@{k6Gqz>CCh)cC<*Nzh(9(2t+>+hIF+d>?IM@{ODIKrt5G zer@<+W@qCa8?B+X;tK%Y`N$gQ*n|4^OBBI>YJs`ThffrjtB!+U(Pu47bm>c zGOCo4sLHsE`8k~001+&I{-g#L&N0my$T=*svNM%>v%NhXS5L_z$Xt3ZkdKnifE;d)@aI2WGDO_{sYm=J zUuDhu8XO3XBJayuu0I(kU}wlUz)!%~WbHVH==#xzv6Jl;4JsocKMOVy_VCG(%&@R+ zK+LFR60z^F_VHqSJ@Tdy%s6oEH3NUNiT9`eRQs={HTZEcU|`Sh-+EKhSK^$YI7UV!>jD6Q1!VYC9$3~Z?9Pq{& zZ=cBxmYYb`%~vAQLO|tDAN5a>Wg%ueFR)^nH8=?nn|6rvL2Yb6~=~p%T1w%!a}iB#PCHh8^7-ec6BKLqeaNRMO*) z(}OY#G{|pNq#%hIv$1LhDpEgYn&|DU|D8IL=^#I@Abws@F=iI`Vn6JtK;W<=j!;Z7^e{ zCN&>{;>3E=+$=xD8NNVec5s+4>Z6kHZ5oBc0px1sRv89)?+~|b#7xCBOc0D?HN@dS zMeUEjQ$qETYy{O_3!G78yi4C#Fs~k@{-m&F(>5d4&f32HnN@vSdc|~uqtJrmoineFCx}D^Tj{#EeijT z09tm;N-7k;(J{?tkR)HSs!H?Szd@z3257G;fy)%O8eo5{!lV(6p^EL?C&?FtV_jc+``&k_k4VG>rh?EEpiBs9pp0VorpiLdT)#R0>Yecx4(Sf(W3| zl_^eu)wWbJ`hWh_?~x+`UN!hP6)p&2|21zgyA@9c%T*S(C+uiY3&miDcpFD638Wq} zK2W4u^do;>*><`_P%i0T!abp+sK%o>iBeA0n-9;dZ{72I-mz#>`^G$icB*^cQLL$$ zm@8Gr-1CU{pERNBY))N<-zr_Mk8Nb><3fE(`7E`Pr#b2oY!|tpiGYEQGiD-{v@`x6 zqHhnn!Nqj*Cf362s*WO<(ab)Kh8MEt4Zx{~!q_nRDT22;q|n|_iaeHhGl;hpM}$yOPrL(usO#_>&8n;rJmA#x#%xHO zTJ_5&ui2b{5-W*k)es%aR>6u!oZc>O@44TMVrW;0tU1&wN}7nk#ly=CbUOPS6~W&3 zn7)4ym=vF+!$|=Cv-F#AqDdxqsc zE@h~EGy(yf1e${s>0%!pSpw||JmXpvfN!DO+wpTf<9qG)PL7fRXWk{>?^8}@J26DI z9DX5}FDl5_+DqALncv<8-X@4t)i9Db$Rp34mO!5XkMwo>?)cmXZ-~UhX}Na6HBo=` zC3i&J6?bGDKUA%l?jF0+T^BY1p7}8ITX6o;l0<%#60xeBzS}6H5tvFrl3{vN7%)m` zhowx9=v_4?rWu-)0H4yEoI6jpS|j~1nwt=(gfjzBc(akn@z`p4(hgPh3+^cm4i!xM zfVSHsE3!~KTh%N2fsg}ELJK^Ds^5Q)Rn<@Ec&E?UtzMwn)#z{)cJ4r+#DfmXN22GC zlBR*eR=?YLfk;RY2+7(B@DVB>h0%DsO4i(DdzsiSoC$0=O{t}6IkGZN*`+hF(8xDS zoul$b=$sMpM+eADb*SW82<*b;A_Y`^%>b1#YKAxD3iN?wL8 z^;WCV2qhE=aTVe|{`FxM-A0&FQk;Q|e+qJemB1x5%AgOSijBCcoNJM7nOJ4Lq#_+k z9U67Brmy!Xq6M*3Nk)vf-SNEXt6$JV@(9fy^0zS-sPYma$p6+|d1cYP!C$76=(y+VAh2oe@a=$2D z+0JDIP`y?3$j8vICm87RHSe5^5!aQTWEdOd8<5IRTw=abVr6KxAsV8Fhcc(16%;Mz za{vITi84O#WCXh8Z5*l?R-WOW%j_<3*QLgb*mXI9V6E1$LIyH6zO#Rh;;#hruG9t$ z#nnznjsa!1wogxLc8g=n{Sk3o-8FF#D|=*O#~BkiN1su(fpves_rwo5P{c+W`8@;( zg()TQ5q{cv3io@@Jpr&#fCWSxILts7fM3%41IoZySV^zduX0X~{%kCC`ME!?sChGN zy9_Q#BY<4N{%f~Cd$WJ?c63ZCz2G#_0Z3`Yvvb&=?11DOz=g>64M@;y{kc+_so{1D z+&@UAE&;>Hz6}&ZS9T=<`(T!?f#zs=w*h4fNW4)x?)R6Kf0~! z;tWKWyvHXRLfd$sD@A>mBf0qRYwN`CYr(1miCB2w8*mwLW&?dzH^;yL@il2d8=w z*Su}|yFcXErd#d`x&TbrC1D6)d*t31EMPiWVsuC!A;pU3&TXVB%Jt48-IpG!> zN;}ukcItl&_0}Hz{-hQyfeGJh8&qo~ATlw@*ga>HjrSAagj=nT8%#e&`jLXSv!{c< z;Uf8(HdWL^7ys0fIzadAY=BFu_@S3nSUb03QM?ep)S`$!so8k$Nh(GGcHbzldlZ84 zhB2vwg5$|?0JmC=NM+9GUjSD&q?FT(fDj8{$7p|)9z<y>zHztv>rDn0wb$bkk^1~Q!f@_h)Ad*-avm4^MihFg|?*Ovg20n zRvS-5(b6ugtNN#lw*!i=5@%0du#kFQz+xg+9&I??@4di6G{~rGd5ZWVT+N(7vtw!-H2;et{rSk6YlO7hs@i#z>@ubN9;Q?gd&tV+J(1BE^K314raIn zM7(6kwoZVmremt^xm9-cBS;X=WNS+r>71fskw2s<#X7zaSoQq-aST!!n8+J zH-teXc!hcc7>$pd^dHOEe@i{RNtEo(sKpR)OMIwMWdsq5X@G>yy z_+_t0O+}u0+e=>IK+Vw&E8|uwjbnzux2i;X*fP&lY^N)x+f6*|i*Kqn>Pvr`!ub~E z>Xo+ty3awu`pvoyp08{5ozeqr6a}G-f9pM0K(&S)bB!i=4K@K|TF=mlGZywkfR&X{ zcaD4bCijLAXEnq)FO2%V-jr5ae{LqL) zU5dWzT%YnLon@5fTZZu4QD1*5En&*}4$--OX5;XFb}hLeu_f+JW(1Zl0K6d3Y)YVu z3t-^Ys|jO`Mr;nnRlPoltNPGX;+K*+Sx0DFYl7h#(7YpNM|67TW0KgQa?=@V;DKDz z2v!=)o3vUwYN;zYHSI^#wbL`h*;sBGYJEAiK3m6q|M?F&_8q5x{yTq8o$oktzr*QC zYV{GdI>;I(BRlNbi7l4)4y3xgc;xH+LzLxw%~P+5h<*oRfwo*a>bXV847vqyH`wR1 zXeTp%dJ`4`{ppeD_Q{dQZn!rB+9~=y|DLGWoF6yeu+zT-v*Ana^Dni);|Q<`A6)ad zxZJaX9xRXw@<4bak;Z=&+Vowr&eH^9AxI~S?$(PHov@nNb*qU4Z<>r&EtPMKKAfu{ ze-(XRd?WOnj_X41-EDTp32H$G5U#BG5G;Tq8hk%-l;zRw=plQ4Eth#I7#kcA+A-(^ z#-B8W1S_^xM{8Coe9t{Eu@sEU0s1LF*Qs-RDSWOfKxG2;FvWk`Eah{eaQnS2KiA2( zds}#Fn_=eCM-D0(k(LWus#~k&-2ja{hH37;5wq{Cy*TaU`m46I-E&PT4$&!2ll~)3 z+z?G{;RL#%TYjSfFZlVTZi)-L0To_E=N^WZyhQAXX~}DT{#ieM4Ua=Je#W!~K)jeK zOR(FNtri#^K*fK-#ShKGvZ7J;Lz?XP+S{Rt05gN~L>^K6_jjN}k7V_*CuAxAeDC;Z zP8Kgi{0IC1Bf}0h;s^p@mVa1fWraOBn3h#lP?r6ZRRJIFF{`icdQZR*=-{|KpLmTk zfTE+|@plJOdo!XPUV5|4V8ikLu5&-3gNj#B^Qb8~Qb>O;%5UH|gBp+ANKt7k1esv6 zG;p9@)j7><=QP}*1Q7gIto`T>Z@7Y1JK&{(e?%l3`Zr~0LFPJWsqgKWRRkTC+yJAJ z8=&F+*X}tRxtmFZ$AL~#+WuM0C&Ko8Af)dNO5y`|eE4d27EV^4dy3Fq$?i8LPu=sW zZG;hGu=IZl$Y|zG*_(~mr6aNNtgXcKC6z$OL>bCMp6cKF?c!iw*L;>-L9xmvEt|5i zj%s&u38%wy0OHiZGl%nj!hyNtyb99>~`67S&QWIzTlGcOXAigz(E5Rhv4kX+F z0om^xA2*&fF+-X$J&}Y#d!!Oxc|eq`X0jnFqUtYHStaNEnM6-~rky#6*UghPnZ9m5no?%z&Ts zo-ls`fwKY3GFZ#L`XQ`i9i4)dfJ;HV;*lRMQCe_P?t*75s34|atVwx)iEF9Yh>9#O za!)ERNOLD%Q7(y}H=ev|TkP}~;2|-P#tYt`T|j85APt%Ovu7&gHSzsmd=>BL+cosh zUU$h~ty+2wT`zR+n<|r5>z9|+Q?}$IpZ9-k=#Z}cv+ei7wxHMojSESfGCv%Uz0AxG z(A_z@p@#7PY!22+&VeCo4FA8M8=QHoHG~Wa(z7;_KYVY<%-kwTI@cL0y)IGcL192T z0}2j!tafH!p_K29rCOig^Ip51a^hMjHbS-TmCq{Ijd~+1@T`det70`R)65t`%u#=d z8lxaA;V%)94$bx?zh5;3l{%U~q?XR*t4)%?=*xyr`QQB!MRg6_X>{BlYkGU%8#CN4 zd1yG^@4c6-`0Po_0AB??DnIPg;Xsp{&!Q$jFCM5Pt^pdeSqniWr7k={NQrrCRFM-X zA=nr!M;llw#uPH50MpS!ErFJ4Vx@n%^2Cb_Q4p=+{csoD0BD1DL?K@tAJX4kmxd&t z___u5TgTZYch~mzd3n?9$Nn!oT){AcT!Z4KgERvK5%V&OqEIotIDF4wx<`865a?jf zJ7+FRZnAygUh{9!BVTKFnJB}YvWt_4B#V%DM)1uDdZDn)`bB_12+3!VuxEe27yQvk zdgih!)YFo`6WrrKfum|r-1GH`V%&iGfSa>P1dq7jFVcLM<{a2NE#7fM5Ds+V<=vu< zCABkOft~aKxi4L&YN#`oDrr@KCMQW6inc969 zz&9f`1$&+b(zJA|<=HQ=nIeB6E2hOjt@;GmGy0)N{4!e_TE)`aTDMwm7~$hRXmFUpHY8znu&y~IIeV2 z(u`L~BVW+(pLMPBOZI#_$?{0l`E)1#Yi>}t4c6ao1;p_Nx;ZSe(7LbC1GDsTbtkJH zV>2=P6Zt+M3-lX}0`MPDl?G!4?5k_O1_lQ!*=s{*0^LO3I<1MCL(;}WWnK!}s@f72 zvt!J@Mq&jTg@OWA5W9b|xYha^VV;az$bNK2EIBE{A2?{MpxuD4%i+eUl?Gu)P%fg|BS5ZIr(sI%qS=Wp??C@ani$^584kg7J}@1- zr)gm~XD{x1h5NCsR$HWS`Pg<0z zGs`cAJxr+~h40{))DLYc+mO|@s3Nzma)|3lZGB;{x{%;(M>P; z|73LtriXC`52OjGS-*C~-+5KJbhEhq!q?09Z`qN&=_oo%4@KFiBpxeINV81o!TUw_ zmh-ikD9;K*d$?N(D#Q{3TCF?jn$;J9P0f9Yle47qmim7hFSqMO96=D{R#JLBU!YOX z4l1N^$-`*PFVp0Kr}@@VwP{n(YBgyc?#*3qGAULxDP}oElbrxw-!zLMRlJk6{;ZtS z9*7U@Z?@cV)jo1^LYY*3m9zt3J3jyJ8zX&^Bqt#+j9h6NFN^f@rRiepO-|29@AP!i zBcs{m^lX1_UN@V}jjwP7sZ>2eXED8oi5Sceq#mf}Xu*N?!S?MHH7}@b87H3!J~%NZ z^axGb@KqlO3Hky!S%N+Od zxF=&H0Tr}>=IniQ9oYylR3oM1*Od>f=j{e&f-EK+`g z%)*%IN;_*%;f34?`#r2&d_hKxFGSu<*gQ5^GcB5}R@h)a@E3|xJ8BC&68=fE9g^ON z{lE_*lPLN-Wau7Wv|1y^pm^M73`)Ds&5f08N$);6kTIStPO{C-sr^1+c0Kv!;?4Wt ze)(~7fv*>TyVHk7ifv{`lx2A5=;Md$Ch3WxahO25&WgiI_9_lCE19zzI-H9U_M$Hk zJ=1J+-frT?T^i7VKenct9aFahrg?$fGLsHmeB_8z5K4;BL$Rhh40K``C`7^<*9S0! zyAQ+<@dE{osF6E>w}Sgos1P16Ak+vR)c&%-enZ05+Tzl^ zvhQWnI;pzV`uH(d6X6p)i@cIz+4cw^a!yZ=mD~4fS!6$Q!9AREP)$#kH%&B#2@S5Q zPM)EE6;bW6j}VVqJ&EH$ChHC{xcnecpaB2;8(7zRI@r9``YA%DaaNmJTQ~xGKsz2u z{(%%a8WEdY-Usk!ly+{w_>8X=QNH(5QF=taC!$FBfIx(Q&21V)tq$WOX;Qcs;l;xQ z*dt)G1NL9l1$h2YZ(#l(?%s8~ZDUy&{69}Y;dr=# zs6mms*#-^s*pd}Hkz_|wlqfVKgFur+1fm%L6iJIbZ#HWlXBmKX zBU$Y1-bLZv8TZrfO&|3RFrw>bGVV*lWw6_=mLVGb7FXIpGaf1Rp01oP8W`Y(N&^8z zAPD|v%p>l-uzg;EN3l)R<~{j?Sip!|jweY$rytuUS=ex-zcs{a+{`ZL8%n?m4+YDi zDnZ>ER(Uuwa5nB= z_?jPh`>{Ng!)eZc(8Y}(DQ;{Q2iD8RvRR@WX`x>BS4nxnN`z$Sm9csSQ^TuYGHTI^ zNRqPqc*2KQDER?HN6--VgK^&AmSIXqH{i|J3=0bjGKM?=otl@#G2YvPln;G8mlk$F zijuU3C(y?gn5ObZ;O*%-94lePs!i$F1V%qQ4?2;Q82!8g_1~|TfN%j6e+~FIOZ_zF zY`ZmKlW7uw4vi&_;TEZwl_$bfJb6Il)m|4ZOZQ|&%BZ!v>he$QM`FkZyN?z3A$bZ0 zr>lDuH`g?iVv}iET8=I@40Ks8*ZCVz8vLbR2h&pyQtdV55so^l=Cq|_8zZ-kzwnlG zMt)f?f3{8Yk7&JFb$yzuXU>Z}o8!=xj>Dgj;@};DrY1i<*;o9ibduSB6vlzrFunv^ z_JeT!>R+|FaqtfXG!b7y@n?^~NB!bwO5%w|Cu2|>5U>B?Ui&sI=KM(_C(LjIn^81L zhwYBjAzCYv9%hTVFYKvgmwIslEPrR71&oI!P3l?}36dep;$k%Bicf{|UEM6;r}8#G{PAq&o}Hf-4#!DYDLpla-Z} zne($FDCEky46;pgDUBe@$$T%Cb20aSF_!Yr{98EB&BZE*NdnKhdT0B&*MF&mF_D+X zoDX=AMg_g@Uj-~{BIelZzR5_D^@y}H=yiWgF@dy6=yk7CP6%`#jWfUfS@rY-d%UOI z5}Pt=r?=Xu?|w<6&cqt?CattS+qrMcwJED%ap!~VCnB!7F>3^~hQf|msVXnAi z_U*R{YsdLUtkp~Nt@ak}KZrdOR6Yz(*PMt*C)c2f!G)MyC&TpgT%i`fjc7)Xk^t@r z?LSnT^5RQ0Bj#6O(6&Fw8hQR%P-lFYghsmg8`c~t*)Nj03&#^7Tn_S(ra%k)exZ0u z1uN^ZnQwtmE!vUgjemprV`=dfDF=Zy0}upAE<61~`5f?~SV45x7Lz9 z=0{9T!n%u{5*{>P{M<*lz)J#jd=mpli8O98!KZd z6@zIG;+8pvU^P>RUi@tM73*w}w8)#C4c*`#>|~8=gCrK+rU8(dYW+oRZ6rr50pwR& zGnQdbTeo$m)pS(PVFCP17yfHzmrh!Ws#e8m{`wACQf8Vp)?{zLkeQ*B`uGlLuFKL@ zn{Q6ZtTbD~Lx0E54w=Vmngh9RTCU`2GvT}hwTx%=`c|&QXTpQks&XGwF6L3sRzKsN z99R9wsr|0$7=ft~I5uN0%)t95zd2z1B*qoPkh+#{l?;-~=_M%gSlYL>f^(%`r+Sz% zDAte6o{1L+Am9RhY3TydR5TMcYSl#Z>LJKFKZ1%JzigODroRr40T=SH21#OXO z9owwVx(o-a+I$&=rGa7Ml~fIjCq4s9hHC12O%&l7<18UhGGEB|E*IlIOI}!0 zt6sO5Fm#ttP`%|*9sbkMah~;`|7oNor%m6!NDN_;K802f|+0;zqbB(SpW- zC!B{1hJSxdBuC@T01eGryHB#-BbS|!&=4$LpbOqg8~nivj%H_xDXvMhxMsOb8e{hP zof)3d{#{&@K;!(~MgdP^k{u(jA@va&Fy$5??_w6jSrpZmd2aXx2Y&4q*VJiq-PpbV zBYK=CZz0(qAP}PHD~WL_jA1-$9Omh zhE%*8D4nmt#pJ3DH$OpE@GGDo1FsaE9Ds8*Qr0;VEqRrpo-g^uFRifK#Qp>2)r;Jw zp!5Lb4!GQSO>s#o`M0;;*6U}+w8HZ<7=$SfMX5iZH$B^glX%HrgizxzQk!39zUR{Z zwtvvNe+^zA7t~OL_uqHkQ04S0&K&OzU5VfWTt#ImxOxCzG7o(0yeTE}S(SXmAl@|q z<0$MidIMo%oIm{`RWsA;l6q+V44vWn3rgN{sQ?zw#{jjhaC9CqKMXf*_tc38_E1=u z7Bsl6o-W(yJlI(-e_k$a^fm?7#(#!4?tkP}wk}(T`MJUy4EYPqL^Z5xyIZ7N(W-bO zPKr2ZJkZncY8DlR=7q4|wp_klF3W9syIfv1gau&n!J7dM(Z&VTi{_kyCs1ZsaJ~hV zj%$!fY{2K)irm6}^H}_NMYy}DAog!#O}NejttG``?t(3b30z-y+`An*^ctY)3V(@V zEcVB%)!YtZ`O@i0>W~M{#cguN0E-}qL2+A%%)6kaAoY1x1m*hqx98k=#b)}Uka}pK z=ppV{tg<&`9WN|CP0O0EuSCzC0>!(e=~dLteJ10 znLZM${W8_9OB{k$_vEUb8!VF;Nq@FLxeKt*8|F5+K5rCE3ny*IcsOY^!YLbU))#R4 zn8g`_F`2ChSNl0if;{&g_pJb97%6p=6^Vx_%S>Ko6Cyl*b>(;iz@hti;=N@g$QNn2 zsQ{xr(pT^p?koqJ86?M4)a87|c>NZ0>=_d3u@uK@CS{PK5v>);C&A3@mw&eE&2O8G zhRnzq+@t9h+^QDbuD9UUY{9K;!Ijy9EA;v^%|OIrj`AZW!{q34sn}{oKtOe3x^F7^ zd@&y|O9rge!XR#}&9Du#<}?aH`^L-VA>DU~nq>LW5Ov#Q#~ZKYe&qc@Nkse8;bUF+ zkb+&rqJ##Q7EM6>vwdVlAHyCC)=c;z$6y*Xj`G|S?MdM5;5DCb@p6P9{$Oq97} zZ=Q>cC4vvE4nTVZcj8iYumJ-Y zoT`$wxAUj0R10X!@!|+nhejJpT*EP{DiwQTa9(W8h_I?ql1d_iGbR|-RRob z7OXL0d2z<;<=rMvbua zLdv9ch(3DnFnZetSFUjgQ zVG4FGAF)}vbAMmyS-oz5ch|5Iha69BhnxrYLy#n1*>f!Vjl~&K?z3Z-@{2cr(=M|$ zSS|;q9OZQ-tTZ$$@XJM71alFyH4v{Hw|+0W{JRHsy~c>j8V1|ZpdnBgjQUvkng=NF zpk6oXR`gE$OuOJ^Ao+OZ!pI*8|+_iX4c%fQ@-nY23~TyZ*xqQ_Wq z$c536E~8_1PrsB@rhwED9R%=1^fo3)U17_7osu!lhTMKz?)}C1y>R~io@Plf@!m{V zc&?B+SAPzHKP&sU&~eCbA*dy?tjLiAeY@xXy=OaZK-Qx6+rW>e_S-#&u$C{xPq45! z@>&l0CG>|0>j%lzD9*dA1@Fqr0ROiEsz3fVXz;yj%lCf?#rK*+sZUokR*T}nfTG%{ z)UBzp0ap#KB`gq{r67RMgE6fIlZjV*@}MeM)qjFCsy%s7wX8mg{ybc4!r88phfSasvGLYBf1VGEALrHtGP!Mo=DyT}9Sq_ZNo{JQg((6&yK zV1FEhlgV$iKaJrXNGczn+AT*tA2rL)J*yr^qz0~ic(38+LM(0>$?GGvF8|n$qaLXN zbD9^ycw((2QjJsp0S_3pAuV1d$&p!+UjG4bt+l`Q)JscAKT!(c5Su5sh09f=fJVW% z>c8Xghlw^Nh2y_08fN2NS_;B?l`VRoQh&2WHm$U1t-=}HPTE!sw*&O=X^`YLMHziw z*iNUz--f8-#parCDpwAuC{oX$qkFXy+8eJIr!*LxvxvTnpo=7R9{BjiaG}mQi?d*Y z8zGMfVHr_}HF8?mVIz)6=pgPTK0~Q{!5_^g&I3S0d^(_VM?81bP*eIctk>sQ>R{!g?5oca?u{CUC`;kGY)wdAf`1W zk>kG%b;YjP2olRN=R*GKl=40m{xMv{q-Zeve=W8jo_w`W!<9RDYHdG0ic|2sej-qe4_~ynIm2-4^*AL2s5w;T_5j9@lKg^PAPae`$MSt)06VJ)k zbAI426-52DcgHnAfJ%1Qc*1HGpPcq1laJ~`)K1Am35^V-Q}_gW8LLfrBFZSpgP!0< zqrn<7NFZx(^(c-t8VxGZq$p#_${|V1P8Ip%I~A;te|hE~84>v!a#L&9sh$#UVmsmH zfu%=ldtPKN=+u8n?Nl7#8h^Yjr=5y7?OjTWo9xQIT(Dc&H=YbV|P;$ z_ZbF$ZZS@?2i~tHxkMOmfE$gBFPnYk{8+d%T6BY)^B^Ncr zqIy@*F_t=rhZa~VdRVDvQGfw6MgR|1DAExHQ|F;nOkOF5V0~PY5`PaCHrU^Vc3q9= znCgvsb5jaPge?IDU<>Qz89`4M&3FU6Q_9VUs#JitxrscMfamHq$6Y0y2bKZI+~#(T z<6#DZ`78GB__gJfXkQJDjq5EBm%CYvQ}C;@{I|Fm@kw+or(PHKyT3L+h12Qa#5iqf z#fSUr`IlE4Y`KNsG=JKj^6i@A4(u~=c`IX@eM?q;N^(B|b4Q_{G=jp0{T7B$+D6`8 zF1PaKax3Ho0vndFq5bLdE}l%_^jaZ`@ru7(d-5QQSV_v7b8Z|RqOSyN;pcKo>U1nS zWh$*>wZE{hZtRw#sL0{qCLJl>bAr8W#F4w(etb)ws4e-C{eO7!efx3F2{g_Q%+$hf zwihoFW$hF!!t(YF{iM6-wI8SCL(!rFHGFk5RnvyhYte`W*zoO4yrjEuTRmj>WSbx= zQAY7Q-JaJ?x{Ga01sg-Ly-i9peJ0k;_s#Tba~Zg^mgcht4ya0N4Ro#475*oOY^DR{ zc`;!uh&awz)Nxai>aCV`e#NhU z$*?tw~Uf5KD`df5eav9W+5i=ND7k+6hNNvE%WHhW&^z-@c=M zT)sk$f$Rihu-9jT@2J=TNjD)m_~HS*AAHzn^T%u5kbgvFIE+wtLKYjKLozQzd9UR@ z<&f?jIoepuIXN!Fw^~k{hu;hapsk8z?1-lqi}glK(vYu?og>T=GOA47H`;vBUss9p}<1HoK zo1S+G{(q})H9F0x&tp~dgsR|-c5ixQ2G7>WsV;Q!Z!7e~zwuvgZG`{{C?7zqhTKw# zdcRWg0jT?leN);0X4EfFDKg7nS{gUbx>N8YeoI!S6vC+20L*q-YFx3t4F}qdi|WRK zetVEA)En*FeOAF5_s z^?>_(w`X5~!OR)FL`GeVs}We|D&D6T^zVAz{?R|)vww7ypoaPnip>2;@*<c}h$d6fqctEY z*yQ#O3x^L3PGoQbmu@_WA23~Djzgr70WvQ_=t*DPK|*8&gh`4~1~U01k_i+5#C|j= zZh;NCO(D7jkkJ#G1_>{}3?Y+@<}?kdnSZ1TBLmbkO2dVjY7I!DY;lWjd?pEKBEhJY zAeaDDBxDqPBT(2!AZ}I(8KY!N9 zKaqzqEk-QztPk%muPid6K?Jz_U>)(*ks%H4QSF)t)@4B&$y=Y+9d13$JpP8dFEAlBI=E_Tt9)U&9fjn1b#2-DXoV zh6Bl=yly~SK*y~@-E8|;>JN%C1Aj0mgt)W}{*_A35)Np;rgtfur8WM(M{-ZY!oS$F zUnAGbg21e4%nF0WTnPIAU=~!DbXl0*XK$63BVs_mvb2 z3~siQ0!?7!4%w%Q5S|y?Nujv^IhHC=unn4Rn^6yolIi>hPUvn^Z;lU6@PC_#k8y55 z$2lzKC=Lr6$>b_%M9EA#ymE-x0LU%KJA?+xk2uB0wp&vDIFzC5e3sj0@2XyQOvrvXZ32a*!D%SIBs9egnC% zqW9&T2jB~5p2OUr^+M)$LR}{_$#2BMX?89nB&M6MbIBjS- z*o{Nx!17DB`tQbH`P`oGH$G(SK8|Q64A6TIasDHS@AwnY6LKHZS)2}!MnT~vWWwKq zNp{r>Ne(^dLC^xq3x75-@?XmfLy_cF8i`m6dQM@G-Um4-P84Yl zkP3q5iAuHc%6}W!aB^L!q35X=4oU5Wv)XVJ#5a-ch=r(1*nfaM6!N_)iY)F=3u;@S z_TmaTVBwIjCJkA_G8Z2oQj6I3!XNH;i(X}OT6C>a!XD5`6jeGa0?Ea(y@U!$2^DhD z-_TENJFggT#shgHjvOR~B*F=R1r>sbt$Z9?_>bjo&*C&VA*hIx7lp&V47JRf$!MXA@I=ga8&T`L7XI> zB{XG^U@4W`5!d>$n}7)#Tr{uQ+_RdsX03HtgY4DjfOyOz4^ZmG6!ryrPg52}7KuFT zY`1YxYwZS&gT_G(3#l~@+{VGE)ihzvTI=ne0e^5Y(YWdE|F6zzaoa;gbZ!kxhJYME zh27nGQX8Q9VR5MX1uh$>V7-eh%%c&+mT$)B3 z@UK5D3OE?SqiU4ZdQGu=%a{rVj@_0;q~9Me{GUNSIJ#3eVny!W)2skNz<1#yAl`F8 zynnr*1wS~&ty;_7gI@&!*j}wUZn_8m&U7Ud%57_!l*fO=G`fO|s*PznHAzYht?1Lp z?X!99v)gU;YBTp>w>EKiYxyXMK+$oX=)@LzVCfN1OSmej3{qejH`Ug$Aj+1B_E>_^ zPDFTNT|yQdeXU^bCl!kA;s1}Wse&;Oe}B}(uiIxu!NwL53`9J!JWKK_>vOJ9yo|}Z zp|EGh=yI!^HUD3P%|jYwUqRpQI{rSZ?Khqt><4>|-TlKF|I5i%vv%0nf4*BQ$KZc1 z#+)5M&f2qJ7vpwS-d1fNb2e+`7@&S4#?tdtT1-!$AJ$rpz5Tt=Z5-@Abz6<6&wt&$ zM)UAMejY&f#BDU6?>F{Z&x6LZr+e^El;3LX9vn0db`!U;`+V1Jv<^eJ)i~UD8_%Ba zx_gcNy}ics=B~TjIM}bjpH|Xn9XyA`)uI{)yEQB>aiJ!tF~nA=mesIjwPp>QmB>_; zf@)?#twX3P#OBnXVU6d_Rt?%Enty|T6VdpCYX54ge{ujqXid#%jrio_1gYE&2YkSAyw*#Dat^Uf z#F}R~X3!L9o^BncJbb#Z5pcaM7u&?!mvHoIh0rF^IK(m+o5Y)kBi7=`wO*R;=b>6; z&q7zW%f)tyFFt6l#<9)C?SJwn%9%P0U1%CK5z2rjV)OQSBNK^dtyi1I?fL#e?OEgb zo`FOmPNJ)3))iyNTd0w8LwSIgwf)B4)2FyJU9mQyX}pclv_n3jypf4Qx6L>rI{Vqe zz37WP-nZ;WcsyR)jlP*x(*ard+Frm5`Yyihe}q|^1PLt)y8GQ6T7R?FH~>dLxW9N+ zV$y5P(LtGQZyekoGy9dt_cfZkW z9n|(e<6zylb{qT84?d5ZHU4$o+WY*kaGR3R-1`6 zGLdfOkYD%k_1(gM`K2buDR>u^Qjsi(VrB^s+9=m@<&c)v1&<2VwRi2idPuAwivyQS z4!i|7*XJ!rIHF+T20OeUQWe4*`Y(r#0-q@~M3Q=)fy~ZXk%c38Z^?sea~MCGa>Mfj zc$y}}hoB-^5+8!gp)P+T$p*y{?GmIa9(+VmN2t3P${yhEq$}Z|Ub$8jZ)NUlZMwLm zf3W^U;TyD0K#Se}s-8)XdL})!y#8*SWWf+*Po}nS1zX~1P0qs+2<8JVelUEWv-rB$14V5iOB5B_Z8mG;ICV$vV45T~+2q<};qKQab9cS*-zmnE>#R?U z84S+2@9x*eg}Z4~M!S__O-YZL!78DiaeVsUR`eHS8JoHNAafHs__qiFTPOuQSQED0 zyDd`!2m9iEYp;L09iO$DwP)*lG;ICJF0chP_EIxF1}0FI@&d*y2P`rejs z$z{s2ageOH3e;hJ-?v%$LV>}p$Xa{Voh;;9eR8;g+X{bUAn|t+R3tS;HU)!p)JU5P zGaoAgB|2Z=(?KdF);506!4G5*#>K+jZ^FukR@BDxFDzKpFoQcfbb-*%*es{nKXR?u zYRRp$w0lll=JSGXkEA|#dt#$|QT}Qx_EYWag@9KDzey5*i$B_GZzXN#Ax1YI@R;6D z#YH+KX-a>Mh}=&bIeLd(n+@pX!2^*rh{{$g=z+i7DGL>E9+I#$i{EX&!ph_%h(rxa9N z{H<2UvOKG1IkM5>y8dR*-}$rq{M{jvux zfN+08R=5Ca<0+rMY#J6UvkQ`vSXgSC2!B+*FoVihxu23)x?RjCK}h}BS*^xX!x){) z_^@i1B18#QFfRrzcB>BD?N@ONp6sn~JuvxRvAw;`UMM%o(m68dV-8b0CA7E zIgh(P{II=E`(kPSKJd#%1~&U17~d?ef}zx0vCddDP2jr3A(`qD*p0V;A2@A}t9PRsx5~2# zZAnZRe;1s%R}WIjCs({A0CR-HkTkL{m@Zpe5f!r}er`yG74WC8aI;{=*6^!o8oRa`JS} zX>M-24=a*N+7csSiQzq%B4jbm!Qx)g=LUpKIEfuu4)e{3ZrmdY>O|a)fYyKT2VVo0 zjcuw)K)zf8VRHy0Vr{{LvC8^@S7#NFHnA6jmPfL_q`>-XH}3ffNxSLPj-8$#^Up+A z55QHK@&N+Krih`u-1n!s{eUpfnkR93XAz@2dhg?rp0!;FUgN;2&`P35$a|Qd#_8Rw zu$Y1(6s{%?$-)Z|QEJHBj1GT{U?mn*kc{9}T$G|g$yb^gC%5IkjFU%uGER(s?0vZ( z-Rp#Ra34p9$W8l%gSsj0XNIQVTp21;Vl~eId<)+A4I?tYAeEA!l}cT zlOWYW6u#x&L}cejUR@P55lpQ{uNN}SF}_I7jWkjXLbTyHV-qfyl37r{S1J&yd~fEH z0A#5$-$?=r%rei9S&Ul3{(()V6P^7n9{y##a2utgf2NDe5zC5jS{Q9T1!+Si zn3%mgMc3eBcuv7KoJxGgq9Bp=oxxLt(>SFiz$FC>ifGbUfZt~^O$)ACuS$A9OVyBn z0L~TOUnq}60A%^Q6il{=hJ46qp6kH?Z#L*BMTHZ3UqYh``uBeneJ9BjETMcZ(Vn=h z{DRF&=yJhkC3Lx9GZS5S_jIY3w785vQx(lJqY2mCe~K!LhvT#qK4-I1_zH|ObAK+9 z3c^}U`{wEgQBisE2VqDjML2>76|f#L=!&qA-Ees{ep&>I7=W~C0J0qr5y61nb7_%_ z-&M*{*@L{UCFFm|7O}*`6pEh(f^sodRNexf()=(1USVyDM%PgvZloq zkQXS@6L|cTPR7{Nuj>MOaU*DWf2s)Omh-uEYII(@h}!8!*!+l zqF{knglNdnX^seSyHh7%)NnvE{D{R|MsbFX7Xd*JA%##I0!BB=g5gj$m!_fGxe3Z< zex{4JvFv{!zW(`;#o+B|AwYnK>g$z(6%c-7mIr_{{y0d5&)GE)NZ>_I3WRW@O+1Bs zJTytAAoK|(T)Ya&NAYlk5XGs$m*xtj@D{Qc#pDgHr3u0 zn&H^u<-i#eu2V>y7Idh`AeQ;_vtWYB3p#!$D=B~I7*;1%T+s0a9mvnikWHvekK;5+ z$z7a^oQd8Q64fX8nyL*86g|Zk3p7?F;3n>*APJ{q;~F5t0QX+kcf70$O{)KjC-1_P?TCDg}Qj(X(IE5g!3HB09#Tan~%Fjw^%s8~5a06ZM#?>NSs6lB6COd*;Gn45YDWHJn399QMb zsP0K74;Ci{$$-nrXvU{Z#wXCQOr9<<8QcKHn?tS~H(`G)2R|`O zPJ=Nz499PAynt_1rnz9O0OQoiHI<;CHa!dqnk!CJ$7%K<9|fvPaZt%M7m_DTj8raL zJE2K}M{3~F@+=h5S$NF!x8{3tgN4BhLoB4A6A4;KTvDn=PbevjIV!X`hg-2%xL7E5 z4!+K&dBJz&lY#&V3Pc@TF8Y60D(d5|Cr zM{wW;7Jn`|t$q;R4N>Vygok(^E~Ud+#3j4@s}u$LdS^j4RVHV^+MCoW}zrXmZ6QbGK> z9DyrA{QNEBd@sK?lPj1FVu?h;n1xua_u>O8)J@H%iCO~qI8c5A(T4c|+%I6^ie;Zq z$KVV?V52&s7i^*nMLd7h1265l^S~>OaQ7PF?$uD@5fwcG;G0-UZurSF%T!mpT!}rCWu^5=snk@@GJh2Q)wuZYkkVU#Y4rNv1wj+2GZuNq*0N1*(TgMoBt;HwBUS&S$*K!`|DWFTRX z(L5xh7KthnzB%|BWOS)4@ScZw)NcaYFlHvuZI~=3Bk_M25I$WvwFp8E*lebOz^m8~Rr?B7)&h!?+QeQ>)zk3#bxQ}?&(ApAO#&3FpS{Z86a@e0z4G} z#kWBnS}^0+)qE1c7W~txH89MnH7$rHsU4DdnFQ z7nL9+l_4sB!y+Cf2LO@JJNd!GV(9=r6y#m1U>rLT`koItf*BuoL&{>YbQI(x-9|2N zu~;f}rM1unVfbkovKNb`cXYv*vmt`vV6efJTS^qefm0kWl)PAszicf_Uj&p!uyI=J zau^jq77(i>h7r+frldT^Qtn`^k_ZOi5RiQ^WHB;-N@12Em634@hqQb~MuF7>%M{ee z2r&;@V51`Lje7WqA)JpHLiuT4M$8~&8NwZlrHp@CTE+vwRKk>`$6~4WG-F771e`l* zk&+(-=7mT(%>`u2DS#w6lG;c)kO1L6V~B$Q%qHl<*D}RI02kj@Sv+L16qIx{o?f5@ zIE3SWm!*V62*S%ZB`M+{WKxv8$YLo!c34^p1Tg%v3|Wu>iarRD5CY)~VPe$eh)J;! zfcU3nNQV%LpOzL90T90|LsW!7{ItpvBMa0f$CTSOx3ugC;Nq2nk`q}hovKwp*U!;=yFHJ9;-*J1U81P1UJU6oZc9J zz`V%V9X$^65jwZPl@Bu32UAY-I3g*jI2j&hlVj6~6rrDa&)rW)0!EtCelO2ZMp z+yLeB00TVYn1(D9$3={c6o4l%rWxaZjagQA@A6!y0IodT!Ex^Wz_YlDo<-ngUS&}z zkTJaqW=yS4XH4rfjZwA~M=_+pfZLCwSVeIU$|^47D|nj51sLIbGsdVQcPFq!#uf-d zCZnKFi#YVGdpA%ai`>5r(jkt-y*uKc7P)^HQ8vti$%to&bPvr{k#>xq^JM&^kYBVj2N|R#ON?jKNRgG!jZMN@63_Dxf}$k+LDJCZIrCXcl~q z$5So~><4MYCAvZkYl?gn=tLS$)u0ciG_Vh%cm73{zC%9_P|78xdts1-T4b)JMV$;TELUw7Lnuc%(rP2R1fd>c-m*kgO+R=73(!-Txd04x$CkCcunfRKqP^_xI`%eH4sYV(L`8;AxCTc8s* z+{M32h$aJ@MQ1$YV*)ZAdm+_#hVV#RdH~j%MG|ZbI==wTQ~>vXC{P80%P3)KxRksI zO-e8_#^F)zAtjH4a!nyzIeY_QrBY<tMcULPJWvGnz zgpxb_)55>I_;(Ng?&DvYr8GGUiY%VPhQho`G%O2$5QS2bDKr$Iam+y}eU-v-1n#DoLIW*GG=gVwDQGBCaKKw*5;??} zVGj6B?^WXaIhLS||PP%vo%F7yydv8W83s~a;Z3YCF!CaEN0q9va?Xr`Qw--ktTFENOr z?`frQ!jBhHwX3B0C1s(HYJzQ4^HQ7Qf3yD9|6f(GzFQlMOA*bWpxRREG72jy}?7|MRZK>V`G1Tfj^#+{=Ao;fSy;eRxMIuS`{pv z)iw)dQGafu_*KN{hJt9){MPcO*!({)?$itvV^g&mEZ+UsF@VHn`$e!VU&(=^FG+9tuv@_Ie)bL%Mu_d^0^>4NN4Ve>?; zC7YD^#&dIN>15O*N#AM5qO|Qq=5E5<<6kEe`Sy4_5t&=giY7T-D|Dax#v8SGYZ8YW zN67s|RTlIK>pSgS*Rh2ytk>-vez$*fu!<*IlCM_GzZ;qF?wMUSH^Tbb+z4c^S{zXc zlYYrAiKxC+>7sZcp7;F+sVvMj9N!&pRsa-_#w`3je)Rh8`0KZK$6vd=JN@$A-RW1a z?#_&xyXt*c$-n?NP9Q>7+0=lD%R+pyZWtZl5%6glwg))$LZ{_d>j=HX+jW1#5&;L} zw`Eu)5MUf{<8eKIn*Y0anSZhG-|gGo+kJAmPtNzrt9_F8dxSsmb;zW-7M(#fXxkdL z1|}sVQziRS(h!vLdRjDcxUaFLnWF6Dptrp4mfb&B?Vhvk z?2sJnMzufYy&rZ+h)vjDe$2O@>h74^}PA9y7zoc0y1*X@gK|4N!! za(Drl3VZ;*6IQ|^r+w=G*9Gm&lYZ+@phlkne2POU;OG?P3>R=+H65qksWP~! z2Ifkm+7)fcTWU0|l7MqShn!OzJZG&8x`+|n{t`0m{)}?#OEiCt*6ZX^{KVoPi;q=8 zOO}oBEajgY9+S*5)Bf;e{%y;%wlDDS>9*z7tnC@aNx1@hTtUuKA02LiS5ykJR8=zo zp^5$leDPNLr|>PBcuuyC8#!bv1ae$a-T9DKCX>!-XrGI^dBg?vuV}Z~lcRkC4nxVg ze=at?3pL&4frWpeeh8_Z8k^9aSs$9unL)M7Ww+P9qW+ETw4Ez(BDU2+KYVCu?n=8_ zo*8Yw(U<{Pf;enXw)}IwwJw(nBiwQv=K<;z-Q8^ujo2#+=5ZEf$Z|O;0k&I?({{GE zuP7g(ZGV>xecw#twxs{u+O=)HNh5HKD3*0?9OpS-abkaD!68@F52+oii3N9hwnMhS$e-C>$%M(4*8buBL$@}h zc7kr)R15C3SmMY=G*O%mB}2y%PpU3x=g7WTkM@Qu)#s3cO~JG}l&M z1e}4c8c3JIs{t2(oU4Y0WC-n4n-FeULxUhjVy7Cao$4lO5A{xUlho@sN#pi59#2?S zh(qRbsmNBW>W%C5xN&<+bN>Fk6MGNRRzkd5wKv-tyGe+p^H|cn7nac?%}tNDY2rufY(N%%#4Q=N!w$12WI-awTYz0FKS2XoNq5ro z7Y-a^l9iecoU=jShpCdj%UXUkwFiBNjQakfC3~@4E?VUMzW;9Dwm?nMA#X+);M%vw zu)W(ituShTcwwiKgNpqWjx2H`GQYdDhg+=P#$!&%m1#6U1%SM4*6S*Xpzx(c9QIN= z%j8=Y&nx#WnMmGfsq%QZTpowLbXy(y?Qv_YD`RO+`X;0JW@J-vQvV*t1p8F4r?Nd< zpjU7u2&0JKb3K4tRG}N)(v~d2o~KeagHz*ji5xqBes}J{uBD82JTXNg8BCr^<-jYD zl*%|h7(Duq0Y+Q8T?mmA+i%$vtg1nF2X|-*;e{BR+r4Q>6AFP;*d}Kij%E@>)5yrx zFPYy|Q*9tZhk7kxIr1)3*7%Tv*kq8Co8VK%_5fML>g6V1N$T_IbcBUW%Oc2t=RWtd z3KsHz)NrNsx-nd7C-EPUEPCn67;s1>2E39)&K80}#s-?gxz|m5L?lu(xbhNo)tjzV z;|tyR4)Y6dD`|vqXH>5jcwpXY66P1udo2;MKKOc18t_i+T~x1cC4w2!>qr+XaX9j0 zZN=8JoOXm(ayzmZKlW@{jvp&2MC1^iLO>&b;1X6qLz4#fv0hIas_UhIuH683jc;ul zw;iGNJ}eL@qC|UfVh?>)nVuE_>WE*CjGhR!xrtF(li7mhl4A&GcKMi!v8_aLaujw# z@#&edgP;f!zXXXB!CZ?e^BE5!>4RdBc$ry{-Tgh6T?hBjEw5$qNIn`Os@yceH=C7z zi3mPha@xwd#01Nku0$3=c<18`N-X5~B*G$rM%fbFvqYlxD7&_mw+9eh!eM9?qB)b&8GM=!qN=Jtk0#aKw*mIvQYjZd|y)bjbeZ|0@rGPpJawl zAc=3ddz0pxzUGh8nQom9yA%2bsG39nZ)9*bHot&9es z&`yEF4&c^Ix!0AD&KksaB)OJIy7Z`5}vDD{4(!vfy?lO~ZR`OG24CdkDQg(Rzq|5=#i<+YHlmP2y?mFO?DBJ__&JS4ht z--j*BR(U`|75ZU=Yjc+l(PeJ{k->y){j_IC4vGUD@&VeAlLVZ9qu-9~oP?D&vx)8K z9)UG#fwl!nNKQ(^JiQMU4Y&~k-w^QIpkM@npLHae&;wsegYL8q;bM&QNsNr@OJk@V z1K%V9&MKSYxN44AweZb%wNPDBtk;bSXTjJm<#yVS*L?Xc_=eouzYV=qdFVfdDjMSn zyh{=3nt*g?d|b$X8p8cM(c^Y)2$a*0+ASD57j-*kA-dTlE|n%$w8C>P1=8^!1c^kK z6yoH&Wt@D|E#KFL!^F!y1w2$VQWl|i3f=mVfjZ*;xv6iq6|B`MyaEsy?aDVKIS1?V zhHY*&6cWufHy7Ishc1+bPkg*NZNxeL+PNy!>7-tv#HF2oiOutQ_+i#}ULLHpBI|?t zWNl%kwnW!PB9QCE`o>STlsxd$!8vzE5OpGPTyR?^FMw|?(3lf7QW^(5z0>KY3wmfe zib39!FqHc;;Hrv>wS5Lx=(5>Xy?#03*7wbHggIIX-~uXP#EkE<9&RX!NY%|r*z=P% z-OL2k>p41q0R)hxcKpzkwe+jRyA%7Nmv-}>AFe<=5bFk|oIhDEZSmzltekezrJMN{ zs5;DZt0SI?Vk`KwPi&-R8td0%d$vBdFs5R8){xcJhL>Pu`ZOK!$7cPaJ0Yx*1C&g* zTh2|p*NE1 ztGBNZGURpM_2Dj(bVW6U^~Bh8>7$2MJRRVvRWVSVyx-N$?k$%(;KiE0k+f%QJMY&? zW05T$Mt!>=v9k(M$ANK~3*B&%)tJjj&V9LLwsjPwDJyCdFh*bdgx&g7Q!TKFArLoz zxGaK_GA|)q#^uTJ5Nguaqavb})8>K+E@<`XnjD9qWYIs~-?siE#KyVUIBuNGHJX;M zR_ojY@+wlNjkR+n2H)jcY+Q}4qN3-HbE)12U)#A-mIGEaSNE)oJ~{0>4#6EVG3Ms8 zI{hjs)K==mzHpIy3R+$GNFT2~ixsDTYRI@L-#1pnGQ96d%_V@grTZe1x0N#@R;5Ux zAGU1PO?pm8RQoQXX%P=%cubFk0%BL*=0;&$JNI*$`;5Z+G!EU;3T3V@)I${YprP`u zEjy{#Lq)D+@ebi)r6BRPS}QMH#eU97eGCmQoO*q0=TEM)DK#V??LhHZ>x)%?yTl8q z+KtVTZx`~gKa%c`w0w#%r(=e@tmm;6()4dSf|jMV$5N(N)Q>s|vL$$I;y7rG3xROdnmE>Dtod&31J~xhtCO@>Z)+ zvWT&Lyz7@eRw0I`h}>ND)=~3+LvRx)y2n86dp_-^Jn8FVjh_Hx%h3c0*5{H;? zOw%c-!UDu_r=WZpbotr7#~&gY<@KZ?>8^DT&#|o@noRCm!4y4!TR<@2CLu;Ey6`a` zs`e3!yQ6^*L`A%c2*CK9EN(?U_e;JguPPke=RS$mo-QfpJ9rBIDuH`g;j_-_tVMeK z8n%-4Z81(ZW`hWCt0@tG=QWUb&_K}>|Jsyyz6LD7FAIqbUfXL*A&HWZnQWz?G$7nv z3QYtnNVr-6aN9vr=*&Su?^`7Y~m5vLN1am88793*3PY;DR$dTd}Fray~Li zhgG+>>9*Cj+VfT;PE(q_y*fMf3w2R62SxXJAztPw$C#)Lcf=z4s!+=NZVRZw!Si|@ z1n5>y$Fa3sroQhNovoHAm9n`yN~SxCxUEOwv$3nlYQ6H8%qa&$`QvWXSuW?2x1MA_(eB0 z4s{7Sfy|+-#CecKFp@(q2Haq`=cvmw>INHq+F8U`C0*a)HsWjye*ACz!nN61`pd~i;hq)AMv;jfhkFya`7JodY^D9KvW z7ti?uXtkDq%k*VyuU=1I?Cs+B?jZzPhfN3_9PBRA8J|NVbi9zUhwT7bohW;RONy1&I6Jp=D_xw?N1 zwEYPx^Q)*oYs(sFxZ`^PbF<#~%;Yrlub7P&@5rKmI%F;&A0rKW(&w=MaR2ec_D9!! z?ss0ffBfnGS8vB4T|b3pW4HL>7-X_Irm#h{cAqNe$^$I&W3=Otzx%q%lfWH7m2U-X z%iZzZ|LnS-9xAoXijB3=7w)d1U7bGGq$Y3C-mWp)H>Dy$`$)9gZOPQ4_2VYA4tU%P zI(##KpV-#NkCTTFt0nwgt}J3rEyp3Fgm*GG8-B&UptUG5Q#Cbx368=kKRTHBA_zPj z@r`8`=4BVOAteV*CnWT0!c3B2n#4)b@_ zSulCg+I{BibhrNFN!@Mx9rush!2SGR@>sNgsc(q`1St0(iPJ^P6`JrBs?UsTO>HMH z4i5KroArA5^5Af<^}JpWd8W=|6~Pq@^IMLW`?zfR@$Ru!wR61AKsrUmwiM&m2DV)2 zuNUu*UT1BsAaAyeK?w?cUNed{6X}eKUu@; zc(8YHA2Tpfp!NLWqAYEIIdeY(o#BY&rHb+mL&H0#E7Kh|8~lYKoE5+w*c-=x%q%uR zJTVgG3Qw+0Jo&M|B5zs-;5QR-ek>slIi zt2GIO9e}4lG>qa0wA<*{U0gsHMP8021SW>eOR0uNszTD)B>24O!ae=WcJL0l z=f_)mph)h^Nd9Dj9F@v{oR`Yn)M~kM-n7UaIV!jN&d3cbL|ooU_ z&(|7xY?sI-Uvj8U^0ju)3j3r)dQTj(u`~y{L=!&+K?zGl`NScA=SEYH?GqB{`xh!7 zefTq!Mxek-Bb#nQNPac#v z)7^7;#7FKZx#6vUIbS2a=Z})!^T$bVJh1Pm^Kj!=6Ke{5NBynl3M$1Vf9;2~ef%DfcDyJ1S7%S zHo-j>MoFlf5bCUCk^YGx@3x%>i8@_CzaAi&+%44uiLdT|tVS<>t7;S>ulH3JRSE$y zYl$%=kLBRbzZ%eN<2nrv6Y!in1+Uyrhquq$H`ZlG30yMF6CCDV2&i}bruW#vp@=D3$wdC91UaaE?KAN^8{~L>c z+0wJ}l^R~QI=h~fNDh(O8I`QaT!4(6tM+Bf9uhZl$Ysl(!z;90T!%+5TOBlak`C2W zdKMjn4ZJv4AmL$ih>tR>Rjx?*PnEXbPAq;7%JOUF;ekt+#m|stImQs^Mw*4M2wN}K zDAFu{E;kzlfKc?*sK+lSH0$zp7Bca>J@SocP_Nq)|9xyvNVh9iw9HD19_jMsXn=Z- zL)LOIk(P&YGiP3kO0Wc3z_4i-8z_35f)zaQDcj(7$fZzcy9LCsD7OG>NlLYZW&=tA z#_Rt^#P8S(f0t|+pLBhEQawK2!fz6kzQ=BVi==M6y+NQk#5^0H~+?h3X8ISv&0 zmx=ASkX5^sh}&e+8coJS=8E#rBU`De;tZ`k=Y05Mkz8=2g!w@(539*nL}ZB{Y1V(e z2V7!Sk(GfWoA3LE;Vn~4)|MJ>l0V;6>H?Y=Ib`T>EtX5r4BM^O*>aic4K{Mf+}~n< z%O&Hni&CuCDV}gT4<}I&@$T<-hTagg!IsOdIbfpL7mNgRZw`h!mdh=$FM-lPL+`%D zr$Rnp-1b4IT#$VHzU@y31Dc^Nnr&Zh^ndi#Mn5eW-s$U|{TFj*|D|8jM$%$T);2_I zck7}xRj_eflQ51EB@x!07Eo1PAQ)H)`ahln8rBm1)$hN#YxE|W? zr1l-Ck>8cXa5^)j+TU9)@%KRd9xj)2UTEyMNNC?V)gr=55eLsI4S@4r%5{t4}aJ(n|^$DFgURs)gg;5ZntRd7~PBg5}^x=kSS_oYE;%Ev&DI?yD zgE+DNn}&E^>H~z3os=kKqR!!-q|HY@kMGRAVS9}L-Z|t*X+1*mf;bGP{=+25K&lU_ zE{ZPvWYUG7zWBx?F)t|+r-R3THC7hQ+lk{bJ1L3i6NA=El$v1)@W)FbxoG8+egDA} z_%SqFGa(3p0N=Y-!*-u-XU%#W$8ftuZsrk-0_k}2Ud$Np6p}0 z?b3^;^bn*8>-|?6PJi#q4LnqiE%Gp!nE0KU_?;bo+j=!btL1VHoTCWYfNzPO(n=b( zd6mEI!`ikVs}v0mKUTwkqiHurbCfa($lLWHya1!({D`MUF*Zjr-i&5kM$>%snrrxl z;nj7P+k~&=$7+%DmiT*c zFF()7!X7#$?x5W2XU~?)A?mj@zoGeW={$&N7>|SGd%IB0`VC!wI3&6McOSTiZ|m|? zlKa05sy&7vT38b*giNB_+<&~yqEK~&@_m1v)$3d5BRj4NsOApvWWz7F@}I`I$CKb9 zl)FxD51o3QZEwFwOa^R->>jjEF4WuBW`2YnuEYi6(vP{7c>c1rwze7SzbHC{WN$Lg{`3a&2R zI*`_rH@UYhat>E)P>Z=h$|bx@2h6J8L~Z# zck|B|fJ1(_Hsp>6&3k@Wuk#6(^JvWQymWv3c)x}c1vZW<_z3#$Q9UY;>e0V#R7Gos z-!=kAr3>YM>kX_PBLOAHPHFWVmr{ON@#*>z#itANx=$CEfGI^|DV>&i++@3j%jvY# zp3^Uk-i8%YoyvsGVu^?aH{#sN>zymwt}beSKF@LvuwdL<;(K z^+kFA6f9pBd5x^q@)p>>O;K7 z!qy6Zeu=v5Eg_5Mdh){+Ne(x2L{pQQ%D;UmCL2ydB~&Gwj^hW8Jt=j0BCA?_3qSS$ zviElDZ5z$H;PI@hps=#J(2Gr4RAfm84RcwNohVi$MY5eZG(VXFniN6^U=TLT|E{io{;RHfYf-N+)|{mvrhJxK*hi^X{%$vO zQQA1MM8>)|6FIOUq?_gL1eQy=@W*>Jqak!+!+IIG6Jkk|%cUd7PByHU&?IZUoPgli zya7%smrH~GP=Ueb_kV`XALYYYL9X2(duhKvk(Mrc{=Z|WI{|4@trvw!r2wQaO!Z3~Nggq}ZivRN|5 zw}_E=g5&MREn`&JGK?aTex6PCNl_NHX^hfF1FrWl>!R+akt#;vUW1j#mGg;rRv>{- z_H68L;lU(k!`#oiy;iH0gB7HIfIJ5GV1BQ^0@NA-Y6doF`72-L60@VBGe{kD4R(rN z+63CEfL0g;0n5O4Gi3AH7pIXo97H}H)cC}%@#l5`mjbt|ki>mLoek%U7Tx79i#fU~ zpqKYOoxN8oshKy5nVW4SM2&yBS}*+3X05nUubm@f`O*C}OQ)a*ZDc`zth`mGjE1#O zfNpoKX_P!KZ0K=I78wyQe^s51w?HH;kt8aSgg%9CVCD%<^O#G#QfmlW8w_&yPt|V} zO;*~_+O#Q40Bfm0aBu+F%BS~90G{785~9(B$(lB#bln(eqx*XOU8b3pMu_z;+quH= zUBd}$%ci>LoIeDLRC=!%3!@zMuDqEr6T>Bdbf22n2qMv z0IPHY#o7hv>LO3T@>C3~*GnVm`#!8>FSkXUks6{1Dz}%WQ$bmabF$>S^_*dMDlt1{ zfW;;2IB?_Q(tJwAI9yh}#?|CqW-1+TcMR-GL_ZJ)5U@8G;07Ik)AHR&BBpOb69hFN zaNM*23w~RNEMO};OMfc}$}@rUnpS>ZXw7Zj6a*CLkc2}7pf%J2vDOkolX^X>HbAJ? zMWSY#*oW0DKgrphy0o~Vk40$H7-hu;uaz%D#b;qJUUTj}j0edW%8DEQ4%wJV$Y&iw z6tYxx;w>X_hm?wcQ3i_80}**0WhV)Gb`$U$=fhLcU2uyLHyUX95?Six1bc6Yjq7#S z#!bxW-JZLkfc?e?flfg8LK!=A@+ln+K5ns}s8f0;7E0%t7`d3xZ0%Jl+eBlM6?EZU zFi_^Drh1^u`HTw9>S*f6x+huj1|)SU@G{kbsv=V}*sNE7=m-b?Ld=pBRA&PP`w+YF9Z?x$#e<$ymJtWdVX{=4=WQgbj zU(qG20Hch%?vsij-mw6cp8LnL?{io|1dm_(ftU-&y?LJ=_vVdupU_iq`U3enp|Hg~ zrKgQceZn_TcTVXJy(CEHQ16_`zDV;>(R|W7YJ{BbeP8@+_vul?7eAXKw%Jw+%u+{i z&wr5esy_^Qf7Q+2iZ0g3jAy1S>km_sfm9(A2WYH?abwU49o30X%M4FCW4dAlHqF8x+mx(;|{UQRZs4%DfuOki3i$P}1Kl z+Vt!41m3_`5y1j406-}$gQmuTYxZYk!Rx#Hr$oOMe{$cV$X6(5DuKDO6-7nI&eJ8r zTYvh&tzQ4zgvm_nDY#Yb0pn(x(7d2Q_)Y=D>%*xA^bMEGjc_Fk{VZzuJ0H21`V|co zytSAs(nFf+pRqZlmug63&=>G7)gWC8krn*kZKKxsC1@yJ8nQ5>f=vGmzE)Y_%)PW# zFbZ8VeQ|%`R{vb9#&)a5)va2uxe$PE$t5Y$6g2SRjZ@Ro=nf=!vC}>B z#+LF#afn4pVsu_uW(k*sj)@1YHB}!}30vT)(ryuqZUBuTkM~IY$}B2lFBdN-^*R=} zU1-jt(V%CX47=@OT(c?TD%ftj$aZB?9ecJVf4)ts-EG~)N>ZH>4~Sz-(vQ|8mD4lR zN~HlP177ax?SB*!0#(yOIDbLaa#~xmSTbUiA~p2ZI%mjkj=Zuc+DX)Vk}6W++#8eSGH5h_sOva>fwn{i^O)t*4gIZ8PynH0&Q5c0RI~vxnxHD)kqszX z5H%X;+ZRneGGxJ5Z-L?ldyL=8CHCMhifjW1WTK$6b!W5m811nouL(>EH&Dp9>*EJG&GvnPROa zW9t7Ll{LsW9G(&RfZt);6x_p8Z46oJ!K=0V_`9h)F~S8(GdVntut-Jt%Fa8c|lih zTxm{N+RgMHTNn(~o)$Yh7f6LlxP==QA0k8haij53V2)oP#xKQNb+Yza{vCHSVvXPX zD36&_3aJ4d zZylLS%hA_bZxzce*ifx8$Ks}zYQ)QhK)h{5^;hF+T;Ix_`4 zTA2xyV7@i=PKx?C#vp*e8lNPR=FY3Aj2vSpO`4Fj41+NLo;TaXK|n}4fT2yAyQ1Xg z&bqP*FFb0rAy;?D+a)xCmyFT@9e)z=5eOd9UJJAln-COn8Rx}!>! zgjmYLTobBm#`(421{@2q?gWK($GW%__zDho3tJhgs;ve^LPNJ|Jq64S)PF9JcZe6g zfuy2!oSb$GCTb!1anJ8LgD?cIC+H<2j{;oH2Y}zA9ZSK!}?`K8RwdDvSnkI>H~gOGJObU)AZyeM~2GEHlp3Ev=>dsS9_ zKoO4hD7KgPsdsE_FsFMfdcNkM{?EM?Jy{cXVQx&v6*{*3buVu=0dWhJREwBpwt}o` zDCuaWE)J2E7d8@0EUeUUIS7+ZEmrzo-e_-%SIQU79lB$$*QXoqCQ>|HDQ7y=6W%?v z4yA_`rw-L|S_%{ThJSkY9}+q;kFL7BLs(EO>BS)mj3In?zYre7_3w;F?iDkopc-u& z`gs?`JDs|Rq>!*0VEmBKW7q%=t+TS-(B0>yI(I-SdWa&KQrvS2GQFefR;2!Jxw&@r zxfQ7jJmE?;IV!e*e1Xbc_;5xrtjBG#bTL20W2az<;yryJp@|vC1=}$$yRZ z$50aZ$WeO_X9@lvWYhsbDCvqK9ZX zbx4T!L|?(NSLLhlArly!#WyZxa_llGMV|D9d#>dfk7ATaYSsk8Zc-fle54l!0 zvc;hg%;NtSg^EJ|kOpf=^bt+m%ti#o{)DCC5YTPRcz?C(aC=C2TMCPYX^?lu*7i`h zzA(Ah_FPQ*E)GDU)-$u#acR_zg@Y3X#-YH?pB|eISEUJXRoy*K8=Ul&V^A*Wy3D8b zIzr49vL?CM0JnL3k@W%@zr*8q3-RB{{gX!*x6uo+or&n;mh*w3fc%i)rPufOI1b*v zDM~1v0)J3};(U+ZF|Y!~g|JAfb-8C^-Lx8UT`)j3k0-pclRYpSy~D~Zse{8$I5Zj0`ar4KPQ}BL5Z&)ek4Kwyvjcqkrw|Umg zO>>l!GE?yi%)K9!*o2Pm!ISr>ine|zQu)QVkAG6;;<2eLrop;dRxa_o4MV+-d+(~a z_dBD|y8=gAg`kk&&=js-{28sBzi5CN3(Lm8Vuh?7-fk3brca)n>mG=-%zSju{Td39 z!=aX4=65j{v8h|(Tk}@<_L1vT<$LKx^#!SDCMUy67o>{|{u zK%>XApH_5dxwHyW?LVz<|EYEH*hHPugc&fS__x)?7tl%8^!BaQtJq9eHCa9uhxKEL zIq{xSl`&ve&cLiobtYi4H=puyDR61Pc3UU-r?udW$z@576Q0*ENOuhJg?^1o#eWu8 zGdSs9h#T64z6+M>{bUT)cw_Iz<=%a&=-h?49u_N~S66;zll`j13dObZ+GMV)jV-=H z)D)}yYqox7Y>WSGJpvc<&L~(NZe16pJL3gNq%IJoMYZgDW6PiucF6m}vD|KDTB=TH z&R+_)(KAxDV8*Iv-mC!mvW_0QuYc9_G3m07Yd7@qvdqFy;e6!4!dhY1Vx9!8^MV~L zfi5rXE5SwOiPA1->4Q}lBoUa>y>Hvk!35buLM2R^V(M=sIL46=yx z-|Jg>{)<5s<_$cr7~W34-!mQ>els2#-tDdEhdsj^-p9Qa{l*OVZf`|@H-7_uHcyk^ z8*dECVkt44^X6qAFOe@E2f)frIOKv55XY&<1n32NDR;ow$qGJ&2cuFbblLD@SyfVZ z*W2ktrcVHH<(#fo1;@>Xcd~k271+;x-;f;{?z34bHE3V$=zSzRM9m`1IX6&?5SQ!d9%tB0Yge8PRs-3;OZ zU`>H-C0G<{>pY#3JVYAr1RIf-M-c3>{6tzdghFwE6-_!-sa;Mt&xx>+mK&0|=+1-( zGaF5WI}@2m6@{4VoIRnO%*ej5vX?!g#Eq!2Q{@#UGoQUyD|PwuwSQ_uXsI?#K!Je< z;Cdra4nX2Yn?e=e5+b8?{snjKA;Q1XwFs2DOS9&;+`WiVII2@X*_8JJ|FyNI#l)&s zmY0Fo-%>H5@O~1ZB9xj%fi>0MZ2~MV=0U!z%^TQR0WaI8lWo?sJO0r=xap-u&Y5Nx&gQyYR-hJS5_A}*z}wA)6ecHu~9 z7sdo2fnp1dC@vt~?6$iAe&eC0C_Ip>ifld6icqa3x;|2AB~~Qi8ln{uQbqjR9b&aT ztL|oq-CV2%T}rH&5026w_|L<;hQR{L7T9>kMQs>h8F7?gUw=A`3Qr-MTMbbri zl-$3{FU1IXgnu4cb`nn)vj&xV+HkOOrW#>h|2D`aLLOC3{ko;@YqYgZb=6AkR<6jZ zT{C{IYU;EK@P4`hbv!mlsF*BW?>t8Fz3Qs9d7^j!E3;$H@53fE0PgxBt=H3a9)B6q3WNT%D=|}=Y=~UgVQZ0A5qVQtZM0!6B!L$d& zAL|-f8SWpc1@5Ti?uyWg_L1_!VBuPEgEB>p=L(MF6PUtj!!PLmmZ7XB(!Y4T{8Eg> zSAPa$Gs|UKr5PGhs$Oq2vX|}cZqe_ZS7=oBat9P=){wosX-LMx+?>0yGp2vB-4kt< zwb6!LW!!9+S4y>}f&O|%Qz}5CdrP4(@fN0E?|nL_1>ZPh;}BdZ9j&_tF~VYntDrPO|_s{r8uw=S#9rbD9AO)*PcZjXs!B>zODENAl|$ex4$0{r&Gb)_sQ{YU~Mkw1i{8pHU z!EC?!9SRLltrw`$gO7d&s=~i~0GEv40SOe*bTGN)(DR9ST3T0By{@c!651x0sow!C z3fsQi4Wv4Ff!VH?-`@cme}5$;!-R9gqVzU0;ta76457GtvEH-5hBV6LYfT!>Nh7}^ ziOopXi^(R_5j&b={*WAqB)4rMR6GWVF%Hl@6D=GkGZ(6&Y6RWi=9mwo;s0PL{WV`{|S= z02j&|FAo|?L6Z9y+VpsH9%+df=;6LXxdgG`r(Kt<=u<}wF}0Z0t1?ZN;g3f6_I*d&&Td9^t@R_rDcT7+kexq-Nyzi57Lwn{8{5{h@^ zr@aLi*C5&{xKNc))bp34qDsx!ANA&_PqU?gT9-@xTfW^ye=3BiD&=z9HWA4`8Dn0{ ztx*JQfBx#;Y|GO{qmjN;N;4f2YsWQ7FYVXsN#RKy$%I>c3T`(*k^o7TOEck?Oo-Mv zTfQHoPntyUpnlRz4WBfL-i~)@+Kij%gC+)7G;zrlO_yO;yu|4B-M)>(uHeqK0W~TU zB)+dg%Fc*ve@{CAxzR3n-qeN?31C84_{kBG0z0{PXFzifrH5^c4Zz)rpY~#80;nh* z!rq|fHljIa{zP_TVrpY&%{D;*!43grwXE6fpn?!rHG(>o))4GB%^ia83!ate;O^3sbCqs9{|;FtjfUDPBzp0_|FFB`{XC_+-{Rlr z`1b(+f4+df&$sb!8~&?ze`{2c)#zi=p3tgf8O^psAgCE{}%s$`>pqBkMiWZJKhgN zD8BdW4L#iJ(-&>;*F7kucfCh{-|f>E-+Hfaq0%3QP-y_LXZ!vGEVN$lhanyA_gD0d z3fbMEFP`^T^h$;FSM++{7b7;DGWJg<(f0W(^~E%uuxY+*ocFWx7erRUg}-#(=ZM0?^u$1qkRfTAMb|$m#P2Qr&>A78}Cy-qrKTa zg?-nX@2}|9;_fDmTY1VK+usjH^x+SXmwL{RX?PzqXGM(t{HX9oWKXPSTTY;DK4X%4 zJ;4dHT(;$$!7%Ua=U;y>_c7`D10emSPKe5k)et*meMTs4J zd?ZwCpWZ3Ox#7L#4=V#k)!`pg(g*zw(sCs z?~b8z+YWVXw&(pmZ;fgGBbIubnV@9OOxKKa(@d?q@0sMAOSbYOL7P9B-?S5ghNitM z#;=P06%(9FKe?9059j|Y#<$W)e{2*lv*PLMn)x96;nC&>d(Dg;77Donr|3K#jjU}6 zyxh2UkiSh-rM~Z9F}Oopd9L-Tq2w4*x(v z40LKVQ0NGBLbwP{+J=7xN0#Qd-5swD^aFDc-0H)0Y!7<>PezR zGJId2F**U2RvtVv*d{Itgt^^MI$g*Dq|iSPGq=SaDAbEAAOI zk%pf*(as2jvn2C-a9J0l8_}5bapUBUHkfjpmJ%Ahg6pjGumS$8I%3xk^rqiMKUdPv zO+>t`B^*tzf8tl$01zRpn^IZlHjOpRQD(5YC3#S$RFOP3r&<Y8f55z6Pl-^IVx7!)YBE*GhJ*V!_o^YYVzNs8e;8#AAhQA@W{PZ4>$fyYT*q{m z^4btrN51y3nt5+`-QM*+>(hej&=g65^}Z#P6-4MNyP)a5L7 zKi&|2KTF+UpHO&`fO~|vjiv7GKH(R#)Sc{;RRvJEsnlx;G4-4F(LOLG_&GOb$B)=G z9R4kIe=pQA{3#3bzqbFZ_j&ts{yF(P?LRXdom}pVV=nXCGzYX44l-V;-H8Tkl8T{Z zy$MC1VRYQ#9^>*x1&4L@_0uIDI1EHfMFw}UshcgALj2YWF$lnS>B!#}O#(@vdAi*O zan#=Po&6W}`l!*MiIU!pUdEdJqoMn{-HHcUf6kAEBRcV4w_9w^0(8vcPhhXa0^LGqyK|Mpu*&@kp&?S+TjvGCEWYQUn{ ze`U{h$$#!rS@RC*utwwMqNCc|CT0n3T}5-3Q0BJ_vor#xVSalx0(WC8?PeZ2X!I{v zUpbMrqkJ30=Ym_4>W-ij>o>!0xTZF@yJ&ql_)I@vcpe~GuIY!VLw=AQ4<<9+kKr=%XRL=q+1MkCZB z?|u8+@Ly}jwn3DPxKuma92@H3K+dHPyWoG13!ve?L}=V??|JQQwQ`-^x(Qh><+S}} z*MC^GZU@=bs!D*bDArzy4ZIU;{LerzJ?zyxqi7sfB3_L z6lu-G30aCOejlD5bVQ=mR5f4xRyk6}7o)C(7 zxjzA3t{21Eo)(4r*TlfgQ7!LkM0RBBmhN+hoLw{uXK!i%g5zJ0A zUH~k1UhICKn*m^_4ONEXKb9G%GgM{72&L}e#cn<&bOVyUm(fap2UfUYoaALxj*=I< zZ>EG6Fwni&{Vg{_rMO*&3zQ}=c0Xb_P3$LVytGEQ5}4galUy%$ze`}df4pI_mr-Nj zrEZKDyBCT6Bg0l z?x-+rKG~Bn1b0+w28Qk>E6l%7eENMdCmT?0(^MO|a|2+{KlzKSzzly8_?7nM`%I~D5PvBES9-7`k$T7?-eC}hCSew6bP8feeT5H7Ht zUw7S{%1gjqILPuh)=hvY{ECNFrK6v@z`8hLQXOrInBl=~6o*rmz{8O!W1Rb7l^u+| z-U;h-jfgA$-G}?ee=r((x!4i|zFbTwzq#_I2wpuFoOhEIJzw znVz%WyS!giL6l|`%Vl-EVX&afuyh2^h5z_>!vDCGs3tzdpq%qv?x*gY(OE*@<=km2 zZ)>rcI&a=nb@e;H2m{kuLhV+PdVQAk?65vhYYVR)iVrkAe@jWNbw$TH(UlN9*bC7Z zj*)a+xox!7O3h(=MxNWZpK1`Q$&$S~GsUESZlod6`DsSoIpBl9j=9}R_JNJZ< z@1h>K(7H_e{@FfZjr3@ProioXzxAn+(s9)nCcVABj&94$x!Plm4H0FO{h_i zWk>ocfAObNr8cuw(NslKJ4*BF6B8K2y*{bnp)y@8D*(b(epbl9Tc48U@@V?#oUcb+#I zbeuODz@;1cQ8LAJssMOI0*8v$dRuLwKU8Wfe=l0No`(fe%#F^^Fm^^`WkdNh;B;DU zDLqo2(vzvbwY^fHQga3e07Qxr0c>vbk`$pb1_;yHxx--1f|(rzAIEUihseCslLMsaG-Ysn&b4RZ#EFS}M1 ze`f5N{OuE~kix+}GDyYFc)}`1$-LjZC>p}(oaeo5rNTJpkIePDd&0WbaLpOTl*Dt-bNkp}$BWnf{C1ko zY9L#EGo7YW*Et#_Nt)Nt?y2_0X^5&Cf6kYh>ok7JBrwpN86LLgjN2YG9OBiY{D1w= z|0}CODd~hI4(W_@H3B%p`OyMquW7lcH9>COB6TeLM8Q6Aj%~5|1f20CqqosC%TLW6 z52lQvkOJcgQo5&PmE!~lluewv)9*+=@`cB6d__G;gerQ$_6eA_y)78`(e{^^e{!sz z{pO-!{7~=0y!9WMQ_#|Qkc|8}t5C7|vnAfKd|;J$W(^O#ZXDs=8r02}OHLJjCviJn|_Jx1p6$GHjdWlaQ2v~U&QPba79U}r5nU!NYwo$_-5i)4j|As{Ggv%8z`@4QKUhO_u?Nek+E_{ZjGN0CDgt&}!5fj`)IFv(cx zSgM)-pC3T!+7=J*ZT^7pI)gIPb#w<-@Lv9Y+L% z%}=CGx4rGP_ss6De`&uE1bgSXe|8HqQ9KsCdwe+FK-Zpe<#^2g6YkDheqypY1~ z-uaqSElid*v^RK|{Vnx?r{FkGJ*V+#jlbkdP$2|grk`a~^riEBlN9s7q1>Tx9p`31 zaE<1_gpHMBWdFWYWt4!h_XrEA$dFHq84M5%s;8ajIO2sDf2+pTg#Wys#&(f!N=0gY zo$5~iUh1^c<-rS!x4kCgZ)FN2OrLqTDwmD^petzcT`8%6buaan4&XK8K8{T>V5!^O zwRAaibziQgh8)olCp%QWgNya2_<|wi%iKnc3!UdoihI{oc9B$9#%0%4rLbVkLSfEU zS>#VqvHyk|N>+82UrQtt+FEPUUzgJ3ov&&ve<~-nfE-N?*HLN!4B#Urvn+I#0hf5N zWJ00b?8$CHe_E+a=VkvBIjBq9_8}4JVi6JX{<)8rlJx;Ef7j*Xj}-h_5MNfxx$TD# zD2u8xOugea?==@Yv+f4L^BTFcDRwrc&VyPQ4RK@4YbZjmxnG<{E(@VGvyj{;!9%8#@*_$#$!c@)Q@E@Lw*!J~AfUj=^+I_xPZcZ0!rj)w$f%6C6@cy*_- z&ssx)nT9w7(sUXrjY1DUrfdk`4L5kz?mA7U z5qR)!1XDyJhhMTG;4BZO(cqY6!8E#I;o-vTIkI_8?iQ%O+B_S@kYogArRJ1S_Ghxb zKiU0c{S*tU*0o8I|4dx6cTeU0TUeX$jQ@8}fsdjIfG((*dJv;#YcgWf^gjDOzl%$4 zf1HBH5Kd3A);_Uj15v}?PFhgymv7dtFA%aB6(mC8#EM&~k zEb-e*TlWdKKdh>%=d3iGV%kfqF+vN~e`HeZ5P7MkLShyjt*ll5y8`n)VWoyj;S(hKnH*6d-!=>ZfeT^*3L^xT`Z1%ZVsi za=d=RM0X==qNa}=KjI>%#>O1^xj0Qle{$##frG^$);93+OSL{hu_p#2`&qrsf6Kj1 zL~nDux6#vkn>^CnsMy=ce7LJ#luS+%GhRQj%NKTdU#~gDTvn4Ow7+zovewn)tGn>a z|9h;BTbjk&|4ke{SjW#BR{o7#{cCZYWmhy?|2MGeU>&1w=*Gse{;7nxs+Q%{Pr5&V zg`M}qE?=g>6c|+WwfS}45;7amf5S}Em<{ zPIu%lJa@F=kiO|!L?g8|jKK1?`1lOQ>PFzEZKUR`tO5#YE7p)?a-Z9c6M|Ks&rLda zlypncOd@23faYW?Dbm=wWaWcw+yu#4GXl|Be}zaUh6E^h85Jc!K>ncLe{s|0GT8F{ z?d5Wzlo(|DF^qR%3kQHGbN)4#M?#-4jNmV`n4^&(@F2Jf2b zav4@MHT>G9K7JsLBWuVEp=0*7htVJQwUgK_#Im&Is18_LgR(71I+x2W?MIf`N+^*U zRsa@gG@_Rpax(XS*maX$f7DmTw#GZj54)zcMpmUR&fRWF8{<`5=Vk|m^=w7;TF{YD znAk8i_qm>-?L0i7x#^sGC5TOJCG)sJ%`B5*$H6^Rytjy%BIKmJ;V)eQ$ z;56g(1R{1O9burQ7iJhi1XW`O3UIZosS-NU_2ufZ%_naO*?QMWe`CjU?&I~VIEt8} zHL|6R8&ZDo#M{v1Zx;k4)m0bl#tQPYudZeK)DBAV{@>ios8GM!C?h|Sh$u7_`(vcp zdu=;&cz=3K+}MlyoEuatwSLuhA54-{1s#G`{zbE{O%1q7!^L9_NU(Yu_;0Mb+kHYq z|9saB%PZ8qNGGV*e_>V6N@{We3Zlc6*@Z_BayjgnkF`1dGZScVP&^7H-9+_B=*TkN=j8BG<|9U=x?^ICyKs+*Myi~^&$`m^%qtNN7u--oP;@Wsi>f+fu=L@z`(wVAAFU;De{7O=Zr_AJ`g^1`Q}_fJ zX{qao9)X7_lf)2&&~-4rM{;}@8^(9~*x*jq4DO;hxF$k&`qafE5C5)(gI9Ig_FvHR zHsB6xjgA9zrxw>MMrh(2S3z+YNI239p2xS9?4<1qTxB<{kW?-l#hjSnW8N4Y#moX6P>h6F8MT^G-8oFaFl0e>HkJVKs@`t zf7xCx;s1HNL%^3^IV~=5)di&h_EK3?+HBIQ)ba$CzKXg`fn}uxUU;>6(ZQp)`S`$9 z7i0|i`kn3msmyu%_0K|U*fKcqAM~4-_jaG+VaEHge~-ul-Z%R~(a-yRU)%kEzfXr+ zoGh>qe%q%)k-~eokGA{X$9-yx2fgn^i1#x<{YqsK@6$e7>+5vC@Br`EeQmq{6aVe8 z1H(W+KcH~F@R9>*5kEbkxYT-)_;JMI*h|I7WjgZ)2OPaBJ1CgW-yhICy^Fo0fG!67 zzk#H1e-x-*ADf{D1l>{Kk_SOrwgc9`i*0|K{*=K3z_!0bf4ud-rI)wI@t$ek2R z`0-Y^3m(Uh0m~6iL!yP3x1xpzfxi&YVLTXRUbym=!zG^el8xTXCvHGP$j*yDzcm{O zf2n>POxCrMKk2w3q0rD`OHWEgU9o%(!3JSiNw0iI_6FT21@w+(8KPp8dUM?F1%21( zN@9%QVKf3$Fij_ihd4S3SDer(byB=z9ipj}V-x>Fl;j8E3PyxA70pC8Sb9E2Y$la7 z9s~kJu<}cJJH;`@oZy%kT-p}<8TWZle>9NAik`N8=D!LL)R-~iZWH=T3>a(=628qF z9<1P~^e|?N%)32U2_s+nF@x+{=RrJa;uf~0C+2dw#pH{}Vw>iDb(T9h|lB^^SrQ3pFgM?*fFM`VM`Cp4eL5#ZcZvqX;HY;3qqD;b#U!AQupD!m2KEttHVz z4_0WRZC>H!PN@0ZC$MeR^0byo#m}142uWvw0%f!8`$WJws%e?JJ+=e4!<7yQ=mJC3 z0v-*a6WslZT0}yizv6=KyfjC}^d=srG}i>UW|XHip{d&oDF7PJ`@8|-e-6;=(~++m zW~_&Ez8)?FaG*Qa3*ty`AlUxGXcU$+tlAuszFL+Jg}9oJ2RD`#A3Slf5Io$S4|y4I z6wT{(Cr^L4IzIz|=uEOy41z2Sc8QchKHAya10WUP_H4ZmT;RdpyiW)L*($dp2P)8* zhxd!*(7Pd40S!30Ixls)f4nO$#I86p_7N=-W)gdt2d`(zCU8GT;8oYAEX95O}w1O zX_hU#1U41DFd?w5^q_R$Zn^n#nGm|Ql~cJpMFcK>(Te8V>IjIKf3Eja79PcE!p!Bd zD-M5oz3w_tGcaDhTsl}ievs~I+r8jgjl?UItg7~jg{8+DJl7o&3F+Giq4onyI&fy>!!wRW1A8Jm+1s9?XHy4U9xo6!| z7uMDLZ`W7?p@>W=fA0*?$RnO&xx70-v3gaSQ?7I|I>4)QF@=bAq&iRL;H|bY*?Ug+ z<8WW(e095ld21r0E4^jL{Zj$q|Df2-E`8VSk4nejH;XpCR_ zXC3lzWIP@ot$lC82Scp#R5Dd!uutM8Zb-*OqHCY}Cc5cqm000)Md;BZ(`uc5SguvV z<_&i&X2Q6n5(Zp{c^p&=L#FUV6CZbumD{sCK)cXLqA zDFN=Ke-lhn0?K)V$So|a$=1*s&q!= zi+i-FDNyIBKekblt3f1NtyU@pwYr?xuv38^_ToO5PI5!JRJ&{JXm^d;fY_H%1Ogd< zmhVILvO;-n+%nN^kj@Xfnw%6Ieabte@!DfLX+kDg=U$SgGHPqf9Ur)WKW1VPB-B`l z#RGdclCMXp!kRZ#Lgxp{9LYo^7@Pet6p~7;3HO=B1l`?8^zP{nY@BZn5E}g!01cxZ z-^s?&#L*o(zEziW?le>GbGbP;RE^1hmUX0!{DdZcPQ295)dR=k9-bUBKk7EyUb_>x zOhQcd`ouK`B5*@Gp$q*W7OHweLy#JJ4=ZCZ1|~%=S9)o<Lx$$7#LE2oZe%9 zx@UE%PcX>cOVT-e1&E7hFH5OA zdcW6ky)!&89QlEJ3LPgdp+~BJ8`9H6=S+_05jY^%6A3=Ovqq+5ik4B>l{sNb>$Dnv zW)#?ab3j?&6B|W$y5sejVQBNmePM>sm}Xx!OQdgCn)x}X<`ECPRKQy^JnIpPb1 z3YM+Op%ua=y@`QsF!C2&t(v7?v_s-qfGf4k4e1CTQ^k^y-Gc7B*a+$OkitbW zUoNlV=A#7Z>Q}6OW$0)$H~>2_5Yr`L%%&M!K^_Ki>`kb8$nwq(sI6`4Mbx}+cqz5d z`Cd$|%Lf1y%No3lrU~5j#U;s0C?2=G_XiY?4c?Cj>fVpzs166hy|MOrtBsnawjD}0 zOOHp6Ual&0cOx&Qb9wiFS**YNbVex8oRj5pgaXew4iuq5$MoY&M+(iFNgcKOp>+{~YX`+~-!7w0_ z+6&GRfqSQQhe0h2ZW!^;w){y5-I!CJmx*lKnSW$z5_Qh%^|MkvXS$v<^}1cGraLmO zK;B5{dZ@*9WV*qB5-WqoY=B1qts6ACaV^xPvjfxS89kXWI4Qx!#Z+2GNQzt7ol$(k zKaWKJP*lQ$w9vyi4j7kRf{z3xswL_?5SiHEnQt+Qr7#4B$m`$oqjh(cY_&=%?!a-; zq&YEE8fm_b&W7y9p!-_2F{kR_m)bh$YEGr&oeAAd$iXQLkck44F6-&V%Wi$cz_wlS#c8aVZj zW}u05oEGVuR1UnYB~FhPVc74twF2@s(2BZCkpEhvPBZ8d$WtjGN`wt0G63K+y2AY1)0nAYxx^g~;JZ{X)}t&4{4hql~_?}wcaZNJ@t z#7yE-ql<1JMe%Hwms7|GF`D?5w-g%5@Da<6vbg2wBu}R_)k2a<(R-6M5=Jp+ZqX*)EC|NkBc9gq3c$=mp@JOjc zz=xWD@u^9rZslv@sS{-v^5cBy;3i)vdTUXyZ;kAh&2d7`K`5bhTLk&$ubg{lxl~P` z6EM8dlz{7Q0xw&Mkcle)>`oV>F@ip@1i+h6CkiBc=#b-Hee;>JXeEq$ok8E;B63B% zNskxP_jM9DjN8qC^RL|kf=WlCu3TLcNmp7J91R4}IehOCEr1 z%1Dh|j4{?n;Iq+d!5(1lG=!f_2$$=?|yA^Aj zye)4T+B3klhD23a!{o-YW6Ua$cZ!pLeeSC<0R_*(!nRyi=g*0(5Y8!AsC=Oo!S{m^ zmkk4=Je~3g1N!+uLF6%zjOG5r^gi(tdY^%baHlNoONzo!Wu2_y)aA>s#Y&Z0 zAtfO%NOZwDWKHrIMWA!10Z;zumXKoowosteY8i3?)@w9kVW|f#@@=MB$t$sd2i&vqMU#iLi8P-Zm=TBehzsOuNigXSK{y+y|S7 zOjgjzr;CSBYQ=NuE^1QXla3*Ooxyu`y`G4UF0?`dn9D7fF3RBYTR6>25hs3HJ1h@E zMNluSMM;(qk^s^o!MZ63cw$g7H2v6QFcJZINR~?%eW&I!h?q_sGV+k}fyZz; zH-n7fvT<=h!)S^>|9JpdKlt+W83+?+{f4!0!gp=A7ytVM1 z&eSW1WoCCDI}?+K#7`U2$*Ay>@)GV`F?y$kNX3rP%k8e?c-lKN*LH`7Nkv1);lL%( zfxy=w)t$f6piDO?!vIV`v%dz>e*opMRpsPXP|$3WZh+Y%KP8?E_NX-SGre2DH6!Xq ze&$6YouVSOda5rm2V&E}L8HtV3rU3*Cj+`zO7u8JoD9uoQbC5qAVUO(n29|q%cTp> zU-dcc<#v~qfy#IHyUsJmb6SoE1JlN{VP&FcdMLN4b%DYTYL?7(4-!s4f3TcrwrT8d z(~S2iH#`Fc1xtpUOXGC+GH`{St5KG!6{`eCAcKKUAf8anwAW2vZg*2Z_hMnS8T)PG zrJM<1ztSp%FSnP=1pdT)Zp54h;2$<=iXTy?xpP!q9I_{hF;jx^dd zBOa_}W&kXu&9vF>e~3DHF?`b2>+!2*yBjwe9!${+u^qB{;q<*}A9XGU`E6$hmw@3j^0t$thE+HxOxjs}$` z6w$!sf{%LLnFuejDqIG@@4-DzQgIG7u*wb!^e`m8j4Lf)f1bkXpq!auL$$s^;n?y1 zmerieAiu3Rd%&?}F=2v+pnNe%)IkhlKU|&~l>PD_4S;HNq)G?hW_{<)<>mFs*~izX zC&$;XFTcBf|LNk5L-Q5xZnDoH0zm&1{pV2`{DBR`s(QW zH$P&CQmWmM71wu&LjlU%hu|w9+d)j1aV_vk5X4xT1Vq_;w#%NSMJ5T*;qr z8z8HN1UtALN51dxZ!eedzrDR_2d%;8(8Z)Yk1>kx5+2~~*kaW4I_q*E3a zfH*+19}bI;d$Au5dr`lGibe3)!qtl81x9@G#SL$Ly}0kg9;&F_1)~T^*Z1HrS#jkV zs58YvNjTJxrzsKcs(NYPPkL#etVS%a$&qr4KtfpT4-no^PzeStm0rcm2me)O7MGKU z+o78qtU&0n7@+G22HYx`j|t}x0Ca-WFEmpGJ++lGd37UIEP42*Etd}x0y2Lh@_pee z2VyGJW>J1y`_lTt<7;gdALr2|28iK2bI5A*P|MlF?i!rP zgwqu=7(_MZXwUr6YAHxS%mr^X@E485euG7VP|lIKMj68k%6#>@RUXP-1nWVo{`~CV z)d(tC!;IysX@bk7oFd}mDbs(W;hGf)(hUa@qHUWGQRads;?!O+9OMJM#HXt$8KzIk zDsOcnuT(5t*~zS}YdkGvR{cXvHhhgxK@G=q8a$AuEjViEI6(XmO>Wx@wUWxED`m8m z+x){d67+4)@ysC=v1&|pAYy2vTY20Kr>^qJNm_h^bai4=C^;NzkWYU&W2!k7fD5Wg z)ucild^M%;M2%ZuEsR3kOLoSovF7RPi9IY&f$hs)GCXMJ~IDO;<9 z74H0%4{oU+^;qA08dQJ$kqQ}WOx25v)&vkQ1DX9Y5QAPoyrMrJl(pK#`R4SthN$IX zR{QS2I_i*z?0&+gt`nq_g`Dqj#)@L5eC1-`1xHOcg9dp%2yWR_A5Ma*oWm(&|73V; z1tEY4OaI9dD;dl)^pC90LX&cp3U423g{O=aG~BhX4yU(Or)7Vu#TDKLtawm;$y@gs zR=3tEC~P>TOme00@+Vg0=b{!nc*RH4)t+BPc4L9-Sb+OdK2~6b%>&X)KzJYZ zi^EbZg+o&HH zn5K6TlCfjF-@t>6k`VK*qg6c8C?K5(RaHV77|N^gj?N$Om&>iua=A5^TnPG*9jOpM zqDOvY$bH6iPCCCHfZpF+?6ybVTwJ33BMMea@V4AB)hj^(6|3oH?t*x67oMoSMD}e2 z%7TIIg`opBQQWXx=AA`1fKkl4V8rc3vP$5L6?T7?kD*3`+G^9(8bwKvnA28daVV-} z!9+|;&3)TOun#|=5&E9j9ElZGQ8eOxqp81Xz-7aLOH`o_3_BmIl&cED+J);~vToOt z^{CYzR#(_9?qPMYHw&QyVj$^SW@I6UewM;WBaO5ovlXdw(uGOmW$++W@S|S!SbmSd zvo(J%4Z#DcI0!n{+1?z4i77sv6k0n0vz%gcCq{Flp<7h_l6%zxh&t-~lMdHZ7Y~yF z`SWLcQany)QhUsGCh`3!N-_uaZb0Efd#J%6vfi%Ftg3c%pVy{RggZ!tz;ZHW8Ov#H zb{hq^;8YJ_@uD`U-9=eG7&A{K%HV%!6TvpRs0HaA6IXZ9zAv>w64tWxj^)uE z%S5DX>KBUj!w}VO2U#s)QGUy&4yb%gTA~Pm-IE#cekMPBegM+}MndzwKLDz6)Dx~0 zGSOUH8ccCcS-hMS4~HWkw7NR5;;)`Cpo1EKeSwr(^keO)hFk?rs;$tuabg`AK(l{% z#u@ebnPx&B=Ke^$jz&Xw?)&h_DG+3O6JW-L!OIKd83-_9#*pBs?=L!1jXvtl`@MF* zlia#zq+U;M-7&YY;$%LfV*&tt^S#x2C8VV7QsKf{d zQQ)!)u6DpW>3t42JJ=2_65jot<;#Ed)v94+)KH#V(wEgBj#YV}(^TUNWle0e>9qAZ zIhwLT4mWAYuOe#0Xqx4;cC)}z;7%LmUhN)ycBw=z`GOBHY=B5xI^s*qZ0qk*9d`?r z*Ob~H6KK?%U(gf>gFzpyjWmhOI=)vxiC9nN-5$vR#z5IEObAd;Zq+_;FJymsTNiyX z%RLJWo3&pKxS<*pJJ_+FYkqm!{awgoL|#ObJY z7drH}Hr&DAwb9b7;JXFE+1Y>0fA$X-Yw9keX^?f7+(YMbem9wrZ=OXopZUE9a7^Yo zgDk%g+YDO2PuX4i2W#Hkk4B3o03qpyi31_Oig zUn~UrXU#zpHUW*^2y-a-AH|S2!UV%EoQ3asy1!gL3q=i?u0p5&8)|>if#r%=77Qlr z&yrcl0!6WSZ{@}OyC_E_5LPT`WLe4$!(?RTyeV@+H!)i?PdAqaz?ts{(-F(9oMpwOpw)hZM! z59AxAsM+{d#qg4?GI)Ow79b6bcy{?!$Xsf0MQhbv>;$k&KTIQ$V-SYT`)RDAc}@4z z_`M1#mvI^;8 z!1DH3wTRArbd0Qsvs_MRbnJgRa0yD44)^*0kpH_ppcDQp07PoO4|dAdeugP!WP*vE zq7ui92$?Y#3%iV-!VT^Yj(3jJ;2#b5w3Vk_=brD+@L_JzavIj6+#!u04jQMeD5SPL z?aUtwQOyz83Os-I$1Qn{KBGs(Fn)56{ihqoX)u@6u#wXcJ7{LrJvM9>j{$J&k9+qR zpk`(_?lFCMI~|OmBJcN>{7E(IcrlGeNjhaG$sdC_3a|17H!t}un|r^ve#_?YE8{;i z3TDCY0CRI|8-K&nhxR6#)i2#s`j!s(Nle4{fXlRceV~7j3~#mhy%bz6x&y<>MK(co zs<*UFJR`jKwqFUjB&<^C+m2PQG6Cfp{EUN%FnFI&hj6#Bc_bau7%U0w&{NY`GuC=V zAAK{X7qdPMn4b>eSH}&Q(ytGUXQxwXi;74N#RB}flwp44&cb3wPXQ)mW3T+YyOmc} z0aBjIaPEKPWGiO%dQvqepgzj{twfF{G~}`I8v$OE9H%v73CqT*;ZbJIh|{+wqQkVX zd`hqb#{(5kGob5mZ&oVc-yQBrG&1xCm5c;0kIN;fNP>oB{wiHA6ZMgB?MVI+EthQ8 z0`MKzR1Jn>x`{acv0dqW%fbZ8pL`WjibD?RXkdT##Bg6Aw8ycT9CzJ{r)fhfN@-nn z!(L5@HD9LF78%MRT5tLJ^TWy|z0duBeAD>PZmSQj(LR6vrtT1(s>uWAJJv-g z-Lc&`q<|&kCScV?T70PjH^E?;I+#h{Hklzh$X>=ubbzQDVwngm8=8XXLCYN$Mfu0r zU-W-QeR|~2yYr$u2^iGW9n+)kk)runDjCqZ#AE5qeJ-q!6NQUDN}>=2Dq-Tj(77M? zG7x{zG5^)>3nvf~9q-s5m4QGeoHUmpy*&kk=(*OP^|vwZtv~0k1193toRi_L>idBC zZ+U?94T-kNy~55Z}U*Y4HC?YXkr2f7H6Q z6$|^vl_%xnAMu^5Yj$M78g0)#+VXu^noieR6pJ2;nR^5)+KNzH4EB+9PZboxN8)`{ z1%8B6$bdE88h#fp!Qy~!&08GbU1(*ONGe~;F1zm0OW6uu#8FD_Uff6 zo2eon)a@ekdUvxvWl7Ga-b0Mmjqz(ys29v=A?OWAH@ur+G5q^HpHu|@Jezqp!xd%o zC@&X3SyTM&tg`rUR#p6PX71Gb9jp(P15|t2>D~UqGquzqu8JSjsiWSz)7YH{QUXZ6N>5^6bX>$ zxGEWpvRo55yP$tb4=wlZr!fG2T8) zHTSSOH$u0VIV>HC7H%Cq{nLLy0OkIQDLUokqC)T4Gl#2F;L>;{!PPA}6 ze2@lls582E2wlwjefq!;MqbY`RvGQw89-aLM1Ll+%&0PJb{u{g*x_2r&;Buc7V)z# z>TiO@SweHLqA^B+(5*jaZbC@W3Ow^~W^U4mI6g2K_ZL$H{wDU08+ijE0XCwH@-!=r zHvq=}UBLb>r-i6h-wS^XAnBP#bQ6t>cev-imt`8mAqfAfsfAR*nYwzqLYa%r-M z^8NpE8p&f#$3)fw@W-s^{xObnQ2Uh!>@7Dp%(n8Lwjc!~yJy#;u~r0GF1Mcb2F-uI zZvOf?>{lisR$)8q3fpTHAn`iu}#1DsC zx%9{(QE2)d<{Nh{;S@_~x8=Yfk#W~?h@(|8@EYd0;@T98Z%D!YZi#DM`YP@a)-a6D zA~3DgR%rNHtcAvnMn|nIoouZFHJRF+n+XRJOx1gd1hv#%@BE%R4|tg{cQ% zkZ>d2-6yMTr!{|JZ0!Fy)5nFGv6G79!Dx8*w(Rd{JfoEnh#D=ts=Tq%906?KQ7K5I zeLs`uh3fjQpHSabxxUxy>N~Rz5c5Y45IuYQ?HLC$apa=T)L0)4IKhVsXHl243ddR5 zj@hE|nhm@HJ&t9z*6{;{gp$NffXz? z!p~`9fED^bM{e~_OKh}u-J0tSyXVm(6w>?E8UoowdB1@uXaBz<%Igh8$#(QUWN2sN zTlxFxAQ-bLU^DYoENy`(+I4#WP)l6~-v5xA6?6n6aTTrEUHb`a$Rd*#%po@cbh+%Xy6i>xv(;-M)n;Gzw1XmW1XHMFuR=n z?F2A^I!y}jiT~f!d$Z>rxUn#>`^ui1x4VE}@1O~@a6Y?hmcciiBtMN3mI25gx9VZj zw=rCAsnI4v&u2tXUz7EGCdFumtaH1|JQr>7oLYaaR&z*0;zXa!>|1KcvOG$V5U2by zX9r#mX4VQy9Xt{J1l|bc$LmQ7uKgyrU}|_T_O3%??>aUX@n8VUHT3aK8fW020Nocl z>e36h_c6;kB&m0yb{{+ZpVlsM`p7QvFMF3THi^Pz?PO-{5W=ctXbYfN7k?I z|J;ADJlsENc>ok%$>bJ294Q`^tog5hY5vdP+@l!lLi8P+;#4zzCYk>GN$`mQ??=nVFu8{tOPuaNR_T|zl)+@i8@VC*3HE_T-{{3d^@AyUX`L% z26#WAp#+1R_}g@0$eR`hW`zP(?Ew&($Z&rt6SLkziv3l%o^(hIm`~reQQMA~FsWx4N;;*)XRCja zhQyi!@y3RyV@#)tfDGbw)N$w~+&Rwx&hkaqJ@!G8atVKlcZOEi2VD&0U+vv`-R053 zNlS^6Tr%ADejMbtt-Hb8ZPTzf>Nk(&zJ95OOH%u|;WQn1j$nh_Jtd9HPWAX8%A72x zaoV_4&~&e$3LKG9-#-N)cK&}3|KESEGAXBRQ+N!Zf%!cqoEf!$8_kzlnJ;08>9G$( z+%af=xrW@Q{#kF-hX+I<=m#G$Ox*J~vti79q?~(*;mwu76k=Y)9WZOFk8x zB_g^^2A9<(0wV#Fm-!_E9e>%$%zf|fxqW;rq8p8Vt?sU_uDZ$xcedhvy_#6Iw@-Tr zbbu(j6#8z3?Fzagi;Avjl&bgSApG)hq^_uEM=~X(8d{LpTUN32OY@@|1?q|sS zf+obd^!2Rn-D-_TZO5|gu6ilO=!X>b@h=~BTSQ~c<`DUo)s=UfDu3ylPL+Qy!xNWj z9Npc~FJqs7N4S7{d;PsV1Zn#2ImN(=(AoJOx+gtpF$$z$hyKj}%FS_RHx>+8YX(MbCh#c~DsGwyVzld7IuIagpxIsK>Ngl4(>wWyo$L;NZ-T@O2=Qu}8Cu-0h)P}hb zSOqQOXKdczymB9h!@qpgRWG8_m&-a9_ZkmvAEWnsB zL+c~DCHwfedb<2tF+Qg4`f6N_xJK z7^!L|4H5f*+=r7yq69ra1-?lIDkp2y1quT}(0>+cy22&WaHm-t&Ubdcd##qwc^caN zx~I2tP10gy{K5!I!kwq#dL>_erjjo$8-YHn3I~bK7nj6ZNHQqIg4wcGDs=@M%@tIx2k+hzgC0z!Is8YJ_*GV&3IX-AebG0X9_ zOcqoiR=HG{3FN=f^Y`V2EcluW=JkoKAPrO>Ntf+--p;{Ca}Y+5*P;b&m`)dG0;N* zH|822{!_(fEm=l-2%`FPN0!mCL4QNL@&p}>>wh!(+odA^DRW^sF0c~l#6!N4qIY6@ zH;JXgb+I(;Kay`-8T|1Qcx2$f8g-iH7RV5rBn$z@Sa|$uDi(QRqRbA#*@zj399^R$ z@l&&&^^Fu%#m0>jZ}36B0PNeU)(VkB)&LfjGc6n1FI3IGsaYDQYW9lk0DpcwQjJsYzAq20Ny8cqs0;Shs*duuef~?AZk5^w>ahN~E?>+EcAo3Vtx-+8*d zoiv+H08mzZ%n}{$`%+fArMRb*k&!W& z0>fI3OZJvrryf-KkPau5&(1Y>op9#j(gUSYDkEWJe*^u?5r6z?w3?$vSE%WV1Y5Lk zym(o)bJ(jTC*43J-Q~Zt$5t*t;OGFLlXlnmz_k@a+fAhQiOoG3Woi$TOkLwM2O_?X zOw2tkD+%vpF0iNJUZWP2$x3Guus%BHCe~o010a z5;BuX$iAFO*neDEViNp^nS|`mF$r0TN$?FO!QaRvWMw8H`*J2>V@qQ!-^ky#BttC4 zH;d8{l1~b75f56bt6M%JU2R6*{xP<})b;=OxdxMIc!-9-at~j?J-CZ%#m7$@r1{tL zFl6}#QDj3rG`~b98RTsCkU)}(S`$ZpY;c)mvsxw@FF^E~V>{3Y+syVIK$`6nN|JWP zZ#}X4R@3V8=0&1;k!W7@Cz}^-Xdc$%Gi`*5fQPIH(BnSx&Z^aUSK4R~`B9e>F9K@; zfVY1y0-FJUKC6!B>FRh(Ika$YNVEfR9X(P@7JUKknMpTHR?oH@lWzT{55}n*s@<4q z#sL09drjh|?j&`SsbyRaYF8lH2F5!uuDnRAkg^{y04HU@FNqv!b)c>s9cdqr z5|W@KhIFhT3WJCpA{ZeLyd^hNmDa|NHS-rgWef0s)ghnrWcu~*nmEEuC%x;NVY%&Lr9A-E) zO*SlRRP%pFUx0FWVDX{^U!W^+$B&RR?Fe~SafD13rQ2xU{hz&yZhXCs{({@+#((*J z^lY(z@jhCkM@_J~Hfd^7QczV7DWKTz!%+MEJ@CjxG?4|WdS zXl0j8_mtzpRuEl6uHex?*`_bb_Iwo|a6e?qe0))DJt!Lefc@azc(eWB-#ZUvd&t7@ zA**w&68U;IOsMzQnuEpzxiT>s4(o$|;jkFl?TK@-uoWoO7n@!asAuj}H|t_ym_?vm zsj&LFj^cD>(ghk4M!mcxoBp1RZ@|e6MZOObb^z1?o*)dnLfAEF zs(vL0-pZm?SO^YA&gIq6TDUy4YlT)g5vrleAv+O9FBXNx~l z7_gdUK?IkXN!f|0N6)&B{a zd=Z!BfnF~jgqW)*v(!X?0ry$Y6Cykm3xkS=0Jk>KAOVud0Fp##he$gQqxi(`My({y zkpF~csApjnE~7}o)QZtVgMk6Omd1<2`7CNQoDE+$8ctMt)8k;uZk7d;s164Gr4)c0 zwW{9g4;EFIq1rGxp|mrtAcgJiczZjp=x-_{slF)P-saLP zq4ENYj37#0QP>HBjTew1KT*oX(d(vo*ilN^aw(L5$rmNj|oTn?h?Idv3ZTR$( zd&aVnY4iB4S|$bnBk2G*lmjRm4)E;`ELIv=)%ikcXx^iTOc2Xt-2tF8I)Jd`0Dy*r zjFfYUAM>-%yG8F;K96r$6ntXcS9q|<1DAYl-0W)aUYqZZ^>-i4cNhA*N9MZ|y}aMe zcUQZ;`v~#V@9ypnB!r1f{(j*kDZ35gdDgw$T{(v`eZs*ED?a+9eRQ_Fa(;G~<~9N{ zfBoz_S=eYe88{vP_WOPo_MU&+#n2vr5@ca-?>l>c<_?}ch1B@;t$cbep7vzice|tB z;Ahu4+#Nx%v-8XdfBzi?ulVcEo(d~Wm9^tGdKyC;UMG~->^ieIk(Q^yv_1hCxr1F2 zw*{~g@Lscv;?=YswAd~BeeFl!VzADXf8f`^6tzJ0qxu!1JEAJa#B_%x>rumc^kMGD z5uRx#w&U5|37DWiE5-ZZ=x}22k18Yh;@R^)UqtFB3uoyhHq;#7d|MRoyZ3ETEK)2I zy*!umUmE!*TK-1L7{2v&Zcil)aiKd+>sp6GLvTIa)I%FygC|_*J8HLPe^@>Ae?r?% zccZ9euz-8;{de2j@Z0I^6~)%&7Y%;)=LY|myE}YK&zm|B(RaHszWdbDYzIxv(`9|i&=o<5~fe)^36 ze$RjR==WKN|L*c%o({S?!`aGzdHUx&{FmpuCw{->)U^9tADi3>ZBZ3DHW`ggX7_Wx zFBlEkOitLkwU%og`lx)RqB#Q&>Yj}Dc+YseXFT2`9`6y4_lU=P#N$2U@gDJic#n9z zCp_K*9`6CINuCYPf3bk?OYtGa?>#NW#}p4;?dkpIQNMZ6Z@5r)G=)UjdQn-asBiby8%!UY(HYN3!}ob`yuu2;DTD#n9XzXh{S|&)!)^zgzt(X0NqM0 z6^8YSJ>$ha2q=^(0SKxRgYko< z+ofXc30c`~Z*K*yt7|uO0g8h>X)9-~LF;NVo{3$F>!SbShBwSViSihKlXrv+q=XUp za%X#c3uGc}pe}=BNZ9_Q#+~ou{THGS@(OLE|h#+zj~G(T{CziXlN zPy6_<3kS?r!drG3cYg+#q&os%0ST7`JOV>kv%!N%V=*#qgW(0Zo3<<9 zR;1IG(aZ+xaE0Gv~&s?w4PtLC!KzAGPc#d2`=(e$fJMktUFie~`W8Yqx4oU| z=Yxj|f~32nGWEmffPJhqAfEL>iqnluQO@6_5@Xb>Gx8z}8e1VlKvs29pu@H4JwctK zLve^c2*YkHU5-VxE+BLR&^ul2-gkDNcY~FGLmK24i5qqAQI~WOj8=|R3nvoG#JkS! z^O3G37uPe#qpJIw#hI_6!^3Jd@Xf;%beC@&zKk|qu;1rsM9yYOoVw{!```*;o+L^K zm6-4&T{cK$LBQ3*XUQKehd#K96EJiW#j3+gQ$(5SC_V9^kuq6JY2O=7uda~g81>$N zkVx98^D|6|E;bxkj7n`dwrH)kSI(refJ-!o`Ai|3k7PzK+AC+#8_46#((&}e4dun- z=!j!PQL>5Sjli{oCpaKa=$U3MMdU4@^PI1`i;XKCe@Qh$MNCb z(00n{2BHp=QYHQMO0Gpg!$z2A<|NI3j^j6z##8XR;P;Y-#Ch8l(nX+coL1(H`onXl zZju~|Dyks3LzvBuH04c5GfmvwI!g-EddcxmOv|U z?2MY|vyvMGK=L)1ec|5FOd$7vDdFxTdbzW2N6lUb?*D-!$@%;eACi!*p$9u7RWO8i zAuMWOez#xB)z(t(NC9yYCw1Y0e&4=_2QFszN2l{Y_IGT08QGr z5S#&_p|tLz^7i8wOz$Tg42%JxGVWJ7@I^eLGivle@WO+;BkAv3npUoV;zqBtqD;x{ zG}dUOa~1hs3FossJo&nBp%^b!5NZM|8JjZDSYLKX7nzPN7V2(7Mc5$M<)R{;)Dq`B zYu^gl7$E1-iiI5c)Ud!!B~Ygg^5z%hF5SfR0=eky2ZM0b!#^PGh0^<$M-A&*X)u}Q z)@rr69p>fdTZ=4kT-sINyj`gloH-ip5BfVv;rtOohLaL#^Bzh)k9OFlB1v>Eq~Qj za$jQ=~#E#p%LEJq4o}W{s07OYzuFocDR*>3rE3i;Fk`!S`8}F zVIM7Z5i2n;&ZPdU;mdbY(Pu>=JKKZSD0M@g33Rlft??_Dg+l^Te=A8dj*wtPuVjXJ z@ZTVTGrJp_GxAKYgKGcLI)NS;-yN0e4iXrm;Ptw$?J5t{cRlSnb;Pr8c>(80Xo@ar z9=niLxpC)f9o*kU2CsuK#Ntjd`%*@yl^JPmIHejvFJx=W$9f_E$^LmwwEa+VuT8pp zT|MS5TOV}S><0{%Sytcvp`cZLJm4ZE-s<(jK6d4%D$12rz2iGUUNsJ(UVdb}`g!Bl zLS=YnPF__@{v*q*$JTqJUa_4}1GWD5D*AtItv+MsSe~&zvsh#6H>}n-tb3-b65Y42 zL;?o{f1i)9PR?GRoSj_$`cN4N3~|e9s0PAnlyA+=Y6$0J+43GC*KiS&&`{Mjw zbpX!?(Wti!(Gozx^-KXoLt6=2tyWBOfKbaPG~J5lgK*SxlO$Zy{cBw1SX?a|Q)Ahf zf}=}yZ>IyjwQ_fI=FWh-XS$`SZex`j-xBvCJ;TgkHlj}#u*9b2#Sz#lYD)A24$tkx zH$}mDBYmAqvH-|B#LaLq%#drQtv9Z zC6t*yx9dp)!T|&R<)fFdO9F5N<6k~@w{iUjf=MZ4@!if&BMF3dB~{)Z*2CZ z7M3KBTqlosUx++&P9DtO4|`cZ;}jun(YNC5?X7ToJKNsg%B9j13@hFmDm5rhFCb5a zwIJ(<4Uk*&e7d17*9ysMT+1xrOIuA}+m86OKyhGsRnsIwtt5>TmgWIi1l_Opv%4Vo zph|z7C9>~5H)C=G?qW#ovlNDHwaYz1!M?kXu~B@T%IGM*OWlOOfizvowZe-FNV zSIZ*zc?tPc`yh&c!B2Re-CSFV`FCuQyD4+^T2b!O3}639Q`{)AuBGm=ThqYzndb87 z#^5+L4jk%^8@v`X)nFr!H-5;OukG!s@q zW9zX4w=WYngsda7+@%g-tyc$!f4w+8xcKqv_~P*3{qbn!@SH4_)8o3;u!h6ou)Y}V zH1`gguifT&bicbQ=6d*<^ZlwA>@#?Qk(X!wU;rQ(6 z>f+$-@zue_)vMz-CuemrLiK)EmVOTIRO|L|SR38%IGyJ!dsuH{mhGWker2^+Hk{p! z-r@`IdT&{SMsw7!_q%}HxYOJl)%)GYeSET?42P|%XuDqwhpqK!NPEmim}7s{M{9AL zQx`jfzb{6-aNfJ}JrCPY>iw>&;>J{VUsZT>qQU!QQ_Z1(*Z5KAdGU0&c>b*D?0#?e zi~6hA#r4?kJTC^P;i#}JMLL+(aClwscXtM_u1ABNj>xnl+xK9NiX9R69fSs*s%m|h zL)__UzX-#=T|jv7k58k1p~ZhfWWRuDc!FAvoRo#^7O)<=J7~C}k;;t0hi*p)8Aq@D%yMxP}I3tTvB)Hm=>dX%s}08Ux8q z4V2GnN$O5!Ztdzl{RU%57mP7ES2(j)&W5$iYyoQp(Uhe@UQe8gMPh$tSH+4pzGyw^ zzih7@(4{`SdUg8l@UK^V4ZV8%?g$oyg}GGG37kGRmp|4zIfC!bkIf3g-CA`zSXQzp z!(rWW4toO&?_ym%#=}!Qh9^7?yODpze#cY&4F2ZvEby*8C?V9s@oWaSLn0i-Y~eAK zCBWJa=l>w546lPchGT!XaI=HkC;a3d_`|^l!*6`$-np2k7hmyB6`z*@3w?Z?B_aMs zyeQn3t~@u(i_VS+vs*Wm&x<4#&mbG6X8@XE=m=x)HVB6deo9?|Vl%>UBpfmkZZp*+~#T^a)Zhf>71EKNyplcd2d zNqqp{6=j|9JiiVD4`fAb9P*w_*(AtF29q*W9ZgcE4?)U)p9d*Qo8%Z~!6$|kmgV3h z1A zzQfPv5nSkEkI4s|1hUhuk#hv=K(ypj_KK+G+D+2{$M-sj+;n-Bv6-7pW0ZAW2YJSF zF%{W@a`n;l^a}TUs_c5?6&2*gbC`V-BUc}6mBe(p;uCTly7|@2O~fOWJD%qx76b?) ztRHqskduFjYkI|npaHGDS)MW%rs+S4N}%WXvIC;ZtI6C=eVk%p9#CQUNQKSjIW|!& zsPI_IH*viicf$NDxI4r!N;8u*o&_17S#cWV%d6k#3>ztaQ|8UnEVyM?q8JsE2Q%i& z_{@tFh6H|t;zk_9shcwYN_Gbii!>CH8Bnm4D@A|Opd-XPO+Of%+W;4rQd`aMWrxDl z5rdY`laSAm-{(wJhli-BdBnZ>@xj%?Y7k11)Mgb)Va{X16Z2?&yJPMRFXv9|zT$~m zHFC~@>aS_uxYxJ?dNKaMbqVf!S7NGwqYB*PT=BDuJ69OS9!j(WoXRZ4sf=kol4?_y zUYvhTiRyE{-O%2|B9wQtg*SC0bRb8%7O;olM8JBD!?B~urlyeLmYeDZ@yKr@JAzFR>S0(vYyVZrnS z2Sj`lJ5rF%lZ2;Rh-^Y?O6UT62Jb?jsz`s`6vh)=_h;@6yW*1vpNTxp(A79juVB}@ zO9R{i@SUJJ3=pYdh>y{X%{-L)!^14$56O}q_(YHWG5*}*l*Ro+c5yP#`C85UI%Ohn z(aS_7SrFld70FLC_4GUKck+wer@urxS?*I7xlg2$<*Db2hiS?mu=? zL&hH>`CTN_NAiQ@I_2|RWh?W@Xf3$IDM%!ryV$0jr896yk8?b}!m$jOG`;dzkVhU1 z0=QKadK+X&2)8Ombj!$b8P`KQ6TE)+(BVM5-GO!X;q+8vqkyG}iPw?L4Q|{c=XxH5 z`Be~g2Nvh^t&xMP($zG8bLi2^37K2LT?+{ZSfC`dDQK*;cJ{y=y5}p&h;!L>8;&aN zdVfo>kv0W`F@G4xDSskvh6j*0u-lHt!{@Cqj&EGJud27}LVi)$z_Hicw!LzG?hQf) zM&28b4}hXRay*w4RstmqahWpJL`FMaJ@iIP^lWO@&~p3WM(| z3|^{OXSa%VwyZ!smjbujLa@XIUuz1{`aFcW%nTd@RR0&ZjJmSbo9MTTCG$4#R+)2cHJN&jbH35?y9FesLaO z$2rf`a;!HOZ-0)LI#>c90$+lcTUY`i2Cwr-e*Np0f>;8^0RfjuSppn?KeLp3sHSYr znd=+5WGwQJ;8fs-@8dLoPt1N4gT^h3-bJS%z?o8a!_o4mc$wvack^nV=RAXo!t|6E zQfB1A#5oGGBy^YO2&Ldy6P8PYzsTKuo|RLc0tka+;pyTnD6wV?1Q{6#1>I#S4p*p4 zkSWaq;hlTJ@(U2G0v;WI-mFr$uXXi=;=-$Jy&R4CA}QI2s2NA&^Ozu<-rDPgNx{;U&`I^&!gf$eXge z0J2YU_gV5Z05$`%MlO?p#VGb}gjE-%Ifdc4U{QuE2#fqu4+`pkQJ&F#mb>2cM3||Y zl|CmSgqxSsu*gf7xJzAI zf*5}-h5wER%z4JnS;*WB&$e*Wdd3zxpXzWpJY)G?oZgVKo@@`2AdRIc3#soSVeSnP z0A&3h7ER1K$PRFShdPS6EP0->qtu;D80p>-y~mT73IZs2nhMaDzW*K=Lng$$vp8pR z(ob0aHui&Yz*6}3hA+gtheVI?AjtC%mApNnb@wDn@Q{mO0E)_RIWcFp)*Cb{iOf;e zFokpk}GnT)93i5X;kt=qwmQpA88Ek=> z7z{sk?+)WxLN^(Fqy*7~%uO%6l+g(tH-h6^+By*CZCDY`TLWze-Bh_Rtj7OGsdMCEbGC=d;JqZv~8ifPw9h_xv<6F$_+VyTG=)9i#?eB*|f7CZ&`L8nIMj3i$65V^w zGP;Jk)tECD`H~C?I)hYHVYi8SJhtnni+5)fFT2Inwgf)jSif-)`D!)iOCuLVqB#za z7hQ$3ObrzF*)l^}apGdf;PjigIlFV0C-3Yibnpf881R7}po<7l~Ae--@GinjZYwV*yI?NV-aCAPc_EM)51ec^9O9f>Z*6 zMF2R$@0fQ(pWQ_9wN9?Ya$N`cAxvvn@e<$7*~vM@idqX-oucE;-@JmOWiXk(;zt7F zviQ`wNQeY+AgGU{_##h9WJ?r>J%(3URP}5+&%x>ifAV#s2`3fW7Lbo+kuR!4q|N22 z0Uo!o2-*AC;&V2UjsNghEFu`^@8-1n{p^N#$%K(75z0eD=sZPOVi6A`x?pibtaK7Y zV(?jh^7i;dZ9y~|oJe`+Dt7w;;Zeex9_oxJKmp}`hun;jRzmPBxPWS zg98&pfB9w1`&teFuU`~{`84lxBq3tTNIpuVLF|a^RQ&3awpq3aGF)h{1$|8!U%#(| z{DS6hka}V=au!Ss0u@;K7?T)`B5azz|TppPb7ZLyWis!o$%Q_yCK#5bVk33e+nqN&~hwsEg$e1`Rd~0`-vqji=Ofses$!J#;H_Eg%Oye29Ung-oW|q@naFP4@ z#RVRWsBTPXBuEK2UDS{SpCtcu1x5E3Gk1Yq%vW0?NV*EJMw+Dh=;Y1G<-zII+vAIi ze}gy2aC|#Ce|vCwaCLEcescB(&Ua@Ar|;fqDbA1IzdOIY;-9JrN(f|d_Wr}=)%y>p zryqdWFODy--k+SIgbUwZ9-p5bobLYoE!a9-{CaVD{Pybn_~IB=sMF)4t5+XRPA_qY zMcU5K9ZGuq_p6J`gY!%69UM;oE+3DMe_tPbIEDA`K3u;4a4D(-@_>^!KSBxbez8U? zC+eYwAlUr2gmh+K8q=a$=m4Ms$pdz))gs5#BPVF_?&I%yh?4LCq!Pjso=gNOod`if zI`;N*{@_F{kp1@~4}^f4>f_-Cf7M5{ z-;m->t*;|PHmh#a*TnhS4aB3t@NVdT^TP;#uSQSW z4(Lt}hQqtz&>z|G{Agrb_R0xHD`86!n;MSuYuGtedvt;AXEYeYo>GZY8lkol&DDP^ zh8f7(zG*#q)E+r_nccnL?Mic$e-+xUh;DlEqS+s2PwM^N@UCIE37#P9-s|;vK$(#~ z!rJraDlM(iYE{w-Evaf*JP+&P4saqT9)W8akw|EpOZ9%<5AEU5YH!jkcCnTKOVn2ID1pSC)JVe$NHoD1y>y}EPG{t)P(+> zofTl)KXjSX9^MJM#+xwsgLh*0?akLIIze8ZT%I0N3-8ym1pU?0-4ZMesP{9)9Z0Bp zg%~*wxDtfaOpCW3&fxRX3Koc;OHOhl98BqMfivz}`p7I9v;f?mIzK zK~O<`*aq#$)>RWOwAw4@6cmYsKAmrBwQ>D7up2NG3+Ti$VR=la!6}Vjawj;?I0~1B z+7@tnxN{?C4NcF82Spu?86CCe5nQ5=oY$mPh&qMWv?1f_Oj;2xe-~tX^0*JTro@vm zZyMv?%#SHk7NXRW<${eQO#wPw&fH{7-7asGQ+4XcOA4raJWo-2l+AHKgo4U4VO*O8 zv0||PRBODA9^iQw&KXes32({`a$MI-8qx%rWXTiJK*JF{ouK2pjE6nq$nh~3Pd)>i z%WLrsv_zyw8$}Tie^!+6oVybdO$uu8geE5>ZAj@S68669tUvp190QIujoGZ_b zkq#%CVwoUH&VN%oCybc+X^?Tj2M81-=Oz);# zxOg{ZDOcD-Uj7sGEs@tJFfzrWKaF#!(I<+Y#XcvHPk4Of)9F)^qK@OIB!_-(?1DQ+fTWd01pX7=S@8lV@)S1kr$1iHzm~{FOg=mV`VZMq+T4xIQn$r(z*88 z%{Umx_z+P*49`$|4F;hA77Q_tT%m(NeXSMlF=e|E+59CdPNffN;`cR8-yxr^&N zX`JwM^GG%Vw2n~y60M>=zJm&dQ4sQ@Zmv%q=m#l(@@dq&jd{!pjcf9bw)|KG8p`Y*AZTDR<}G@_FH>oqqJ zz%jXh>O5Z!uc0+x&8+^jW>&Jl>pRzjozCM?1J$|&1KI4`!|VEB=szAc>cbX3pn(-V zHtc#m42#p#A}mhr`n>o6W=HU&MfZzw@w&J!URC70Gg$l`rrh|T`5NqoKndfq9wsZQ zf0h>Yq?F_dm{5VSQSyE?Xg0WoRKxDqv2d!?deK84%NqKkh@t<4B4mc_m@5+CkafPvtFwBWLhzQ8j%a%C80LL(Ofj;u` zN~0FCJC_ev^XPZ72U#~}QI;jbiloA3f8^>w+Gf!sW`1>m>nJ?EVG&E|@hq10xwEGbG&IqSPl)RmW-8ojg{xC@qm`>yWDs-p zD#+<3=IT&dmt37_b{HNd(X8%XZL#}-NJl_LA>ScJeQA@e>FZ* z#*~0^;wctdl7*gTv2J8jHbWEb;zmHqibV;NQ3_B)89DQR5~RHEvK*;%p_Yl?-_yL7@lE zHExhlLj z`}p}6>daSu@???X?y6;E- zy{hl}(b+gNA30oT2%3=lnzd!JHC|oTxWY=lbenzr4D~<2{{w^!M&H7~p!uACsD_ zlz3<|g?Yq}-Rgi$GK-A4A;}lOgN_?o;4pEZNnrH-UTyI)N%HMl(Ok>)OC(Fg&FhjM zT{(suqZ^_eA;AI34W0_d!Gig^>I@^}$C407mnUpTrmRV77CfBhQcK`S=Pvg#JxnQU2hy;Te&`$^xjtYJszX)GKVb#UT{qSBth^&UIHr%bOnHS!Sz$Wd%Qh(5jfBn*u^p12Eqm^@{wfoF$ z_n*u325e0i(k!w!ICBo2rNdeW@84Z;lW3Y)to64K?=FvxIDgXxk#nPEd4yW`e;`ZE zPPK|>E)DmH+i;%i=gGwRBMljWot~JPFBJ2)S(PD(YOv?6kOh3*)49L;3(L68ssmf; zz#lW-e)7`pf7ge@!EiXT`xc5AcolxIHr<<*4h!EHX9Z*-zR^zunWAtwr0KW(no@+& zir9GY(+S&;z1*!*GoXyB&Vkl&@$_)3*({pPm)a8eqTQEWy&9qJ1&sLV$=R_v;G5`R z!yT>l4bS$G^&h&Jg}bCUg2eoZ-_#=C0Om)lm2>X+e`~S{QgzXoEBz#?AMN!6eAaay z)S|)dv&Ol6$8%2qP_KEjnCgkL(NcXis+M92XH+Q6LI#n`vdlU48JW^+ygx^2Qu$6DY z{&@WN!*{3e&Sidr1s2*Z^*To*IB32GBd}*Hf4eZ_c2_pV+h88%y#8=@czN>f?CR*` z;{Czp;g4Ot>!nyp6fhXxHNoVlH>|@j9b8_XpS=12yYcbq@!{n=LOGDtqr*jU7v8~Xn)*sNfS!kZQ;n-#Zr#qG4Xodr>G ze>-y*g*Pi^vm%I!Brg)LNEU_9CdDLm{Q~|2br)GN%ZeXA6*oVAD*PlU{KT6Tev&MJ z6EFxHD){rk`N_en(_^D=jV@_62M5i6xXn*jBmS_{+`DQ5cEZYb&iCMiiV~6Qlz;;_ zx(0SRy6N=>H^J8eugr6!TRiM)j)OTue;i>nkr^}#ITTZXXIXz7N!P*3l`ulZj^QB4 zZ_f>1!qUUA?o{~C_!K60!U0M_Qc{u#W)nO$6VPFRM)fz>bpL+M*1Zo4pTjO*PnoWV zg{%ujYjKi`a%0gF^h+Fg{HF7Fykw`&&0M+yVH< za=M%kR4%Zb(D|Wb9lm!WG_fOU9Fx)=bAe@NHS?2S9mvcQdAg>x!HVAGA+Tb7EaI8CN;7*9}$ z#@>y`oEF(d!n}FtrbdaETqwgX=jru4Zyw61@|I>4iape@RBp$sWQq zo$rCk^t-*0bF@cFHqB{#%Ml=#O+*l*P?j(+Z>DaJs{`U0h~MVKoy~I@BFDHe>2Cv% zHA`ReSc$jB!vgD!!_75kw1$l1)MI{A!Y549z}L~*L}i2MmZiC#au!E1=wl&$$Dqju z^1(RL00f)YT(&7Aq)wY^e|VAt!fi$wHv#-&#*FAFPu(onqEhD1q0+NhO#9LcRPO?q zyT~KKrw+dC(JV)Q?_5Hja6AIhN3$T3{WWtJO+8-r47}ihQ45d|>fnF)7BIBxwsVT3 z?d1f&2t&=S{;xxqFy^F4)FW8qlIkHGn0yW40?iXJB1QUUGeXxOf2X8QH1`^Svqw zqErY(=JI0U8v|jt*&}l6<TZMNq319fc>ipmqonhgiPc{-ZV~|T(8rOf|*##pCLZl11Z3XqOsDORy4hUbOOsaf(}xsFX-&Eamwj7}x2t`XB@T(#SbMQ}MI9vBzV zM|vcTkcKtne1DYl6kDo_7l)xsET)RlYXRK}|I39?|3f#F-$*Leg3Wl& zb*_zAMPqs_I=Nqq6h|E+w$vKQq8Z_GlX$E_Z7Oba>T-{2Zz}6irZ_!zLn)OsXS~}~ zQq>owf3%z-(L&`~U|cCLj4S0-9&)k!>%nr~6j{ogB1^f0yqTj#^9XEt6%~s9s-nYE zQFOFYfpYyIs&cNui|UC6Z`MmGH%A5KtY0*7L+8&v2)SvK5;`Clt!>2+9t+S3dTa^_ z0z|-YXZ$%%ebDzX-qRU}2NKs4Wn@#=N3{nGe*~X+w?i$OlE=c3WUnX~L|i1qMKMho zOqm*b0Y=StH~4AtqbokAb8cv%9~j9V2Q?{3aI_xFcjZjJ1=PMk<<(3XDECrpN{2Hq z4(Bt)o$w&Xr{64(iUIpw%gKwbwMZ zf7tcc@@TjIN-PnZKHd02RS@5Bq;sd7ISQiQkKtTrve0-EZF($3`6Hc2n+8vZf=4wqIGrHx z8^b!4FGY~kiDVf0e8stX?PftH0($i@e}X$K5VKl7XVa9o5L9?ScS zhf*nE#vt+BB9I0%^O^`Ae^eO|m<3uuE9oXv_e${7ltKnAB4j-k5tn2>_*6K=vlj*!U!?A7|<&xr8XGS1{}j?_4xF{XGsKE9HmQRk({~twCS^i zS1q4P;S2>h>4D_mU?sJy01BA1x_pITerwz*Xe6Rut8W*k>??9_f1BL#5j=`}A$aB_ z1&@NB;F&Oy38_5-Q14LQt2W1hGAPjdGZq(h^w1<>sS5i+mU96hkKxA?5h`cZOk*yt zRJ@iTMYtn$aWRhPx$uHRPjb}|VO236AqGfjIVYQ@IODm-4v#bW&qOU7`n zQid0LV95i-Ho_A*f01imv#EO<$U2l_jp~%&8C591Tx^IZ<#V5_!Sg3sj1lGwiWw)C z5*aJjKM|oW_l$^AW~C)iM00`_K|!=4Osinx@_UvE-!|lNMvIcC8ZAy9wG(jq)fCi* z?%2eymKTInHY`QCc5qt?sU-;SH?*CS!eXDK5Z@*F#b*<3e@!yeL7EgJOX8F<#`zDe zX3M-KYC51*GF^Gn`jIm3Miv<4vBGdw%~Q8dd{P4MMoeBhzg|jC1p``TGtcF|I?L3| zW8};+!jj-BVmYN$y%FS6dM8NGjFyC06kSQzuQof*%RftT8cLMB8%Kal9C>mttaC_6QqDnDMcM)V2 z(OjxR(bujL$E%phoU`RBN^BO{)J-@~*#u~dY)UQ}C|(gY27xuAg@wZU0U=9>uf^Xc z9&)nO;~FZeP7qN0pmeTB4^40_Bt5yPD7p3VIA}L_e-@%5xQ!BznZd21@CU-<%H_&V z$lYtffuPz`Gjx~SnE_kmav)WpNf)VFL`aw66eHF%40-YF8pdg!2+F}|CBj{gk=<2} z=af;7&Tf#Utcq@uIB4w^=N-!N`84ouB2wnVcWjnSxg^R6n^lyt+KOquFbV|LEs{OK z4^|>*f1X4sN^vw8*69u-q)?hdhM)+e$bTm3`C$Y>IT(CZDZbnk&i2S>!Ir8B*+wxn zrA|Yf2%joVSIF^v!S!k_$QJQ2<(+&FjDqgsR32yhSfXNGc$bf8BPIjmIKP(jGa>0Io(~292;-1kFex zNHAmh!bp*-B{EuLG0^F5^V$tLXHQ|Re<(r6=eb1MKE*j6)fASbCX4Vi9)>BKpb1-$%ArYdMp;hk$PSh%? zkekSgA|nub2qk+%L%Y%5Y~LZj))9HZ1f^v)GV`;jwLzD@I>RJm=x%M@imO|<8mD{8 z7a$ALDpSyw%{p63RjZQW{oGPB_xX$>nL>pc^cNlmj-!I0h^70 z2EW;(Dohl^?1cJq&9FjYO&B0NKeTN!Od8zpk4!5nW&79OV520_BPiogGZxK0Th9&M zr2IR<5`Y9Q?YbRF2;HMSWhzt!e`?AbW3r4ClwT^ zdxUyjwT7B26zHStc9?{zGTPAPWWl3**W{o#d5Z=4l%@6dZ$JD{Ycz*`!x{RGT4UId zk3am-cG|xUpTNJGil&I+lSlBj*=%aBn$2eOQQIy7d~=VkRUT_aB~D_fJ<;k^84BOfqcnC_fmFYt7~)$T?lMA8C$&;ET?;t4Hnf2*H`ma47oQ zu+i4O*9}ks_S8t)+>mlT(=C-!ALI|(Acwjyn=)Rm>pv`48fvr zKA9>^jyYmLOybSZstS&Se{cpld?WcDr(cPWAuUBJz1VP@4(Sc1g#1GAnB)$Sp2>%b zP?a4^XDegrYbA*C@975}vIG$&2qJo|pj~*vxkxz|N#`Oh2YpdL1eTW0BWAemHXN=x zRuID^vZ5d~A=Ng*;GWg{VFeNGm zX_9mN-78Cfxo%ZT!5B{9a{QU%`EeUHv0BnEq&$=jCSqWz9YZ zuP|JN)vx+m$bNz@!Vvk}uV3|tzWpTVx2r8TWrn3Z8uj|HRezuifHPk&$L@cor1%}? zL}=m+z_KcKvFheYD|3(jHt9Pdpl2A>=zgKWUd{7T|pT?bMiF zzrzYpbv&r>n9-?#0~i3#En3k5dbN-p&#$=w4jr6n85Kr?f2d~WDjlR-R$8(J@G^*? zT>{c}CtMGTV`s(DL(3b&^B7(%S?foD8!4>}Rg>H}iEv|9Ll1#d=b$yhE!!mBst=^; z?iLtV54*$Oa4;MV?}x>3HM;L?%0r z?qy`cRSvT!f8DlS0&SG^I%p5}UpyYQoi@6iXzOJA`v$VDS}Ap=cpcxe4l}-C_Xk6N z)VLaeJC8Z`>9=DK9bX&sCKJCv;*bb0UZ1gPLUR z_uA!yf6{9MJd9NgezQjQ6RYhkd+o=A-yV;iJZ?Lap5(^SRV`p8N!p$`1S~Tgwh&sz znK+jHf5cj&_#U7+{PxW-8?|lQTuv|Fy*+hmZkWZjld}_jRn6nsre#%O?_8K|H_uxJ zEJb@bXviMZ$bI3gnX}`?PS(8_jy3cWJkh_S6>KWEZdzj?aU!1J3HJhGId8q*NAQC3 z=$>UO`=fL2Q6i3Dz?1S2d=N~&3>NGoAan6-f9v%}_iw$G4WL}$_=G~X+|+rp!T7a* zgnmyx%4UCD&qXDJ{OS}O4zKOLt$nwaxH3=qvH6x*tNNq+b8lq>C|bdv`_xzGSbf1p zJy3lDN8|2SWVhRc@DvkT6{S+{B88dN4&?z%*m@S2U~v74yv0Dmj*-rsVoLIPx?5*X zf9^y!$26)%y04G8R&TuVuIEnBdnr;jJGQMB{2wjk9dTs)@rj2U3MIHbYSX2RAjLQcF;%9AVWsf5>KY z!Y4=M$~7O{1`5m%RwJqcf77}3GIA|S$V_CvFM?S1H{jrGf`Kzm38i!LPwuU&Z#w_X z68)I$tF7k6uw8t!3B7DMd@>w9u^ekSeA2c|H0g?7xoiCJfWTdVn~ARF8}_JSAv~GL zh2~1_pYE-Tr(f^})zZ32P9{~*f9~oJf}1IrMgbfJPk)qWT74<*o(WM*D8%wiQ^4FW z0Bbh#vjV_l(X1mYB6cU?iK8!vCYDF!2zAt94z?Z6%Zsn%9xzZi_VxZ2EI)SJiEL4- z07vfC`&&JG4Ii1#ZclZt_uH<5u3f2O+5|-Q$D5;#!1CY5Vyi~8uY%GVf4~NVJ_ix+ z%W_!j!`adC`NiS8^W!Ut9tcbz$6GR5w7c${Q=fd*!s~x)kx0Ij7K6!u3{)$aIEjSS zawk2D&*(>2+}9irJh7aAqWOL%AdzwN!X4GzNY(C1yDy0iwQF|VSfq4TY@T5W|G8qP zf#S|rLaC9O|K9cObX#%D}phzMF0U7`$DgyJ{@7jCU z1Q3*!oYTGf-s|OMu_hD>g+dK`c%F>qST#`;Wh}o5r;N z(R|7+Z#inY*b2f&oDlAK7?bf-koa>hS48ZY>pV$mL!GfK3zJwS$jvYZ4tMQ0KXnwt z!|Pr9-LC!Tf39Xd^4pp-P-DRC2ej4Qd(W0uXP=jO*8zRT)`0c4-4=RUGD287OqrDF% zCt{~mQ%I~%?bWRP0xu70H$wSkxKdGb5o1bjC6>AAsQ*-abPaq%@vYf7z<;;XA zCjd##eaDo72aOmPqz9Y5AAiCn9hUOHDbKklLCj2-=FBR>oB)I-s{)sMOy*Z}@r|tD z5wVHntc`2r9e@b0%d5I2c~!htMAF>hg?H8!e@iZ1>@B7Esx1atTXCI;!V$)`ijg?6XW=l?CwgiLpz@wtv4ft%M9x?idvkzAr9ziR~mRi#z91?@*QYSSVuYDw`~ zOM3;zlOLVfwZMp~tj-l6=QSt(+LH~U%RkX@(w^8y_Jw_{u#h*L0L1MhSBLmN6>I*( zqbv*ls?bPWcxo*zz94x_+eDn(>NXazf7p3!g=Vb*K+pB(#z*exF$?(BK0GF|TCMqs z;Lh58Betv((+Rl<-dG3+LLqhFi>64IqKeQq8>X!>@U_}@XrYS6a=EDEF`)toDP-Oc zr9458zyeK@Qf=Q;?g3g9WT{&vU{hvb&H#=PQ11XfLzkpo4He!jz;rn2dj z*aN2QDs9s;EiQw(W1R;v#&mkhK5M|sd%dd;JX`|dwx-^!F<_njPa6*dw&d*apDfD` zSJa(ILb*Crlz65zVc$r~jq;bO*Q$NeAb-@QMh(|$TNar)hAXp;k_LmGemvX|A_J?zDTe(&e==P`;oDq>u=nup@apjGb^qw{;^y$|;(9M)_R;y_^>zR1 zR}Kr{jj5`Wfj7J>ES>g3JIzu&DItyMWO;=GRJ!jvQQQ91X@ zk+ELJzHU1SLQo|uhv~^`+u-C!)=U4r8y8o%7PQycB;rR`&RKcJq=moOoTSC#Kwhr= z12SI5OyWHZnR{7le?^#rRb*I2&EafyV#>5T<7J|=Y89C7(*E6EWyc94W`sk7^lpUzHeduIt2dBVE!VcBzB8|i|y&ssNYEOkP-MuP!to?=!kTe!&~ z7ixYim30LL@cN39#J;|SJm`#uvKMot3tpYP6Feg7_MLxA8K%t69p~C4`>DLne(F-% zPvvMob@nCuf2n`}^!Ma#Ik$YeEZI+8R_&+q$L*)|TApk_b?JYx&M(Gluin5SWGrxP*9Mr){Jx9EL0~`2E2_8WceW|HDln3@?roG7v}C zKZDE`48fLbgz+$37{ags*a3~fy`jyh2A*pSAi_zRZ*Uqn?BXzv8RTc+y<)&g0VfTE z*nOcJe-?o0Vt77r(JD{G*Y`OyuECJVI2wDYaTe#sc@mEdu0l0XePtY@iIMRk2?OIj zJlBQ5T@*7A&VB~YeFn~a%Zhz-#Z?gOL{OVu2tBmg--Q`)LrJmc8hW&n%{JM0!vX<* z$!m+PS*Q<8@KSz+M|7hk<0{)Nl^h2vM$J`me;_TOwXIlZ(!4SYmhF*IR%p7GP6`^? ziCbrdypFE4*6wH3u=(ki+t>nk`Ew%Wd9B$z8#gi`_wygIM{9KV0I0VR2iyYjRSSNY zEmG#Q@Rp?oZn7bX0)lfKH&A=rU~>HtvuiFZbiP%qxca^NlP%@cmMp9Dwpv=YW=l8g zfBgF=`@T~#OjA~AUo@3lTA;Pk2Fn^b_nNWyg=fJwE;=Kjj$KG!g5Hj+QMHa-d$cBN z;o5v#^J@a|W%|Zu0vhN^UOFD<|22EPryJ$fZ-Vaa*anmF;K@Ey{4?t{++t&u7_t5Y zmg^_+)uFZaT!9j-m7A=yt<)+}X9IV=e=el`iVuSQcIkAD>)a!BlK;TpE4LrFx0F8o z9ouQwy1uJxM(bW>MFqL;g~Q9v?A0m#-qno!s+Ns;>V;AoRr|hN>W^!Zr>_|QP1do! z8d$Z-pMTGJfPqzYe3E-|u1GMHaphz5!+Zm!c2sg3A~svv#=~6NYsS!%M$qHJe~7h% zsZ?Fh-g8mFN1gG?Y1r0-lj$9zeFaiG3gRU@aF=%W2yg52XYF~Gwg+L{X7R0}{uN#O z>3@D&YDR$n`ITGqE4Su@S@tfrDveznImqv?n(5jgx2lD|+R z`HLqy$=|4>+)^8V60FP8Y|DM+PH|s$P4&qE?Qdad2MzOuY1u*Jh55p=^Hriwfm=~# zf`qn7-^JBFB`+a#^pD*N(AaWm=ZcJSWX~F!3!!;%&^5ObduQ9KW@zu(#7t$}Z~oQ& z)cg3!-Ryoct;b0<6LKxu<89n~+=~gCs>m{{9wsnC!ORYSO0D=I2rs}z$A1(|Oup9) zP)a(hSPhK4uGYE)t!2%S1)!A^@K%n>p-u0M=u6fWPAC_Xug3v2a=FRegYp;A(G@69B)wH^T z0yitN`3zBiZ3+P{TfiED6LA5j>H?M!tQL$w>A^|>hrOR4mabX;kd;S|{;lgDcoop_ z5t1VC?M$>cEioYerJQD=2w;ws8@6=KHH}#;m-ba>6$=GE^w#seZyl$%?}~39A5h*a z;^Y0Hjqdank3Q{Y(jlq$s`FyQbU!^prtsuBZUVD^g?#g1(+F?m3lq#kX{hEkLvau1Uoi!>1tC3AJT#5YHd~R+<gwhx*j(?7wKgYTzK(76FC+l9V<_rsMQFn~u{nfn&#UT!^@) zqA1U?kK-B`AUo;k?#0a}pwDgP@RDqE!$kLBm1%APxQgbD9lB*cFss5oem*a8l+2=e zo|F(=-SOMMwVJ_f*4?teT0RmkB5e-0Kgt37}@U&}84N`L$ z5;Tj&#yrl$$ly!`3M8Wc#^|41d@%YSPOh%cE-#FJANRRNzaO(Z5rn<4(eL|S6hVyT z9v=UVem_rW?_>1)5sQs}f8-nez83^Wzn{$qPeB`AAfa(a)5XnG8aKIE9viKH$ReQpj7p9pJC0>Akusj5K zBMIr;h}i-FK%nRhp9v+WM(W+M&vP$=06>s23Tv)GXu^chMzKU)lKIyC{GP! zWq@a$&u+bF&Uiw`@^O+(%AGLciKPZ+ET<6y7%dFUaO8uGSRTg11mdFhktjW4IavB< z0HY#$JWLFILsfRx;e#|8|bfQ=vmiDM%R; zPsUha3{$w$XJKT35l?EQUSN2G%*Z~cIShuO;bmcLOyPeL7(@6IC8M2f!^^#R7qC^J zZj2cTfUbnO>`De;FW6VmD?o044JT?NWz*!A85s6KJ`2wWGlr2e{9y~`&jIs>BhbuJ zCI@U34)B*zl%<*BgW3#ywuj-!z;MPG3@1L&;47QY06qpjz|k;_1Ni<83?Sr{0oywx zm``WMh!Xgb)Bv_Am`J8xZj7L!D7ix%8a`y;b>Wvdf-qijYTH3N1P1b>?J+@y)T=vEzFn5;uL^K&day$6uRoD6G zY&n~V=vILVDnDIYl2?6y0dOXFtH3`s^s!EZT-*Uxdlf$VK`nTQ{sZ3;1)xpzt?_!aqa>67KXJa4;QDy@_*?b6iPhbm)AG{X?Jjn|Ja7qjp2JG65 zyD$oTFNN>bi}2I-1{wa+8h7WR>5P$$Sri#0;WUPkcbla0^9zT6u;9Z^0oNE{EHQ94 z#hKi!fXCscNWA-xkGyuo9q#RIb=_{qGOO4}Ivhg#qtkl#%jL!Idy-tXZV%M%IW6Z- z7Y3Ph86Z_x;E^g6p&xr6oy^_R>ersB3fTJAn=rChR$H?v*|tmsp}V`Lfp;Zg75BD; z%@!LPg0091hY$^ahcW%I&k$yx`Ti^pa65*w6r(8unFvUXF~Wy6<}nWlY>rzrlK2w6 zbB+i$i5=OQj+>ZG)^HIENe*L0jsz^7PBER8C<7J(3~ zPBe{AaG3?-sz$4DoY>mzI?m3g&7R?IYN&LLzj(R7O6C54s+RloNfH@+`m_a^8(QvP zfBnUC|N85%mE2FQt?noNp54yJhVk&-vT&T-vTD#rO)Z~S+<>-iRB`fFV8RCT2nn}j z7aIS=-E8zWx0-IzaGYQZ)+vjK1hy>2&AwF?A8K?qADteO2y8j-UVHtS?-59~5|qcI zC@$s0Fge+O3IHDk+p>z)LbS+&T}MO5*lGbh2Hdacc_T9m8(XGhm|Hj}EV{l!)k2bbk?+WdyfalO~&1xvj`wzQIU?|BfK5ijFr!n8G z7QmGDDkZ8?_db3{WJKw0I+?3#)Na^^+`g9Eud;&JqmJ74&UE<%0Gutf%==2_&$9|b z-@>wg?<+|oB9WSOda)e{`9^PM-`gX5;kJfJdgA%x##Lwk;YtC%si637CKgKZCnAoN z1g-@UEA}HwTpwm+#YmEUvE9mP|WDK(ZD1VE(Cp+vw3T1&J4m z=LiJ*uw(zQES~rNz0fJ3unVl8%K*yrwP%4UG8D>r4?h$~hsDuparCA*IxCLOi=(&2 z(M54|Q@s7Pc>At+d-H4Y_U62}xGt`bE{f}4i}z>6Z@(7*KC&F=ZVLfH+q}sKu4;RK zA7*;L^H4|Y)hmaf8fdJrKdK?=lqOjqFX3QHU;?KI0^R_K@@ucott@&U0e?JbQg8;S~g!(H-X;O=kC! z+>|3S(eL1%=MC;t?Cd0FcW58c@5@^HeO^z$FKpQQeXyX0sm#kr$9|yLw-0o7?gL5B zeLyUx2by8U0a>FR2;$QN!H#;MxKP?ejqtd4>&H<0m205+%}?*a~pL*8U}cc2=;S@vn10}hQ58lY8s~aiSmF# zS!tendddWN9^rlTFOrxOs`fADQN)KehWqmvj8ZwaquK9E!F?a2@2*a6Zho((O0^}% zN5WGj69L)Ed7ODe)}P1Pv&6E0HL+?;yxJB_LZ?LWAqtI;;JxMMeizsMi?f@v!}GKMb#g^{?=KEN9G;zn?v)a*PX6=#+11H)|N8wqQRwRA=Ka;hb^q+* z;^eCT?(*#7Mr55`fDVMhm70!E-d&v>!EGsia`FBxg)ZMgcU@ED`nR)x)0@?H9A00) ze|yruxV*S{e}3M7ee&k)qBiH`;#jou^z7o~xPN+iMV0FHUmU(YDa9{ue!05*jkokY zh?6M%%i+cGxp=yhi{n!K!{Pb+6Y33~aCvsjgkliYcGxY+gy?Ji{{XRTH2V0MELgvAv!d#<}LK3BpYp zjNypJpbCuXlZ|Ma3J~cUW<9tDn*jD$bUi0y9?%5x`ttG|=hb2xz6n6g28B|fS-0q` z)PY3y#EF5AWSr&_f)+ZE;GJYCRTWzhRv1{1AC>k=Q}Vs=>8B}wf48=zQwCfF@e==x zv=n&NHcKL)zoqtD6=|-@FKfh{Ps6%CsDYoZY_@6zjumvu5Obm70-%hqild}# zN8rY@h}3D}(BSeN6!b{Z8wuhN{e6~tqp4Ss|Bzg#o=M38EbR(BtrB2ANfKNUP*)_V zuUeY`Xh4_0-F@|#rB3|Ye{|?2Ar6gT#oZIMc+ah~TyY+X4XeEl3psciX0hCow$>qZ zm(NCO!|Sh+eD#2q05(&;&t?P!<)I;|M|ek|<$!_lX{S$Z6+6L>nhGjpK%q_Uv4$O(C8<_4uBCDqzJ} z?$eKW3=EFQ1sZ~}e>bHdJlx--+|MtRhOtf*6!ttaKSh)m5^WMl7E(a7Y(9 zvr#(Zp)^cVGMdF$k|%ltHAr3`?4onD3mcBChp=!w?4-Ki3Ik|&z2RU&>n1wTfu$+;H zmhi|Dzv$+rf9U>95Ws+g7H%;f2j{A(?5YsAN0a|Hyev~jYYf0 zQ{K(lcq-4_<9W^nRT+8CL<~O*sF5zi02q{Dgya-6?(_@+N)UqptPjxZhh8in*aE9B z%jW^13lHdThA6?={gl7q>>m0&q7Waa*?lG&exHdqai8S~@SNRe;#qRW{c)m*UQ!;C zBL>q^e~H5>dJN{|88HWm9y%}h_(y!ibM7>b8z%hai{?D$EhY08+MM$~2Qwtbe#kp{Mf*l%xS<(+lc*KJ)@T*$jS9@PS$|+&P2dIoxA!&$-*O zHVfy$x!ZD#ME4qV?p>};~N)$Fe=+C!P*79!wFwaDPMD>$<#f3rcw`|g(2EoE4hD~Zk?{`B3=YTiPV zvZ`QB!9j1kxdTOZcxHu^;=WVQqZ7FHsMBjd1B!_@{3+m1w{5pyv~4hpXg_be_M+1O z>&q)Je%vvw&OYDfo_DS~9ZKw8sTX`@e;->vS9y1J0^U8-zChMp`xrQ5_XLNY({`=R zZreVuO}(;1YiUn-TddChKdKvQv5z{vz^>D3Yh>~tJI)<2zxwmMZLb=_FD<*4ct?$| z`x<@0=icYLMGEw0oMT04v2?2%yS{u$JN1O}rK#WvhqdX8h_N^ri{|m=|U0zbWzfA8U6cWwRy{#}|@Y0BDNKCSKT#K2ak+HqPKZ*=!0bulJ%t44$Nai>@2mgcHA z*S_lX%zppq{P6mkt7nhTu6XG5?EFLo%PQQX%eU{&&QGrTf4>}_-}Em}Pp?mIcou2| zDERi|=9kN39svd8^}E9(k$H1 zi_b^`Puxme4>usH+KMDzXpG7BLYM?-LiU(xW+9qfJz#$1rDcN+l9vnBpc0icl9fw7 zl|Wt0;()}}e-sc>cF-bm3AwQGU~wQVShzWf5SAmSA41VI@zNEq6xNIXS2ACT1& z=qCwjNhGUr14 z{7cj>ihzrVdxV*XZsj43JTwazuzJ_!j$(>XCNje8e}mH{nq(j{=O`8=JG#}?x1zJT z;0Kiru6^F=nJ1vg!-qJWfl&7>CpKnCNJr?+d<59YC!DL~Fb(tZl;xqXQc~}}bXn@% zOOB^QbXg0NSQLNl1*d!ia^ywP$pSP`WPbD73*O>pvYK%O?ltj9y9ty4Z>>}GN6<5p z=`4(xf4CdAoxiqY1VK(xy|Kq3;CLkQ>-+!3(nMT!oB*$y-hvYt@7i@(vQ$6|pN0`5 zDWi79N=x5F$v}&q#X|r+Wu+t}sS)o%zlA8Jl|D!1fnMkw;EVJU$td*o0&nM09?l~D zo)^g#3b(bS%Xxk|d=22#nSL=QUjd3pCAIXFf0rD3M=wM23a!vPaI+Vkzf$ED1HkR@ zmQ_cZ%vc>CTGpC@B1_Z_3z@n8vd7Zu@~&WWU6*l_vT|jWav&0(mOA8`u?dW6tsbV2@hl!DDkeWoj4xPl$d{h$S@a7-<}Z%F-&J^x(Y1rABWZ}& zgW0^(fLbnCUE9c;ON_%z}$f#=-P1;-3 z8))v~z$v$QZx0%~w14kBtom@2;Vz^|#$%dfXu|BwzF_Oj&5rB~`-8i*f3I2j2I6JI zX}UC+hcOIbA?Lxt48!}XI=qWVM}1iOVm4PF99fgqOOia_;>N8oz6E$j-3sh?clq=0 z6>p1cCeV@ktOK3#rLM5=SZ76p_uT2B?6KQcV$;NqEK4UN=h7a0ALQy+N>>(fVm>_9 z%o%BWPbW3E*0fEykGbuCe_9ovv0JTHpSer=wbzQOXapqkbBIuOD@-U?@kVym48S|~ z1j&?CFPrKQ+Kiv%?%PC5ox&W?LeqXw?M#YNmXILKlY-$@6*mbbsCQK zv(|rRC0}7D@!JTgDRw<;y#?eIGz11|($z_w%NUpkW|aYvDK(nAe*_lwst{3498wv* z0lpfj_sr~T9Jg`?#sLiy^{j1&wd=>aiSmQIvy!{7%qto;Kx~xZ0oou7ui6|ch^&oo z9H-}+k3fdO0TS#aYYh^m+y|6U$MYrerj!^;XB_j08|xiG(%_)H$_0es&| zpD{#zM#+Bs-7oJ9MW(Xq(Y-o{zjC^b*?3lTtgcwMakkJef1QnIb&4ID5BWfcIS~MK z>BHb^;(=z{UClHHn%Y@rK3&|eDU!<4e2j?-E&Fe21fEo-Te3MG*Sv+u3uA+rkCmc| zGa=`T+;nYD*?c(96CgzPaY9j*&eo`^Vs>X-vC+w5)&ShO*~7ojj{DcJJ}mpLh#zrD z=Mq+{IqYnZe_LEiR|!@L1LR=gaSJOYT+MV>oRZ{pNT$Xp93?p?7nLr!X|*Q6B*A4x zTq)#~38$h_e9zNC;0ZY+`4FM~XR))btTM^u5}TEJA-7KCf*b zyxs!se|V=!dJh1SL~o*ObCr0xWSNVmxHA+FD>GZ^I4D4dQ#MPLNh=!Aa`rh6lrA4% zkko+vCJUr2LB1SrhKX_va#^*_d_E(DX|m|OCFZC5|16`u;*GDHMwW43YkZX2&)eu;uav9V=TT#?I%A_cbCurwy<311vFN2&6rwjeqp{pV*^MgukNNo(3l7f)3wAQ`ttpi@z3_w{<=>Kti|iuvl7F z7j)JbEM%hNv_M(^2_z#M51|Gs`l*5>e=Icg`sR1#Q`2r&G&UY4OUsffa7ym|@gHut z(d}$J94)O+vbR1_Y0TA9p*FPRRZmrHZ#DYeZEMr&MdxcO6E*{Q)sAaz^`i6j1(Qo& z{SRL%x%jte2)7k@w7hy?mG4$Djpf;1NP9E^)N{mA+-qB_4vl`z#$FUxL-4tne}4zg z>-H+59kk}EQwM~ny;v<*P3&lz4v5v2Eq+-R_{4Ltzj_P~>GVeS z13wnx)AU4n8`Nc-w#*mY~I}Mw5KlEtfSrf0U2=`}<%J zXt>}_t3IcXwA0eE$`3JGZ{WBG=0_a!vT$0xmH@!CWDq9QO{;+VpE&_Hf8>JYHknDO zscwsWD|l?PH+#n|n}V$nTo+c#!~L~FR=0N^x=<4ArqFE4X*t0r*i5xn%%xO$u&r$O zI?rD1@421ADV(DC_S<4_e{Zi_ylNJ&UNwv6{(jNiFPvh(=ry-Cop$%x#<#`ZtIqzx z$G_ad*>pDBXh|s^t=F*Jw%zVM0QP^|>7mhZujjZJb=r2@Zd+wH{VTH$I#59YNJ%C~ zWhV4y#2eO=6!ZglW^8Og1dqa?HsvyMT+b~UHSl}Sap9O;8atk=aUe*FpON*Oj zQ5;E^A((o11k~#xf6yXF|hMr@cv$Xm zh^p3wS&PhXFeC>nycCr#UQn3HwG6`meHyBl7G|xfm!~1#Jof1Cl+iC%3h$JD>0O3d zi%## zrwoC#r)(;mMNqC_WbAX_1H2Io0U~q8K0}8ZfJ%aY!XaWx<514Rcoq^IHONl_FAeya z-M}++m;+KYj6t%AL&$3!`MC#I%0}cVAEW56Y0P69>hoBZcz>TUGCDIB`7oVUFh-^4 z)pPJY+h~7T&DB0&3lGL#R$lP*t*e@Ao%rMP7lbN%AeN&o3+CDQW25`*`3uyxm(?sv zzaj1{7(8%)YSzHPKI$1s?T*zR>yva*`{X#Twe<-!8&wUNz3%blW3*KnxuFreI7chx zqENbK=TfgAq+*G`9)zhusOGZ;-R9w^^)|`#iz8A!lWM{Q2Pt;3WT6%IA9qQd8CRrk^NYxdEmz+>zqZhY@qx8rtUm-&}_P`r9o?C%$y zzlu($(=A?g_KV)DeYXQX0Gp1pS@fFit#3YlyZP6r!Yv%92OK6{%Pj4(LB4^r1a|{+ zw=C$xx_@Nu|5Nfc3hlP=;BKQ^P>sSJ1<{(zfaVBra&dHd3^Js?>Du*Jjl#Z;V>K!R z4}p6}Y9{hqu}uMYwnnj}f!8Fp_%t9XM_e=(b8|(~DRKA_P!S-NhmPPO)kG5TLPNOl z=Uiiy-j_5-gdR`>VLSMAJ~kCgt>dYHN4PZk$t;cf=+|I zhprWa-EH4PV@`ZJjDrQb!Px`4ufCqGyI$)Fq1X?UKaeT}}q@M&R`B|dh%WU$@W^KiVsJPHQ0$)4bhT}NF!;bxOX|3igdWX$Z zuQ}w%xVv>_=FUU6TW0l#n@AA$w{bJT=@)pHiR+f8Tj19K}i zI8MVajvg=3b*}61I9|Q#HUDzEFS=DbRG7`d^zb(evEDbG7$hS+2%z}crnBP=&`4Dq zjO%wjSfh}QQTPj^m@FPprBD^VgHmBt(Qxz+R0UE(-XOgMC=0p`U0pC)T19Gg!+);h z3_ewS_=01%(u$M{R-OIOb+Ea^3+}8hD7*)yE?`~3vYe};5Lk1OQZUGT@_$?fJpuE; z3e z?hCg{-dO!&ibnTY$5IQ<7j#qlTP%$+g^XE)Jwm(46=)H$xvWQ zpA{=X=YxHrMYy&6?;WKfL4Q=3RTYcuwhuKLBB`p8WG?2G+1u~B`{tHeK-h8ap1WI) z(;(y-3q88=Pj7$I<)tvQX>~bki}ur)kz+St9R+UhO6vCNg`s=-uhxvO-V z95M|60fg>hKDgI)nJFSRvMIo7qbXFtL;0A_O6fqy#>ljx10)osk$-mm%gO7*i#Lr2 za=_j*Q@GjElJDtvOHw~^gRDt!(XrW&M1Y}ZaZr4ucbQ5j|6wR-1^8d?#J z)@6VSr)Lq89H#{y=!$Syz|ph7DHuF)?4-O2L*zAddC5QqFc00vkHyaS#mn8|`|paK z-5(2pn74{y15+%3xqtBqbXGdIXLa_L%uf*TZjdbWQ}U|Ey3bAf$V>e`)=taZpIq<} zh*O*NCyy(q<|zWE;JJzj0iq-!`#V2PH1kWcD?}ncBuao|HK(oOM@l;2YM7G3ft!Q` z*DJnwoR>kG%}ICeIg{p_+=foF7zp|XLYe0X-$Zx79lyggpMQJ&l#b{8@-f}DgU-sC z_L};g+uU7F$i>-o;gD(uX)-g=v<~eAuIVg)&F^|m@9;RqUOPnx==d%6&uM%~26Z>d zu(-<(aC2OzC;s7OrhXmc7+{!m{GN7F*^94k|FJ}ge=hgvxV#d z%If~!s0%uY1Ai8jN_&0|&!c=b45`AzLOAvK$sMz?{Vj&n};rHKR%I=S3p^ID?bBN5b)ErCAvDBPO&3UOIW(_fm>|QxazJSNa zY3ousu777VFy+Acz=e{wvH@W?L(h+O|pNzml8)`%Bun;HJdHh0p70# zLI09(mmrb;q8|a@m<+n6LSW~pM2`@#y^sRs5?fBA`|O3K>66x?=eGylwDEk?GJzxy z!~v$W19~)FwlV0|S}aPZp|O{?Smt{(%~Hu}yniU1yJF$oJ?t)vxpN1scki9MhaZ>R z>csv6W~X-A9W;8|JAlGzZe`_jaMB>q&6=!5Ly2ta3IJ&y!1N%_rv~(?LsDYQ*HBvhBOX-hd(rxc<)ZmuNHd-1Kp)us!o%n; z-GB1XwbVN$NoB8y>chDGSe@$d_Akv$O{)_=;J?w;>MSlQXms~*$56nQr=Z)a)4I)=4)85DbbQ?O~l7KJ}9QZ_Aa?~2=TaXSs;;&$pS3V(lE zOs7Q{7qh&W`NeEe1Z-4{QZFdrKjMsL#WX8^`Lme(@@Emu!XlXY(;}G77H~Ew%RsJy zkmu?QID<-kTk4W#Q!=(HsKP!_XfBtQeK&#A26`wBP#qKmDQU1^i|ONT*vn|=?#+V` zwvz+ow@Ca6=c_XZ2?CA}=6R0Gxqt7)w;s7Gi51=_RBg_S6$H{3ECA{!bmj^0z!Bi- zAiykSf0zo=2lBTWh9gz$FdPZxUcz0K5@hEX@4z3}gN$FoAJ~VC{{?@(M>iy{{2Gta zWR8wW`4rDKc-Bb?;69v=N`6X6N-zjewKl-EM{GoU=P1N27wHu}eH zffUiUP~0~WPlAyDp`X*^NGyLu$)6Hvz2sh#u=vS5y+~s zl_OR=TjXBK;N9m#Vj0N^GK+BF1(rKlw1QEES<7osE+y8KG|? zyBrSrSY+2T=FcN9E#2{k4v|B4GfxNeh`UO`sliL%q+Spb1`UL~NPkPna-x6{C_SCu;~H5$onQ|*g*rypotaZAWYG(P+lz>8^*T`9P(7jX%Z*E ztb^k_Nkhr0MRE#2PihJpLYW{L7S)3T6FW;Ys<@f?V>V@_WU!sfaxE!k!5kVrO~hwk z$v~n%L@Dyg%pgmmx%lpV`pS{1n8wXj4l;b1{SlY}u6BSsBG!@`t&=d?A z8slZkM;2$R!i^HHJ1NnkP(Zy6fZl4VK9#>=7Lt7n34i2jMrR*K=a4=?fUDz!YQSJd z!!gPKHi1Z$?-F1jMS^~kM>!Q#3P!JKlX2uTxs1)cw9+GT>E)kel;qsccLpf$9Va}$RjB$A=MqvGFf_XY`xo{@;$4T1coIG9}PQ_Fv z!%d$rn}5}G@C?5%ryz*hO1XKuTIn#1A~6-KRFtgno6hO6WL~w6O`OdIYmG`4W3rOc zq`g8l6%`|;blA9vKGGB64JrH=)0@qhmvWc$%1R^#R#UpbLkb|8&HkNmfuILE<-GOr3^7nRuk**Kv}i- zpsY1_sVQGAndCXj<4f_CR!}M|BTd*M;v!eh!&!G-X0;`hT4{=(M3F~ZOqHUy0=g6a zSATCp{SVzx&5@&2i#C&aPCMpOsv2W@s(?`_CyJv^DYf!6a*I}qS09O|mPVUOTVJ}` zqgqaFJE|j2PhE4D+A&wkTX(0rD=KCAhD3{1TY+hnx-hL$Q}vaL)nAWR>!!p?<&;>d z9e|l=cPkR`$?Ha;=&!nQSh*D)tyHAiK7UB6{nj9({zHSD)w@(5M@8jVzi8ru&cgxo z6TW5&H7+J=OEJWUgytfGO(8*m1bE#U4<~5=`rA@IopE@CGpa!_8+!rbpI{{XlXp9` zqAB-S7?ay8iiR;432{+OQ$|y!UcDbNFXd-T{?p_~R}80f4ppNc81Zm5OCqu3sDDZ2 zt8ylDXK6AFRb9=L0SYcXP33UrC((SWb|*YKB(6yw8=;hdM^qz4tf@6vrip=|r-@h3 z)A-4o$I6)kEQv%&JOodjLRb6E(h|FxEsu7qSz?Mec0VPnb|1bDJT&WH4wo6A{R5oq%3EkW zi8g%}q5P4~qs`Ku4h4_u)8OL-_r3|tQ}|FA<>N%dOv3LKztw?9fIS$|n}-P?UJ&6? zs>0bgrSl@j?oyIn2`DCzV9Ak4F|9?s%7lo{lQ?gxp1|nPn}(5!F%OQ@(0}D1S;$2C zgdvHwhzfs>re@B88E~+K1Fq^QNS%3j<`Pk%8W!QE7yL2LWCOEllH}uZOsg!*Q_}0lw*8Pr>BTL7^d-O zfMBYsPLn(RLWq{GQlNk&TMrPK(B z3NSB|>4=i%V?BJgT@q=Ey|!8sMT#a{45v&SOTmdd1Ywnsmy{Fa-le>-gd7bHf5rQy zh&TaiG2Y>Nis*AnlUsQ`TDq#T$qqBMWq=_Y4(~PDibxUC6%pt16*bKXsU1D4+KNV8 zzAtB7yt&w~VMM+|Eq~U`awPf%*K#?js7O=E^<|Rw#cHX1b)9`VJy2=kMdqB zC5c`ViDECx!I<{K zF3wOEoO=1V8L$~|T0WM-8H(_s2a^BNR#H<1P^A1-S5*k*YfCx> zZHTCBb?u@o`-;8WCa1)PpwJ6Jm=7rkitz+tJj}#o5*dTNUL^IZ&0(lu9`*iAgrbfD zO%j%>co0g=Nq?Tgk0&Blt*V(OTwJO4T4E@Ij?lx&IGN}Cd*UHEYlygN7*7x{NW?|Q z3H6h%iBeCoR#}C|D>+%L>ab)A-&M-^LXX_@2yGi7l=*2AMa=;ld$*x%LnXD;oN8XF z3DwNSn)G{=#|Ht;#XKZ$Q;PEmrJN?F5}7LIKarr6dw->bN@bNPP$csKE23i1ia5=J zGmq<8Mts?j#~Ce2o@%r-dDNbHA?&pIXq$PY+Qnb3E{LgWSc-COQsrqeH3gx5Lt7~+ zF4jqk@l}#5KAY^8vxI4%hZG~r!6{Qp=RY)kt?n(+(gAUKN##lNN6NCC|H$XVhbf-?0?;8lLy`ha;Jh3t+AQ!;m5W*nt6)+a!PSI;A&F&NU6&a!=;iZhG02a4#c|T z+M84cWjT`%&2pmXwsI;BQ=X5w#JhzYEtd;=ip(V!-jO*hL6Qod@VkJQl zCZUZCxAVgU0N+T<`>INM^-=h?M~20=RDUJNXB3~Nv}wSPK2PTUxakQwUa3hU+$7&D z6_<;yN|LvMlu)mY7Ef($(o%)uUQbdwdM#DMGuKjAb>W!Fg&Z8rv^fZaP?%~=lYrhT z#-st_Em`2Q;ZVd$+9SU*Kx5aZ!BSjIg5^XZNGPZBiIEc3lgMOki*cKP(b&L?_P$(rt#X-6G^<}s4gh<}sZAVZ)sqD!s!H8{nRV@gx^P-(iG;Mcms9bb0! zxjwqSHtkhbCY_z9(=J}PPx92MGk4Eh74z=0uZ=J+2Z4le&L~+=Z$0MNF0SU_1=97T z^=K7&YmLo&edRic4tTYgTa$@`-LC87`g&#=n}c(xbi+5d!cslvQ!m`Jx_=FGE3RJb z*MmB%7q{{*+$DcM!n)nATDK!wXXWvaRv*9Y_k$lLev<{b(H3`2I&2i&YOs*=7pwy+ z1{Tkw)SHb%znB9%BMeLmHPdA3#XRgqv$0pmRi{wKoOzKavjX%P1+K~k-hfs!$bbOt zg^PU9OrwIfE@Y=ps$8B#hJWrnDU(ltjTezQgPdG4jG$dV`w3Z?BRG6tXq zIV&Y~nciGH? zXOHE>#3BG5$i*}af`}DU7KHO@5#?zSrOkX$zk_2)=2n*-o0v`ca zBS77nkH!>X&^IU_W@!P(jAFtVoEV~l?}myn_oC1*==cm1IS2@ zwqTriq>vp=aIr;zc8~=K)9Aa@g1m8B0*Ti36d0Ij!2pSVAq<8KZc|(Y%nxBO!0CYn zg`Xx_Hul1_5DO~=M4QNXz5oqL0p^x0Eqrf=FW^sBkVpmJ0Dl+4L{Y&%yg`zWive1C z;!i;x58EKA?<@u^O704wLM}iATX@l(cb^sT82mf(2B=zL^v&`w7u3wR$R4|r=WSz0BKagwZ=ZPY7wjx zg=&p;;RqH^S(cMC+O3!J8gISyo(?FtUdo@3a5KDsXybe`a+2EF zfMvdpGQxd7xX=apy9i8!bEf%;Mx7RcBL&%ubALM<&Q3?1YA6;KLMQy~d!vyIPrXIo z_sBqw4vQRIMVv6m*)b--mFbk_)HB${`Q3{A;rL@ zK$>Q={Ej?Xo-jHFWEeBJ=L~TChkS5K&*OWU?*%-A5L2mN0l=RBaJx7);`)SxULAju z6@Ng>W+?r|*(Dr?`Z)EYgzlGk5zBlg-?|8KKQm#s0em$fw-XCOY3vurqFf9JIAN?= zwQ(c4TcRNJ31h`i(v<(OYzB34vp*`GjfF)S{@!|Ne@0$*80OBEe#&@5VRM0IWdpJ< zo3egDq-e_7iHG{WmwQn%(o*=xDx3>UkPhVJNiW0&cl1EdT*H!dRi=hpprzP+Y zQ;&)d^l9~1+SMN>XblW(G!mdLh_X&EaR6qixZ z0vHu1GD3lbVS>2jUMIE`B^S(}6VKuzmwwO!7Jtl`{aY`hwM#PyOciw|xpyK}qc>%I zBn?x>6t5=u)8UU40}khWog=R;NxV5q$<9rTUi_u+41;9YbW15*=`c>vp%zhlXl^3H z&Nv8t4jjY{i!c=PB`Xom*4mT|v2^VUm(h!tVQ$c*7Qc`{kWkjh6$^i4N!%Cfgo@Bw zfq!xNhA<{qd@ZIG{ye7_juGK(ts?zd3d#$57)%x~FY-p*n{dtzfxMLeYOfe3QKDmv z$Tb^&%Jxd(gz=v~?zOn}5VbLig$JVe6DzjD0iinxYESWISpg_9$ExqSOAMkrh{`MvkbzBMR`b)VSV!g#*p

0)7zmOl^-6KWDjG3lpEFNS|f0%Mo@=*>;c%CarR0 zL=p=RO`V2Rw;`Exm+R32B7b7#(J(@A@4TD>Q!1r|1OuL=6-B=ml#f5-;{|riEWr1u zZ=M0%oMwOc9%!orSmi7rPu^KR)OlB_nc#7nX`55Q@1kMH4H>-#EkYkh%0_A5yB#gG z@Vy9&Jpo2=w&0UBCZxL}ltrZXp~zyPg0iaYo`MncIIS>8vy!-W6n~%=64sz*yZ}mE z+*Uyl=U1Ao)8tNlY}|Y_oyXi#G4pPjUrS%F|W4m z&+fCFO=Uprw|I>Amc-V8-wgR|Fc#C_vEm4*IY)_uVxO;;GA?IET)JQ*%(v~jrcQ?C zbd&K$KvYB`N%p^IkfBB z@VWzhWoz!*3+NIQQyAh?qjQnL&o_KiWlY$;Isoz*Xn*ec&KBinvI;K0_hBAtil*uUm8bZQHF)rsrTXmTQ%9udLU3=2Gw~r8o zXa*oef!m_fZzE!z{b%+E-nSH-^7E%tl+9T1Y2)azFrE@CdjMoW^WR;x9!Y=H?~ zj}8AIF#;#crLxyJhbQIw0b_mj5o=AoS>r=z|3fQfXuaDYSVPlsvMtlH;QoWQ#nA*$ zJ8O97&Krv|-nwdv#~Kl{ALK4B+xuX{^1J7=Wq;>kDa8I4tm89*M*gAmp(uK;&D%F& zo&5;}bAQ}uJB1T$ZM3bT_@G|wgjwAQ+q+;G`n2&dVasCs)82$x%d(o}KhGbx6hV5{ z;uX-()G3&pLC?8!TDpqKV~r#G0iOxR8xUFHsqfXxr6s{*BZ6@B6{zEAS%Z;E^M>fu z%zs+ir}mY7VTX2trYU9BU9G7~Y#-~om5p7cDl%A?;I0kU=?MKvm+i3@`K}O-ti1G1 zrd>LZ8)l>1F>Pj>mT9R{^B9Jbb`N}Kac6&Hw-H+wSGnm|ye+GHdZNRcI}(qcH#6^a z1Q_18-{wSk0ce~_+Uax%jI7&Wt!xJFIDZXQkG*5rxwZG8^}|_OY9>%AG_WBpa7v%2 ztZSNkW_QoDw&)2$e1cFufiO^^cE2A3u;x6bkBZrC=We6j%2}2-nAL?u;NfXo_7!0H zyi1cjAw;Gdc*8E<;r-aM6Ii=IvL)cmRn{e3c^Kadgmc$k3<+y1NtF&G%Mw5Ia(@=^ zC8q-Y-u5yF$dA43S)1!G21%MGcbfCQF8wFbjo?j#XPpbr?QA|38&w4t5AyXmb_5DZ;_)+bzcNPmvk7#sp#U>hC*`w{}sn_T?Z#+_R1Me61vMT#*q z>2rwB8z-?A1D;t4k}uKm=WvL{{*IGa#o8bUQg;$!A-OP>P`fl3eb1k8pOPfY@qvWk zCH9%v0Rg@oKI=+%mU-o9EVqeuPAeo>=%iDc4*JEAeJ(2l`0KKO8vOnN!GG6jSpE_i zQr1SKc6K{Y@A35j8hTqt`R9~@?Kzx9S>QAP;2#>SQV8@PAA+Yz1j{>aUs=KY2UE;z z+0|u27B7c|Q9&v;_5SiVD z8GGw3O3{#uD0OwD9#yTJ@dm=4^<02%!sl@!kK-BpJm*s=%!=e@aYtSbci}Ma&*MR| z09uA?h<`EyYGIJt|t^Z~>e#O1((^Ki)c*#eT^ zW7)TJ-1NVb@Rcg7oPQ>Jm`g?u!BIVm?oU7}L=)JbAsk6Q?{?18M!VPUIL)nRo8P?J z|F-z}SJ!RZ?VdB=-ag!E;?EB%@lsD$tF?`!;f3V)A(fWN1r z*4^!$@0<9eQh-AbepJd!N_lxAQ@-25lz+XHDgXKxrN}ZnKY!s>FHcWJ%D=WT<%b{s zB~pI)={rbyt;(EGnUfzx%FFF-xYx_=?UyR$M@-q>mMJf{-L_@_f_?gVm!AG5R<-@| zdr{TS@e!o#9IM-HAJe1Blv8TJ>B+WA*`<`-Q@F=u*qO(0k6(6ebJMi!KlqCji~{oQ z{D^-K@$WJI#eX;OdKX{p>wn?jA1HFDUfB_)oWNgf@AmdFzQ*GpF>;ELQyvkm+}@$5 z-93eyy!-(l^!p!9;qQ;ofjd8LALHLs?1I;_Q|N(rPPId@7C<3iJJwByZy91Gv zmlXN#JNf3n`vFrxd+4Pk6k<6 zrr~t#Lw`aWN$=V+^&Hnu1V+R;8f+)QXapx!C{J5Ym}|jK{48zITqytsVLNKV@zrWu zr1baF0A@5v9>VvVM_v?h@di(Mi+afOB$?o*h^dJTG~(a`9*Ai2CvLyq{CXCnXoFXD z_WDq)lxMGx8EgP#PPOFaJg1%C+3RaInzKWtlYfpvaTx~-kCQkV`~iEUi|fM~oO*C2 zeM`9)*FYphO}n^$>&4y(x(+%Eu7Ylwd!k2pJ@7-%OZqo*0QxQUxFn8m;Ie*}Cn*d1 zqC_7y!I{)vy`G00^At#ogpLFHih}RYR+~GI6*RD>C_r;wSnJM&S0`)|`tukJCqUsF zaDR4U)3x8hAPFz8p|>ve9y!qt3scX>68O8m%n|Gy?z6lcTT}iH4oX-n53MPe-(n&7 zSyO@rt*Znkf0jwr8?YwPzNc08v1$zj~Sr?dpMcwcX~F7uM3A0Hx}}1r}UBN}BtXL2^;5U7Zrw zP~?A7HIt#F3H<~dxsKB+HXeT2U0Pe8pqGw2y(9a=wXcAq4y2|1O>4`v&v}&ddr;!( z-(l#L_XLLCE6LD%wZhPgDV&{GU9`PuY&@LH%g*Vt*}F$BYfEk*Y%da#Xjw=oBy-%Q z?XeCFMbor^Rb`dkd*xy91v+i}kKAtewjF;bk5lu73vaKcz}$Jz2~u|KAD7Ou4I|UC zqc7aK=5|hQ0K?N>&qm1S^Yd}IoKOf*PJn+RMRq7S4sZQ22VYP(n>P`6ox$QGxis!#70HJ>i z$iLwE7b5>cO&pLOuf@aE&Gyv&7_68119yEi)^%Qq~vZ}iXUCo^!w88 zvkpv_dK)+@*cP^ttmBZCXCTCv(Nt zIFgDlcEbs_+DkjmA1(LEaW*y{((UEer={)Zo!$+zub3U@u04?VZZ{5!=Z$%6buI7K z3vr9swyWw7;q|cMOj34Ul$wNhlXB-C+o$$~x!7s%**f(K^uvKz1kNXx{U(2F;PTYq zG*lK)hQ8~Rxt&Va&OK{szv=V_%zn@8cdk8^Px>Zn2+rw>nyKDHMAuP3?Ty0^#nEAL zbXpv}DUQyHqx0hEZE!XX}`q$$9S@GMi#lMd% z+VF3IL(aN;9QzfwRH1w8K68Jon<6=Px8PrWCVl|f*0;E&dXqJd{mPo`F74Oy%X`IN z?w1XKTnxt6>iPu*|_oA;@rZcTpQiE9nUuR)ZQZ7!`)B0 z;lsRBJFIF|f_mdPL+U8Ev6**FwiGvaH2NUq=hS>?ydC9_K0Oy-86gT^lAC7q2uNq?!l{Ftm(|!4!qz!C%R4b z81$7r%-ssGQYH6GqtyFz#hLq0l(yrJ4+ikT(t=uF&SEaha6CAX_VWG+|CtY zcMlhy_BU;JY5zOdcKCmTMt4t2)(bet!I#vfCT+qJ^Qz%mYdQEHy=mL&72mX%_C~C3 zD0ZFVktf41J|dhgev$8~*RE(PL`}#36Kc|$J7pRJD2&%L{0yRzOT%E!nYqce#JGKgX)y8l1dP=xhqC zwUDYbVmL&Nq8yR@b8Ne`%wN~uuf%Lv|LH_IeKl3@RbYj$y+G=~Ro#LKl?7@sSYowq zWybz_n4}GwVgJaDWN1uSOKdqTwHNv$SM%~PjDtpycJ_m`gDJOJD|khQ_k64JaEckU(qwy7QXZ|IY0^wNFy}b8K(K z_AfcV6e8+{i}FEX-e+Q06i0!*ku(Vz=8 zN2G@px-`_zM)y+XM%MV6x(W5eQfqjdAn+&GZz&Zv=>>nom{1z=X54y{s|bafTPdhX zNho!Snr2(_f=fpulF|{^x5k~$RpqFR8gYiRGW)tzY)b7icd20u9>)(7&5Fqk*0n$E zuKJsDlPo}t5Tscb-UV`T4`CVySIReVoup|yRbBX6x(8khM!mPBsyimV?uD=ud>xL? z;u5>^6&rt@pvCARH`qG`FQS*brRewHiRf9J|0u%m!)oLx@}@HuXoYva*CMJ`D6O&} zrt+wb>Xxs=5fo4fWMx1Ee!nqqLU9OD8kT!KACi5-_1H^!lVsWIMiGBzDV@EU)E*C^DO zC$~zadB!aZMEnZ;A7n%*HATYpeVls>5d(Fh2*1BNqo8a54)@Afex`h&GM3+jQ$}{q z$jhK*Vq9f&39%f`=%>bAsj^cTmuP&L(NLXvY4-mR_jb!|BU_``y`KWYc+-H{015sJ zG{k>Tk|kU7*|J8KN17oy5=1sBj3|HuG%d*jPo2wLo#ZN2sY+F-~smL%|kYHs1Kzu6I<6y8)YK(JSNlanlifqm&(!!mzp z8Um%CzWBNeed2&qs zlVLZQ81LXnexs);J`mk}`a*54HTn^?0p(;YF~g2tDjA_NnI}s#O;?MU-5F)t9Ti9V zUe>6csW}@bCNzIXy*;UR_{Si5?Nxuw=X(%LjMV~Vf!FyJy;Qmr1NA(w`g}#pB{v18zt>2{!^S6uI7mB96%HJeMu{ zG{o8B+1k%#gHL7gW%xZ2fMCLAWnT^kPg#ujr9usk6iRLg(g4|>-GlhVSE7(!aLu7# zNk1wF;n>}BzRQFJ;X=W8$HRZ!46YdaN>igG`ESwEh=)u+R$7|=_1|Pn=-ucq9M{{8 zoMO=d+4hzGcl9sHheAae-!$JXx#+Gks0>~YUK_nFWa&x~-ZT6U=`pY8{hPqQ!?LOW z0JmfujZ_bCd1-wU<1GWe0_4#L)V9V+uFAK}9nZ6_<5vCs1!d^}0C;~5mYKdZ^%WLM zu_AF2A?JG#4$VPQS-JWFlm8iA=#3K3;gTGBKN#1Y+>(8AOy1^hz}%h28BRA&E%xn+zgV}=4M{jcUttRxg3Hl zXcNQi*_u~Zy$BPV+MR#VC5VTf)pa6#jVzVVW%yjkznXciMlAArrHH9|u`0||ZuxMDT!MdhHMskq*Fa1G@-!hK zTqN|8I}y~$iVGF51Vrw5luZ{gHBJwOMg$!l*%2Z3ddpkV#euReH^WCcVD z2*Kk&2J)i-llO|t5HeHYhfB8AzcdTK4p&_A{_KDCKMURc+!x)S+*TO1t?_yEC#Z2L z$A;h`xKDp4yQjM!KVR;DIzHPyI{xg=DB1t8`}yA~T_>A~^-fqP7s`Hlo(bpPe$ zuJvtLV6amA^ZSFn-Q#zL#9GM>y~cp-+-%&{LsXs4A*P911LU8i5I(Ha2;`V%!gKHv zD4EznSzQL*)mL^&goS?;?mg&?l+SsJ{7twUA(ZF^SslkD zkU`_@&&|Z*2LkQ9JGeMmcOsl`b>6Fxt@>gVM52JujTERPE!M7#8v*`VauS%JB;Rw% zYoJQNzl6J`l8gWE5H9E^P}WJFlQplY<~^;AF|zR&yA#WlQ zllky12TL;b3^*iDb0+Wy@3<(7Jp$IKap>(FX5!el$cFvk0Z-fHXIMz@HI!T!`p6G4 z$N&`9Ah?0XB2Cz_>t=tWY3akLOX|(jUBV zMU;H(?Ht>9+5;~d&wqQ9+uLXwN;ow2eN-R{zJHrNcNLuLyo8kv7Xslo zIPxlk@smSp%R%9xv8;d?2A?ePLH>dWreX}nsnf4IwV`RagK^n>s*>7Ldu{A~Oiw-n zjhodB-|@i|#m9vq%%p+l8d;KG(SFiiE50FSBr-`wd;JqCDsRj?`kU{`-+W(ftE?m? zWRx|}weo*lhksm>hS%VydVD#2Z5RU^pUTDoN*?%&*)!8(g4sVkeEV-(KMi}8oaj^Q z`O@ouKKd{;)BZn7LsQLFdfy+6cdbKcxtGUv}Z% zp;(4gUcGu%A)xm0^dpSn%9}x@m+AZ=YxO~?4T*28Y=4u}ZH4?s-ShMh^Yq{&QavWt zRlFkp8YByUrtSGjFw*SfgEMuvf4v58*c<$GWL@7Cd9!OBr%jfdNC1C4=62DshD~Vx zbQ;qsc*lX@KpLdL?N{@K2wnqoGmT#mwQKl4rW5V@@vA@6z?U*!r34|2J<=eas~=PD z9!HakB!Ajyk+A4a6GyiuG*2{8D#7N*!JQ_Z&}!odl`1vvBUS?i7mZtQYcw2daK39a zhL+KQTf@+b8hr@h4;*m;9)_Sx1LV0jfp}VD4YWFO-F)@D&O*Y2ox7vSld`DA7Uq!#+ZSp^>aA$?$Q*f|GY7)-=g)(lVM& z<9`I57W+X$`T9r-O^Ye4c1$7Ahy+J4pSz=)I3BsNJa|jecwDmteq%b;;L@gXMIsHN zL~9UEPvbX>MJ#DeEm>S^0LWkgsWJbSY7i_&1DAq=2f!kY#gCc@+odhQk(q{CM;f@W z(a_I<#;<($;Eor+faiMn!-IKh5K)Tan16&iR7NerR)c6R(L{qVP<}Y}XGx@?DwM`u zYJdegKp{}W@cVE#%hx+dv>-u+CoM>TxC1{S3#x&yT7V)1jcCH50Rl|LS`_0(Veuo@ zEUd&HqF5Wjokkm7KQgL~z!{Y`0%u^_XnZ}>Mq@hCMnTB5(JY*5qv-liZItl5$$w20 zGi{VWdctCDw7`4|%(%d>I(O(JuT2^+2yI{!!lAV$q5=pf#M&$hry?ML#@pge!Tito zDuz>SA+s+qzid!a=cpH^@kt6J@Jl$=b559EsicF!Q#u$dQ*`){PNQUToeGSd-qUcL z{xKN5)YR*8NCGk@bMi>0K%hQD%zq1;qMx{?rHs9j!u+QkQKdBGN#Qt^eyX|KhcuX^ zF`WW;0A1z?ZNv@Onxjxr`3e@~Nq`Isl>^@5l_RVdZqN_I!X^17(ujgK?B=v$E|4|4 z6;kPfnQWJ>sPS`7iKKs36U<7ti{~s_TEZ1uIGK^g$k%wsjxSOD@jA{NAb%oUy7rD81PrPPbKG+&1^Gf_hBi1AkW289Mo%Qg)d;@CMJt!U1%a zSoDvR`-kRH+v#0|s+9b}o^pLl@g_rjXI`37aV*4=ng3(Nw;SEzDqd)nP9{nxrQ_Gk zyDFvGH|b7QI>w0Mh{b%vTz^5TUomPU5`O&~IdRtvH9hhdhAV8W!%?p2n>}B+~gt9c90kRhj0S05E=U(l8ku)rbCIER|^>J%1GyJ@ftTP*yDMG7A-(pzd=WgntwK4gJvQx$d)# z@yhVhHqIw5sMpe<)kr7xS~_VpOi-<`iy?au7*;P9>jou;s3H|rnm96}=la)4>W{}M zM7;tBuIXs@E#){-M4Y(nCyU{r5*R(VUQ1gIGX>{ZaL-Lg>{~jfcM+q53RFH0zQU-W zfZ0pwU*CFML4PdkfPYSaOV)Hsm5fuSyv)=uK-CyM*XX&O)M=&lnwfo3%iQ@7>0J<} zcZ*pXQXhh4ry-s8(kM>n(R>u5-GJz4y_Uw5E62x_4J!RfFwDmm(>ts+jU(2d#ltkY znWf21%oEXdfYO^@3f@OkIF!(hu3kuvm!g4;X(Q`Q>N--9xu!R8`+IP2lW>5{AoFwR^Z(TwJ=jup3g8q7jO_5)b z`-Az@hvnt4vLMWsgsV&jD-+uqzIH1MldPmcQsCu@LTPgWs7~c-2u7~Fb8;ntZd&>Y zmFV#4q3>H0{(n|k=Iq@HXyE*fNtVd@Q?JjhV|?qI!-H5B*(PwKO}v~@Kig<70rth7 z%FSPs%WYuTEWryypq?}=+J`1(yNUzvD6H3wCS(BH#=8w5^-TralgGk`+xX>Xcr$ps26omHH4tD;x|TP z2%m>(fA|w(D?B7){0pDwGvO;L0cs-oI(_DrFRmx2urZ3Vs%J^lH(LnAxxr$AJ+Ig$ zkue#OWV3npztWCcbpn+OL7GZZKGgTOEUQ8)7Px45QoFV^v#z|^8ua(y?w;)`eHsi` zhO#8l27mgT=kD!PHo(T4LGR&i_Uwd=6(D#sz%48N-*QMODhR5~B8*3k44!ny++Zpy z0EdUIR*~;~SPU5VW>CRQIMz&)vsw^v_E9O6pqvvFk)H1r@{WI?Tb#%Icm2X#-|yI; z{#QTqRLHOP*M9s%o!tRnvpbb%bL3~jn`e}0LVu#Dmccmj-yi`!8tJ5=(%X3sxOq@) zA=ocqBry~7?|^{PJMby+O#do7sMGULwx2^~(W^-j7cPspyZ42oRlO-c(nJd*;nzC! z^!{LQ|5LZpbN{X6m0uG3KfQisnEkhWJ-=4Rc^u;8X@x zRex>3hUrUFVTtM+GwaXfezh_8t72{*!%$rqdN&8BqE>#z*uWWoQj}8%OQq3&s9LK$ z=HaHJNL`8_47e!$CIElLmWDbjCiltTHuk+!PvO{q2Bh_U9qkAux;b@_Y-VV08kGF_ zYE=!He6Le*xRR%3AD~OJ)i>SMa%~C-h<^?k_^GipEPUeT4JZZD%ev7&W7=TAhWWRi z;i?&HOS1`fo7({l2K`JYy%5j+`xU(P`oXhoE&UJ6vOve=rM`01e#ai5KYtjc zJ3|AFPkL~ZHG6#xbXY<|E1Ug4v>`-64SC97P~&gq!659JuNBRiH~;AW!?T7x8S3^Q z*0A@F9lfaA6E7f|jmv==5(52KVJHy&Qfo&l|lQmi3+Q*#`Vw73<#%GhTXD zL+GXconqkT>_-b;FJ@@6D~ei(JAa30a8Q^kYQG26P%s==Q9NM^?I5|ZY`I+FlomI! z;;x-5K3%W03u=Qil7JsAbOn<+KR>nS(cCb}Dd#zkOmdq`@lU0c{I}hY&$zPBiStLG zKJq>CR$6WUdFws-@puu0k$bsKASAE5zfDH&*)|!?=kD<~8G+zo?(V`<;(zXKlW{Pa zxQE+h8o6J#iLjb+Uv85i4BWrA3D7|9d;ZI5A@_WnKp+?Qw{3D8u^A|nY?B#H64$>a zvxV<-Md>7yVF|&yUBfo--NkeZ%G0}Ji8?r zAlW^>CCOhiclVZ{Z$9_iEwcK>m=15r*Xxin_scE0_meyKmPrsw&<-HJ|pW4QzJL^FS9) ztCmWJOnO?H1Z;)*n@Hs;L4x+0M`*e^_O_g5!HV-~W2UM?qK4`^=W#bml_}9n{@uK* zn|bOROr5bp;tnQGi-|fW{>@xEyFU>%LtnD%=&cdO$%iw2ek_#$uQUtcn z$=nZjbTaeKy`8PCbFNNB63;j%Puw}ZyUz`zgNbo&^BW4#BSsb$)74FLZFZHfc>qmI zNAwl&)A?K(##xPk7wSV>UX`e1%K3z~dxd|rBFqc;_lNUT{gt5yA3r#ev z*k#L?Px_^9bB9R=3(JnVyfTMM7H7 zd|Bj*taf8*b}C1=Z|^#6D$D5S4zklVGZ+nVA0%`1LCB@+B#;-Ha3JI z0&E|?JW$sKRr}8?DXtj8!dhQ<9CK|%oND1kcV{ywofsSkbOnfHm3GbUl0{$!myZ$bSJXU7fK!StWkpE>{DCrmkVmT zu(8BU^HU<5Wtc(bYla7R!C@_QA!~7$nE+VIG+aT%jxyeD+USBOI!}AQ?{SBjl`kv-?-ZMN?&80X~AEiq5$~J-iqsf`v z9)Dbfk$C&GfQDdjo({2nBtY)-rHUD9i081bLiLh{WByruQ~+yvdFhYGpBL8>$;-8t zhZr2@02SQ}S}Ga4Ola(0Wd{VH_T`o6H*{&2nM)P{^E-!CAcjT=qJ{C9wm@P_RKp6s zt>Uwl%|Qx;!Biy-WTd8Z1t`zm^UaZ(9)FaQ$0APC`AVFo+toZS&+}T%wEs+|o|@?x z+)%1C3>=e$TDcH>Z< z3)ODMePv*)j>C#WB($~|GoO4`_J88Ai(b2y{6+D*QYB3IvoT2=7Qe|aWyW&nF($#c zesu`9g24cA3F|w<9#jqQWw9>Yy_JC<%`yv3*MKZon%4mp7&^>)xhx4Y&1Xw9n~^I6 zi8}L2zz5(W;abiEP2s%?Si0*XFvf5&xPPsKHMpLI%HT=26uNdj7>vunoqvO0v!|2a zJl(K*;J#WoEa5s2D2GXnVgXRXkVUy`Y^yx%@tRB>Y?HZjlxuKc4K5-ssi$wHTVA^5 zdFemWe|TvrEtS%emktK6;U7Gd;h&e1)JiQct$1lAb<=M8=1uzMjhDjzspokq{GaaZ z?7$x{#h;xN{!a%3=vH4SpnnRB$yxv5$@>x02y&>swv!Qk_7gYB{C%?bnYmEicj(pMOlIe$;{jB<10%{9Ez zUttaK``sEwip$e$!%uy%7^SHn)AZ=oT^c0eE0&HU{+Ht0J-%HpSo-H8VQF#`EoS2s zbU)Z*nt+P%V@g8^z?M#0`8je=#)pn1U8(dKbkM zwK>4sa&}z=oJcOH5`PkMmwEY0uK1ol$ARzDq6?f0YIb*5RrX{JD;`k#=o{TL&OW{U zlpY>kd^|{xKb;+J|6vkkiOsD|M&Fb;=*)IA!7Z}{FU$eM=^7(dTQw4 z(#)4p;*tox0Lob2=qHV<-eF7-hCxuz*(O-K2yg=rLRF@^u7_2O5Yy#`tGo2^z9)qRQnoOTLxkiuS%2(6TN+b+i#xZF5f3bKI$zkZm7Ly^8HrKHf7J``%KrJE3fz} z&z!Dr_P5@j29;F<7k@`9T|9m!-|>C3{E5yQjaYQAyn?TS+}VfGHQ;b@!T*O`dw(6SBcmkNX93tF`YD zg{kiTMh=eOUVh&FcyPJ<`Eu{z-O;g|Bb1C#D_-!=$&-rn`G56*<9uN9*_ZeQQN6cM z`_6EQX9OtSen0n?$MbW#t$1LVOR^?paNiLrynq$|xO=w${_^)3={_mJuUsN>55Ssg`V=6a6Y!CBqHNALG07cV* zZK884@LHFcIytv5FRv+t*T#dudl{1R+^ul_n5HX@e=_4y3ot8%UTGY5yioeU=<(L;}y;>_bMXl){aO|m%^uJWd# zWcjs`AAiC~uBo<2oW|6YPB@WG};sow80v+wg;Sf3-hbE_m@^LUIMtsJRj^Q)UXLz+_z$#=m zi#|Z)*L^>s2B=H6FR1t#Y!y%u}z_t<+{lFJyxI8VaKu8lT}2Hc!} ziM>qxq-2;yQ{(D<_w?A+P!u9^EXf1Sx9;yTh+xi_73B9{4b#*plz&ShSO7#;2CL3! zf>qfX&ux;`;@(!(T#_T|W&1o|uv?C12!B{6xfhaIvdg?`PQIV_FX#|h6;DfGh&*ve z^pKA+&*?Ju1{jW`P1a+~lEOBGCvReUe-f)JeD26IH&$|VKf75^#%|6PK-_kT8+ z6HU!aG$kkEbhLkE!8Mu43%W^GiNq87R-p-CDJ~a-x@h9{ zha}Im%RGm_&Z7jQSbJ3v*PJe9)L@KIWi^7zcP<9 z#Yc{D784qn-VP0Rs-@CdoZW>?BG;v`4!`EAe;SlY(Mgs3%FGoXER>Nb%*1=UY@_gd zF;gK2PN35ja2iF6ka^X$NmJHePUDoak9;a{GNrIttrkwng1CA0V?MR8y)J0CAUlWd z3g`KZ<+Pm9{}l#1glS~91anLSwRzx%3D6@O8wGHtY>5W^K{)+9^21&B*}tPFVn?jX zfAF(WZ%J`ZFsAYDY*qknIWH^wP8kZ{Hkdxdjsg~gJ2Yo$2w^HNLAjiRX%YO?X)m^I zb}4%vh=H=<;HZMFsN(sVD3!L1Usr48_iK*8%FY@*if-ZLWfQ3^f0RjnWy-O~LcrmVXmKpo|@ zzFb|(Bpm{4>jEz1UIlO|m;4sMXnYrhU}ty`jzPYa6<_^Go_KL!RxPotxXG`U63H^Z zc!e^I<-?0Zd3t5Q<1<)qs3F|ovMnu2@z$2-Uc22Z z9|2IjO?61xX45w;^eao#3Wgn4vaGcHIaXiEI{Cmcn8k5{~6kbfenPkgnKn0abf8s$g2jEo##hBG1GeZS59A}jw6^+PCgn5LlA8;1pQeLpVf+?=n#Tbe=! z$q;n66qZO>D4E5YE0wlje>aY9==j$x$yiPD@&ZNRP?$2Hp_^MJp}r5WK$aYX7OLy5 zCb&4R9#$c&_~a-cF&hxpnsK`_{b^p*bkCO{E&7>yy5rdJzb=#_<2Y0N!gb!HtIGAi z_N9U$7%g~cLVOgWW`7ilSXB5K%!j=TDwQ`MLoX!X0xuxHQ;!EMe>3zj5}aZ(4ZJpa z3_MlXlc^AF;0uz?+RdP7-XG%Gz(P1BcpY)*>H8q>&b>e5p5idwZ~`y zj<&d(rv`o(Dq>40;47S8^>kzq3AZ-83l@zj_>2PCFt_M90UHK%*rXTa+Fij;dg9WN zFCxCx7$y_Ko#3Whe@hLc_rdgL7EEv0DRnD<7-3}fZe)`AvJw#4!?)NG$TdK{Iiu%7 zve~nY{@~skmO)%E9OiNrUsu1G{h3%g7>9gD_bgEi?$^Qp8F(I*HFLlvm&mt*h)j=) zvwO;nLq!Xd*@Z6Kj3$^N-*dn60x%tO<{i>}kC$~#@6K5ne-b%&88qyAg*RI<)zQUL zpCA`?bsjh=Ip?(PNk;zcF>g0EPWatX^m>n_sqsr}#9^*${Bg1)PXtT2V(-I z=K0i_uJJ$(-KpAby9!3ovbzT4AP5#jgL5&SCZ=}dC)zcop*GIgwYF{&wTi2g83mtk z;7K>Hjn^a9Ezz2V`eXaW4P*1ftyHD~yIEYrF;>Y6fBl){&-R~5B}kG5O)9loyM1NC zI1VY;*G79fD3JrS3 z7l2ywOr5LQdG)KQa;WagH)Kje(959MG>l#pG088h#P2(>-Qt4&CzQRrztH6pGAhu2 zBqouJ1q>W9?+_ZnTRfz~jy~cn+jq1tb)<&!$eD%64nr{Osu_%g!9tQ*xMC(*Dj7%2 z+j7jMs2PEaHpgGq{3l|GGcU!>@Hm>f7hW}(*~uQ0mEnDuit zwc$`P#JmmRdOQowU}D??PwF;=r8;6JIbthQnwdg=ieUH4SsF8xUG`uc33H7xD@cNY zkAq)K0(gjQjhQMLE|s=66@dIEIgE`lBY;|pg?d#t!y5`U;XgA`b5=Ncq)oto5%azP z$|vtK5ZpS-7Wa1<({&blzfeHV61SB~%8T5WI5s>)$=X-bbe}JdyH1o314H=+12^4S zSe)KjmJ^Fe9Wh{JC5+!q)gf(O+P9G*7xo=QG|UtuknrdxY|HbaxSQa4 zB3e$E9O64_FG6l$D-!{WWZbZXc{?h`hDkztGY(J*Ba674onyi(6q+lQIE{n^3+!Zo zI4(4otb16CDrAU*HTg=Ci(CMql}d4tNi`$|S}KhhCxxCwD*(|bgpyf*GHSK#m}HAd zN>dZKfiuvqs^EoLD#>Yxh&1En(;)>jlrHDD9p(23I9=dAbHwaxAKCq097voaZ;s9q z2j(S!0#}A7XW)nh~&ZkAtv>93R_L2eI8aZaBVy8<(>^nm%YyGH<6zte0S$nV@Z2Dw6V z&BL;H_2P;+P|HEE%4$GffwBUBUwBBKjQ`}qlpj() zyS(yq-}@~zqL_!m{vEF`zfcdD8RxzXZ~|Ts$eB3B;dl}hD+>K!cVl5BGRU{ktCC6V zRmnl%RSBGkPM5`x0Q_K-i)GKETN)bP$xwm4=GGOe4PNQHxFg5ro5Lg|YNAfv3^kesTwp0hY#G7RJxFIIath@_ms7DNS|#BEA(03$fm87`5~CI>{6Ir(V+6VB9Z+X zaPE%zGzZXsOjCt>nWU+zwhFeS8IzNDpL4nFNic)9cVkmN-Ur;s(T}~Iv)J%sa!ARR zW18JPmYu^V8At0O^!AvYL@_fU?-wR?s{J@8G`2kN3sdM0Nq;M8mYToI%g-{><%_~x zBOU6ounGnlO(A*doPTV5%bjJ;UphAZkqCm=+YM+e1)Q}o+6u`3UFzz>}~(HU%{Y*U@EC3XsIT@uRd ztOOl@n+3gHT~q-vGQI}UBAGpEN@iO-i}H8PpZcIfItk*0X^Rm230-Yy;)-*^CBG+_ z7>A0F0%>s!S{{-+U66t(s=@bu92)wk5aWY@Jt8usIlCP~ns98WEo5;G%G=u64aTF; zd@VcZXOCJ!V;G^9(9t3e*rOH)$t|$2i>>{CGc4)*;5Mkp`qpYXyMYJ@e11_qwAfE~ zwE$8CAoN}n45<>@IR58n1lGe2!jQ&TTNlMJT?Nc-!14Ql>O%&s^%K^MLSv692iplF zMNtA;8@OKskXCfn`n`vcJ5+cE1U>zcCOY9;?gZM)H#8LCr-=8YutVS&XKAQN=DuRcT>#Z|kOVxw;0f@84Jb zDf|E)>3_HZ)UuC&g3Mh`06R2i@KMo!E}#Ud7GQ%rvd7QNXGrFQFqSiwccV1D55jSD z4;b|TBG>?Wr6G+C{q3iZ0$QPu5Oa~Ad;~fmogyr=HvU@B_)!Wxq0@}niU>5y9X23%85(R->9wrC({~qg}fPT{QgrQ_UO%3tn0iCc<`-gPsoxmw($Nv4>tPilo zuR+j=a@})~nAyVLV5u|-jr=v=@hEbI3u`_j*BoMnE);K0VR6kFU?r8B0mmZz0QEfY zkd{iBF%qOH9zP;u)$UVLLwP2DezB6-4yk7x!c|l^i!vcG4QaYh`xp4AG&Dh3YH^Hn zw#@wY92RxS52*(gAX<_za>AW#^iNi7e}>+?u)XhBXisr3%*e)4X=~2xHN^tOe22@; z4{3kxaG{aJ9YtvVWg@!nS?|k>>FqZE$V5}(1;W-0$0Xkd4&M=XG&gieA!D9EG z!I83e&wb82x4HHo=$it$Gn`94rDT_Jb=-oQ!o%u%2?oNa39j257!;O+ekFt|_M8z% zb=8wm`58fXNWrtgnl2b1j6K#rVZ&01|H9oz?JxCJSl=w8$9d4eUjxS{5TFdg&9NY+ zvqx~;_tmM{)fEA~zDYoTFUdfT$_b}YmunW-S$;PPNq!0}Smb^UhS zh`Uex@R1t^ewv&GcQkQtmkO3Xgt%tf1v90`F@~pxqC`s#o@RERF~z@N;GmQI9D9#} z@sve~)P9cbklr6a316j1!|)8`VB-B8lb>TdTgjh4ixd%D5#A0asJiy+tp|=*CEw@P z8!wDsZ%s0q0&na5))Q(>Iic}e!GHX_mvtBeJ%9X`yIisr05!%NG&I-Qg;#ocT-Gx+ z&AfBE2#ChF8F-D5jE#%if_B(L=HcYlQz&+0PM+3C|NOE!UgC}Q)>e2l>;26&^YZ&7 zUW%^XD>teuoji^FMMB*|b;~?-@omm+0e^7h*xScDhFs(0jE47ab8`Mzv53A^ZV&sa zYk$=-ndfU}%wf??WCl`}rjl{`;8_m2^{Ql_3(?Evhc{DcDJqK}d(-~IaKlO-o2Hbi zixIu`LgN@>4Vm4yIFM3_{}y;MQnzXK)6Ojj61~-AvhTgk7?=BLD(nSd!gQiI(!ZuNzKFs($=6`x4V?|X2$o(RA#@w8X4Ac9Fvxf?eDeG<;d+l#zRNWVy3V?_Y+H>VI;% z*_G-ra~Ub@!wM=!G7{t8cvd9=#3~ov>4&RaBydC+luEg*54CW60cSsyJ?4fhu2#i3 zxwbB61_RNqXxp$=*OkH(9T1fNlm|;jGTC78x8saFhO?G+w;T3?&3&hm$H4rl2yrm? zL!F4IwgCiJ&UI${A$Ss#3mPp=cYn27+qNUc|7=FS^2H&6#}t3cMZI^3_$^VwsF#!# zugN3R4JD+hUWUub(uC3e&3w$fe`&PAk`#OeM=N8Ut^3WL72W2pImT67g0A`#)pe=( z{`+k~eEmx?Qe2w;ev5J9k5@c1Ym-_nWlcEso%x`Pbvd`9=9 z8G3*euOB0f+m*ea#eSGfq8LhyX2JZ#XHfHm?#(FoC&_F0=(9L@5bt;MD5N3dWx=oa zr8~uioCP8E<9D$?4!D1#PI&TXNbhGAEO=dV3xu#6QGlF5?0O1rTcUq#cu~nyav#85n%aQf z&K~EmIYD^pxT5+S(Hf@KW$GBVF;DBrv`L<}ot2&{rJbrw9vv7m%~{PVq-7#mnFo>K zsc_d=nK=50c~m~~fw58q9PYy*p=8406mzL(7N4D8;Ti66O2Nr`!7L)c-<7>ty(6NY z3sOQ~PLF^w?0J9Q=ZT$so%Uu2ZgfB*nFR3HqnmFXZkA4FYW`9wDp2q}D(QC@BH2-3 zvF|b#2iFTmc{oIqj3kO4D=2!bP&1=`(jO0pE5!{+L|-}G%xg$QMua}%)k#$YcwC(X zTuY2YL;`sdMM~?1>$0S-f36u}KNJ3D`z?{7bSJn`d%Dn3=`D~oZ5 zz${87aKX&`d{;QbG94^##3)n&jC`V%6EW8yL`sBDzmR22f}$*glj+>27=56z7*xjI zjaeZDyc??O)oPN0pkd`9yq6H2UUM6i4}h1;p&>;o(IT(z7pK8eK+z% zE;8fU2;c+B7U%_KPX~WT;13ygW^eB4|M8#y551ffv79D}On07DQYDdz^*(lIWv7`j zE_#nymKlM6FJ>95a+P$G1Z8ht9+w|c&0?w;fCbq$oC=>mhysMKpQ)Win(+QYwkC@J zos((4kP(Ttgm>7Pd@!VB+?!dENv7&@GBRB*)ykAMu-W_zj(2*HtIMcRnQLiVcbGcO^%&}B>V^}wqb5L;zvR;J`yEu~a1SM2M50nx3O&Z@fMlH4D7PkKprMHpD3 z@4z#}c_sQAc!W5xS>g!E!5*`(qw!5&8UrTbismql=`h$mR)fzn1s>9a?glnt()EmC;nTDH|eyYB1 zcGA*M57pOoKP|z`g=QrXJ}mR0_QHN`2+}Nn7sOtbKV)bz@msaF%LB_EzJ4hLFd=vy z5=TYfS@m(EU&eIig9{XWNe_x6C?JkD=u=rihGAAq9U zo}b{k>jGyd*}w5)f5d3K7d^NK2LyN7D4HF{(cPDL=3X8w$>jt+Ud?CzBj~BQf_9UC zj4Cn~VYNXsPCupLn8sisd4sADyc}AM67Bvb7~N>|1xwHw*J>u@8)-%oX@fx!f?48N z8w`@g9R{74`SFwz)Sspg{@r{=O@diQG-NbZsssk<4b|qLc@5WSmS`yF3PNh@WHbw0 z-5L|kvP8oc^iE%rXu$xH^YJ~=aB=5<_H`f!cf;{VpWVPcM(5WV1X~EqzAO$ObOgak zA(h>UZx{32ix_ZVZm7l^%KCB$eSw>Y&$Ktt_>R6LGx}E;{m@)zb1bG4OUlWd(FwC4 z{8umu=vZU^G%;;`NhXl%pRay0TO>CcR0@^gzWc;|z-_3D-%YlvVwenu8AcX=ma?MI zqWnA=&4RZZ+QqMlW(0gS5ix1g;42Na>qpJvr7cX(d`R&Lj-z{U)#YpXsReG^>6~j~ z-_TH_A-ad9F=h;TV=yFqz&o)Vkc`(R$D{u=8da;+s$9JhDRxGCgFid#1KSmY;5CY| zh~bIle-%%!Ujf_fRUA*J*VnIqG$V|(pG;xK#8oJ?aS_URt-#CYQx@X{(lN{ve7Be= z$RX||jz%;|qS&S0j{cKgrb}|?kK!niLlzuDF~B<*AZF0sjOk>FuH-8NQPK{uK{xLvlS#3RLbA< zC4oISlDNJvkv&-6f$X43f8Haa(a^ttMi+?wY=(w}dH*0(B_2vwEgU#xIU>-gP-{|@#f*c+mvK9lYFz z-m2uS2mWVVrpVI6H8HdXuGtoM;HXA$k3-Hh&idx&@^(lT#a*dW$WsMSNcL})r;1}* zq&_Vrz5EqMg%qS0i%cSah4?FS*D?~Skj#(rV$#YHUHG_q zz^w4}0LiUzvlaT=xQpt$CNE9>4ma!EH{V#E}@#sg8vQ-sd_*tjirT?N}!Fon7tRPfk8}S1Q@gzU# z@Z=2=i7{SbMX!JZXM|;4>!Z+jsGB4(yVw$*vOUpnVf6k+U+AS$(PaK#X&g#!XxnuE zn>44F4IOOEM$t43zR|JvN-sxc9Xn8Lf4N+LB=j>@vIo>pkh86ScvVeM3(eP+t9Cv` za_OYL%gR#swj6iELie_+uCf}&|F_El=Gm_C|MQvi1#9o_Be6J6GIPozuUfz%@dKVDyeih!U*?XK2NB#cNhxh!u zzaTaB`%wVz`S&Oxb@ls64DVHQNRAJ@zP}(x0pWk6gq+0u?~v>sc>SY*WdEPU+5bcG z@xbfH33=&nDtUU~^C)~4%+wa$!Ez;a} zhNRzabx5bZJsd8H$UAzBgV`(?DcPNBtwEexA)6oH(J=7UTAg~WL!3sfu4KE8{hPZ& zWwm;>K^nDYQ^_@@Q9LbH)_^{LHtJ1B?b@HWYIC((r$d}tvr!+CzO%jE>X1ei21EL- zX01~tt!BO5Qp&^ib}>^MX;kYq(rnfo=*=*vZ{H$)D~T5fcgfx)H(3zNa@ouX8!x%( zGcc~Ic7r#e&FhvqgK}A(Q1X+|@T5Cl)UOUpCHlsTfMuOgywKOB5&;c=ciIK&_$4=a z6eVwm%En|G^L8#qnIRJBVJU{RkLx448091KY9{a1#i-ACx8SLq^(=D*vgKfpexG@r zD*3QkJfwfzZW6t0v}(07D~D$J>cz;a*=5MMvXYt7m3rZlX|;D#P6tznd;chwnLn1gIG1EK5BJv zER7m*>ez=`r%7s6Y_3t;elcoo!{-K+z|T(GZWD)RsMVYov#TUd?bw)OmY5_ShaeS zG&(IJANX>>xUFph-QOnd7bB|$D+JT;5DOZvCzi9_fN?-C#8YCmnxxSt{O8+?k5^@* zQ*S_TbWSGRh+gV{vX}ae#go42OLFoY(9~LWgqo@&phnEO(;@Y3yG@!k;yBwcM)d}1 z!_>4ow$o}8r`f>?t=Uadt9M5AhF#|wnvUIpBJc=bomv%20~A(a>`n0G00VASF?S80 z+u4eA8p?{`WVK;MU~(Mdz_L1GMbrgxs!q+W!*a9%%$m4=81ivs)uFc?Y`D`PP8;C7 z-XgV5ePqE1s~uu>w)x092nLSh;LvKYi)wbAG+I!)Ubj1>&9??XR@)g_4Y{h&1z6Zx zowO5NOR=QRi^OuO+r+9hp=$9ovf6x0wmO)IJFE%W0UBz}+Q`DW>Hu63hqvD#wH9`X zIBo0&?4DYGOMFQzc+CGL?G~|`+l2q%TdPX=4+kQr3M>DEQ1tCt)YGX08S7Lf>cQ2l z+fH3wOk%YVXb{{T9JIE+xn96x)@>)TI&~Wc1>aSl~A4v|BiD zoo%x1)QgMJ#>Hrm_6uB%DlP_8Au*d($D9sA8BFf40>z_#Yt)j^yJoy7&g{8e78`eIb$4b=Efvs!groNXt; zw<=GpCbepZv}-jYeqW55En+$CCgLibfciFoLk;#xtJNBHa1LsSC!Hn$t<~ssY^VY8 zWvkhLv70YOO+En#&>fB`TP?dowrd=zZrja&0ZjIu1CvIr0m!CS-|pmR6s`tt&3Xq9 zpmt4eKdb39Mphd)TaE7#I9i-K-=S5JMI2asw>S-)jv8rfb7*LDtll9`69?Obecglu zuuZnJZ!LrvI7D6~RucfHN!lZ;ApoU=ClEh>K;d5^ezW&BR!}42H)$gXIz;@wNNUw> z0`!vjRBr(U)|w3%LVJ7Uw6^(yhfCVz;9PCDF%#5Z?T8bsVb^N;7W?iFd$EEp z8+PkOVs)U2dK-_^TDxYqh~uq+S*0 zNR88Q@V8OpXos{K3FfKuto79csXz}$Z}dxht;fE+xVnewe5y~U9Z=zTBB;UYwej;b84E^YScBW)6|@H^~K0)0OqRK z0d>{uRvW8u>YC-$XO>fkpLW}FnoSLVTBBA=b6^M6>zY&F)*8+>6nRl|wzXEv(PlNL z0}tPD60Tr!->)xNyHSG~{cC+jL(if0*I}2>tgcJ(0u$l|5J`XZgHU&WBk#k35t83$ zW>>hymfwLn?m&iOj$pEF6x$J(}|0eW`Y+OHf~-`BRCZ`yMoglOeI{=5GzuLFR(s=aqQoh+-4cb{*cx2e=7axG_cFnM&4Yf z0sYwgLDuo#{}1X|YS>(}sdTx9W7+sY*72YJM`bL0>gv^e{%bgYr_X%A)oipo*vFca z&v&~63$giwzG}$F;stszMTYyhq1w5(d^h2}J@X4|f7wWihSXo!Z} zS`Ci>y4?Xp*@W6P$8KA7tzp-!y4JRviK8{`j)u=KM)+B4*)V{n#>;4}js}FA=F~cN zO>4C6js|y@rnc?aO|1zXZq)6zwq3V7+IGvXX-=(SH($ViBtb5_u5~o0i8a8Vj^@;F zS|g`s!8kfr1O7Nwt!mXQ=v~`tT29ld!5lVPa7uLAFDyQ>GYcpEroO%1u6Mp#jq3ZF z(;3xku&Uxe%+ncJa20{6ZZ++;)u>yJ)3#dMTFq|1uxeTh?n;_dwVGNTGCD1-U9;;} z9fw+NTdf9vEN};#>eQgAZ!hjTTJ>u~G+)KEyQ=1FH&zm_2nU-OxZe_Pc%QZUc5U_o z#_Lo^R=uIsTNVHhoLZfh=G5wN%xN79=HITvPfM$6FnzF~9j#W?IvRAc3iDaBaHauP z96WaGmZLT7js@`4hE!N7fF5g`FQS8kg;l9JZL8*gw6t2savB-{k5y}Goz4rZj*S7d zI2Np`Q^&&`*GH>c4ZEhn3EWI92l#FlzKq2BwO|&ZWh`X1Ixj4T18&EzSpYwH=WSX5 zKNi4`1@L15`~X2vvk-nP1Rr2uSsl3K)R7%wIZ$2w#sVU>romV`S_A$71ZfNtzrlGA^#6~Z`vivksJno&tDNp=>c8ph=kuq_0)`J zz#f7kKyn8o3|Gy1LUmT6D~YVkn#}BOT>akYnvBAI>U=dMuzCh^JDr;@noo zgTbp3jj3R~oK?C3+Nrn!%ZIFhc3^33WfV|h<*=g<-(;{CUW{=1oF<5XLh;u*XB*j& zp%8FpE6#Bcg3T%}bt9MnJSf<|jgatlzhb}7xqmaO)yl9L=UcufnK%_!A+T=o6G%UcVOl4`MI0ym` z#t1Y^%)kOqfnR~Vfx9S+WU4Et-{Rg>O2I$`bF)f$wuB|9;ES$AJAm7rDv zv*wMEw&GU7>dq?Nn93tplE{`DT#`nxiob69!$uFu(|6uvmD!laTyeYPVqqNYATW~R zi3imxRVm<8TvkXC(DbZ=X~DSy^KxwAxTJG~OM_GgCx+J8o|H7x1;}e!HQ-;Vge*o|(RIyQSbw!J7bid2E1d%-$(RPfo3HHo?VO5}KC$Qrt z({aTcS;;5TEv>1zcG$7tp1Ky?r=4G2>~D6vEiS$=7B(X6cwAdsafO8LC7#M&f;D1> zXBlS;foh-tuFoAv9hNwds|2zjw$Mwk2sFT}9h$;$3Sbii27=pt26bg6k(U8CLhfc7 z?iv#~8y*O{5}=S;Ji}an#m%x&qVf)THqy0Ku;jp7gm5S*DFgxs0{#X<2CQBK9Ffk! z53f06jWF2_9WZmVd3^X3*h!Ex`%$rzB4_p^VrPh)*=NGen+zxkJ_gO2q$oyGRPS4Y z#>?GPATkU5bBT5bgIc^pl^<|P0+EzxJn(QF@B@CB(f0~Rx%3Qw3uskwZQzKi!mKJu zr3g-~bNb=oKoS*IoK+k?T?#z#?bz z(7;g#JkM~rQ_^q2WP{jj)PXAmfp8!edIncB`ZdcIuF|08z!3~$hysPDTEdLsw9zPO z2;gKWem8g+1MY>X1ZxyS$pS0kf5VhkrPT=EhzLu|y4%RYJ}hl$jck-Qbu&l%uR{8dwX5cVZ0)m5uR@1%;At z8cVvKXPQ{F-q^;?mI6gxQE7pSWJSesv!IF!!oSp{1V~%41~wve*1!<18X;bz@d^-+ zXP`|>0aCCu{KIS3aI4}vdO`-U(u!*}ruN4~UU1b;!$_kJ#dq|7|8UcZFXG@a5Q8EKWR!UU@#6*U&t*n&59>I)) z=4wDB;A{zORN+AjOa)Lot2Ek|Q0oy$*&4KedERgjwrwC-VDG?%0gaX5qA?;M5M^Ol zV+r^1fV)|RXhdXIh+VKT3dS2#xdz!0+L6#=g9<({e8As;|SiVq|G9V~pLy~(C z$Kdx5XV((kM5v1;1I(+&u!U~~*9)-!xdo5EGqX@2TpKdC`0yJ-LHPw08|ngCz~BQE ze`gk_K<#nitJtzyxIR(@2ml%_af~g0_)@~XYsD(1ISf@&4gN>rcZ650@nWHXu2cif1(*a$PBQ=r$(9VSxCI>h3LF_0qCp`2 zerUj4%#FdH_-VETPR=R^f18TFn*&wi2EI^rLo@HugK@A}NDqp+1(?MZS|o{o9>VW^ zdVsQ#9wHd%K`_zM1O1-RgJqEhDur5qqz5ffAMMkFGYc1J0H8MfuB3$lHd-j$NIv`) z=wSx*036W;8i-J|085%2wFmSNTW;y0M-ZF|kY}`98YXd*ScW754s79f#C(N_2Q$M$ zRKlXoCxXG6XBe2PEovrMIsA@)(AOmAmEb@aifinzYJ2{0c5N+N&4 z7pAd@Q~docq1*xl9+)NlE+t!Vqzn*Fk^Cf7t}M6~(eDRpf_wT-&6x#ojl|yCmm-}9 zO#*frp8ZZ$7WF9TW7eQ}Ct-qcQUF5*MF8%KVBY|e;1B^-i_A++u(X1ISqab_Y_otE zu*UHUjUf_@vuODS8(Xl=qBbSa#0K9vC$lPE6Tr$sD~;k{9ka+iXO;8~+C8kqdvblS zFhavXOy$0@VAV2>s63vvzTy1P$i~Ad43j2_6DF-^jY7Ny8N$ z`2x+hyip`b;lZ*%W0-?~8O+ebqzUr^6L5o8LmhtsJ}5XEO9dy}p8&N4_^}}|8qP$R zys>a!fHxb`SAyxhz>fx*f#m1uU=pn1jSAW@N)AAkBm#mJ zssSq$*`Cn`)u0Uu9IQCdK%IUGj<>UP(AIB`!Z4v`FP}DvN#ILxG6Nu(Au#^SE(o$L^3Kadg0$Z8l za32I(EZWLIOk?zq0Gk-J8;oU3Kswj15gO>rHwrAV+`rC}9y^1jFe1}$)Arkhew$vu zWMpI&8jZv50@hbZ(hLAFYx9p7;6|YHZ=PIgTUh*gyRYX4!V+40iwC|e$=U_GoNLqUXilq z)r(jw`dA7cHZHjOQMS^YF9+vHcZ|>>0vz?AQ2KVWimq>#iZz$JL<1jx&*+XBPbR#N zTaaH5+#4Q#F&=EcC}5}T3igU2W^WcXLYVp9rcqi-MqeptQ95_6jNPdcQ3rS>$yzQI~(A-e(# z85m2z*Ry&XjgMr#{c!dv%7(65`&STbIC%P7KrUEWfHk6F3H~tlsu6P8kkJe7d-13T zdo`oGQJ`?Nuz&#)PBWAV@Fvyx3hWt-Z)~`s4u-D{ocl|x{rY+hg`Muoe760+h~LN0}B(8a)_(l3CJ;Z#D0D&&~Tj*Y%D!wim3ckr@e|5lt3 zgQmdQZMMKqWeS`nwm>fQ##a_EUMh)REE=5%n5;PLFSBHg3jPi#2ZAghVxu5yHwLd@ zOydn0#qc`Kazkz_PHlYSmq7A>Zv}c6s2@-ouHVVL2r`#{pP^R@{@N&U)hP7Pt+Yb# zi6EVYacvr3*SwHCmyMRJec-gWCAUB84S8 zuD9W4>5h}P?m4kUZ4Fn+q83vHT*Er8H2k4n45){wt~nK28x`ltXZJaZ`?u{yKo9h$p&XG8t!@cHKGknD^e*yn?Toi zg?HzFj4h0GAUsOgIjG2k9SHoB+=8LjYXlsU=)s5n7NqnW@dnsZ8R3%S{XYo9x{{3t zafNNTTd2mr#=%YqR&KD5IN)efyNWHjBIN`&qaamMh=b&+@n|DyK#c=iBP!dK-GHL9 z;An;hq$>1)fpZj$5svN*75D}Eg%PGfZyy7HOiKuI+cQGE<}{dIAy7_QQCSUq1PpQD zvj8d){SFB7=w;y1?gKZ1QiNY4P9@kcovE~c4LoDDuOtwSUI;LQ%lPXOX^ewZqBQ86 zxFe)}V*#ou&@gYnA1&NT%MJH%tG7_EK?I1Rdo=^0e1HswKbKOHa|P!pSKG=e6MiFq zKQIeY3>TF}n~12;4ul#Yulxp0GtBBGuiSze(=3b!+l5(ZuNwn@fxD#g;O@>BObc{^ zS)v)(!#z7!rZLRp9E1QoH`cn|pR=lwAOkgu!G(%hXbV;v6@QOdR1u4gu;>VjcA?OL zCH~sOqEtF!kx5NxRC0ty0Z6p3NeAM86<92!g`$8%`h}2KDduMtvJIv1XAgnG5(0(I zA<%lY1T%qSxlMJAfmhQ3RoG@IRsmo z0UrQ+AmX6GXwoKND>VC-#N!!%?6yGlU^nN%A%eyQ4pcB(xCENLE-+NXa=0oYXAXmr z8-flvJU3bBkm5mjm=W0VuB3)2BT)Hu2o`CShJUsH@SXbU`f_!3am4!~;VX8fHyVxv zu9xJ&plOX5XmDT|s3ZinK=5J1D-GueTypuVM!cb=s0@k;T*HTD1HP1hp02h0rqLw6 zp!)+Cz_VpH0<^_*$18RvHXM#ZZq3rrCeTPGd;>NK*m#uz1rO99Sof=jzX4%s*txER zfG_R-rAsxoffHC#_loK@oOk`pQU5}I#{S)B>0gce_n)nQsXlf8%d6G$y+8vsEFMw; zM40I%#9~6YnudQkTZRySbqckt8z6_87Pg)>5QzzK?GQ%S01a#!i0$_H6M`%S(FMf3 z`C4H)pp2RtFtvb)T35G}fg_8wQNz*_b|BEtEFowfg8MWDI|71gnq`fVw-{a6m4@Hl;EFrP?3si zAcA&K{<9Ctc7w8?a!?O1_7@L#PgWVwxvR$Lik!It-Lz1f239kAbI;Id zVZ_M%n>Arh*)@@TkeD7`>_5dNWxS)D{i$4128-$JPw9?-vXXxGr*%h(SCq5=S??$Z zSCk%faZhWfT2HmG?kf6LMfP#H+U<|2BeiC>CO^&^W8FiPGerF_Sq)Lo5N_<@Fo9b{ z2%pEVI;NOGkIq`Fnrl;Y$!hLREri2ZaEy|MM!}=A_BFS&rN9<-4JRRALgp+@|Dks$ z@K(&=ATV}++$;qp$$>KvBdmd?W5i5Nq4wzJS(Be9{JrQrMEFNYyAkq#tkey6OT5Oa@g55xAV)|l<$XPa z%d90OV!-+kijF-%yguaB31RP+WE0FU)P(gxPEf;VS7YXsnpU8O1%iwjX#K2}sNr_2 zhTE;$sG4?;p!5hp0j`GIt=fy4ZnuaPwHQbAMINWD?hvbkCukAyP8|{4o|+xS|Jl-e zLhiGF&q(hja-U7oyJt4iJCJ)It)``|_O}mbZ}zFZ=K>BJf%TT<1wjG-BeExo*AhrR zQheBj?Iw)B{QrXD6AZ%4JDLYjc1hlOR{#7hLGc@K)p)NL70%G2<*T=ys$Mt7RBtnk z-LBro&rdLeNc9B&xPsX1Cym22T@bR})lMsaVa9@3sf)kf4?2hD+Uksk08!wXuHiZg zz!DR3B^7%$3jIQw{WJuh>*|Siy6ppgU!rvY1Zz*f=vwfaYw(Yyy$D}Hi-Khb-hwT> zbGN}J@UJDsT*MS_^eg5>gP)Ylh=>M20AMx8@P9CvYK-|0BMWnZ->*~~22ZiO|DT(JAlX+Cp*o8@Y~`Sgqe+Jvn)yNx--f^m9jcQ3{oy3YFLuVB)X zoo#L|;JxqA+dFSMBWoWLTM5+DArlaPot)gpf60Fj|MDmeNFtlrvWap4YH^f6Ps0)4 z2+aNiDP-Wp2JiaXI8irBL*hOomXsri8`&n9lM2oxX=<%4NPq7@dSs_4T_9vOVl^Ju zhAjn{kx;m(C8ZrwwbBYwjX_30Fn6$e<`o=9%0eDBqw(-Gcs^k=HX3NmECk|z1&`7< zci|sMcBLSv64Y!wQLA~^tQw#+aZpFgYQevhw0qNQ_xPLA+%#wQz ziQ`l^-qm-kxiZKTnrofvm zX_q#%O=*`l;K&b?#584phSQ=o++Y#ftw_ES413yjG|3h2Rgx769UxItDo&w@FfrPw zZeWih0XDQrXANxA2KOlnxX6gQ#ECTEC1o+ge@LW7`Lp34zSE|P8NGVSpj# z=k>+=U*1u0z~J464>p60g^f3xBXl)iJnr7a2jh})%;&duboVUNdTgrS{8uSoIyJmK`}}qhzuoJnNN+p2 zzPPz_JPD7}0}VmVFJGK}H2>n8Z#7?8J%UHe=cgY@aryxEC;b0H==0iW_yB1RoTsl= z2qDgEV}<}@jIrmnv85WW1n`BXEE@qh=ls03eAJY+)}7aXR+Ip82$1mWd2Q9{g9V5F z5~x0}tuD1ynz|iL-7BS_snXP~&-NZVa;5n@K3Po^dUGQrXsxw5ubnE)$d#7gnG#@# z0~l^;`py>so)refnJvtL8#bU_9*NiLdclOo_Tf9Nxz^g(3*&`Qaj}ds=e1C*(1q|L zbZEr{kOZfHIu3hbHC{za1$0M?;Rl~>seB;qL3t0DS}+Ha)k5Qs{)%(P2*pY=ZR!Ps zQT3NmC>ps|-kE?QK!sCA7N+EsEA3>byemyLzFFVxrx>PJ2}b8iqBwlw8&eX^gWiu^JeEW)Ws!9k|r0UT7ht?}1zq1k{lM zEqK_@hV$DB_`&+&SHZGhd4K%YIu}|CEYBG+F?$hKqT3gUFrl-bCF{{$+lJ)XuZn0G z)H38FQm_dw4I-l@I2eM_5&(kfyEbVlofU0byXPUQFMVMte z;j>7zmKC{|Ws`i?S#AfEy->n)AP*}hn2{Ad+V8Dh+paLv{Yp99ejUh+K=CM+whTgU zB{jEwis-@fh!7DDhvY(Ncxz%|6-R{*Pn9r#D#f$)BMY*)8OQYzS$Z_1coxO2H`o9-4rs(lE`|l%G#xnH z$c7n_9OfNd-U6TIvDTSe;C31w1-A^HdY}j)ymmll@M8e)b?vzsPTk?rE3|eJ#5g^F z#H|C#a1!o%6sN5)L%5%YbCGRdBMjDEFl|k}u=rz8SKk6RlXT;N?M%YmByJt|({Le1 zM@|nYr+_ZU&wFq`4VP?sWRl+k7t@#p#yJH?J~n|_ZY-0)lAqSc`W09gWms1iUI08wl2G4&w#Ix9;jn6o26)rG^wWi<4* zL0bkD)l_GF5kwrhygpfc}`rz^LnA--$8eN3MyD`usoU~oEQAI0)P(bwDGVf;m4o{c?;Z3lDrr8 z7Py<_*&ffN2{z+su{mRGbRGxu13X$|iZF{} zT(M)$oa(PjCtZi5hBG|4%bOkwxLXP!X%rO%C}3_YV5fvRDBx}@fLqsp;ITGVGB~3n zd$IsvQaO0>YT@z6uz;8@fT#8pqC69~6d<9xq$)uHbz1>FJ(gX-9zqVoFeg0&?nV01B9?#exF%wgP}^#l7aB zy2FoQfyonBTWQ#B1^6w0U6|WuVQ!m+akmxlw-m5!YWqV0K2hLVJ%8sl%ztabH* zISD1L<qZmEd#z`O&K*Mja!SyIG%$=f29JGg^FoG=gXICcYn^0I5mD`$IGyHtXC zquU}*DJ^=Jyi~+WC+I?pplZ|)cgl*oy-5w%yElDveLA8b%H5Rx56XTN2Ga!x>t%>xGcz^_!e-nLq*6yOVhAeCnT$b!MZkcEm# zsGNk37Bm9%WHnQ&W+$tSP$IZt=DfzUmJ7pF4r;>XF!LVnIGi!b`j`>5N+VS7$mtO! zx28L6y6?_^N#FAW>Hvldt-6DyS3`2`y5po*!-#l@#2F>+aqc^3px(%whU3Z7U5R8O zg$gFSv!aJ}t(1!nL8H#J7Ls>|Lw}4;^^Sx-z_8wF&;s~^DGw*C%^*T>(eeKtKp5|d z>qm-6pcdLAhA*w-*}7ufsAIO{^9jK4j=v{!#iqu8NKhJx!I{j`0@_>-35XsyW_H|1 z-=N%FrSOvYb&ez$@tqQG7YV3k=NU?@D;T>EvD zps|?Yx=AU4;y52u2e;Twu+R6#$2s6Hy7Y7a3+mo^%>~O2JRI0u$qW8pp2=W$k>exI*uZv?pjjH0^22FG4q`kI~h>8+eDGsNYWvH zx~^5vip+7;N&jJwscv8(dv_AzxP~2!jK&aV-nB!YefD#Z3P+!L{A(|K)FffQS# zg=sr7G)gl9aEDFV5kNAnd$x%V;J8oYYzgkAF=X)=O|6~ zYQ)PCR;bcSlf62^G8d%Dw$1{K{*uluwCn|Gvd>q#wZidpriz{F!kL4@Wai*&JGJ8j z(|FaFD925Xuo8~MR-#m+0rD|_R%|Cvj+kj%P3FO2j8UHKAYj|$J$bSR(E$>6gH&E5 z$89T5W`@x}3%XTlhvBo}z&W`XVI5vtN3Tbs5puM+VcCV6oP{7SGcuEE)m|cz*vWK3_>giuMQ#FU zv@b(nnmoYAA`3Br7)di+I~z8w30$=MPT0`CJ9+85Dzu~X--RoDFGzHDllAz~A!m9^os1sb|pnSoVCz}^Hh1Kh+wquZB13F7a6;&5cu=u) zPxDcTAE&~}XhqPb6nb_9ET}OfG{$Na&CH0}BW9Hu%|D!1AgE=3x2RQ2ScIEWtD24Q z6J)Iztu@j&y4BKN<}qY@0bP^z^<;f>YyGT>>9lvbcKa3mbOFQIZ3Xx(1>|j`H?WJQ zODJ}034U7%ed{>1&8|YE0|UzJEiL%z7R;^lFhj{c*0n{s)mhdjYq&W;@3Dmuc&+Yi zMm}w;`hwBMwFlRKh=n`L^D1^=$_~$}DL9$>N~gNBLf!(;3Sl7{Dfb;IEL=!~pejwnh4^)#Gk&0Za4I10C&C=kN&3brrjgq?xTi|vYZcIsRabr2D1C`9~&KX|< zdB&#rAi8IN=D=~|dWu~yabX|a*m@i+hHDsKf>>6B@De15W8@3lSM5rU3;e85(#-W=(^CEV+97iQ~m)y}b1T4xXqXS2Rfm z@8lv5oV#R@>riJI-^MK%_<=>ocQv|sgO`7BSGI7Mbl^0(*aF+qgF84_Y^BNIfzbX% z+=6SzOi;z}GOV}z7EH<%*j~vkxC$P8+T-q|n>j}8js-u^P&O`>? zwB8GU#p!A<+|iAz+1zB}p!!#J3ogmb)I$9fG`YANL-{1MAH`NEA}1SAlTdwo^JcOE zI|)tAy~g-H^^EWTMny8$d>w%YVlcz~V?}QWCfz??us(U`?)3nDxHuf7d2!t$t7TKvR=U{le-DD)(&n0+tKg^vnYQg*MXzK z`aA};ijHLCrLoS6?ET=lzDV!eAk+(?B%-8<;=(9$5+k`l<<|#??R0)ljwKGm1ci%# z$sWC>mxmF`7hDIaKD&Iea!OO4{8J;;v+M#|hS!q)Ys3W)caKuz+0(x?1Vq!uiokSZ z8OwZc+l?*|y(2Ch;Wpt6UWq=p_34>p!-=IONk-`{kq)lc0h-<@0`D;Wj?-&JIPa3W zVsxGg9<*1LzgpxhZDGJ~(usBVi&7YWATQ}`kKGL}8L5nkXb(kf%I-MlN&hZ8d*SJ> zi1{qugDjBVJ9gke{d{21wgc3T?;tZ}ZpC177ulO2;}~wo)6+df4K0)lSBU*v$p!O< zVeLGXQ6@g zdU15p@M!D{TA7PO2xN8nn?!wOR2@CEb}3f;rFfBpdvVudMGM80AKZoG0iNYozu+;5ehkcPE<+HJ zY$b`2^we{8Xo|l>LdXrL(-xv^sji*nQHOrhj3HKG3ecfx9~>QWgtqzTeT<5f2M&vs zG^_Bzk77Ml4^dPubPtJV9T|ZIiN zKi5oKKfL+Y>F+=pCVV_OLb~{6v)0CO=QSQ42T8nW7<6Q|L#ib>J96xLIH&mI}H$^G?MQzg%FO7ELqd(y2995`Ryk9jR!` zK+B(zcCNIKlQ#0xB0T3PGjmD^3;aSmq5`x8KhqulR3!QKM9EQ1ixECFjPQ?AWn;ZP z&ALlobQZfLJwOPw@0Y4X6HBb{_4TRMkqc8Fel8X~Zb z^|L{a+`*%r8tq0p&|aG`mh#O?l`n^d{_8EvWn)HpF2xKg=C6S7cF;XN%1VqLo6x{J z54aF3lw{p>zaEtz6Wp-_jI!pxzLTpalzNaji`j_AubmJJ0`+|}PNTNsi!MZFA zbrodEm#M}zgd93fG%ILbSPkX=RvMr-RpJfSoXrr?`-H35cCQ(Pi1}wOCiDa_3qTMywq-QOl}{LAY=#`0iXuE2f?{N}JXigfJ{=Gx*l|?-~ItIsr{d2~R)E ztrx-?WxKj!yW)+aAWXH+S-akrjpeR@kL5F~jEVi_-fqMKa48-NZei$hMSlI68JuiO5>M)ud&e%<*CQg%h!rjzlG;5hCeig_>Ma?4UdX zt4MDNQLR4?zs>OHX~h9{N}n$+CH)tH|BW*RBgO`tTae^J+KmXk7JB6lHn}LA_-raz zud~NP%*_JX>vGeUy2h+E%(!UT3FFhyhQ>mS%>r2wEzN*uW1KpS!k=T_6XTJ`X@33U zs=Th-MUpNm{;O=^B_zLcyN8jmcJTQ>qGsP5eV`1b45x8W9jpfk4l^0cGHn0YhDb@E z;?|RMtMMDH9BOm?WHKg4@ZL2SIOtXu5-e1TVdJTZjrIQX!xi>A7)ty7$Ebqe40L() z0o(NWJ3B~dlU;`@MfXf(n~G$TS#YEk`&*_?3Bt1B!aAv;>dU`q5Cv)W5v(3&<1o)P z8Fop6rnXhY>t?2plwL>Ajb3KANb9RGtGVU5dm|jpO0L1v8RL*1&5tpRwvoJ z-gKq#Tw2ZNw!l&k(YB4~i8CbAChgZ~<1nVYQMkgy_hvNMU4g*6NRkw_cn^Dg!@REG zTil~LGmt1lY>5rTDIo@Q_bW7R8G8jMtoKsWe$lJ6GdU6vE-phOR8VOMN*<_5Zjl;o zHZ!JqugP2R1>$IBtps*QHwSRNQ7sa~j22xDBmJh2txSdh{eYUGZfi+ zFJFHrcp&e7COgiXGX4(xqbu!jEVp&Zd!1$-Xe0-1UbaAfvmlq*2)&L`Eo+!bvPdeG1IOfA^34=UcKe`5laz$V+*rbcVprm zk@p&r?U}z@GJiK_{;uoezGwM`s5~(s1^;R-nS+RN&2z4#1;am7HoH$(L z0^&0gX~&of^XiAMO1PB2nZB@lJX|UF8x~yvgHc6%(LL6zLeV{^-~Pv9|4EI^uBA=k zN?xuu7kK!>g zVnE}6qv8MQ1;APSM6#!hO=qgK7#;Wzm_i+M^4tz2*|ykN?ne>yTnQv?#h`K?9;1}qCf@qI7pUi)uL@CGj)X;=Z zhh0Vo(}ZG?g|tYw5r!NZ#R1>fSj~p)bR1cce_9tK>o8}OahRz#z)JdJ&e|D%aXoR| z3=S0^&;Qi_`x}k&#wfOgT5OfNjG;`}iLwAI)u$)5Aw%+6_y}C0o#4Iv!#-Dw0#--t zY<%-we5wJN5~Q`QA^m^U7ipAV#T?bgGBQGF8b=T4?uF14PB|<6z7_&oVQ1Hx4E5uM zUpSSz68O@}nf)D-TU7Fu!o15ALt@rx^`GWPdc759iqZ344S-RDv)qW%Evk8zZ)Iz9Uo@X0%rs9~>BOqc znNuwtx~+d#Iw7+PeTw~3do2>A;^xgWe##?SmW4j;Obu^*LJTA$NM3EuSa)LrL_~8W zp}2ef4kNBs_=0JM192j3JSzlIuifQxefrLsv)SFtUP-pxNAAs-Xc(ccvF9IRw`a(`cCBKGn(_ zTSwhECu;8;#sRaat!bOX);|7|?!C2y3p}~sGFUz6CjU@w4&#MYQLJ3<{-;z(o8QV^ zJLNxM;E^yuJ&HeyA5D^f(;)U)QMpS;%3E2A$%-zwott+on8bSVdcYjfphZHd&KNf6 zfQKVRo%!pTd&&>h*2@Ai7GZyviO1CE!v?si5=n-y9shtw&w_)eewEZlmNN9Fa97dpXhjRaKcjrp0E2Q73f2nHf|3ECB~c2^hE#3$80Fq>x@=? z8uUp46yj_tGym#Et}BaMNw0*%9TSGwY~k<7y2`MzI~G};DC53&GlWM+oI8l&Is13F z0f&Ah%v3<(f-Qc3f4hHu)E15eTIk#%PQPE=RNVDr^86+=nsE4)$KC`XLF45W#N!-k z)D};UKE@MVj7=eP=)%e!rmSM(c>kQkjCRy}35d!5Xa6v> zpTfSrpLJKUSNU?)StLJrr@UsS3?g-HLE@R*)dCN#l`D5<}ho46E(1z{iZ z0952Zlz&HEWue|(1ga(UMkM_;=@L}Zo!IX$Pm*VsA~W<) z$(lc5_&|(6e#eo$`mOB_1EmW&EZGmK9rpwa;3~o@)9!cXTO2d?Dd+8-@tHOu7gKo;qT5$F8Bq>Rzz7PSY{q?u;+j9M3CJVn%F z8liCIWQ>xg3Im4#Cu)Vo2_cDh@o5l|LGW>@lwnJ1J+ae(I%{3oXZLP5uaHHhGUopO z{&8)uEoXL`=!Od7D0!EVU7+$$;A_rgZni8{{V5oJ&trmnv_DWvOkGGUMlODa&*Hv=~5INy3$yjQL1%_7lUZ=785!ew@}=m%!V%RdpD_ zH%gvIph2mS5iFBs$wdi=z$T_3NH|r1?Y@cXQZK?hLl-iM(dkkn*xs;Qy#e8{`!C%= zDa9bQe=2*1J!p3&KFWSHv{}UYQ)g+KN661Sl~j+A*wygC$^YUXULFa0E`5OicIG=p zhmH=nbS*UQ(O&hDb(j2_1#;l zCVx>i1DBnuXF)7$^=-hs?xi`|DXTe8o`uBd3OIO|mg|^-Oax>M7$) z1b)GGC4N}yTdR8b=z>TZXL<@y8)1heX{cTD)wEBw-~;w6{@7LX@pD-B%(Z!0m=dSv^#k~Q&S1V zRsXU;L(Hv+x5j*bMTb^p15Ba_zy$c6=57q|)rJVzk_5&*4u8C+t=80lS#l8gdzrjS zdfPR9>#x6}NouO+cbM$qsM`xh@>0lhaxTU{0%K6}P++XhVkhdHq9b!{o(?_+qxeKW z;hYude9*Rb48q3iiOKu8n&GKJFepVJ@z>OH*y8Uo=kk`!a;&(CGmD7h=ar+TWsj|u zQF&XhGMggylry=9#3h&^VBlvRt6IphRnc@w!~J&*IAzZbg^g_YI)-1er`c_%!|5n6 z`94z!KV5oJP2vYc@4Cc2S~N;_!}u!2-<=FRY9m7EgLrV(Gf*%!((sGz=Z7jXVr<5wZW;#D7tLFXaki!u?Iax%ur3)0W9%AiznoG}tMPy~~W z-GwGCB^wV_j?AM;$vG$MfkjsE8JI*SYnQ%H*V?V^8u*X|lQ|%aGRsfFyHge`xx`gh z)hoj$At+NH$yr9j;2M+vmzAaBzWen@gGsae$3(}jZqD_;vp+ehXc=e+l{kXxUT31P z-24tOTdu-3m4Ivk*Gb$ctk(D4BwIeh;f1l9#CFGOD|?;>?VrRezGzZL?mWpTF1qo9h+cje_{s zvQwY`rKkH(sQ*Jgiu7Y*!O5_E|L5;JQQAY&f|X|QY-}?}T?Ns?or(gPjP-Vz~;|Rg%ktt(wd`%|TfMYoR=#c?q zv2RW0e!rs(-sq7wW3goo($|P2?25_M*h^&#m5Ktnns}0a8G3963x$dTo|^cHKAGds z3>JV)MFC}vLM5oC-Lg+c3zLCP+*%aRh>G)f5(lZ8lCOsJg&|c^@pxGBoQKvg`6ELK zADRN1F$?}#0^~tHbcRyyih^a6TFiT&)a20D%=Q_8VYR%HUDt%I*$#%u_XoJLSG z=Cw5xO${Y%f^l$g)cz@IC<+>yjtBj)qN)HYV(U{!=qW2oBLC)*^o-C;t!V1OA8{$l z!y7jJ0_6{2ody1&B{_w6L041bHnQx(Q{i5$FX|4LK^UdSXDru#1;zHecN2k&fQ8)XgDn`)KC2qZO}Y_} zGiVG?&S`DWweipMC(jUErHB%>aG7PG{xj?mzxc0OTE{6$5FIIU}|FGdymr!(=viF86+jv_*SLXKF_oe4`m<(+%&$DRJEcK z5(iyj*OL7%!pfw`nfzv5BfsiS@f*utgLA$5%YUa!q}@3Cp1W|e@aALtslf(CS#Y1V zdsqE5fA1x_eTu{CKakQ@`%6a(0$_MGsg_k-=w>|Rm>0g~QC%1=AMqn38r!1UIw5@i z!P*Od9{r^dX>E2Rv;VX`KFtvZkcGJg=bZL=uz!~u8#oYM9{V`;1IC9yN^e>08Ur@( zR*~wDsq!~HIL}06NsccPU9pk0`rP{z^dgnG1lL>B4da%)Xlk>Y*Uum7n*juW!f@@D z-04Up?;(L2T~dC=Zfk^n&-AnZaMeZR{w3^r_F;u~HMlR#nf}`SOrd0zrO}f05i19O9NF7I zVvx3I^BDK>U%a@Pq939wnVl(~9y_(al8aG?n5$yI5^E$geq8IMDk{FDuOgJ<0Vm8b zG9;g(X-L%86vx30o#6Y+8tw>2>Yzai2F;?<<1a_1>>OWY%O%Q0ASE}hr|uAGH_IP> zS+QvveJ}x;zUfHsNEeMXn{CR>?<>XKQW>kQf}C0smTFwUz)!r>ly4HIzvAL`QZ8fS zYv5(R_%sDy`^SfDgvAdiM-_08SaJ+eeEs3~71|<5e$21O$$H#1( zsZepl-#+19(K4y_0DUL}h7l5)H@#IZVusfuhJ=^n5e2-R}?ni8w?Z_RUhHo@H9Uo8+wI z48HBL{<+5g?RM2WDe$hjD}11?^S?o0eH24@<@LbfBlYVkKP84?(GP8#_}vY-0xPd- zvW*5{d-=D}Pr11Rr+retYkc-w!!A>aBoowfdsE7KaUTO>NSZ3sUuykY&BI*XsVP?S^ z`A-Fy8}S%6+jFlxgX!Xr`lwAWLY=u&$n1<@BIi2&Z?f&(3q|9(BtYjN&5<*?TYb}G zzHxrniiE%JXwQ{0U$V@B5{z^wOZGiA@TiO3TpSyGQRs?%Dtq1YAo>E|(%jJk@?j<- z%VXI(zxILqna9F8Vj-?+4)@>_0Nhvj8hhMN8#>hpA^^~H0R}p?gj)ls6Z>{WZYMn;cY}B-#TY!DJ23BgDC5VFowx1E0?65p)j!`# zcw^#2BbE?oqP_JtkpkhZ<`IWWxTRggJ@>=8C$stral_VBu)0AfF7b!O$UHym|3>>g zeB8ZZ`*GaeEUCrVLkGDva7l9iSOirMS%_TzH`RIh2;7n7;a1g=_I)Juk(v};{25FC zzUb=)TgfN$4(E|t;`^a13O}&a9{<7&4GR7x?YmN^GS?p&hPhi|pl5f2NY zxDW_VyH1DKmAljH$j|5e9ua@_cRz%ky7dbRdoTEQJBR+bvl;I2_}mHgFurK--EXhEqe)NdxgKcOETqbrTZowu zdfvUroB;Dw3qQRS-+(d}a$ZMgpKSGjYb&7mCc*MDPpEyx?!+3psy)Yg=c;9q> zEA)W8f@Wd>XTJCAcHg%cq;E` z&nG!K6C0TelkfdUuXj)}{p<Eb5-ZOpUgf1>IZi>fD_-l z0aT(%<$6K>K$ss|^nCPMJ#ljIe(vnc2~7?hy;?zUw3~?mP4NH`GH+avJARJZjlvkP z8{hi~RJ{v_naTIwqt`8H96YQ4e&yVm1Jwb`oBRQ^ z2|&IH^xhG^-Y?E?&U){c{I@%8JQE;a4F2aW<|%yuPx8K3zaR#{*uOkoZ;NRf-SKRy z?DTapR|TD%4NIQ5U9{isMIa8|B>j@D)4MN&qlhyHF7F<)xnMp4GCV0Aqpb2Z_ zYy|4LGK1SayCxP^j-RJTnKv|ZbcH=XfF>g5&W6PS43-AlRX zW1G*;`PY^JIev5pLk=BXoZ9);1Pu=EaIT`(V14zfS)1RSA3iKDdX*O!w;@9NLpZ2?v} zu4He4%t2oz2X5)8lQ_-p7eBXltmqc)4Dzt-Yb=m1)_Pb0^c-G2*G?h35FS zLee1TW(~q2!#1Fb$;ovMA&nfdoylL0IE~R1J}+-eusj}JkFVc?a@bx~8oF}$UO5{2 zavIedc&b6o%m1L-Zdz$)+*kdW-fVB@b}cCC`VL~o+)~2YbFC4lujV;do6j}8mWA)=nf`5AeeaVuZQ-khbBrlxYiP&bY19jE zNYUdP*zhXh{~xLBNy$V0`!^{RIB^m$paA|;;UIASfG*H=mM@91Jc%Z(5a%5U#<}z+ z@}b{;U4jn3mR?9I(%P>SeJ>VG*qo?a^`Y&4Q?qchoK@o%O*$@JM;4$ZPRBW1VwcdN zS(xM#^x$IH*ZHsa^2&YlFbu^$CUascnPv`L2D*a@#%T`3{tg=vLD&olj5XTx>n z5m5i9v;3wH=6(|&E~amnmab4PX`)C;g|;)MUc2-8>9k^KCXGg`&zP(1)KdP5Ec?#5 ztZW$}ah_+^i)jNqBm`5l(3|#BW~117=c^EDfdk4|sThvq#7qVMOn<6@T<@dpzm}{n z0{jDOOIEie(>sVU=aegL65hb$%GIS8ko55~7|$>R-3hYg`En>PK!>5QFu11*8a)|J zh)Ko^RV|di3KbH}9`?d`D35qJ-$E4II6JzE480KP6c)@*Glx922)By zSU3Nv3H}Qbo6gCFxY6iy+0LbC(4spkmKrYiLen7HZ`AZ zNy-)|3OfW-bc`51UGCeJq$EFtM0>jYEho_iInNwCf&g!GZ{*iq2*1q@4b6`6 z_EP>_+##b4PwW`p{vf3P-X3tO(YJ}YA$fsL+T26zff-L%b4!xkgUWjP`Z-!;r{$sy zYRqlI$9MPt!h`?b^=2p!?v_W_JezrpNceboj08JZJO0d08C9;z|#+<0H{cJ*Qos$3WL37caeeSCgqjRkx<-v7{Sx+p`f z!UnGY|LP9MD)kOmAk6~p&rGn$K#!ts}vCU{c2D$c31skGG|VI37zu0 zfcCekqGc86A<6s4V;CP^z@<$}%@Olaf)m7;G0T#1t5{ykUSh5pl*OnU3TeUkd*3J;t;FXok+{;gNJf?_B?2FTU zd|b@muvTdBGVV;_8x+db4RX4x-43e96waYPB76aJNXmpOY3~#U9?KdxPEM@!n zdeuHYt$BKx>y0a9&z)d=W-D1JiJ1{1Yy1$p5lFhJOBC~R*E={kw{@CtejE0$y^fS9 z|IKZFcy;J$vr9JU;BF8E+@k)ntsmQ>Mb%}tB>`2Pk5$KfK$T&d5@|ZPIP=@|`V23# z-IicJc^u&k3{F}2v}g-;>e<9Z?D@Jl=&Ws2Y*zCRCU|Kd-xf1fshdBzAtA^k)T*zn zNmn0KaC~P;9A1v6&Fn{Tvk4@&R6qbQGS;)&U$0#qG>F0Y5vxmRDF@wE3&Z=7Yb)N- zX3h=kI_K&u{bmh5&h4#DEnW{MZOzRsnl}IIhzArb297^L8>Mt@?|4F*Mp397?D~E> zhbZip3ch|)pY%QV8A#9Z$^m=5S$2$WdB3$ht?hJ}=$5*5O|8|b)4zIh0ek0=9U~Ww z9$sD@|Ae;cG5s*L=qG_d@Cq*6UsrRVhtoF)hr90aIZH1Xcm5WB;V49Writ^{^VjuP zV0WDlJ(`w_V3jYjsNc>ObBBq(dS@T*VR_-4Il8nQwf0MrgyyG6+L*Hyw9jjr^dYga|Xs+yZZL*jt~F9b1Q$)=YNyc_vKLh55G3y z7@QV&|GnnJ`E8&>STEw2%?k6Z#3IoZXJRv zLhIJ&cd<_%2=#iq0FLwWoMJ+A=c2Hd9XMe0wqc5`0jmd!vhZAdlQP;&Iq@5j~{LCmXUfaaIw<$sj0uxeYQ!$;KF z4|ZoEnU{4#x;55%7#kn+@onp$=Af&Z=i_pP93fqSq=>b*>GA;9`{S+Qd=K4aS>e0+ zEMvh?a$e8#QZ8Z0<$?O>&bAqt|GeuXr$(y$J)q zRNi2Qe$|@*NY1@1WSkj(Hm)|Ii~!*fQ+mXHZ0h*ozmknOBT)2Z^*M4)`#`b2)9YcE z=ilZ$FWR}(zn5K1;%D&|?u?In>pDc$v54rDgL5Q}Pz#eVf+V!C5eUCU?iibuO=-+$ zM*W0NWHIxZrZ|n9d&`zxe)F|p-K_6bM8Db_eMCJ4@bF-Ec6&&AD$fD-IP_gtnQ>ww zu6Gsh^E$h@J#%7!5dovR^{O}TG;t5*N#W4^mmIUvdsof>%C!+wLSwqR;dQjj?(Xf| z7n!@2;KVL#aeT#|ibC!@VfL=NoL`us}efBNTzTLe$cboHyZm>;1dQobiY9@%Fc?h&qpNBkgYt93=mOK?nww`T z*O^cVW{>TC`QE2neN1Qfb|U{y$RM1SJ_gwagwxWl0i5;#e&2jp_EpmdsXcdt?YzFg zf^V9P^oet~&o`pAS1(%Fc7MyhLF)VJm;|G`#w z;X`N$#XyNUnftrVJoN!%;bi>N{TlG?Z#9TSSfC<$XUXRQ)Cg}|3*UM3P}8-?1p5*U zME!MpXe(ZscJ2)Qd*%uEh-aAO%00ODi|5~V!e~VA+^6q1a8n7Qc2F5HAfmarBAfYF zFq7M0#_mgw_r(RNN!qtJ8Nqjy$0z2^Vs7;vyWGXm$D_RhjdyL@dm7;7!J?bHJi|oj z!^=JQCog`DOR`J5cR@Hk!S=5?C$)^2)rRkTsv)tSqJ39 zy}touk1B*4?}3dVj^17VJmUj~{*N2)y^Y0=3>yVF{<(=LKJUUbH`(~J!nQQR)5{<# z{8apvYOFQARic5x)ra?HVrA&K5{A%J?!5w*wzh2oysDc~H(o`w&5UCEaxj8z->~L_a9*fcgKav??I5BHQq^FR zz{d#6+aNK!IzCjx_Z`A|$+v6Qx@SIwtZ}SE;ln@r7h-R_h|X+wjXu2e&JToN4^Q&X ze+>bE$BjvYux)6R@5O@HxVRiTu`f**ldbK2+e7H)1r!2WxkiBFrJUGWVD$p>ox$Av z1=5S2dGX9&pPV)Vt!D%EkzeQRUh==z7aTCeuGEM;GW6(`%O4@n_`{q0hc^v`N5N&f zl^tInn6@vUC?u}!Vo)M$uGcOQ;9=p;uE>G+7`s>H^d;zqhI-LPmz@u@jlpUUbBwlB z#v^)`=!)bZq~v-7<+o2F)r$VU>t_S+JK_=Un+&x&=Z@#wS!5xRN>IgaRJ_41Z=M~4u3?pWYzcE;y&3StGrHna=0htZ|h4D;x}C~?BF68 ze_DMwTUC4OzHnPLU471Z-Na~B9dmh;Jv#-Vn<=oZ1LUO+Z7r=t-+YC?-K*0nJnsuf znfD)3qQieOU0G$7qw18%Fn#%68V1-+@2~k5F7|N)O@#g8lHf%b6{b7c_qfE%2%mz& z_s~{18%GLWbP4c=zW-4ji7OT-DGReBp>igbInQcRh?r;B-Z?T#rDrQjsvbwHh;tz9 zDXRQXZ39MS3`t{st!c+=a*m`@UxW9>6-Cc=lzktsAtV$6Lo=b!%*{|<29&oFia9o1 zM;UwYMwL}mRJf^`sTEoN_2v9QflnfMKp`+}O6Mzd^0e_5Zs4_Uc-1BT+}1q);fAQ@ zPyPX9bmLfa@cLsNheDn=$Sn}_X67gee(}G`QlkAPxSZe=i~O`^$tZa&_NQWjC(_6q z>^L8SrlPj>5|v3jhkHLYZ$JP^(riUoG?6Cm0E5BnH6NgbP2_vX2kn15+9~B`b$ET! z-&<0Jz4X#g#!Kb6p*E9zi)$vzsg*wTQgl%;+JTsGe8=>K**tKUHCeK_G8%L8eKXD< zv4e<@d_b73NxM-_Qa%q5#^$b9I+2(-Gg0Ftdr{WRY+zVAPy0xE$YsZ&+~wHdi2pr zPt|szdEU&yjxOW9VNYrcvV&M${rtMd4tJy2Y=-}z`%PKN|0#zn#QQyknjD7$d)2lHS#{XF>%8HBNY3UYcG`iW+i;Q^lVnD<=zYlud|P8sKui&%pS`3nfAxYPQ_+c*6L zRlH7&H_fe*bOfNCz8IcI(#^J>$BBGYNz3g~A3(+gipvc7}wpBB;wtR(Dylo{?Plw*8W)bxG;G z{bNFd#ek}$`t@5$yg7VF*w}mLST*Ha?*p9d6D6v~XY;M!s!-_Uop_zyY-(9YVU?N9 zQ|F$=&$K^cy$BW;u89j;`GsCB(XWD3ofjT}TNE>gV2UBoA?Y73dR6koamI;MhiwB7 zY=Pj|BQM34+lya2$jIeoNMw%1!K>A;Kcme`1DgdlV-jT`pL4r0aHf<92E8sy6_r`` zme(tPE+pp0&I;-w~9%=I+3TQ(pJWU*2ML?IV`5}raFrFumG zyZgh%uAFC2GP&2o`~3chBW-&+adJ$;!#bYU_)&A~!#czqhIFXHdfG0W&Iwmt6v+w2 zT7eA-Q5u{WHue)rY#=v@gGf%&PT>v+uyM1>?`)(BrVC|<1Rh)5iFA%JXRv`y%UqJ3 z$1(0a>byI_FN3^|d+Z0*<7t5hTA0A0c`%(K+TBLDQ{ER;3GzXlglz^(fh{mqr8dF6HEureim(~6_%1E7%JZY)riy- zY2((sK?pr_)I%F8 zi5Ew74`h3Yo+GeQPhx_5WCgEb+1GppuPugol-TLkKkJ=7w`paX475KZAM4hcSW~~3 zZZgRoKE$mV<~wkOM&yuDzty%U_DQ=UBNW!|>V2;O-osc@2y?#V_uc@$^vq&=jAW>^ z6E@kQZmEBGTF5T&-0uwbTKZ+TVIJjGgN3nrP8}yUMj4bPNazQ83&>HPJ|_0z-duYB zmO+U!e+%TXlqGQQh#nl{{VDw-s=Tz|Z%e@WW4+vAKc)^mp1%WPlb$12WS+;H^jN?Wd$=ZqZJ0^a4mHLF%6jaa$Xn6840@so_! zCjsi};QmaYyC_<+keqgwepS{)wfTp7`T?ZSpp3q@Z(K{SOcrKD7ZhK^$j1`}E7?%yJm8gsIHmj`0 z_&CV{?wiO=wA^dSn;%Xfx79r9E4i~VQiK+N)RaAX}sWk-&?D;8z6 zRxL_qoZ5i20z<<}9eqE@tiPs(ftgdwL5JBK!Z)s(jL?#h%skEeQ+QpaS zNTqwP5R(SyKu>+G+Dw%Lfb^!k@;*4vAQ6s{&GY;HqEcHo#+&%Ac2a47x@%BDF$W7+ zxmo-x46P2T&|?@2l-a@QT|5jCucc15clF&J@OmtJb-2Lz}gEb3+mw( zq!XR55=Q!wB**GWRZ|a}%G`{pU8n2)h|VVBGIX$YQ4$}O5$EF@cZ&xPJVFcI}k6l%MI9z4TOBf%v7HGl90EL;xv3E1|3E9FZ|C z%@dmfHag;7aFo%2 zG#%dPw#R=@jFBS}!HE^APXtoqN$2S;$r^F?*ZPSG2FCx$^)`t^lK#ty%0&S);g?)d4mW+tzT2`J(nWn#Vn2ys zL&1QjxRC@?v0wE^YF^78J4A3ZnLvt0(s66o-x*35G?KHiW&b>qG2%TD#wkubrF;L- zIQ;$4KPqW+T9aG$bvu>KN%eth&-O76IPRj93U~fPW~4-Y2@a2RoQN@%r#q3npf_$G z9DPnY!7!B;dJ4pjE)iC~$&paxk?(EC;rq}I%k@Eqc5|-4AzWDSM$GzD*4L$q<(R>& zr|NQRV?_H5E8E7JKsD`^j|l4ojH1m5q_eWaa{ue@^D;7f9gV>z_};qc`7b&j_|Jzl zjgFmwAQ?KFZ@yn#?5X*FM>v-&&el8|1V7o-96VE}Z=`6Mus`uzK2l3uEkSW6Pq$Kf zFg?qsYS#w|pJnX`HKCRvSTqfQgk|5_eR#1GW=Gi4$8^yUcz8p*>eQ`078i7S`t!;w zPa1>6{svm*OHdk^R4htPoN)k>R$G;Og4`lr3L|rNQrEa=8Z@2z*_0iSeR5;N=X^V3 z!_|1+TVlhVLUEPiA}z>p5Zeul-cY#KnY*?bQ?ZpC_66w-9^)jc+MlR-!ln!r`EP2G z7&cgY?#WVZwlKJ}W}U2T#y{#$&!@jW_TXJ!Br#mN847$2-qO)#K!*W}BGBm3!;2pV z^!yMa_rvVM%mA_MgJ)F797q8w!%e| z&kMKAeXmye_tK$cm@tgM3W*7p^I|b9oHH_0+qGXyQB=v3*tbKi0hG!!VfZD}YoxLF z0|WS_Y<|1wT^jG z4^GLSG?`;a5rj;Lvp|RldapcMXC$wd@z*DXsUr_##?)fUw1Dit) zjZ}8JVhdaj!)}m;Q@(cm%&#%`SA0r|RD}L1T^toS0^=J%=YRff=?%87TMn3eKN?6U zOi?k?U;P7VV1xC4@?6IfI?er#EIyt(2^=Y)vIzdj&MFo-D_@qkpcHl67c%Z3Y z>J*+v*nbm8A2X~#8-s++cBUxa+RYv+#7}2m*8H-wv9a!yYHZ}5&$xTR!ekRCw~GdDQMd*Eai12rCs3$DfM>&34kJE}e#j3+E@nZ#Nh3 zuQ|Xq+8IeE$HO!KTj}c5d1=Bvan5tGqgY#U4#&mgE-|E_d!s#mMd-}6g}E%DQ=(an z?#*EmWv5|98V>zW&R1c1YKs48obc^ejYJ|N8q}L-ClCDir1L&xWkCSKwkxr@r?6b! zom~a-yT`dJnk^n}ezQP24KkQiiN|;5cQ`-*62^`FyYHa8kI-y3%>Vd_RN?;tZ9tO0 zPGld$$E|yvPXdudfmWmTb7`&5T_Tlpfq!(oIPI`W*#E?EeBk<8{RMb)B0djrm?_Y( zNJU{{JR|7x8Ifh8IRAJ!AG5Pbn6YyAJsq$|I)i6fm<0-cCH?&@)Ur-ha3PwN;*1(8 zEr&$Ze-78Sw$@+VcCNOX>zj2uS>wExQ^!zADdoXPxv*rn>9!zPP^6fv4+wb~42uq3|X|K1C(Scg)&FeST1zOr# zZ{FBwA>N>fmqR+&T1|zwGw22!FMde8;Dj-vf0S}6w=OQ!_qLN0)RY7@dBHZ)vUy{B zN45}Z6Yx0TZ8LPptDt}$Clhes2p#PC8JqBEu@D(&)-1*R5sn}SF@`){U6o%A;Te%T zPc9}p5zazkGvk*8S#blcH*Pcb}}YD_s&jy z^vX^m2Zvq+)dE+mNyK)nskOQ)el*U!No-q3N#3g}Jg-%^EeOneg$F~q0&YjSlW9nZ zA;F7-8YuoDA>6eDc_iR&&mo~21nt;eUFE~9?NTImQAh%*Q;8EghHKhXl{RD0^d@pYqu|TeErUT8cc$;^UAAzIGOBMhXwff=1(|e8D;Zs9XAPKXBoYB; zi59=Zr-|A@FD?>2v@-@h4XD41pKh#2JRMWZB6GM;!zN~K<&;v+)2^=C-7%T>DC^h} zNgRiam7{&>ZT4td(L7J_HiqfzGn@(4NlxdW$^AjY>?WMIU+t%`*2_+y>5i(3IF1K2k`p< z5AJdi_87)NEX3ub(bfq(Hi>#_VMm?p`)c2~xLDa2@*hqBG~n4OI5a7EO3l2Q1E^;I z8Sx*O_zx~?+oBMsQ2244f5uFDa)xsac!02O!QnLWhU`9!)q;c_3VxCudK37QJBiIy ze1{dvqg|-q7Z;45nh<@Ie?WqEO7V2Sbtt1Z z)hfcMHttsY#Ey&BKMj$}t zjGZ<~NSh>~O(LQae^!Ph6g*<42O@0C@H;nS9yujd^ouY&H^FUpNK;9)5jp2(!Z{Tq z%~@nBq7TVrEFY4YT-SoUF{$8$FiHrlw3DVvm8?+tl^)0)dQ(8l1d=Q} zzTcSGFax?h5-XfZX(+>si()`k5x3cltsa-6i93pSJV_=2f1BJ+vMd?B#)A{R#_U9O zZDRCAizEvYbuR4%!7O|07 zC=pFaN?Yx8f5!-+C@!IEJa!_MjPQq6HlY5R9e2~UmPp3Fuj@%TquUXU>OKkam_O(H zxiw^y*uuEm!NQT5P>EcR=dv|!NH`)l!Ps8oc4gOa^B>h6bDBf!dznWDwAmh1tlNQ7 z=IQHp&}+}v=&X?mc9AiZel*;y5M7iv{tE>rKpostH7=9f&g((I&w- zqG83ve-FDU?|5GgKX$wjqWHXt?I_>TB4S*5Ngy|(#UiTHbrJjw*1I)^I4BuNrFu?# zEDvZV1Aj%i`e6?2@_;5}MiVlm3E88~_Fhi#o+S8)hTT0*@R2!Ronf6G@hO|nuYG#N z$tTD3sBs1kZP-KvQs;4+4)bIlu$sqnI{?~`e@S;t_IpGGYdLh<2Qrp*G6ve$qP5}L zzC6Z`L`*aA4Y;~$9~gnoAd%`2IXw4CSSo;#(F_=e%yH8=aLBMu_vLyN6f6c*MW-H+ zA>4@6c+JQV^#&fpQF#)DDGbWoX?v_RC^HdUi$Na`cAD7jkEqzddk*mnc)E{zPTMbI ze_9(Lsv{kKErk+7pkjUe#Sbl~=d>e6yBZ9adU}i;L0F$+y3cx!xHMK5>p4AM@6f0h zD2;O7Jz_b3yS}CQymB|`HhZT{e)&2z*Z39>*wQOZmE zJWq*_O8{BM0kpfVfDqh^o6wBUsz^0TUM^4i*6OOdYKED$8&OLWs<=9&%+pdu2?-rh zOUZtC@^6V5!wAfXk{dP)(R=e!~=N}>FYkA>2{QBUf{=~~LQ0Oispg$SZJInfZH1C%u$wQ8sr)&+7{ z&Nqlnxgt(4vFAnMVB(EWL%&cG^ix{VKI*CTvPIREXskcEw8nxmQ1(z||5SI3Sv1qT zc9g^kE@rJ{g+67mApJQ#f5LpXe`|p?Drsz9Z;{6J)@F}z`(-#QTW;6Ne^{jP;?{N2 z*nIIt4Q5@3S+_{5v3Y%+G`6=8$i$SRYBe^Rn>j8MnU3wn8;EQ{`V|VGE|;bDXnzi@ zddeC9ab$B10DN~6yPNjNIhV$}a1C(?ZjLTL&l0Ntww|iD&BW2M*YdpE>MG#|NH2Q$WA?7lIPfAH7A#2bJ|SJ$5tU^Ue56!RY)hH2*d_~ZU0a3}bA7gRPu zhoqU8g?{JB!0mTP`eXn+s@tEFY%3KXIov?|!L8fsC^LJ`3 zh##r+O+Eb`Po;0^>8Blf4w4-Ae3Xpcdoh7}Tt6Zs2nhC$M@ASgf9zhoAOL~D04KmJ zzyJfn0iI+m{xHOgfPS>VvIqzmlCk*1Vh!Uv-a@3EFUSakm^}-v7}snmxf~-TAV&DY z67Z#=!Os$!V#Nww1(F3B7dpAJD`m&Yep%Y=ym2~xW31|QPR|!4YQn!M{F}#Kn`97m zPTnE-Z8amYH)5$9e-m(hm%1T2_2M966E`8J*(ka_1uOrF8}T)g|4tW_5uYj~)6uQT zAa%or_#)n906Qln$xp%`YO>&!_|{~=Lm9d&D`BIQn!`anViN{Z@_aZfT`d=WD@EAu zO0$>;blx9BUwBbYubu*Npu@Ny&Um%_i-7l-FbkhBo~C}~e?7Gz0d2N}T+~pn2hQod z8+U_VkNRz3`Xl(S2@&sM-&|d_Q~o1$w5WhjA{ZZSZ1fR`3nLINC~HqL+Z@MkPYmR_ z9Fpcj5^6(u%EFDcbDB(}pcb{bi&Kmb@GV0K-1s3F^1f0vEw3=8W zjMrz4c#J;y3IAf~z8)DsA#OP?O9?O$7BGjLcj$Q#rsK$)yAQL*{wtjahqvCqAcqgL z#=|?WKP1Sqz|YR!@hcBxLNOWtU63boV?b#--wBXFf4&!p7V!N*(1m+~Y?Ig+6tQAs zkTlEf!NR}Rg0L#lX;FL3Usl6ETJ+xtF6}5;$SNcYEVeUX7Gf@s@lAnXE?i*|%p zuSTO02sMyM@g2~D^;x5E=)u{(q9SodLh=x!^rxu3n9~eIOmj7T((oNHr{5jZxN+(| z3F86qe@OP6%Mct_1I*^!@SYniC|KYq?0KgQUbZ9dhjK z>LrD?#ywtAV$>n~psm;|mvI*4JWc}U4i*H&e+#67$l%mIa>z0K-E+tx-Ctcj-rZ}c z;PlGuVRzi=?)SRKJ@-_8@bL0s-#(OMD^@fxMfM>Xm+Fre&T>58v6Px>tg`}SB)!Po}TFp!R> ze;sh1q->Ji>St`SYy7h3j%#O)Y0Slz_S_*rq_Y_BA_D5W(dw#RW6aJ_GRYBh7?^F^ zEObEjtkps70lhh+H_tPvDH_yUq)BGHvV~)(&H}qH8Qac+jOUd*vbnM^E{ud|UPw4; z0>+$d7Q;rxpX2RjeCQYCq*EA7X`MEne}fVNRM}E066PBS803f%{&< zlWC<9Ue|)`fJMMqu!+@^>gB9);-##^w=N;c)Z(YV?b)$}48c-{#U)u%*tb_Qe|Tk{l77w>W&_ZjuaA?}jr3FURB20@`ix(VO+h03X56m=~uB($Y6>ne>% zf)ci?G?M+D6!qp{K6f|CYW(=GCH>ko9>&R8EY#;<-Nk9=23#7q`>M~ya6(ka7YB5w z)guh$bUaB2tyQmvr8{M;^gp@sf0cjmm50#a1^@zn+={|M%*CM+7@(D6My_uz`%{GO z4Rk{JcBsiKTdS)Klw0zE$~=BJfl3-uy#ZnP30x5lLP*(!>ooCjaY)h*4FPD_ZS}N8 z2`C+Y@`t$iOQ(JptdyEwuTm~k#Hipjvnf0UDTFD|*V zE1)Yat=hE~5bxIw^5y2cy1G)zQ`=<#b^#TQWdCS79Bp(RXRvhPNML2WFwa-YXbu-7DS}2dHFFlc@wL{*F?5uIN(MTqPYpoYwym1Y# z$NZmrhgROkd?Sdt1&&#-bN}RR=4XxZB*_v0FWBQW;5KO&3X#|WmcZzMxY(23xaYL> zZqwJ1IAK9N%T1v)e=u$53Td+)?ij-%_pT84E+elS_B6KT)LUIGWY=3JK)y#Mw1}yZ zB_ODZV zyvcAnR%Y0A_A58$(ITF*LtvU=5^%WhL$S*O;{HLdUJ;Qa_X7>CeU&#Ib{H?}i+ zGgBk&G~Q0a*tUQx6?>g$?samXW3$ZMrywEAHwP(0H+Ki9g#)NEjy?xOp~7$|;;LH% zSR}P)ol0s^e?o8Wzru#!NJY4eWH)J}xwW;mwYr+^;OF*kp`7*c7r$b;0NL8!-bTRI z_V)D(xDOC+eSLi$;nvsJH!9%1Ea0wRzb@deU%ycWC#zmV(l{`FZ&i``@o)UG00#mE zG*DedYYq%l3Sc0;4E!5E(15RBzafFIU%y^SH3v4+fBAJqO`3eG6#34xOrspS{b?LY z`4wA<22Eat&k~EgCaUK@9fxEUGufnX*V-AuY`tmK&5XLWb4}#9S9Y-e8cs&*O5Loe ze#N?W^UAV?Eo(A=x>n6XyRSn=&!M5vd+upSWKX)M9_c`}{Kr{`#O!QebfeNOjphq- z7+&&#e<)TDh+^{rQQ`I1ih44zQeh3BIjgJOo>O~)u)3=M5~lK-WSd*cXYd#Z^Cesv z!g1s~rr3PP1jpGSYvgmvS&+9+t7XKxOc}(wC$1Yr=<0m1e`t_8eYYCXZiV{aU5c9{)b!zoay)O$ zGgB*jL??7P8VOwjycDk`Zm_X>m#`9ET&PD8!jMCCH(ZwW2R3c}1w}OxVet`5N!K7B zO<)gNZmGelh7$2c=84KQZ7vKbFz--s<sb79Th+X{900 zgvY0LL{gGCyC$5p@ZjfKE+IPZ!*vp-Sn*YaoC%}Qf)@n^4lvg~j(S_TfdfB9U6OL-Y85~n4AG})*EVp4$6o_&-X6c-y5 zs|FF#AoUIe(7ZmW5=EBvGzkqrZ`9NcJi^u0#HbDu%&xMN!r?EPORy!Rm_0GFxxjK^ zO1Ol0E7QeWLTSY*^rdDiD3X?%3A~{}POf;#mzGk3!g#2z#e zTo?%_7QV(c*Vs*1Vz$waiwh(~zH$zX1u-s;|JZDVJ-&?oahd-75nO6+JL&RmXHL_i zDgC^-Lg#9Q{*S-6_`_mt2^oD8f0{pI%aIa3v+09k6$4&{e(A;(Dl*j5Nv94()Q#kp zllQ=4AtaOmNJRrF`2$5?vW&5P`_z72O8J}x3F0zim#aRiIYNB*SY`sI8tjuS9NgHRsbGA9le;DsHQ#eNcShjo@W2JIp*eQmauv6*=69+c}z*~p8t z&`&MWO$o^4#}&kKP68eW!>aceNA!GBr~ z$#lybJA~I>j;1rcv+O0^tOM#VuJI2W$ut{Jm7|DJ&Wt}C@KOo9e|XI4=iU$t1-Sx4 zzE9kPVhOybkSn9=yPnT8G0yEU7BJ)X*w|NL};6rCYIvolYv+UH%T<~NI z-`vHm%iWH`PwqO`1wXpe{F58J%3u>N#-HHf(29w(Vgi)R<43p{GJ+Q^rOvA1xUYHF z`uJ-r#<~3r$6QrL0ILEwQWfPY9r}JFh8866Qhn^SK zSXY<~BKbYif4Fhu2H9F)C)VtqH{+)4y~2v_qH#`D31R%+D=pe*s;00>JWD;ZOX_Gf zHn(3aRZ<$qVzG@zceN&XQ}A2?WRfE@zaX68e8kz<#LC5 zldzcCFs+oLv$$c2hKZj`$H?5!p1%;^OUzszAK zVX0Jce^rv|Ljw9?axh6o5?cIHe^c>BugouX!QgP>og`0K^-ShjC*DM& zd9m5*Rq$6Dp7b=C=ySC&opKlrYd$#(NF|`6Fh|O>N|*~s7vy2*`5tWu(T3YvkfV}o z11TN}ZE2mo$+k#9vqt2duttI?pop-W0VjJuUS@)1WK%zkvV zvk93Sn`A~UPQa$+(eecBHLakyk_-Jue%2h6}nBb zzVRa2Xg0~lc9U$bH+jYoG8*ffq_Khjf4@x{o9jFasNQp2M7-Su~awOlE&K$C@-j^P!_oY^=+59z2 zdtf>&y(RVeJJLb*8`3z5nYECd{do$nH08w|A8~S@2K)T+#VCxqa~-!SD;zFzf5E^p z<^Ce}I^Cd0x(CE#Bw{^!NFLFMU2CxoaEovtW-~crWXKhdKFdY4Z_vZ;Mz4LuXxBOo z1GbiVA@CYQ@M4g=H04Fs9R%~DKHnGCxCuJZ63}oROXsYH&Nj3Sj2noNpV1N<{_t7M zL#~OR%Mpjck~)fS$iA?bOeW0Fe`;JV^2*2?uoNDy){;qWIu5*y)qHTWdCDfNmL)ae z2=)J{Rs7!<>OQMmH6{4Aa(liGqs?}Q?f9(SVNyHu7L$|epxXie5ob)zthZP`wKkie z`O)Uqqa|}IVwIC?$N+uas_{(-UMW9hv^cqrI8cvt&zO1#g)>5*k`GxvfB8HnNqV$s zKP)@dc;t{{6uUiS%4Li(u~8zWZmm6XY|L)G0&Z%8rIQ>Yk^t#~hIZMvRjM3QWg?ItmQRTVjezv}=bf6A#%guYb43R?5E8gR9b?sALg(7@65fidm&+SfkcJ(=}< zkibVTD0)DcjE!3b z<9i=`GJmVXUyTgklJ4 z`o!d9IXRpJ$zy?)e?)FS;DPV0i;K_PDH?4dh~w-sw0(fa+&wkD*uc_kGcXWPZy={* z(3kGoA%)T491^iSAI=Vth|v&}Fo(t@p`W{BGjx07$mGy*1%isQ^0g3)!T4J|v)r3_ zqcIEcwIzQb5jMh>IjzA z(e~-dz#co?bLIkR`C2cds(>bo1w}gOgmz` za%S|iUPH`&JA~IrGr8cniwOH%cNpdtoRJab{i`cn&T%JR1palS1t}-ai0Kc@?ON%R z!5VmP0f~d|e-+F1Z#H-3-r#$YZ^H&rTdQ&R9_v?qrw*}SbN^PZNM5*Tv%Bam!1+<` z_KobdjUEMuDZBO%8m#$3}I-84jD>cnN6}shr+iwJjfi>=}f0`A}7M!wcaBTry&#))_Tba{C z5C7IaT3xkg1=msnO@A6imO}reo8Za4&g46T3kwb)f9YC6kt;k*{0GqnKlJN7{*uM?%l(^g zn#viKq)T1ieDnTtpo(NEg%Uu9JS#7lOH4E(a=~}O6-P_C;&9C=I&E?zIG9{3_d3Y` zu?C#{VBH8|Ilu@uPr=O~7Zg3(Gq0!5u6bQS;y9Wfvhu%Z>fBKcQh8@P)MmZvNt9F)5hG{K{qd9+;#bz0g zZ2{Yhh$rBXs-X@q9p05zKE5l>UuJ+`MFDL-%BI>9ht^bo=4lm+G?xQwV1;F>#D34T z5YKT`+?dFo;7)cRC{At)Th%7skFma1LhPxe&{KtjNk1+^oh9Tc31#}xFWR!(e`u|{ z;H&$xQyv@!6W!6RtDKBqcH)PZvWZpfKvKmG#DWE^GL|yg0V$&IMm_#I!<y%a$53 zH#QJpp{=6XEu8xK0-BkNXI5Asf4Qz2e;yaIE7nv&n}%tIHY!glf+=l^q(oQamQuy4 zEvl))aU+F?cCdN%SwnY$Xya~|1arbKyxdhaGgLcGvhLtsH?wvvFuwEytDTq^s;F+Q zp-g=2sfgQc*5Fk0a06wEPEeRoO%aq+B&C;DChcX5J9Pj~&&|ufHw#m||JNH^S5>I1 z@nw)}8su7|)oQhE;U48?e`9?MI63V&C`~vXVrz{i(ltGoWoWXI!KFk>ah^_XozpPC z^rR_z?Z5PVNeFj;;*P6M4PRsiRhg;C9Egi1S48~BQ)ZlNbN$|0{oU#nqyhPH>X5>@ zh)(BbYf=vl;?~lcsIS551F>DSXd8tw9Mu+)i%kR|#c)Qj(m}#;!p4s^T!q&S!CFj!8rkYFgy!!+$yvo$7nwt zL9zrv0tkpf5_k&QwPVf4Lj$=XYBd|{AlM>C=ETe_G!T6sF2i>Tvexx7GVW)Q$5hJG z(AnbiS5U@|!^gt}RkppkwY6QsC*Gjfo3KH*@JU;|$XP|if3LHOGM<=}u%KT|A#Gui z`WB8d@rU$(iNAzHN%0HmuHaTQXQ@qtQE{I4iX~g2->xvtwMJGw6kNlWgS^YDF?f15 z=cHTK86-~OPBxzxt{qS2Vv*lzYK%Xt@@dL+tCn4+{F$3v9$CQ z?iy{gNRlv4@v*_#G*rl3ksFT>k03&A}E6LmW^h zKg~RUD5oLpyD7`^OFjH}u7*|2tyT{rFT(DWw!;<9K&eW1Q+lUqCytv^JcR|9C&=Z# zSfNJ}!7#)8PElNR=FQWWIT@YlxHCP0)Uh{VarR0oe^XinwSoL=ItJ^46a?^`D{+g& zXmc673JV&9LJsMOkPa?j@ByV83JHPitzFRdxe-aHC*{0Iyn=q#p-+({u!MQ)a0kz} zxbnF$9-ZhD0e(J%pze7lHLu~>W^}XXEWhy-&TixN4ZUAV&jgJM^ZUEYdZv zn2; z?RT`hcrB#&J6+b30bD{6z@^lWu9mF5dr(hPz*p9RG@^kQc0BM|7q&Baf{BG?*Ggo0 z=emM#FCfw?v=mNoL@0L~-cX@fHTqbdNU%{YAYMcX@$GP6acLe89kirov^f*);E-yg ze^Y0xQ7?{n0#}d7<}kZvTHN3T^IU3?$jpe3H7tFh~$AClz*= zn#mgVmk^A-HAYHhs2yYXDxj~Km>2oI;jok2s&sqYXpS`o(u_%3v-4Q#ha z9eMsFNnxV^Qv8V!M!oswghd`~1eN96e{JVnI9L{1j5{-aq(PHbT8P7Pw9+gNN_86; zrIxl28VB1^E=9N{*QyLQig+O|cAQQ}Ondh)vRMpKZnJKdJN(UGQa6=fV+?Fm^^{v# z@jz8zya>I5e*rgPjhM}{hslt|P9E$OEJYLA4SOnlrXDpj_duP&s_^O2z;;Bme*o#N zt{tE|!j)SCJB4S?j*o05?`58X))b+1kWPX+Z3zeHj|2!t7e)K7Ta$@5x7X37Cn(6D zVRiKuWZrE(E9i=C(Tj%F2;+dw?t{3H$ad9Xss~dt~f5f9Es0(joyJiZKlPHt(`w&vDG5ycee6W4h+mtooSS zlgileSyJntu^)_IyRZpkMjxYm~5x*Pp4ij;DYOT!5N<66E2=C=cH>^9c7K!cTu z@7%6i6a%aklQSVaM;L|z?-&H1Nw{`*lAPW3#;MDr8}x{uM5((Ee>&( zfqv54te;HSF^I$SmWXE5e_Ns-93kk>o+8%QMdvcevJmNq)dF<1kBL!$o-?4P+|v)%INTa2t${MIYm&yd5IGc# zofOPS;)J}yuPh%{#UyWdOXW!$}JyhMA~f8-}4G(kC#=bqB5 zHF;|-ELX<&BYtfC@x`xUV#{C?fyFPe=zn3z41 zb26u`NPVuUx@>;R8uVG(mtG7npO}?1PnI>OUR<7f;AC3J)%knwib-^-pBOQVwEAGl z>wT#BG)*Q0XTm4Af6dD9Oj>UeZbr++7x}>qI<^q6g%PzZ5t=2b2y*c_vlEDOk()-g zqdN7`MJt-k^2%O`GYGQ6gAj-5k#}SV#e-Eet9I(8wK$2_;^~M@LcivT&`t1y0^MZo zG%qfKo3zzxZMIsiC1}pV?6ek&pt<~!$T$fl7U^Vkhi-3ee{F2-jw#wlH$XqTCmdJW zf#cf28VA`X)gX3o^Jc5*)VH?QU)&~ku-a;_Z#ea>?TvMCo?aY6eR^`Xd%TcW;**kX zVSY7lKx6QA0mByu4)_r%SV5>696JkgRz&yurRYB4<)4dk0RgbEg0N7RCyLB`>T-Qy^01?eU0q zT@(=@YC*zadju^B_)M#b>V!RBVIU@^J%*?bq_^C_YIPO1Z(?yBuG4tpMN@X^IjQTE zjmX5gfBI0=!L?52VX0zRa31F!1kuA6Kg5985aO5vfU?zDcBn4n%nF{IO@a!XR2VZ1 z%RwrLD?%gB$elo!DHMBn|CFAC;c}Fma2GS`6PR<5{Rkl@rt$<*OsIqiSRc{B2az8} z3X>@_nKCP-L@YD$3tb4#_hK%lS_?9po60I7e?+V`wcT#7*UlS8iQ`rj|@I5W4Xfz*pd6 zR3|>RN5*+N=dbr>kkuZO*}3z`{{wYVr)d$$vd; zq)R-E4q|Q?DS}?eZ|Mh+MD+2gom^aC8UCg_D!=J|H5PsBG z@SR6^E8%Y?)Js?HWP(T!5^FRf(uo9(7UZ2O#}g!#!Z6v_d`Rt#SogdzUOQ%kX#}qo zBa(oA?zL0f=YC;R-7Wo{xgEd;;E-TJZg(n%x$xvgRRZ12r%GNEz%NK=et#*ow|QVu zjVkkk$NNZxHWpXJI(WkHdr(076RvKB$mMOh->)9*Hebv@d`l6&n^3HG4eD!IuJL#TaBydbYEp`9LhT~7L@BE8owM7-(kJ_}HHv*${< z*5z;{lKyBc(ax9e*cst3nSUqAG!E<*+qfD#{5^tnR1+ybHLo$){c>L%usfjkPCUr3 zBSgV<4d6_;`$~NG$wvII;x`*&!h{Odk-1e zb?N#d0Rdg=0OaR02mnFHbWSr8P!(8aSe*auOjy(jyPI7oVtxFzuUoFjdDYDM;zuec zx6Rpws*Ff^8eu|>oX7)Ybn{P(_C`l#~_s75e z6$_f^KNFQ$w207WVU**0-)Q?>;H}2TOBT>g3iN%L)A!T{SAWtsG3ooj=*GFgTaB+w zpRJ8F*8oave;@|syMo5=Eo(EDA~Sm9`j$ZIXd`VP{k}l@1A+91%LuJN`sSPGg;~iY z+CR5Q0DpfZ(Ef#}<}WMID{PCxbX3^>N?`kIf$eXe1zUt_KRGHt_*;#(XQydW!17-; znx2D}Q8ERS<$qJAFn#sMMS3pBWJ`i7Q2m5oW(~3~ZJe--c_X!bG6W}JM|4lVVcSF2 zknBVD7_*P*zRcbi*@uul$Ls@oD6{EYQF{hn5+M}rK{#W9z2Vd?-~>(>wI8zTyjOGAeQ~B( zB0rP-(0>Ul@;x)o^E_k=hdij_VqRsy2e%(cb_HuD4-9_A&s2f{J7MH4hPRM18}ne1 zONAuV{l1twjw<*Vtn{cpPwt8iuCHG`J#9<4SNist+$D*lg{2rVY9s8L$DC{H6(vRt ziWs$N!~_+@R1{wz4_dxN$I|)ZVrR&(tj72XxUS6M9^XRL(I3>g4|YY=2LDr zqiMw52Mw|wxouu<7G%FqZ#2napMr1W2mR%)2kGNGLPAe`IcqbL!eGhC=U?JBGA{7m zn@#w?npF=kXS9{y$f8pOIa>`MpfT$Kw1kzfLjf~-g%yAqzN5*e&Sv_^2m-3n(LN8w4@avnh zjz)898(_q)HmEq!Po1l96W^QDu4UtmOnT)}2rJ5y3qPT0K_*lYOY{D5? zB3DR{JjNTOxrI?p4qTiZ~yAml}v52_4UgWn`l0R^%zSCCSU!&Z$jq-8>OdsJ) z8!?$MVlq;MvtxvFQa$nzhGoYh5Py0cih)~IQAB=T6YjeFVH$ZU#_f!djsa}CpGmpn z>Bc%~HExjg#wOY5k(aAIyn+@9mf4QROJ_O5`6FvLqd6IZ54@QJ4>aJvurNGuPOC%o z`{o`ziMqJZlOFP?>0$%w%sFM?#E27DbRGV+p{qB4ruz<-NsN-Kg< z-Y~!$dAmcUYXRKvgz)BJ*68dVzw+QA{;OOJR0A!|hT~3U;wFZIlU8NwdS&WHW$Nal zM?U)>NIieLnJX$h-v}nR(vj`HP z7unRLRYk?dtssytxNH4nf`4-v9ntC$3%R@Q22AT)^9n(Cu7y^PP}j-Ensf*a^n!>p zYNtD)xN5AfhTXI$-T4ZtbvxBJQadA$IyQhg1g-jk8&Lj8e7sU|`&5Za>s;|NtIV)+ zU{09R4zAL1fdc|{b#OD5`v5>M)OVOaCt8qu%cP0F${i1%BezSN+<&f5-|o1j?Qqwp zM|0PwpY4dKi??tS^x-NbZEA~***lboe3<_F-+fFXOL=$vsc_^mRXz5`UUz$*7Zq-J275 z4#3@PVanakg4`Q$bF-6h5LxcI@?U_C7UW)pZvmG9taE^qaIh3&L!wv;u_`+)wtVfFIo z;GL#!0IPh*$7b-}*E&R)5~?u8^0?kDn#L)P6Ao!C1&1`+%JFkMcLjO{Y&G5G801;c zRCHnj(Iu3Dihm(@$!%2l7N)kUd`rr=9<{}&oh08d@_j)qf3?o@1)Z;4xkTk!REtin zt}5&N=knLuK^G9YVQW?UE8OAoWqfbY%{`{}@JJM^y^w{eXi4Sb&{ewyWepAk=WWB< zt#&WrxR1R?em2>n43W0Z!cjHRN)!zDbF&{oB zDkNg{Xn!km*)!5=Yp|mCSvjZ6#?z(8C35!yijsaUUIF%@o`0QkKDsXm~1y z1>iY^B)qWfN)4@8dbo)4g^RD=@#RWfu(MP6OuNJWC+@MlTFuLIyIv0)s;Yf{KSBGG zu);rr(_ULEf|C%8HJQ)*T@VbAc)fZIwyh;o>VINiE$d42ZHagx4Y^_?A{@$%#A@fi z_@gsdM9o>^z?e^ZWr|$%jzhYV_mm5tyEX@_d+=Ngj*$3c#aMr=7{etkOz(w35V75e zt~4QhNY30tw(z1Eg1FnvL0ww{p8u}Wtayz`%4yE5eDgDXCm-vCkNmurXlz$H*o?}W+a!F7J7(Nx#p&ed&ENSBza&Hi# zwR&yIWwl}7S}V?b53n!i0Ioyb@tn*@XFNmy~byk;TaH9e(5c-jVGYk ztj!&+bVy*O{P{1~-;XEEX91iJVuQ|Kvli%HdL&%NS0Sx9OEeD4iN$)5>ma7pynj_o zTjV{-Vw_{W5=JaTH(Ru0wTfw|=KbZq%{98jodwt@m3Cq69)>o*(x4-Mx6GlDW%K<|Wadlvg57!}t_DN6FY!$FY5m zPf*;(Hkrm@=33*-0-wTUY~*fiiEK8*?74Eyp(}mc2~Does}8mSl@9;wv9OH}>f^S2 z%L$$?!gz2fq6`hLt=9t_wpmdLIk=iYB&ApHK-kW`+(JWGxnc{+(78luj(@fwOBCm` zXH}d>^m3i~UXS|}FX=c3+|)Mj1hlibhQapaGbCr%zqlxQj^H5Z`WyWyg4hEtF@+FM z@gPE=V$Ml2%e|Bz(I(laO>#_|?Q18=EU()>&bgW5yn;}Mpo}G1Niz?2PF3J38`p9E z@pA%VA?`NaV~DzXWHjWmdVi}X{}LKDuO5?q-H4#EzO@$1z*??-ME%_c1g(JjNd%6o zwXlFT2#Al!K8zkle>_QMEV$@P;0u&g$qmwK=uV1K#<=0XK^hr*@9q%%P;k$ASpp2#Y{D`VJ|VTJd))W1fipB=R7jdG`?ZE4Knr<}JwU#WS>7amve6 zQ+iy>7d^OM+f{CjTz_jMR-KBBf`@Og!kII30Roc_(4&#>N3n~VNW$i zVfG2;er@?gUMN==rIVt{T~7{Vl@FJK_a3#V3*Kv>Uq7My9)8K_Jx7=s5?$`cC#~?~ zqv)=TMq@VFjZk)#7A}8yZxkbsy?$z3cPsj)zr1rco~d(wg`LE+TT8NinswKInLY$Z z-fF(f9`!$k9b_039ecqxV(vWL=kP-6M1eBb^81g z-Wn^z8(h*L;mIob<;R8}L!V@9TA$w7a2g6@!(*t->A4p7d*FY89z}Y;Zw6sQd|h0WUeLEHyi=*l7Q$n%65ZD8fpa|Xf`GP1{WiFl ziJIN4MDR zoFtPH8d5~zv_*gUQ|??zIh8mKycCn#y4z(Ubc7|SXQ?Z9AOwwh%Z#s4h>3~8)^%51 z1u^0|Xs}uQ1eM;4n(xK66IPq1+?6hd!PGr(fm$sehR>Y}wH?bjXBJox6ltyHsN-7Y zT@_jRfE^|Jo`0GS!!i1cy%h(yKriox()(zjmFu$qY}Uy+w5pM+5@x$60yf|;?5Q{!#iP>?oVK7I|b;tbTv7=aaY^iH1?(EH1)6nqR=#YKk zjGXQI4BZ}bPmT`ZU0_UUf_nfa?K$MYb}oN)@V|=g8lUdvUmXL_abvozDE7Toa~1p!XVIXbyir6)%4g zcFgG3)d8Bvm-+_mn@WjA`|+WAUZ);A;v)m6sArkc#`^W^H?G>lYemmD$oTEiCH3v? zTf3UNXG2%^#E#oqzd9gCbmQuP?9;5FPnoNGGLDcgK87nY$ti~cD*h< zDGeC}&yH9(>4B=)XFEfN@>twa0&3z-zk>s^wq9TEJ&Bw#oNVdEMYjiUxPcR5 zB+lS9Ha_{z8n83KB7nb*G#P)vbfGuz#tt`8!Lg0<@jPJ^=z)jXC!Er?w-|Xqu{>nl zm3#aHO6f{-_ijPi${uqM;pl)ic3ckDDuT(}QZk#^58(Ay(}C{4D?0B)^dh;Nzt>j5 z&9i0Cw(klRz^hD5X)r~KGd{sU^_T6O>C=qV>v8Uj9bROgF?f}JadCgXAh+pCvpv#I zU0mSF#A0uc($5MsgC#=fd(m3nLNQqFepXVn9ymm$R%y-V z)%mE!8)SJVdK#t{sE2=FWA@to+PbiY%jm({TdWnK|SLm?GF=KGEDc%FGEO)cGnJRB3hJ7OVU9jNX5Soe;K&*M-gc3o*Sd zSA6?*;av@qaOJ%&eXl{1SbDRiOYiketi2cFg|k@9vQMb>n`Ll1l38(<`bmUJ#WkxA zJ<7i?E-c@2dZf$!(aMgQWe%~E9e=I$90PRbc9ukmYT z{0Zyz`3#jpu17_8>~N>=k?tdKIIRpIl|589Adljnvmk$Obm+P7TIl+)W|0W(d21G# zxE7j^TMP1}$d?x)l9i3YvH%0xS)**SHi)L&DmS_ma1^bnGu#nIyC(CO>XE#B$^WH2 z5&#)v+H8+^dL$r~l%Ic~;t5YjW#x;u^)0RW_x_esGyrm`3p-X?wB8O|Vm79Mb(Bt;O&SCqYUU&Ao z2RQ;X_1LwCb^1o9%d>b=kL<%fTc=hGG0!ZpOoB)aMq#~n0nYQQ*_fK*V`|tJ<&NzTMf_0_~6GZ41H8m`*ka+{!e&mr@r)($g?j^A>%y$*IK{r665Nlh}A zSyeryAY~4jFUTwN!WoxIeNTnXW_9(gCb;X?&cF(AFB4`_az&^Jgi#L8=khoTGl+kx zb(F|h1`~451$ChoR8VSbqFveORaK zj<7X+oY}`QRRl0zx8#)zrZYwOU7 zAlO=_NdSE`i2J?T1VU|Fb=zW6`fSzxxqB z`kuI8QfmT7TeH2#!*#ZHkEj({?jA^`hO4XgKAhxXcTdG|H9~71$-tWXDy(L6u~1&Z z%iHi!tP#0FJXj5j_Q11EF}E}}5I?N|Kc2;00^dewLb4d|VlZmABGw(+D#Dw(kvGn# zi1H}`QE<^!h|~jFkw3}9&<1}H8A9oGtNJJ43nM_L!i38?5u!d_>%z)QxX)C=wH5_` zLa)ZdI5~@zCI~3u(GY7^JpjKHJSsQebdpM)z)Dl!JY?`c6>=I>)N=2N2+04{*5sJB zC3CM}L-E90ILiln! zs&i$i0k9e%7KkBY3ZZR7T4^S*(n1ALw`yL-3*54E4gNtIC*Cy-*GN z7*)V9%}5N*DY|6l(#wBDv-Lc#A;M=EFbW0bWHmZO@LC?E#f&YjlJ>3lx!Z#rqOmi1L|1fxeBxF`nB)^2C|I^c=#kG| zMVd6aOf7sJ(xe7&8JBq zU6(iH4l6S=w>K{8(d6fq&Z*`r`D@H1mnP(@rQ#8Bpz@qrohXD56VyGs3+GVM;9>X&!7gkF66l~9M ze4lodV`Yg~dQp%#aehi28YKr!c6{_^93RVeh>3Ehy#0QK^MZhszP0B%6Ti(~^`0IJ zRoKHXESz1bg^4C)pCPHnu|N!xd7TX1JtTkbOEgFJqdkC|0bxBe+qjfJYb({P zKrPWfbY`?u@fWX)^2A0^Lx=J5At$@ekvkus3I4G*GuuaJ`}Ivbe(*~8&?p*uqXcBV za#cGYRpPO;sa6fPI)U(9?O12~=nKgPin(A?0ckf(g-dJU$T6;p(x;>r)JRIG13L0k zRsVm0&t5a2Qg4Cw!Imhe`UIT+7_Cq-zJmgQ!t~Cz6=nw%x`o$VMMFH=m_hmHI*=Ru zQ0AKdfFBQR6lm7#Ucpe$t#l_$260MiEE8J=C+zl^$#yu6SD?vxW zk=4NjTy~x%mSDniIjJ^LwrieOc`?FgnOvF8BF5sF6Ij07M8g&2sXiLkKLvA3il~1w zVyUABLQKW>MyUqUsk|5XbGjFJX7Ekpi&)fdYK3UFpdj?$0B?bg#Vmg=*L*n$bjm8U3K*FjMw{|yhfq0=#Ob~U60ntr z@?1tL4dF9|*eR`rR#^AlyDcf7Tld{HW(BBffbH*7XD>cDf_2XbNh3XUUwIQCp|S#hksdT5q9xNEJGiO8rrcKfd;@@ zm(BMBF#%4O|MvqGOWjyow^j~ro9oumgWDxmU66Q9!>j^oc{a;*-P$(H^2im6`5Uvc z=JJ)NU^q?KAUapTRJ=`XE%8oYs-COWR}c1$yVe&MLC#}+mtXh;A1tRnK64>ME@pPV z%v{KXN1iE*jCthJ7PlQD9|$j4O3Rg&v@QtZ)K?Ey*O!;7_yZh&P6>Cyd!n|O7|hJs zvW6lu`EeQegQlq_QTeQrZico-Np{QTtb&%=EvZKzm8%Wfusg*}u%RYl;1z5`1|FD< z%Ugo4Dru?hfkDgyp^HW%6*j*1XigN~FuAEewLXs)%sHBm&Zth7cJKN~gPmn!lCzvs zgTQq=@bYPZ1B1wa@O-E0v-lQqoTV77!9d5gWk z_!GgiL0)JGMp}gKnp|ImvB=z-)7m4u$6!rnQiD;+^3D$H5VT-fiRu(%8qN`o2|MDt zKAJUEn6W;Sn?x>DlP@+r!=S`uvL&y0L-JrO>_3d{c$oz?y@#=O3Z8oLQ)qyYE_M=}W8S?yFj^YJxM) zjGwB<)xH<9ah@F*To%C2i3~_2hMh!1iN3LcjsSAsTYN&!whD9W z2T(kr1!(Ahg~s4q4|Y||kutdMgqu|&3{Cmc#1w0@uymw-u(q2qXxnJEd6cF?Y*UZr zgc7;vUz}i=3{E^%^u%hAd;uBpBq~0YxXiC>8Tyuv-bUn(K;;`rvtS(jk#OWWQ9lHB zUEzbG;zWai4f?+69t0DO9cJ51y>5w4(hMC>M4EAbqWr*APjPZ-;+8Vd1P5|sGoTq0 zHe)KMwdv4oGNCntXFPLAIg3TnjNGB{6M6e4-cUPWmrNOF#(=){vD>;SVl?Iv10P`o zHEh1gF@g0F$m_@A+4#$uN`qRnXk}*S%*bJb1X4CTi&$G4 z)|QI3zd4A6DVd4dxZvHP%2)E`;xRwjG$Tndar1hvZ?&_I=5HY4o`LH33LW}TU-Kl! zH%j1=UKmhO-rm6^91X3ITQCPSGlYAH`BO)K1Qo`{k;bIzk zdPhmmol!j0uR3Z5LuP<$xx?FpHx-QRV43}7V24|46EsZUw}#9n0i9U=I(y{0yCF4y zIB>C}TUOFu>99wE5;0p0*W@UKUdPi3k3kugOq_PxI@fP=Iofqr^G?d-{_%ZcOF^b8 z@)k$l2XkZm%;)?p(mB7v+$NG+^%X7`m+<`Un`H9^R%dsk%ibPuYS>|&GOjk~agEQ? zs9?}ao!((_dNWQ1wPN(cW=feNHg@=b@DL@Wd#z+@hGmxN3bT(-yWB*(srByS>8dn2 z$C_+N&wV%#94nB6d*5S6tcCH+vE#_G6YOt=#QC(ZVFVNCnZgxv=Gd;KsBMpj@wiLY zs$h9M9uL>>o-!T}^QU7713ikwFgZ%M1f|PRXaqA6j*_v*O^cqy#mg{E7Nv)O^7Eb- z!jmhcNy_|f^CKV%E8cI{V{QUFf&v|O*!1HEFMFf4sx$XGsppU41hQuLqI0eP%1fjz zZzTN4)tjYPOYBC8I(3mDh=P^E%}&lnc4iY8Y(+aa?@>J9c9`qUn!Kl;*RFW~?F#na zuExA}QD2-x%M1g{z9R#+l_Gzi&#Rrk|Ju2l3Fpc!(Kn@!U(B(L`|Ub#mj}*;ijCz=y0u_~c}5As(lSnL zgMxmqvJ5DEAh)y6Ht?7a`@9VclsfZP%Pq4$$r@uGdV*C(nzp9U4sOJMQRBtWGA%8f^GKNXGqz8~T%>CHWgitS~y!x%DKnjur8J{|>{3os_2 z<;M??#&P1g)hXZfo1&(4JPOPxHP*D|sFZ^cFu4kkosWdzCdLF~9;1PsJQ8wmyPmr~ zaH}FuV~(p5-JOzrhZTc=`$Xt$qF)}!2<>sqahk(2yc8+!azxuJei$cU?=vsI4fa zSL#J*A2wxK6$+9BkA&=qs52l{-hckwk>(b{{ET6F%wX_*La1J+a-Nl-HX6y2L9ot5 zQTW$cSArM0g>v?P1xw`~+;C)HxgBm;e&$gPQbPeoo%!e!=DL-M#H<{m0L4rTh|rYN z+ld#(Oak}5haWgs@kBtykHH!%SW^Jm%L+?{hFOJrHg&;JZsEo=qonDwJ(dNBeT_?J z#Ac#CL8{*jar&EX-FHhMQ zCsw01xl5Q_X-xz_>0870q$4mbCScg-XbYCLoY%90>nwC43BI(pyvY(CLeb5;QV*l$ zwux&e_7bV}Q3n_`yE|4NcIWIPEjALg@3TW2aqSS7WQerA zzmJP{CXox0dTMS)uz(j;Fs8xat5Qxu&bHtqE}&`xkoyx{Fq*MVQ1=G~w#HKAncF=) zcpp9n_FT+!tY0ZpTeXrwAQu4M4))#5F9>D54AD1#xh81jVKIzAc+Jq$E@}N<_0SXT zrCMjLa7q^~YMm%|CXBB;`iF4^=TEgXgqn>UmWT zR@?T{Go9&jY8qE z(bD~9P6m~Rxk1YB%>->4!eZxuq@bZK{I(re!{c&vhuijnD+9$fD7T`JTZxC3d)o_z zWeIBu+bV|_3$3yO@Fmv{5@I^mwz`Zlr!i)KWIfZXZfhQExThF1C`KhwVh18V*5Zpx zhB}6}ya^5=sN0+Yl&0K@#Owo}V{zLjjKuZ%eT<67T}Jk5 zgF5xVj+3)O6Ru7SgS)qxcd{r9UN~iJWe3MXyy~~Z4nU%oInj%R`@DcEL>Pms~@^^8u%0*<9Seh71p;+7?CDvU{6R|WH4qIlS zi<2YuIru2jU{b8tz+z%gkBYt_#5Cx91I&P0YWiv*Dp`_FR9!4vr=#LD`94 z-^bk>jNvL)rYzoysrLjp6I9%P+~qDBpFkdvccKgq{Bnv$0fq^!BlW{!?*3-0{M_sT zewIHWEL1xvNrM7Hqhoc3T9580nOjhWl>NvVh}C6`r{n_aEAv&VvmM_h#@t$=UfC>7GLxjx6? zyhfIq#3W0N`f5?mRCxaLYkbj|xm*Z$gD8KSUofQSk8C=@PWR4RQ2;J`4N+v6^qoB0 zbAjtX+yy_ts0XZ6Y*M&?gQF?|`v(Rw$(^tc_+>6r0>#5N*Cq{cx0vM}N!2^u*@>?@ zJK-0+Qe|qM*0g~frt@(Ss$5SMEtO?@(^4V&nZA4~<0lARz8QA@m7LVFDTV5}2vkd8 zfzDM91+|Ptydv$NVK6zV9L^j}jxWv}T1lyT_(X=i8TtByoDy1pp!VVN3-0}0bg0GY zYabFRyX(N`0CCenS##L;viw#TuVAa2RvL{EGl`11}JNzMO?C2QQydo6Bl-~5~ZR`F$DdBmSv zwbJ)}tRH1Q0l;Csi5C8iG6?}z{jyxVGW6k0^kG?{!)Z%`)%?RTMc;tByjvGrKm zi^3?-tC4s8ekEP6@oI6xR8m&Q6?JKWiN$7s4W`8BQ{#>4!A@c1o0~wA#CS>W=1?ijS1v= znSTA^;(}^W>CA)%X#B#!JG%ugQ|qEVGM%oGAgWTMB22U|ja1n|<2C8e2g7uJ%ONryrvnrjZS z?h?iYoHY{IqrSHUGVshm9)M^fSr-fLTW;M??7(*@&ZQVEGdmYA_p6f{wwu(d&Kg+; z%i#nh5nJxZr&yY~hA5d@+dR@)t2d4@J7$Tb8fZ_|Ce)SEabiRxEx?37Yn0VbCHr93 zhWgQchRY2 zvvvM1J{tIy`+2Z&&mGIKC!u&U;9cE5-=P89!NFZ;d0FdF7ULHUu-DJ`O;xv|wW2UQ z=M33}0Ol$lNKj}L$cy%XtFBmQxFDW!XnARWwpus6AcvSiqqUQhs*C%!gvRs&J!P2H zQ|N$!aVs&JECNnLVFOI4h)bd&aIjN`L0u!I+hd*eu+u1?%!<$vsN|jVLF*GH4pT{m ztA!kc@C1V>97Y_CzyDxqLxy*1B!R5g2i(kY~ zCz1hQ!hm=TdVssHJj-n4`28!M+xIZ&stCI5v4CqLz`={E-B+HI0b>k!Q|1!6QGy{d zlV})>V+@knM2SD}SlII-Y%I|}D}v&Gry+awycG>~xAnYDN@caCXc;RFP^u0OAVLdD z=Ei8FS5<3^+Ek^gR9!(Sb_MGJMs`MSMqx%)MoUIOMl?nzMj1woc{UCAb@eCB`$QTe ztVY(SF1Dl0#!r;H4`e$8l*!TtB?#xUxrq8~%w4^HdhEryBjEN}yPSv$7ot&r>80mQ zot3%OAN|NYt5MB4jcTfLnfqxNOzwYUlkewjiRb4o+BU;m5ATI0C-HyQAGp~ zcRlv)`fMI~+iY%Za^2%Cu8J6cFtmVGGlR%|x0Idsc}0h3GFJ*~z%ee2&Yw*3+g)_D^HhOk0c1A_#@$VeFek8BS^ofhY^s!zy zvaxZG--{D_G^^M=C5_f)=^`Stl{ibL0i4hd{Ma8118*(=CX zkv-g~*!t@usDW|+p_-mrxv}QvKJ@+zJugcr^twMs*&}=9<}WboBYQCPT=o!y%(P$^ zP!yHf28{M)_%D+yxLxhbu);!+6_H~+_}xH+4Q{#+8y zkwVf*%B5aHA?4GcAr}G#NX$+nbDInPvBUhHrRCRZMi$!kJ8FJvFwa}|z{kgbE7*N{ zx!2-wZy}+_N7Y9Jkqq|gKX-VPIuDj|MZTg;GD8TJi~8iFkw96jJ_;|FjKa(RBM z$1(~_McE32kZYF(%+*Ow#PkG-4*sSKO1=g5tkaDCI8ko3)I z1Zy@}q7uViSsgXOn~BhhjrO&F=KumTInqgSH7=d_F?(y%-np0j{V;%=TD@n80pj(6 zJK<8{HH1MPw>3j{?{d`Z#c+q^7bN$5@J#~HYJvLL@~;JOinaz zG!C?lDSnrXvl8W?y?lz4j7{}NH*iHmqeN#YWVUv4LzBjqG`+)R(e?3viEYwE1o4eE zT3iJ09JvwBltu#nC6q;G%T1RtF~M&b@$ia6jBcSXK%=~ZLjc!NP_Z?PKoD$&G?WpA zFM{1@}|MH zfk$yGI@qi?msbxK%om4$d&!2&R+p%C%xE<_5q7{o7I;POd(2l>)!5k~-2iOU(YVa| z>yTLjic4-0y0)_gCq?i~5`clxSqdAWQH)i!LJTQjH=9|+3d6tKYOFc4^NY|}&)j(G zly3l}5{om7+lch&50|T0R_>gLIhJkN_`CzfT{yz!%ut8REwAz zUdZipA(}AN9kEYtjXO$b3v!}FW*;Sk5tb!YJ{wc;>

Sh>@{~wu_hQjx}U{ZBcPFAqG6A zNHG!&>=Gjofm2y0e*etwoFNfKB=&GoJb7^1sS7*~g$czNCXi!B?CTWF?7}G7_qjMT zm#*Z!xG27VF?VSPhs)-ui24~?o}6#v=&TfSifh@F9?p%-><)Vk$rlmnMMT*#oV}HN zyl3z^2elb@Y`3hhlidQVJrT}7aqA=v=bpdwia1Y?H1!hv-p@?l{|>lh5@4Z)IS^1} zk^4M@mEZfec~g${Uq3$_n4jV!&gvk_l#6W73W+6~UEoV_+@gS;i*wHJh-)&yD)U>6O=E$S@ED?*>a)yfGxUEd|!AbtgRPH?_%le zdzX%?Yho9tjli|@))3m!G(|lew(S%VD<%h)xlUN&+VKKxk?O05hQj6Ip;NGYi?PQ~ zrDrTA+XtrK;8&fuG7Kiu;$l^Q`O>vT$|6u@q*aac+9qc}_30d1HHd`{2&06J?88Cz z(upPYS10s5b)2-FD z{SBI1D0FEucW>h-e!;@JqT&^gy9Nh_;&P7_-x@A14s{+nl)IjPGDg3%lS`qr93o0L zE;BL`+m*H0;+1ACfKM@F_4dX3#|pH^OyTedsf*~#UkwZtuNi70Ak zr>rEUC9)Mt6CBY52fXfRO>p*Vf>YjvTuxdOobo0(`)GpN4?~j9ke!NYzz&*n&xS6A z=3Ap;dZEn3h;Xidi3-4IQw2Q{b#7iD50xX8ETv_i%(qe352aO~`#1UK5hJ3&U1BRc zDP9h063arrUCnn0rwg)Hko@z8Hui>;(KEH`psOa}Bzf;kCF+}s6rs@Hl**9&nDg|+ znht!E@OIEaXbSOailXWAQ{7{uLK<9O>|sFc!I6u%m@>hC&GnF9(nCV*f_KHvj;Ol4 z5m~UTrzh9tu1e}RUAc6OKrgQj9zLC2$(<8uNrhl{ZwP;9fR(Pn%I5B z>?jX&v#i`T8`v~Susi$6HJH}a<;Zs{4RK#Q8W6_6M}zDRxyQreo(xM89+o6BjB{X_ zKBZ4*1$9Lx-7J5Tfy=`NE=g&4Nh13qcR$O0Jmg`pE28)HbFvy9Uahb#ISk1=`@mVH zXG*x!h2abLx@U0J=NORU>DjoC;YpBVVD60JdxPO7$&x_X7dP#pWJZ2M*7Te(x++>= zm@{Mjgy?S`nSNtQlJHVH=fEj%l#lzsl>Mu8tJ0gfPNaWXw<*0vv;I-Kg$jDyoHG{9 zkLPWe&n-UT7G2UPidyKMW{&pGeX1EcDdGd?$9Ydsym3Aa=j~zcE10E|!ZG^GH=bki zx=mT&&B&hb!>x@{T7hUq$69m5jtg2%GUtz3t*K~&ES%|OPt#D^uP)OJy{B*=Do$!Q z{qNCn(p^!%?N`Z(r<$SZbQw?}UQv(WLGxL`Y@b2Rn;EyV{M#F~bBqr~R(8+_lKmuwmYQvu4Cz#0T0f7tWt z$!M}34Z42l!9!RKlIhVtSczmBNV9Tno9x&-Yx?WT;)5uR_=cVF04VXPzU5hWv(c96 zz*i3SDcXS>t&!a?{+O6}R-oIFwK<{pO{^!|m5tHA{GZN~baaWiL$539q?8mZk08V@X6$ggJXkXkjmR3kgY zsF7-WVQFKdy|K|*+I{wo-DiKc^nbth!GHhc*`@#f;DZjSVHO7gai^WtfA!>=Lxj0}EfC$VUGVzewifKzi4(b=j?1!4a=C?tj1(%z zmni9j**`7QDfw4oHV@;4e?xO6O=B-vXK_jc1a>>=SYaA?+f_Eq6Dz9>4Xs?^nST_q zRTn<#)r06Mze-L&c!uEls^?cB6B^Rckb-r&Toq?VaBB(AqVVF6l0*xZmG^J-NXYDM zdv3)MPuR@QQ9A_BF|6QIeigp+m8dlh>6^=MAL;Du`0bU>&Q8^De=m2|*li7|Fp*-N zQV|I7HxCOzhD6GAKlv|D(wT(H1QsmIT57IUXJSfP>O-LDe*t{YcwurPISmRTBC&oq>!rJNxNR(5AOwG^eUfVSv+_A znHs4Vx&lXxE(J~6OKB9wd)E%&+1{<-h63+wT#r!(oMlw(eij^n<8t z2N`m!G7LN$54`A*14^#gamB85$9=x1Qb|VOMsC*<=3l2l5*d1B=P^Q1!*jjSq{k$!v{Q^3_>HDTALfA|zBvVu4Aq@IjCJ2??w0Bu%y z&8Ac2PO;feIyAF7ChWqnq;?OFNBgY`OY~sqc-6g2xCf=Bqh4|)DlY+;<9Ogfvg&ds zzRB`LwbPPJW6y~~_ttZ5mgegBLT@{P#dz;bSHMCl4?x>han)$aJH~M`9(ftfpc$GW zDF{H{e|79>C`4dwM*Ykze0fH_VIwW<`iM4E4o)@lPw!}^DdA~g`~TqR0W|KsZ~7J9 z%4T$vIh)Xs|D%0t+AtfmX*TJyxlC8g6?)h_OjpfSdc-_JkD5p6G4mKbZXTz$*=*1g z@TY4woAf07am?m2U57ue*<7Kg;ExAe#nbSoe`hvV=^3-RnzEM8?#^s%1C!(CwiEeW zcYqk7b&Td_fe_h{c<|N`TFoPSB9bQwhZ%=SqnX9v(_39paWh(#9_og`*J#Lo)}&%>wO0?+W*acp$FeypaTC(p6orVpj&X zGm0vTMZ&;}#8d|9)w&;_@_K&gxz&JafAlRbE+lgbo03{kBdOT^;Py4Dj64@}0e@;i zjkGGT$pte7Swdnk-8d*9EEJ$(iXyITdZBP*T){B(a#jI(p!dG6Z(zif2n~K zZXNH8s)wG~h!5gReQhH?c;}&64quru2{RERu!>UIYnslLoR?!%a*4r6Y0}&qk zU9S`m{u6iuFOF@{W%l?)$us(ym}Dzt6p2 zZvT!<#d7M25Wc{EniM1;IA-uPp|BG04u-D-;>)%%*{EWRkP|0($O#bwf7NlK;bg?M zCpCxGQy%Y+oWOIZPMS{nlJM8JV`nryDZ}5^!cWWaceL;`GW-=S{5~1}MJ@cS4F9qgeolt}RttYX zhJQy3zaYbZDZ_gsduzHbehe9QeCZMcO3f++1-b+I zfrGX5Box@gjg5Q4D3Qa4tgIYM4~RUxr%MK3mPZ&R7N*RCaOmy6fAj;TB23>6@?xFy z(hu|i%!eDhE-z^crlV7(D-Ll_vCY9aEv>+dFNq55zW5U78BhkV!8hj=W-%AF)1{_2 zNHZf<(?MQ$1$ecs^F zF!zW!LA7WoILq=%(@7<7k&P+KV>BlZH_IcKQ;%R?Jv=3kIrRwU)uWf?aoLT~&+&9) zCs-iu3TM zm&d^%73Z8iQ|P)mBo?M(J?`rmkNaxuL%G<8wAhP&Ki?V*QgI#`#(ofmIu;MqO4`g9 zwW-%6&d0}E{DT9ppQ{T7NgU9(NYD@$Y(e3sej1OvHkfuwr%rlDmmnnsDu01InCU#R zH;%UXRIoc;D_6kXZaXi4+| zs3FEBN#>xt-N}qH^7yXHoajuMdFa@KE|AOWQlsAVw$rINeOcPqUE0^(+Siiybx`{Sn^IEhNKEsfZWqOF-(xl!UhUC2J!1dX0z=v2rHa-kzH=%N!u zHSj`N;P%WqY|o+;4C1pg%om=vav@PM$S(^j4J!#mun-J&d0d8Vi_0LkFc&YWc~nGb zDDS*Zz-!GJ^%_+QcEjW5z5TVsx)+z5CIoT@^fj1&+=-VHCj=xCvm9M>d>>5{|HXD1 z+qSL7PQ%8wt&45jY1ptq8{2AZHMX6+dES3+X7?7e`&r!H?0n}p6VuL58cHA2i*Y=4 zAI)c?bQx+$b~>q>xG!r<-Awy&`-EdwOq^y1@M%(ZQQz~uZ-x_#H%Qt(R&M*M5w&01 z(tl7x5&_Psr2^g@nz$D9a`Y{n2-Yj5*I^<01`cCE=*8+X`)WEqY4JKpk<2I61wOKo z)3kT$4$3NIExzYfNF)*CcQdmls}rbM1D=FNzXJlI_$#?n`p-8p0&ag0)}hRyXij^9 zDDlpJ!MAaftD|{gwq=^@ST5_a$GmR{eqMkx3j(rN17L7s>_--BJ5TN;*v_aF^yN@m zr=yBx3Wl{CXSy3L8x?!W^V*IFS9x$CWj0yfvN)IN z`~-U$HuRNZ&(`HOh~2-|CvFh=Q~!CuHdot*pEVN^onLG9S@O|UY05l?c@AGT)!Iq^ zCI|@5X>MOS@}=U8Rc<xjxrDvEBAhOSu=^RHQP^UK{8!^n}v zSBp{962UJ|)R4et=V7R8@fN8k&gEszN%PGkMye~8kSx&I#IoZa3k)PR%+9{~VEcBB z?;W0D`gZAGnh!=$Ft>(7nJ0X@8tjoAR0RSXTNZlA7{90fLqd4X%%sBm?Qa=RwbRb@ zTNwVpqRjKnUI1@j)m&=Ez8hoWxBk@i0-O0QUmO9cxQrZAMw(`vIktSnp5WUpBldE2 zUv+~lf{tL$mtURvL6BhfH5sYJJE1*{9H(5XsiDlJlxDR{4d@elV&5IADtuC^f9U|s zt3J<19fJv+ZR3KHlgHK?(?_dx(i~ke<@QI?cX=B=_NEkVdsH<9n%R$6n7n26(_W3# zKLiQlx^0H-;*8QJUax!}wd`Ndi%&a-F2|FauBy^vaR)y!IJxi%UxJ?)^_`p&13 zvcK#9SXii@TgP7Bt^4;Za4Y!3%k-)7DfqI|ci3iS~n(f{{*^g*G zq{AB2sfWwnrHj&7{LR6wC3x1(P_za@(WCG)Ha`a}q|02SA6YN^>NOIL1=&*E2%*U5 zWG$C1>aD++C&Ya>=} zbwUk$mfZPN3IX0^6qpQ=~T+I?U}Jn?})xJpfwMU zHJ-XYQ!F9L{lAcn*C?Lv4mPcgHz+BusT^149J0`z-?Jnoe$0Nlwo8(O*l!BzMlh$w z#7TH|qr7Xy_Zg`+A6at!^Ku<+UAevXt&d?|?wH5;DHQ>2y?9W3G+j;&IM2lV`~Lbx z@XRq@HC(%5kM==6s7wQpd??hS* zN8~R=?&aaF9~XvTeF=s8O3TTUjL-y8J9NP!$ikQsI+!fVx+B4|HXb4?8GIBe-}~)L zH!f01>)D{B$N0J_Si(f8+t+S0jtrq1*x0x>Ul^q6rF5AbM}dg>u<9|16EshlO{RDO zvxk@0>v1<+hCizD$07YxfF@Y6Lzs-2Uw2uk1h-rzvSdyrv(XB@< zc*nWf$w+a|yrg-z@N|5LB5moU8u69@%Tpx`(Jo!ifK?ec^$tlZ1GybRLfVBb>! z6HT0P$LRYB5D}ibsK8tg72ReVfB9WP|H?PPI<%Z#mE~QH_x;D^v;Lr|=a^VjayvAOI22i@X7}M*B15nRycRzjprb9c&3pU~mjTFN!UUN8F?o-< zw3PH@As1*yKg)Cr$C(=mwX+pLFwL;ODHy<M#Bc2=rnH;I3n z{uyMR0jQ3YXvwrh=@^ZVd-76JYxM5L)(JT^>D4Ym=+rKTUrN=(QQS<8<4Lc#$ov>B zo!5yO^KmrX5Uwy^u|>s;R<3AsXT$FkSpG(wSC?K=XXm=!EzZt6G6$^_|3GJ!s^S=RiYSgF2(rKsKW(fXPV{erPb@S@$@Q~lZi&xQcc>KT63 zAfQ#B;x5W(tq~!j3_fDw#~IH6Umg<)S59%q?ht2xe`Bz*NgVO{op%*fzp+VLyl2gK z2?nkoGp^Z+jRBsOkTXL5?HMNZtYfhz6UCGeMN3)diTbtQgS7v0yE{B-HAZOLPR;Zw z)>hvpZ5%DH&-Z;DPdaysXRbXYFD8aMj|7rvceze?q56|4Bi7Gn%)y*GAg@{*Rmba> z@M@jr?6}|~1_qa;G|vXD{dLlR4b;aUtgv0nW;>r=*6#`E%gDtM^EMyvU&-P*pVqah z(zGna)inB+RHXCQNvU9_xydcw;Zc^G{p9UDC7=~$An!@DWuQxfrN(JBP7?`(x8a__^?2#Av2>SIe z7mmdNp3SWwj&u~}6Ufa5r8GMojjV8#+M0hX7mVwI1a^74R>PB`-`Oy1Wb;G<&*XF~ zvx$E^YT}q}(zb*jV#zlI(pD|sqc{;G)=)1NJ56GK-?H-|XAH3mb1RMlpsH*wCClc{ zjkReEJZZrW>RX$Y8I9At^ni*CyEWDU!vrIfF^g8YNm*I7*LX~R zVM3wauyMt*Ok;4mMiPxaATz}eUofSK8qSfsDJ#nlQ*fGe z7?32)&}e&@40M8`5m_2DlaYiwa$`D8uOSqdm=)Fkb;$co_4}GH1nvuB<&(IoVx=)8h|E~bD3w&(u7B$7*FBM zqMVmU9w@LRWgveVUlm|4J**nUP!A4^iezP3n2+N_NE@KZyYMr|`T}p}_fvCFX$O8K zLo9ihr(9Jf6~`Pp#^#`=WN*$I`XMkm|Ao{wNX^E*za#Oqg2&BpkB@PIH8uAr`n$ps z*?jQD!o(2rDPU&~VpV#gsYE<=R86cfX$(Rk^WRYqq7kExC_MOWs6K|0i=hH@lE2fl}b(2Qk!JK>?s!?);2A;0|;dTju5I~91dSZWS5XH`-y^#GGpY^vW z)48zH!%H{z_?0SN^3JxTF)fP51WyZ0Q)nEz)db49vpJ{jr=*S52y5#_cfpzKbaZvj zum_9#VaigHAU@xte}?B54w_Efz|5PdTvH9es5^MVNldRAKc(*1Ki6^%5t5|@HJiWY z=1mvV7{J;7->b>owVCv>H%zlbSKJ!WUH90pjGX*XA34f1H!7<2^y5nz98^PEpl=Kc zvwwx6mZN$iQqvCotg|uAw)XF#t$O6Mw3M2D56|jgWw@zp>bNncE_PJLs&I%#-Hy*) zSSIb~q+DAIfO3<#mB$`@$5GF-RH>aV7^)5N34vfAQd!QBHRHU^we7<(HEcceJ=BYP zxr&Bqd@P>- zARw$e-CFY53(}5bWTzCp7-d4Z$+wi8eMY$SGk*(znij3P)2A&v<)ViX^@QuWgdU_g zCjp9KTVuSx7|2r8NAOnr@u}!zj3m;aI^`4eZa5P&DN-cIjTD);7HGeZ7LA{Ee{PD$ zeN2sCs%X1=jb{yybR>Ub(GW!E#}%y|Mf_n|3rp*p#QqsnFSalsF5ctRhy>fP97)+s zTH{Lqr#j<kWj8LU(>GA# z%kbEoRMalJIAsV~0k=xyv~qGX;D#rS9#I;9t0Xh9a7+32{xK5x==ukHBb zbLP^vDAeV1w!Chm=~1h<+Ht>j-gMwm>u3J3V&PHSQRj0uux`VArvI{Y-t->%Iea`Sd$#0)efUd>PcAv9V?wSwJhZPU^TEB=#ivpDP2N?dT7%Eor@b;>q3Od4{im4&Y=^A8G3hO@ zaT1+aK8zr9NFF7X6^UzV22c_?&1sxu7LrF!r7e2sWt`Lp>lc<``lWCRJ()oC5Xv~I z7`8)N-WjnlJwhgq{PwHkRmAisD-hBtDGy%c|0!Zb2BfL4`{ok{>xdf8i<@y0V@Ri{ zJUYp(f}>H)bW3+Sg_r>qRV3;&7wZV>4MYnOIpPa4t4z$aM~@(YKlmP!VHz{76e+`G za;h02)69DMSwy^l@D@Jk9jk{sQs(`HF317wkrNVt-a<&|nSX#cKD3ZCX4cqSZP&$0U{ zs8OH40IVZ2AqCTFupQ#^&UmCWXZj%pzg|#-{!WrXmimEzM)-e5xWI~9p^!jA^2DQZ zyoiPr)T5Bzg0YT3mEguShoc3}+{51UB^;2{&RHeF=FVTD1*0)As|yk?KAkk?{4nYk(jl+wLD6t44aL(NC~4-$byQrng4AZD7GI|iM(4>L;dl&{6E z$kng`H&H<@#9{4)Av_ccN7TT}!mWcaN95a7Rml4p3`jukaMK^WJ38cjmlN{+NuUsT zVGWoi%Vjnh*E*n7x7MCucsc%4?vY%NYyE!A?XvG-j z@ev2V3IG2UKXjMKp>SOsv*A~k<+0DC@vw%^9{KDt*r>w3Ko12nf;U_NH|YJ+<-a1r zaol64Q4GMgpu|4zp^ZOl#+Kg!pN#b|06T;V=!Bfc|CEz4*;BfR8w`a0Mi78q_y>T7 zg}wO#$_sbh6p2vZwL$bxn?^x{4C;6h4A=lY_FaHrL!NL!(iteIz5jqukL4ue#KL`s zGzfB@K;Seml+^*|)oGqY^!NCW%yacAF*u~&9Uds3@|M^E1WI`%0*S!&gN?r7fX0uI zl5NFS-`_YvpqDa`$oj?`tB7iw81oIA@?h`l0828V6b(<2QFx0b*Scpu;OEiQVFV^oWBh z*GSNPw@;h_EV3J-ps{|_S`l6Y_K}3tN2Gv)pPw9T(EB?hC;=H(bd4M289dzu3{kMk zJ*b7S+=qjVgSb(9yDz{1*i=81V$wrSP=U$DN0r+g4-<5mH(j<{>A+XC)wXc6~Opz!073&P-2L2nkis9Lxa2Gx~$P&EG?3^e|M05W>t z1c$YL>6FRqxqpWO72LcSfWv0pQiBS*tEhMbUH!2@G#6f=fXPiM(9pdLs25X`A)|K@ z)Xll22@9~Iw@(;S^j_c~+I(7pK_(f~RbMEf8dvCBu>5$&4K7T@LG(R?fi?Up1VK^_ zc0u0Vy3U!LBcXV&om|~mfDLAB(_IAux9Mit@@0kq#R6dUuy2<)RJ;QV0qJ03Tn}FZ z3~4$|lV+uhus;WW2RO)mEguBl>x&EeM5~KK1*!%$aS?4mL9Nif*d)!+zW5>}<6|Ha zqxT?Ge26JF9E*n=#IS}s$c%LdW5tAn2L)$nt2_SWwXxB6Fp+8XO&E~m{Rs#}*@*yB z26qZ@76u2oc&}4DkhVPwfdbwzKnWNhup|_d*&r|{A&aC&=^RZk(2g(IXZ8M}sO~^- zHb`t8fCkMSV}A~LRAS3ZFHnJo0|cn)IafxgN7jFp;$b%izyOucUw{R4`b<$A)Wrxx zfK&iT5dIBzvOqV_LL&$)+-s8JfVIp6EC3#W18G6%gFsI+Hz2Tk3s8W&{{RJwF@F~# zh~kA0BvPJF8qjxR10rkl`a%k5+#lc~wsw8aZM}XGrV#CG&aG&~Fiuc1SZF6CNcpe| zw1pR7>6_x4*~25-Icb<7)CG>7DV7p7aWQEKpU*lm_Ibh-&eno1ep(T zk%Ajd5QxuYzncnx0$u)X0Tbzc8=;72n*=5(;#r~Z8AOIV^I%}v-UvZFg~AA^&ikJt z^O)c>LqHN7`cMH+6ek0lAjtK1CeZK&T2M_t&tcalM8I!ysU1VFTa=BT6!4pmq2;BM;N5o(}oM-;jc)(4S{&<*#Rm zwYDx$L%I#J{}n%cvS_|((;wcrh1j2IqY=eIP&&sMgbUVCIzxz+&tVXc8bjU}t3Jzs zNLy`#6|?`Gfq-8aRA)R1pA|CMz*sz-Mm{!3t4-F2`guenFg6& zcTR~pm*^?^drQ7X-k1I|hr(xk{*56Bf}KeR!r!EX<*TZv_Y~1kG9Ir8{ToeBysxn@ zK(8qtt1-N%_DHXz4$lkwJ3R)^Q7`K{WJTQ}te5$3)o;U4{jUc+l5xk4p+4XKEOJ(3 z67GC+2~u_2FGbsPYyAhQR1FWY=Nnigu&C`j0^-LaS0v6;*3=Zy)k8;|1FS$u{p&H+2?IIHS_z!9fge5WB>iLbgSB*J z8XYg`&j<{P*Bo^j^UuypdL;o_e76$folKS|D_jR#&bIZ{^7TF@|MN{c49VCtPul@h zL3C<1FuaIOHQke^@9r+2fw%-2&{?}CZ^HhgT?YDN(5U+08lVI zd%=9|oFemnj?T!)dq|!v4}JCN=ICOK2E)geQ_fjK{2QnG9C$f5FI)l*MMHLoW=vC4 zV)NaBToNUrEE5J#su&?187K1U8xZ>KD=JbI9#fmA{L!o~6Z!W25>A3=u34(LSl^te6otvTSOewcNQWe{^c>y=v;A(9GMrbk|cV_+^y# z<;RUp`pzZOJ3g_QhF200_?JW-C2=P1toREK19{J<-G^8#$m%^;h}@S70QBI9_`vAh z?E$BBTRBgzpI2|Gnq0DDMc;+psZ`-7DWl z5Jg53SI8|c#%I;l(qaP6%E|N9gE>sld1>=wqWTq!zzSvdg}(2CFY~vWrf?+rHdGe} zfh&#wyD#a#o{Ynzfl6Qg72e{@3lsMG#NRC2U=`{Ht<8#b;_f(**Z)8(KCNRZebl4d zoM}zRI+|2uNI2DA@=p~m!v&QZGR1gbx*j_aN+U%SGLl!H>C=2~pG)>D>ku`L=<6En zCtOHdpeESF4qCRUyzz{xRwEZwWR#A^G^K_zM^!3~8|=f-&eBGcJbVY4sXK6=zgXyY0)77C;j9aJHJ2>;De`YI ziI>US-PH?WOzac^@vh)LUo7`R=0h(^QbI!a9V>P^)h)5vW?pjKyBZtG;XjX>X2aZs z$2(ukVg1lf#=pXBW)RUYXHl8~e$F?)LfST*dXFm)_ae?btY4?1 zqQXih{P!Z@?gAWspBj4elq^z{cxSV|vx0_IOgVK^)<`k{Ppghm95+8yBw2V0t~QwE ze22`I1;^uS`l@B*vdfe#?c_27zmTiBD`Lmh=F-~x-LYb6Lg(U2~^ITbEqV;z9n;0phf$9(CvYY#Ec8n;> zy1mq${h1gVsCanJ_opP z@9SJ9rz&njVFK4r(%UUmwmJw{E2r=~Bz*|U9(O4qscv0{{h+_D6mCn&mQ&|S&z~NP zZ87f4!kNm)_Q-U!Q3a+W|GDyF)CCk6voZzGZR9|(%<#EO$ZZmW>V}eCy;BcU6 z7zM8ZL*|HJc=JrXqc~ARol-jr8T=Bf!e-rpsCDW9kq(m~qRpAiEJ%iu{-5}(kx!Bc zDm^NyaQk0BaFwJ8;)BrJr6dF6!d&JhP;tdsBXOJs2yK(l{2KA?FkG@W{SRiI6jki2 ze^caa4UbTO3SkD5$jv5fqzI#$$DW#bIIm;@*l40vwc$`Dng~L+o+1ti(?@Q*_<)$!oDX zKTJWXv6BA8H5OXoJI8$_?lUbGn|O?iC(FAetT)$p-;G=;|9t^NPg|g3T-2spXHb_0 z#)j?C;Bzw!Tn5Z?Dj5dPxnFa1nHbt|HOWJue^&Z`D)pT!Rbn0f-T08}v1wOt+0KNY zKxmvH=x?rSSm){y?0Fz?1U&cuv=%56Du@{1a%;-)N#G`<{~o>eP3H?QcEj!)agkiW_SzrEmEygOd)^!dm#I%I^@)G0>j*u7HLH z+zP)(79;RpoDp21{M4}{MD8_)+`WIINg0Rk!GPxF&?+nYem&whFJ-x}(iJXY#xyl8r{Np+?i*9M8AY zZA(cM@jG)@M?;0o=Lqd8$usjZ0IMmTADS@S-&pc2RWS-To@(AOZOTU?bUc_Z;29}F zD?dT#x0B;C@?ajWvak^D(stU>4H_x7=2cR6TLvw7zkb7{D_G?}130^y!-tixB}y18 zy8HSA=6A%^*T;9`Yv?E!W&K|+zX;nP-;k2d!*X#|p7Ec*E)FOecpGQA1BlbKt&K9y z9Q}Oz(mX^^Nw&{qzleY2kDES^+Cz>eGAoWkD3+@g8s5`o+~tTYSIO8ch|6v_h15Rz z5MBijv#QW%*!i+egeQ$kLC31OebgP)r_gVJ87wbHE^&To&QzD|s!Dd-r(KYaikoUr zSnS|Tcb$~~TUZp~epjZ-3gmqi#XF7m7BsL{Y=iC+D68kbG`aLU&KxkmICVs-Jf)MZ zXcNE)!~7s5wtN*QG1f3Cg=_pQ579$s%8TOpY z_Rd%RhhqoM6$N*)CbXwm_od2t--^Te`)#dZE+WP?bA}$|(z8WOJHQ0C^0{eWk2G3G zqj;e*QW@v=2tLhwlUft_Zy|N`PMY)U((`!hZ!3^BClO=+ zE+g7zcYOQFHVFC=rUX!^&mEA%z&XPn3XC0iiog|m_$_|!vq`2sNm_CfYopH;RT=|v zCJrI#ID)Gv0xb={Zkbb)fLrmqR0aNR>!&!uI8i0sxaD=Hj}hDaEcHYt&b=Q8@pb_6^|LgOJh|E_ioCig}z7WGs zp)e;pyB_~5B?k6qhL=;O)(V|keyKX&eB-yE_HUgNaE+GOwfWXbJ6=lf#>lIN5sX|o z^Wn}XY)cS4F&Vk{E6mu=INtQPTg881cAqCnm{!}-aNpl(w7Crf@;lAOHW`A-+nBoX zx+rDa>l!Xk87do4^NvYU2haV)Q%fCRkF$NS!qX9B-~eM*@~Oz@!jBj>h0!Q}<*Fpv zFO#r3**5(FxtT`OODo**iik3PcW2&%g8n3_HZK~*^IZr`tOMU24RmwuzoV|5Hcsddz594?o9 zeDgN`MFFI0aqc_(LjQF5u!u0ucszuij8?6R;Zg`^aEZqOR;Bh>gDyAm&Z7_w&Z3cB zGG5)fWPBYz?#@wYn&_H<-XW)<o3~ThC}AfahiOcTzXUy?;U? zS1w^2=`OE=*Au19;%M5hWGllfrF}@9!a&R8%@bz{BV*&YQAnpIy=Kvj#(4Q|_?I|6 zuqoAPw97R@Z9CS1Q~wcJf|>sFZ2$XP>KI&_rXw6HYPCE2D@8YQm?>Y6L{@>?>smgo zjyeFNoK-io+Ms<&cL;UMnt^ouFU_SE-@8heaeObG*EmtA)2rvujGnmkCt#vDM`x1N5)=4YPR;@=A6Z;aB-(!}4XCCPnMA5302I?mq}RY z)hW)nEQIpG-h859=(-Nz|6GlES%A?eSoQOah#L(29;k`0LAUy63xfY)fIpNLn|g+Z zD--InT>L7>Siis%-(2#}2(uW87Cz5@>d!tep42YdFUNm&ieVe*lTwz)6P!tQiSj!S zeU^}oYBsKF9NvGJmg@+*+N0kEB056oBeh`E!vy99&ZBB>eJ|^FjJgOeRDoEGO?l^xP zkiKLdWVB!LwK>8TL+LrHXge!28>8l>t{J1})l1_}J^43t zy}|{v@+Ki}MIkq3vmfkh$Q+p*XSZZbaDWFf>0IcFBDeMz!-c;SZkh9S$v+H7Wtu6_ z*sj-|Y7b&7WifFKjqUyci$Ub^7Zu?PL|Ak^{o|&GGe(4TmcO-sbkd+yYGd~c;}3S8 zoSlAas+`u2IJ6Zp$F0aMROe`e1d9H!>a4ZLG*P`%Ny*k&v3&(xkCJ>h^|MZz^n$b1U%n>UTLy zWHOY^q>7v4>>yGMTuWa1Zn}B2EWR{D*}-twVcBL`P#>*TXIsHn$D+bz!_B)f&04p} zTAch5*@uc7vKE+g*wlBBERiQQ>Kl48PQHZFe37#R}zn&THAW&jF<4CTH zSy_IJVz_Yu1arRgTP{r=zoU2T@HY0p~p#8^{aGX0l@oEuO!S>d=Sh-PTzZj2&9L z00e%jRHGEC;^*g6Ugl6wvDD3`LL_Oa&YBto@|IoR9D2T-aedQOlNHZ4?S{#0=(|8g zHz@#vgWm^fA{@TUPCgX+d$(7Jm>7@h&`=WeEB^qMs}oNMMn$OD5LlzX#}PtBAr^WN z@ZiehBx0D7ig)yH?&f-dbwzVqYx=BNb3M6kKMxuOdhlPMu!31C6+(>E2H@&)?_I9f&Xz?KqmMC<^-O?B9DmH5Ycp<~ z6HFD?w81K}pO1~#9-0?m6p*4TEnyZk5g-^n7)Atafo9=X(>tk00s<*ydt?T`dnY5(_$86t}(-0!c{uK(UsYFW0)Hz>Y);e4PVWPVNk7b*Nnq{)rXr|6K;x>!7#i^CW@Mvn-R`HhT zL8cJ%7?c+W!gK9jYT`b_3AS_mYodgmH&Yhp{0FexS0>Pk{VrY5YQBox%3e5 zy|=dp_JtO||MUOhL$ZF>8F-vYB)GgeV))NL2x-8aAsgA@+4Ul9Jpvnr4@P>~Rd=Bk zzL}UX1kzGy!kn{yaZCkhPT8!g$8oErv%5heV@PtSjZ;n9!|p>1)QrzJohEl(&FmWs z>zX|8H)!cR7|GT|D-M z6PqfHZr`W~jPpm-&%f?nK9#p%+^=f+m}z$#=t6G&Rvd@phL>H8_A zA>7`-e(_X(qmN>9-?>)hWVOFaOe$>+P5w|Vyxb_5?Dw*Oxb}XR_~+Eq+r~1x|1SE4 zh3Dk#`U13cIztXTIATr>?q1!F*d?s^Lvon%af_uo0haC!gtA`izatmgYpK_58IElU z#r;wU9U6_@%{DvN{Vk2|FSnJ6)mu6=&U_}8SV~PeG@tcCjjHZcR;pGEPEPdYCi|zj1hYUbJf0HX0{z>ti$sM!iV$k1LG!HfAheufRkj*treHe05 z_GiiQO8(&p9OZPt@N#un84=ic+GsdjhvVeFFWckLJ`!EM$Ifl8RT7!>W_*3Tz8j?3 z+lH~9&jy~~uL!;go)JB%D2)VThasS}heJW8e|Y(FS| zqL9#|`KVH0D;@W~tahmH1W|(dk7m~~cbc6f7vbe-2b)-+4LN$qtr)=3%12N$xI$Ev zrZ68{WHkrWk!1{o{-dTYt&vp9Q}$p5h>&Z~*Sf#hqOC8E*@32_aAb&b#jUB@T#+Z9J}RS)7Thts-j#5*fMP+bG< zcS;u?tnH2bup$}q=ET}ANn00AMW?7;I@?HJHmbRyQjk70>Ad4)q3yZGdsjZvV}8f1 zpJw5W3b4$%(FGv$a23bd50dN<*Zd(wp&GHR$(2d<8sm_Iu~!h^GygY9B*byy`}&;b zk+avk+kqVq7XRVMd9G^u;vEWDMjd*k{64A=T^*yVaOED2JMuP!t!U9n&9(@ScJ;<0 z&-q{oh)^4*sdu_LZS>VUw&3GkeB=q`;{SGM>X=chD)?fEit>K&@woYceZXR%hh*4K z+~xleQ9IK`%C96l_scA-=2`qb%vY7d#r3w`{<-7CmaPioG05I7bOIbWVN!TVN*hndyr6;6>UC9>zINW>`)_zC zmBx1C!nVcGH*HA0k>3sO?7t~3j4h}u-_OO_$6hrc&JBO}vZJQ>FzF_%F@P&gxhqv1tX1li=B9wU0 zrW8w<>+AM(Wuofo$Fh`xz+3GiN=@U>nst4tTA~7)*O|llV0VBl{^P3h)$YXd&hqx3 z`qQdTKyG|q$(6_H@yO#m;+v-kQP*9oJ+wf(B(eWe92I}>%Y7N{tzY|%hDEN2{!e%P zH>`M1!S{}*)u4}6qDRJdgHAdd&*V)ZFLp5+#FOjKW_h~_R2qSG6AqnEMwZ3tjHl8Dmxg0na<@%32sE zUseOD)hW%BeO}+QUoRsk+fA&$rU$onX0>&V&YzHQ80OjF?9{(0A+aLE>^=Ezt%MNB zX#cpe_4NC#YicII^RrU@_3>(80O~g?1@nG9Z|N;-C*YLCgmnc%p?wgT$L^Y9-UV|j z?-WLs#7Rf@Qr;C5+ks;eWR&jul5k?HhKSh~L z5ds}qvgWIZyA9sy(i4eNjK*+_Q|P}1yTg8;h!|OrX!5~9=*)G;0|&mXtb~Tk8t+)x zp1@KluoE6QB;73*e<|&mv-Ls3#^^KMKsam2UitgHs}p+i9|bd0lJUJ*KBDmF=IS8O zS_Ty5Cq;?vxa_gyp`y_|I)G5-wv@}%UV7>Z>xce>8qE|YS{WfYxttl7~O7# zG+&U!U)u&*=OW*D3G^jKXahxFmBbA6Pz%_?oCuo!GKwUG-b_$UI=K-fH4sn;xgQ4| z#lnOg#Q6i$HOln z?MtsTU{Ot-eA1m(0wP&d$H>QH&k63}qL zHfbo~-YmD>wsz6*&?pPDl*o>>QS*9_2*LW0QN!M~{(;4G;6cdK>(53_1u7&Ep~XN% zADaL~aKZi5QT&fHF*6X3eI&f{jMswbj;#cc=GH{u>(#yf1=$1F0O+)}GSH*1k}u>X zFOVO6m>qQo?Up(cGZ4m*x(-sMA3a|JToNH4>y#)cR~gS!A_NqLDiX*8!R!>Q^5f_& zBb;1oO-Nb91f>V%<{s@)f`(WGWgYBauDcg18f>bFgwR^9Z(9v18hB?&PTLZTWqtkK;i2tKX;jHcN}djl zMPuAJ8hg?o#68)b|0{~_`Dlvm0ZxoBH!oNpsRs;YY1lFXWknJoRx2A?z8}N&rif1s zpm%!i5L}v8;q8P~3`XdzXcBA*ZRwBBtCCQomowL6w|?1N>Q9&V+s6>;`96e!j#b_d z(|#Xm7sNLKT&Q)wu&4yISBknyB?R!gJ%%7vyX*5`on&l z6eoTq{Zzy^^2;mopiGpJ2Uwq$k@E1g5FjU&yVn@7b>70E5E$O~##1CF1{0@+_>!gc z%_S+1s#J>5UL?#JG|L=c=p{W*`6NBl%8c$L*FxF?$)ES)rTX?>{I?QmZn0y~C<*f; zIuMZxj4(kSuHQLp?Jnd_>)WP|`xr|er`_^!ZKu--IL* z`opEB)u}{^OTB zCZAUT5S#>9c|I?a^JiWc?5}T>2oJ6kw6G}Y(~U61fa$hf@&=(+gbV-OJqo^v;g60M z_18mm7AONaM>(j+OnO@8x+H0yZjFbE_Yy!jm24(P&3p zl9+li(#YY9W6nf2E%MZ;zR#J8iX_xCWsPh=&2ev^n~E=A8)SZucFrF!XeX^1XF0$; z2wJ4%;`E5&=kX>&9RhFYB@eR#zSGf#aFu@!ubpe;3J&U7wzJz+e zZ#qLi;rU&eTYi>uRQ#osQ1u4CCzxQaYt^c$T%^})XS(Bab z70dh4=6KX)zD47IFKE6H#9_R}iO#|%ZgKdN zztmDketf^__vpBEyXjCgE4J={q1Kc)+i1Qlbj0+%h&wya@89W&0+%fWz5^VZ@9ME_ z?Z7IgGM4=ER&+R*OK>Yx_n)~suWj*&EmPvy6Dd9Z^6y9|b)h5P;Dv5!3nK2*fV*v; zC#wD^M#?#2p@oFUo#^ z)9e;(+~US5J8Cy$y<^%`=^LR#%-EBm9vli-vqlnA%BJkmo~-mWCL0>KB7^gwR@dxVf5xtm z_VZ~YZme_tT5>r3Q&?iY9_|JcUFcpbsh7OmGj?a1sR6+u>RLPK0j|EQc5Qzmh;duO zfBZGC+x>K@YlES7bbrV>jLYk)4bT3vIhuUpQOJH!IOB*;2HOmPRlB6s0YvlgqFHiO z%b0GHz@b7kgv#pEBpj_0M+-JY(989pKZ18qQ(zp=frWO{)nDqq=scWcp_)~aLzzO9!rbpcCSYuqjX*F~8Z z-YQ2YXXhl(Di0!NPx8ggoL+zB(y1}dr`^s5FIQU6VrSr9l6?##sq#+)a;Pa}9~SPiN;s{W6)Tq=j%?*IGm*M!4%_|VX4K=U4j(=& zhlktD^YH7wlgFtioeO_Yc22#0r+W5?xdZ3112+9;DUNSt^WJytXIo=GgR!6OjQz`- z$8OHm9u#Z0mi>Ha-?;^LTQ9e}<~F0-pyJ52fdOXm_LZ{htMPyRui&czm;6C*RPCW# z4{a~A2tgRX3dk~99;75G?oAPKmL#Z?zA$DE^l-Me4n+DOoSto_AaqhvjMqjQs*o3>o$Aem|HgFe*<2S!bmk?+AB_=p6hX{=_e*Abk%gEQ|OfS;&f@HdSyt>L~=}bR9 zr-TQDKR#J67Ih){8bS*H>Q@+%r<(nFU44|zo)(2Oc8-6JZMm)bLMO8;IO)?;J7c_S zd~Ti$uYcvh+xfbRogs)$N6z@eUVFwr1|PYmvM!1{Ro>LYOBMx;;H9o$L7cI3k}OJn z@T*^ydT~PX%*GdW#!gaRWm9K}e$m*t9pBFE9Ov<4XW2v@uCG;T>CEy>|H^S}fHE^p zX!#;Z_bz|PK~wqKNoTIJx~fj|%yld~>^e<+8v{*ed7=*N$prkIopU37; z9l947?W5TdJ$A+pJ^0n;;8$-@_B*B4vG1#L{MFSH2uAnUG^;`r$r*m`>~fG; z!-%y1b~VO>cuL>C9Zo66v;HbCARyphUp#-aIurxFzrJ{et^IzHPliW}!{ctAn8#ly z=ZVb%y%~*0qs_rG1JpP~Wx#N>-%a^%SHs#Zu{<0@*c}*5r!*U59Ks^WX8CeB`YqET zq?m3JN_|y6d^r57gp@@PwFWj6yH~_8R{MQe;uPEMYdmedX$=TwZWoO4%B_*^YBPVH zn$5$c0<^zKI)l(0SEK8y@x3*fEz05S$*dD=JYEqvBrysh2nuN*1=3lZxd!*(7&I6g zr|uTm4W<}@2 zudYU$(ZLG!H_vM{{djD4nacKLs<8lV`t zp~CUV-F+Dd77`xqxOP9m%&Ira#*$*zlkHx3z}`5g=8q9#c$bU8yR{+MmIo;mZkG0J zUtGUlmsNUl@{3!Wy~qO+wx+J%_iL8^I6=g}P!wdmthSnLVnNP~?aG+e9nO z>EC9m;kHC=zlth_{I}^0Zux(uC-7P>?NaWUNJoEjH>FJ3XAqj; zY`-#QFEHx190UuBDyI3u0dWPa_iYlqZJ2JuaPOrjWJMUp097}wIOXocuz@JLW10175O$~S;iR_==Vg_z1|5OME?$4nFTz0@Yh6BOEbzO~8N4vr z%}lEBO;3XS{@7~Kbe0rno=v-J@U(P+k^XbGxWXg|9r2sJjZY89x{2aZx!E2YgPZr> zI^08UgS22Sq~Q>90^)y-s1LLStkE*bW|&{D)Rl=e)+Em`FCE}D0i#CN`LF8+Y|3M| ztOKN!#2)pXuN_rO9fG3~^tZ81>e?v1ImxH%ayZ&Fr_4o~&GHM9%w|vF+VdTX3loI}fGRqk}5lj3Q&%)~!2`}oFva5AnH zB29FEjXW|i!?x>1K1Go>0pe=oi;+?%#q{*^q)3+blC&I*o;=z2;G2OB?NgpY){d7t zn+-dbT(@-BZ|r|GDY3DQnb~l3{ka$(4VD9U@Xg?O)MfZ)hsbR=Ayk$bIY}3lE`ER2 zS`T3yOboX^K00=DocU+T3e86>?mPL3qeqxdXIPSS=9V>2v={@|Z9+iiIM31;o*Yae zK2a0a-(dX*<1sxL)ipbhlM2!@K6&CbVR?qrK5i(s~Ktu7o%;ft67uk16B!ku_jTH?RPPLCWf%$ z)NnL-_0KQzm6ws} zBGp-C9vy!SKhSUAroU5k^zb2UaOqO#YeVbpTM+zr755v3MMN-!AFmd z;bquO;ikKXYd<%5jQ8^A2f&gq)C7KHXGt-Mh$|sM;%;LX6D9&1FO^A z=<$DWc&5i^mV&pA)H6+%<-DEofw@3c!$S!8JJs%$`|;@Q+fF|)o7Hf%ai7_#Ott%5 zqZR?uHNn8sSu+&VTsdr3)}3lg{H6n=(|13%qqgsXX53yXaUq+g&8v56-smZA?f^{W zK3MF5>w^>0Jpi7(7>?p;ea5?h!Dd(AxJQ3eQs|Qg>I*%7{_x>&_~lKj{bjw_AB@M( zNjiISgge;teLL9E@%G+DJ4T-$(9Ln&29UZ{?}=BP#XN=^PUsjuwMa4(8p}HplrGC- zqjAFv(KKI_-Vr+~UelA}7~kv{vyf`((i|uAk zvZP8adrQ(15<}(?&HL@!sRrKr&}6~oJgQ5O8%O+j_z-{seg8>c?R)$|=lOrbhq%`j zs5ALkpCs!=H5|DwhOloTE|+r;Gg=Sa)>06hlI{8MUa6?u%|?V8dHqByY3?vwOi#0L)Ws<;o~MKlLIobL}7xEZZj07)ZY7g~c++@Q)=ug`z%Ur5lqOf&O0 zxqOYG@5{llyMp&-S6Kq@zQ#1c;IvW~=t~o77n-?Rf#oMQaam1YuZnzVm!D&F{A{G2i*&JYCfcb5eW2Q1=#$EU^dMIw+ggcl*_Q?zIR1Z55ApPac+dTTXnaL) z;K*_BKI{M5GwYYhas`RHZku*p$N6Th!5_+uw|)_ay83~K`j=DlDw{%O;& z9m1cp7y6sQ=DlE+9qz8(N3(PAXqfF;I$`>quFEP1bG|wvc-)cO>CSDimNVd$W@s)p zCy@PoICwnM%N(p&!4%O*oxUz1XZQ&J4o9HmHKMm+2CRQtM*x$e0H(PCkCqupR&ba9 ztej(IkjlL~&@!Cv?P1F@9aXi!fSAdi|+jA%RfJNj2oOY{3I`&WnO?;!b$ErXp`iCf5pJ5%}n20G(TD7 zlgG=XtaN|zSYP6fQ9dqpajuKUFP?t%@n=uTvZvoN&zjuhjScQ#gc#JtnD;}@zwROI zrq%1a-$h@ZA$#<31m%oAPwOfIeE0=De}oeFH3*dG4>Kp^1Xm-^)8vdcmjOJBB}Hk zLOD&=b8nfJQ*yQJDhda`U(cN(J{TFDk>MtQC?F1->CE|jp_5WO$)e1ia;2x~$(1wM zZ>D{rC+qaF?m`jU%`J~c7GwhH!lL>@y&N`gUH=)S6I1_FR7_aq4Uyrg*Z*r)R|ZTfTOX`zkfm0TYRV zV>a&^N!seJlj)?u!T|=lw>8Osv-dFE4fdD^!A{i4cRxQIUc(Lz87Q*DMPBMQtwx<> zB6G{+Tp#u`PVCyA(0!&SuxXntTjr}t(~zEAK~@q-w}7Xm)xuzZ#d0|KsL;40plN?2 ze7Q)n3YHbPcQB`fMXD-vYldt$)8PeNeOBYn**hQK+;=(Z9;%mM+cvMY8fWAA(Iq$_ ze|a*z$d4|MNAC0SpPKI@CA06md_JNx%c+%2Y)!hzb`sc z<#!DtTQ7ByPTNtyY{$=AIF!Q9j<1UCX0N&BY|AzGfaP-=nn9k z2Ug32erkmqgrOwvQtsZgcJ)mcZ&fj`R?AW>R6z{ml{g}Y~K|y%D8QpIBm0y22;B*3sz`QFX z(_dZo?bpnr8jiU;*JjUu1GyVK+>Kz5o!3sGbky5+&JSICD*j1d#dDDQ)|NF#`0Kbe z1iKny$;k3bbAo?2?%Al<1MBy2_0AqTC&YVq#`~R3ZtU!X55Dt+#Ath`KHs@pbWNYE z*T|eScNQ=QGO2u@bV`5Y&iCQ^t023_g<)#0#%X4;}y)dDTED3yP18Uvkr3Gu zJ+FWha-Wsso|VUT8UbumKYjaFe=kEjaQ^CQsNPrwPwYInzQ~XC@!Pk?ZEW*qciuVJ zMdjPJqCL&U`TMM$^{5+vB98R7HhKRgEx*OT6)zf2dr;!x{0@KW{(5d*Q&;c0#N~bR z*#mo2lR5a=LIHnV0X$%5j5_>A&)~@&ZE0`tt-le??&3(_vV|=_h6}uljCtb?`few9 zWCU(L_uuUmpZC;uRtwqjLD{2P|IX;X0CZpMLH8cUPrk=)@ji;|8+MNV5=>9^^lXw} z_JjN6^SKF#p{Hkhb~8oW3ZvUBjhBlaJPZ!>7Ube^*q?vsa3yx*QoLj8vm&{GFe~%e zd^0zrZFA;7?Kb?i+LadmY3g3vWNx@GG`sd7L>u&=xjr#019yc$$CDoq%ez(i9{vdiXsXdg5t6I}#yAd__ zty9m4@|=H*`inJ%L2k8WVGT2kb^#q*#GR%6vNNFJH_8T7u)pVr?zULRI8%W03@+Y( zom)S&7kOR{Z#0KQ3(XN^Ap<|*q1Bye0vzbH`V9Ojw-M&TD&R*4zk6()*FHo~z29L( zAE(d@VDGLLx;)jo8aUG;FH1;&nr0vV){Q6nBrkuo8z(1~F0MVVy09BPctcDfM9^jI z(1U#su9^pE5d|{dPUW_&tyknArD}ebXZoV^2L5a-m(yf{4YKax(kS(+t_W-yDSJk>MrTV3RJnUxcHC+U1$=Ur;-;8>tGCM*$rXC>_e)nvn(Y*t*kaBmj_YpjH=6s6=6<8O-)MfMjb?|hndE<$ z&^DlUEo9Fx5A0WiueAPY8Y%NdI;)Eee*Ri39q7qsAJ0^~wmE~r=CoQa+!#c**C77z zPSM8%0!trw7kYA*R-Tc6rQd2VnSoe0W|aQsz+2|udQ~zp%ODTDy!)&36>qmE$ueDB z#p~27ldSYgU8Ehp$cm2}rR2ONrIW4Yj2?ULR_wAD!T43#JV9HjPn`AO6;)_#V=;416%DIn6 z*F7-I^fWgC-Qp%Xk2A}wO3x17Rbg|=+*9tJ3injHr^-F`uT9K8fKxpzEV<}UBDwAl zeY>N&-UI^@W%KK9BNG$`Y%sja*<|K|xg7+FzTUzMaC3$oUAr$o`7F7fBVXimLCFVLp^ppIJVB^b68q%-gew03RmO#r z_*H?9{u#W+h?g*n?vGXa{1gR4rrG6!$0FP~R$CosOfvUZo(H(^^G*Q3Q4yBKTNv+L46%hy%^S-qR# zDD3pnA@+FR>|yOuv36ibZ`q>QuPZPSS<;)+?~UsmFHn`UA=} z+mzM3UeD5UwMedF@Df51@dkyi->maWyR(Hmo4GqXrh8hrh8ur%%`R5s-^M5qb#=W5 zBNyP2GrMjJ@`bxzxFDdt#@@{_4l~?`(8ivj^Lj(8g+{WffxI!uX|SUsifxE{_tACD z3o%PrzmBGfNKWde_P27E1}riiVZcL6Sl1?IY;0r3ZsOXMynbNyRhx7sgw+=}CW)EVZkb6FqZtjFi-X5&^iq!WTc~T5L?<83);E^|BZhp3y zo(@g6zgvOqg<~r>KGbY4(-jv)TDpEfh)pjbd5tZC%9KmVQew- zGY6gaSF!VJ88M9)owjruJHMXE*`!}w;+06UY0xX3nwwH|GVK;8G2Bw7CzDyPbc)4{ z(q3^EJHO@=AtmjV;xK~QOv{7b`k4+-`lFlh*a7C(*M6^9>@SA4*$IzCZ!&3y)jAA1o29&F=5phCU}52&I(3wH4!VAGQwJYW&!jP1dLV50Wm0o8Q6g9nM=V!DF| ziJ#0SlN~%@GcI_tg9j{Pn#&zLUE$rZ%ueEVB3r2vv0S>+- zArwP0E?6&E>`;<`Vcp0H1qlJ2VO0Z00%(JC=0IxT@W4Z)4& zL-IINo@D9L@ZY97S7oo%oaYYVPz?4O$Xa?aLFh_crm6z<#5FzRVfPqDPW&~^WH8v><^Jo7*nh%60s9; zBAFjS8^05+)*s z&B4+JlK3(ROby@RKIGt5kuNPCb43LA@i|i=A|c>yp#0FHHW!5Z)YK3m4IHinVU+sV z2Vgp9gvroSi?47HGz=n9WBQGQ*ld3~jP4jWVp7plK>fXGomf-uAyRI<=fK4h8qQOu3phageN z&GWz)q2n_mWyDc(iM$9SD)fJmN1}enn4<)dk}Aq;UgXv>^fsSn!Mpnb-`x*_LND*S z@7LtExbH{yB8gJu4>btrkk4Ygo{|4m{+kL_nl8ayx zR$K8J&Eb4E2d~3JMxyB7m>&uw!{KlyINTJG!-4%qgd{>-l73TO#!i1oi7@<=bOK6( zdh-!LAc%TlQw}g>ga-)NpoKx~FirvlLC^s~_v(7r&AG+LcWlr1h@?Z79#&v7+`l0rBPPB%p!xO`mQFosE_U=J-UzZ*j=~I8hL-F8|OV1 z|9fL`PePFwD=^snxpAF%FXO}t%B2DkOGH==C_!j|XA)&JQVf4(=zbVd)zz*<0(k+> zgD7&C4?KJrejft33;u#2%$}E=hwZ3QJ|=vlT~IPm{??!o$0Of}(3prK*&8$5gg6ml z6dC?TQ04F-BsAcN;D};y4K=b#8mcfv6pNT*LhVPfTU+drJK<^{!85*}!0Z+e2;(N_Xz&Z-MPHVs?&?)T<=!|qAZ)j8HTcpz~_uPJcMIK`82Zl%t8|Q zzG1zoMDDv&V`UT>QgUWiDWU=e^^`^;6s-WvS5hFh37EI40RMr;Z>DUp+w;D z&?kxpkss8k9*{^FsL_x`j-ZSP6$)9aqB$cWXGjYni)6S3_;)pddz%j&2Hw>;4+QRS zppBDrNX*W5^ZM=lF?4G!3Y z$W5uwrErMiJoI@Z%?qSauq&>>a6v_cM$)ID-BWIPADB2`{unZGTuG%I#Yv=uB1HBUOBN=kihH|5*!LUBJD);T#azCpo_b{sRJ=C;@ z?%t`Sy(hqX0=y@{_aeZaB<^aWi`6Rm#VRVV?M z>-yo3t%PG?V+W_gwp+`)~rrgIj#VcJ4T5V-$SVJ z)9egCMJNoj`$&SM#c9+mXbf;df&!c8b~O=Zh!UIye!G;$tili&Shx8R5v->E+Ax1F zh&uSiZg|im3c5`Ybe9pNCOP2$F}!6@f>7zk;=j4!99R3mPZ|;s)yb3h#f`Hd;ioHhfcXbwk4_ zbJw*<%9Sv-wleY=Iz)s#4DtT=6LNozz%y6^ll#wf_VW;VfPfuL;LQDBi6D^ubw^~4 z0&6nIqpd~9$J{rLE`bcG?60|Hpr}GJkU~iA2o-|yTqqhzJ0(1zX2}Cixg&&6A_Y$O zEtdPC#GQ%z0m!Dh*aiB_fCy?v5i%y21D*Pk`#%{M7l0N8cYt6Q;ycEA-^qW3wWY(m zoJ#iuc~6k{1o__sl(3%0$NN|oWFCk#}&p-h3FE7#$h&0AI5){mS!x=}!kAxAHOFw}9$|pe( zNoM6KQjrK>lwk|5q~X{(7Y=_DKIqeB5bZJD;lM^SWA^CNum@C>=SvaYhgKY2=qgBi%D%o)oj41ER-``9w-Qb1CJNF;JDVD3Gj{QU(!;L};u{ zSm^hj|CpizOJTmTTj88V0Saj;_?y&1UdF)I-LP>qgecYuSU<|b;NO4Q2bB|vI2VMD zAffPuUz12{7zFeX5*ZoXV>AlY*80X^xXuGZQA+YC=r6CK2&PnI_CV0QhY_gpRpas% zfmEKEKmeTiqK{teT@{Sz2VxHx{PTSY6$Gb#c-OPQlE7VVCik>(PYeH=w9wu2AxwH% z-Fzuv0VtA;BVa%y_#J;Srh>4x$ngoM!d76sU*X4=4u^sSzI9_{J_v|Aom->ONEssK z0Sy9&`5_TJV2$RFNze%z0fPu5hlPp+Ot7fo&V2BtH_I4=6zG@{;Y;6|p8Tu&q28S+ zzy>BX0$`;e_J9mYq{4{Tyi}JoAofgPG`&KJP;IR^>SBG4S89L8BB|KcipP$i#E;CK zTSVa9+FNk6SPe-isX_g43+49QjqkemTT*!UCE%VW?rGv*ohG_xK=RExx%mW;iU{dj zyFeeTGJQr^$o!zz4oXHMVN^sk@D=H0;+X%>|ZO13%6J

&)AmkBF zFfp1yKQykYoJxPGP-mo+Y?Mn33>;19FfJtJ4wpa*)Nez~{8Uofz=)fFIk)Ig5#R6< zOv(sCbcMv1s`ko|lrSzM>dvVmQG=BssEUj-(RY|H36l*z2E;ceemub2e~XBU1co0J zXiON~U5$og@f(5j?p>5KB9--#RX&Otco0S^*z2Lo!4Q*$y$}$zE4FOE_aEIklp{A3 z31S6~chY|#67`jo_S^&`QhN<_))==2CXCv!yTGUmMkOp0s1nGAq+;UQt6i0CivX1hXB z0S&ofpd^HS>@KIVy$O87q(vY_?ye?slAIcqrcZgewU!7{ArUcXAiy8HyOR5;^!8{E z0!>tzU)$k>UlfN^UxdcF>IXJcvS|NL477KT|M&R+BjA5e`k!P~QeOQGBbkH&G1nLY zx)^`12(9m@5ha0%Gb^}sxCB918{wfgvY;5$FleHgDWZJiVM!W*y~?1w{YXVPf{X|zI2d*J!4o?Q%%qJZEbv26=k>vZ%A=5B!~q&bksobC$4Jv6 zyn1sLpj5a6oUI$^z8!$?zkc$4(R|*6!){6AU7Pqljoj17FN{WdajX}~^oGPWE2C`> zgx@nJ?v*4vCD~5N4~Xay5z6TPCFg&XISdpCKHPlngy?s?n+ZYuAc{IDw^fqtlmLeH z@QXo$q`us5ORP9CY?_bvxHd9nzug}S3BkZPLZbml(*aW1{>r@%65iDv^1I6bzpK-+ zLFU_)nfF+IkJUdDtnTRr*O%+!>gJ8OSX^ATdmGa?rHCy_U>u#ZnW3SiCHAm_2L%uPT(D=H3y~&$RG%aq7bgDUJ(hYoQ$tXgcd*D z8umI)BW^RZ`WRc%2jY@~w@&tOV5yd49n)rXsp#M_#5+3@Huat2TO5ZeMNID>r8P^yIrnG3e3q5v7IwFi3 z;{fi5GIAIu;s_Y}rq{lbzBJZpl7X2Ohh%otWN7-AcbQZ~`X;?pC}^++OcN!Ba^ra) zDo9V&gQW==6B!#bE+Jz|50sb%MM`CML|_T~kVNN5*cEu~zyyDG7d@mj0YL+sO^^l= z6Aq(3QPOS}K!p%e%^VSt00Y22Dj+`5C^A-Pkz)QfCSdGrB`{wL6B6=pZ6N~MfpcC5 zMHLX+yT*e%C-SuPc?Z|ydqTY@)O$kx!3nh&QE+*6_8pYrdj!LP$Xsg%+k%IRXsxsLzA2Bg^J85H^OkA3?ZGu;gZ4S%{>DQr4rUdCk#xs92!8X1rP;O z9r+`u=Ipl)l{GMth~%B`qoVbB_M zSc-oUl3IxaqyguO5gCX`unxP%jI@9%8G`g1`6$VnXakUy(u6+~LP-dwLV}1XWE!mD zqpXvaa|kJ{FbEr$EzNMExRDF<5UtiFh1UG2btjdOGEQ2`WDxMiCo1w;D@{;DM%M3D zFv%daD8z~kf)33HZgLCp2sH)~h_e+9eg=P-&qzRpZ!BCWh=Y5OK;DZ+)+slTfcrf_ z#EuXCSSU>JgMj;eFraMT3R0NhTR^b!%!GoFHyzaHfW|gNrvmKgg9Qex0;Y9QNFxukr0@@mY*75~hg8-!8f`Y5M3PdaYww|$4hTzD~6%B*16)Y$t z4OQUp!#Rn39x2vJ6}WHo$tax%ccBi|*x;H{Nr%IQQgtNV_L%}^PK89Zj9ds;Wn-2E zIzUQPN0bm5aHn)=R zr)tuth3y(5Zxz}MHyxy>CY&YbL3d4Y8#52U&$wMs#2btV%^{{hvzo@=1Es@}=q+WR zA>NPIH#oF?Pya(CfV+s+?kVA(68<$Qp=W!imwA@V^zD0oNR6Q<#?$0B2c>^S`7km@ zHVh0rn`ajHZFptbK3SP4n6)1kgi;aG_Gk zeq%Hwen;QQ4YAQkI-J{J0X5Qho-q{=G>mMli#pFJ7>P7k-)~cq@Ry*|RH60Tg8bYy zK#(^HOgtCA0}e!QlUEx}c58oFV8ApZst;O7Skf^;nL9XwI!FK=0yXG@r~jY>o2>^e zV78h9sNC5Wt|o!I?fUn$a8C>WdbH5p^D{u`TSCu5<|rGHKA>Q558=y|41?O>1>za_ z*0fS`8Ew@toCs6qssFyDV?4i9`12^rB& zRxugaoYRU+=?FzZt{OC{NWfk-G$LCX;7s6hMx#)6O-&=4LDE-XMjtZtjtZK@!C-hJ zO+aZLKv1j@LIBfNri;2$E1r}T!bl2;6o*l5egU>FLYZ(opoNY4U_!MYWi}j7D9{k4 z3nldQ7Cyq6L2~hM3rK%>>5ioJ67fSwLfr)tUb|@%S`|Xl$v!gG%133B(u5RTk3~SZ zVwg&8+eI7AK&5f65L`jf&OXf1K!;0i3|k_l!am52#x+D$Hwz%1Ujy;Go;ijD?-26# zo)+(E@tzjH8!h%M%<_{Ht#8r9P*^k@eN)0^lEc+K^cw?p2)}AymVBN+<6D<+Pl(TqVXOCS+O81s4HcO%salfsLK5XvD8 zNWj5Xu@kXQ`X+xj8&^KiJ0oGhOd#f8fI&?Z`Nlp|gnSE-u_GjrR*D{>{c9JHu_HMF zC8EipX2j54IN~m*mLbL2yyIsf_Q1Ie58ORK|WDJQW^H-@w{$D24K7&f;V65eCHDx z8nHZ!BGfV?ccg+}LLvspfh3GE4D*pE>O#tDeUl^G79VKOo$>D zds(p+LQWgG9`_;Xh+#oPNe%U}ATY9;$vcN(H444@{vwtzcfvZn*(6IO~56 z`^l69J%f^{F2MDu6Wj@_8)tCW|QfRYw|Qf;GX7 zBNA~V#$Y~pRv?cjBd+|`wSw`$YL6i66sQCO!IX#^CsV>BYjZ#$Zy`jV3Bpi@s*Nsr zTQAet5u8Y2bEEJGdok1|{Ej~dH9++_=5p_2MvUQL%&r^A5LNd*#IQ!z#y)>8jJS|n zcwv$>8Otwf40T(q=-b)K_XtHPzh}5sq{};85AO-|oLTp&Sa zPi;~>V0r{3APQ*5u2<2(#==yBIh+KX3fZ!zT$v+U2qD0lHUd{Og#v(@H)YHw5eX>R z@KPU0ops`-K^4wx5e)>~l<0rA-AzHon6N&Kk4^3KfDbQA0LO>v2iG~jZTR%ghp$UIh*9WdmLts=A zAvDnkt$l7(_aW%rw;*d{_nECK2qC+02d>*j#`~xzygT*0kG;JmtUFu)?Y-_L6m#dnb$hA};cagJhA;Go(SCRq@F} zUwUPg6qS9DR(e^+Qw@I_iG#1#WtE;>^$X|8Dn^k+r7tTF->M&3tg}NrPl{o0)-H-N zMs+niPJ^8nHOl6zWSUl25HQ%5;BH`BJ4qIcNise2Cfv=>7Sq$ADWCAsWAkmNrO{|~ zuu5jLG@Hk@Kg5NHgYWdUzmsXdlWC`uZUfMT=OHM($vk%6Jyw6#d&66%m%DGV{#(qv z1<&TVsJ(@$n;LEYW2t9p;tZE*W|!FUC0$*Pt}U``A;mObte2TVl}ayHi==`*8Vbu8 zsyWLvTPBx7`0lckVzh-927Nl%fCZZkNzRihDadM(O!aBLnCarfS$f_eE8yO3SXbky zDc%qyhUV=wTcm%P_TI0NgQ`ffGR19lRpcvOR97)Bqr1JXZWsGpoZU=M^TO=Grt~CT zEZnjx@-yvPRB$KxrB|LNv;4xv&Q9`T2^A|{xZZ`HoTZfqea(t|<(cw-@~D2+qq)nP zRp~O#=HAIVo5JsSx}Ky{Z=%0VbulE;r7qzv8{PR8y6=CD+1gAihI98+!>v26eX%gN zN11-BV-f~fg1EU=%Mhct%)j+c(yGQxTih+NyOp=lCsj<8FW~LEu)sF!(pz_2pcmK5 zg#Q`4t|^}P8sEQr-L~nq9n5zc|DkvBX7fDF&vg;w=jfU&l8Ih8iTQo~JCSUlhqIhL86+oNj+5<|=cIhGo>JP~4(cNdX&2??Dgq zs(L$i8p<6xkDVKdx4#Fm-3{ml4)7g2p2hr;k3zPfa%Q_Lqr3Z@>;V~|VsYMSBdS^tfu zk8Xb@Sg?9BaZ{s!mCjF2N?p|x#jbu$DWxN~U+=aG=Go}~XYW0iR7aLZ(O2Q_6BBEC z(=?Jmu2;;&6~YORuwrK7guH|-G6?AC@Av(EAMrlP4H-bf_cG(oT08bMs;bQLSy@?W zK3l#QEt5yb<02pYvGtxJO_$E8%V%RmYzj|;>av_J3K3=fZU6O0^!d8~d`qZFL0pTb z=SJ%|26tCJ>xge@auhGe^fCVmyuSvD=!ic@Sk**=@|(A{#a{qVK(N0eISFTN{&;$N zzFsOMBi0|6)29SC7~ba`CMrpIdVk?rCmhvyXTCb3&W0z}=a(X=1Y>^}U$sIUFRH=H zVnOnCJtJQ@%YEfU$#TRCbF)YLpZ|Q`6;x9QhXQz1O7I<7kv+;66u#D*=hO6G{~KSN z<2j3ECa++8ZFa0+pKyln*LW&&sv+<9d?cDFSOtq)io6$I244A)v;2ITaE>gs%+7~< zKgJO7otUi`USUs!O_zVg299WptoJkYoQMv9NN(^H)0t!hqct7#2tq+9H)ynr~{{4y!PQ6SiI+A1ssFjBy0l?{PgG=PVt>k(zByjW}otp zO1ne2{q_=V3laI50s-_}3s+Xo5`!j#bU|lU{EyGM!H9w!H zWXi^i@YS@EU!O0hW?&B8NC@Pm#H9bxhJTBMTNy#v4M7?xWq@<>Z$)H<@aR0tkSr@a zGSA5lbOB35BrcJt3uTk-aslyvVT+~^C+(Lw5S?+C5$)GNW+bEby!wv*^MArO)Ys^{ z=&t4b#?YUCD)E2rTFym_;g?UPPq}!JL_ar%Zo0pGQasOoEf#|ycnL}`mLnG-002S^ zpMENdKb3f!^^{yFUi#%z6#4K|PWa`Mkppt2Tqjpb{tM*DTqzeGz~S%Gr(z5nqWP&L z{^#lQKdP~6_)u0zZn^Q46~agQ_^DcTesMhgkK8|Lp1=&l0FXH->NXf&75<)=jj9AQ zJU>?2pVin46amLy@So1e!9QV&{*^>K+#>NU`7JAUEEIS}yYyUmS>O>Bm*hv^2@j7C zDXuH|xFYZT5|{j{1btIqTI1VSt-t+*ALV5?8*&twU2o7+=ANFKjDrm0{d|y8izvMm zZVwxjz90hjCcQR}2yxu=-_s&GqncT;u`3YHwl(CLI{LPols{}~_ zHJ28w1V0t$LxUxCBMO&zuxK+|_ABrR82~@umujp8Uu!=%tNCRx^0{Rtkt&1%puX8ft;1l%+qYWM0bFU;5@s=C>-4GwR^3 zZ`+pRxw)TXOu5)J(NH%(t7gv9be4OLo(AIoK0Upq`hM1q3&3BfwfGL41D~F9IPg`ZV$*(o`dO)e( zwZkV1PV@p%G*48E_gc&mCCuoDfL|vyPMeoJ^!j_W+yYO{wxAPpR`{;v>BEX2>3#G) z%|(+Dj?Ws?H@MK>fAV3KsiqlDtF4*^&i6dae2$A34B=6TH-mL#HM+{b77~T=C?UB! zXVOwqjVS&7WofDAUDNjcgV%#YA7kB{i9{a}qNhDi8!Pyx75c6z`dwEG{+z*;%}W|^ ztU011uafGxUV&FsJyFSd)!9SBa}4v}2a2W;|8+e|weWghe{L1}s$T5da}=R})0SO8 zd$o;w&Hc+QO>(a-$(!xiw>E)#csLL1{A`X-+InH8v+Yea^jOvT3O$VKtwI;Z@{88s z`54}+^=u3uY~s^b&UaQ)5FebD^K+z@^WPL76c5~?wi7HVH1|+M$*J~VMHw+T4{!~q z=G>3@ddQsee~E2>(okK^`TEm^*S0`WJ+TmR3$LaXRhB(35+QZ^0MF!Htn7@NyU9?- z+}+djT_ksT#U&m50r9HlhO@(%Ej($jwxTu?j5`|9JuHGRl&4E zOL*N1MB%G>%75-$NxoUP&kg`M@iS8YV+iT3bAh&dmXYcr+t2;Z&wGV5LH%i6(M-ki0yQGAN3e| ze}gkh`h22(7<20}SA9{i_Klr>eKTS0sjIS!H@!=Dz)+e9y?nqQVOGTZ&ywe+F~xSE zVQ)ez5q3Z{y`RglAj;3F_T*sTr)S=G!rx?MA_W6KJw2bofu#uWwBRYGObd<TtoB|zi_yig{Me>s}Ovc1Wb?ag!YH=Td#&0@dge=~gDQ*B-R zV^0fWwXN{1W*w1zyX6rNyyXAn-@m5bUlNu6(?i9Rb?#xi+d@HMO+gov*T5XO4~vve z4rkogSuc874`)BdkuLI-`){rQMwC8kRh+l?;2j3>yF}zmf2w`&9S5V?r}or_e>$JD zOzy+-4>+UH{CdQi;xv9NI@d9StPU+}vHBt*6M1UHA-~ETM2^Jmn%)(r4gS zW|DL^l~0%S2QO+4{`++XlP>3yciI~-lJS~9sCU|qD2a|+a74l9MWJ9=(R#?4Rw2$3 zOPJq`?a08 zgYLp)_&hJV?$=I*pB+5^t0&_g!tJrNzpa=x-3qLZmsbiTG9cM&E>eS9D%!n3_#q2PL3U3`tS01ujfi9g3s z@^`_$F?(cw2UYFc{*x~ef9D64*KZv351jAjUZ46{<~w+!yxsXTMi)$&nAl3-?Zkn0 z;-dPRx9olPU*HrweL+ffAicM~!q(yK@f-L9kCd6q+&_1O6;t)JjRtjUbmaA+x<}8=bZH86|>oj*^hPlocsMim;IAI`yZDIF@O_h` z;RgCIJQzMoG4I>Be`W3skJ!3hzG!@anb*XvMyW;|j;74qg$~6oC>?*(C)0-+ z@IxdODAn$Sust7Aev3l5|FA_iV{NDY}%vvPumv4 z!oW$#;21cy1VW`|Pjn2`WW6tnD){&k{N4-c@>0Qp)9J=eM+Lj2UQ+l{moK;kN&>&? zmwUJb838YsmAC{f2Yt@}SY7`Mm&mvTCx7RQu^A6b%GQ1NTd%Oc-^iEZ0e*0N^Z&U$ zXeTbkLv8M-$DNixe;uBLKt*<}cm2ZOKzTCfCtg6_Z#!|njyIc_Xu4mk?8N(il6iI@ zo<)54hS3i)*Ru;s#%F5ENiesIns^tY5u7>HKW)^(e)H6^{sRSXIoN*${1)KH`~&TO z0he*P1S2{oCc@X?y*f+jli~tP0iSXd5cH2{DXuS{>!;||)$tXVgJ>B)?)uex!7&gcYH^=ZFrSx#20&i zkGF@ex?ak?QrOS%CAwg%ye2xKjl9Z-3ixjs;qbw8R^e34{V^?!R1cohAIai6l@ovd zf=WLBY4_!Hmr_^x!?Wkp-nrHBSu`$u@}06gxa8yFJHH-$k=XiH1(|rlQ-Yt=GI(k;*?ZZ;j6!6(NU&xDJl5ckXX0LB{_;z3aR^9yd z{$|+2ABBnG5y6^5ctIBnYgYHgx9BZ7a*dI7F7@ldCDXanrnFNz;KZREaM3wX0SsyC zJ-m^*cIfEm?GlcZLE3-bZG>kg=4kI}pIs5fS6xewMGEmR@%=@&*zgu|(O~IPNRiL1 zYlm^~Bhkbwm@DSMtflsCbFuRP;fc4F%;%pH8vOHzqN$Rp0Hsj*7@;)$9eYkV#2sC$ zVo`>`{h&I+@t`nI=n9C_|1PWpFrqSj^D!YID?S%Mv1OEt9;%bA5(`gC(R`P6?bg3xE=GYEYSA+rMrVaR9T z1xCTtFao#6F@zu4;o5e8`SemMh0m%8^88XNeGb2(-w5&yUL5f2yXtS=nH(oZGy-@5 z-FrQQfPZGspvyh`h6Mb_Hu$j({_)%3ct#k)TUsu27VSkeY;XJY_UC9EgL`MbDJNC? zhk;NCcJ6<3`;`+V%MmYV@gy>}M3Lb}Jolu;sTTeB@9R<1>2KrU`%&*+7o3TugrTv+ z?;rkIfB0wp2L4&2u0}4$IaUvyn0@1bRLN#|&cs4`i60LCgqmKzbxo>!4Bveo<0)&% z_RRxIeWStrippNULuDTr($g?wUl>j6BUbPquq~Hmy#yoyYnO_>1U!E&b;d6bdZO0) zB}0EHR7Wx!XJZ(v4?d;m0SG;N^O|oLY{fFg+)FZJckCm}A$yyi&d{Ox0Tt(1fg6Uh z%zhU<%L*NH=aS%c=SDjT#whegFRRak87>=&G|^8x5bvUWqNwYt?W*pj z7&_lokt7pZFA6?!41<57d(H>PaDPEbEz$G4N>o*{tt|UB)GEF3e!ky|=rscuI31S$ z9~=hbhRb#6rVrY~0SUY(@9Q{??LWys_Cl*I0uQu@J#e}T!-7dXbhF-u`k zfsj!UR93g-g!?*#9f+>AQJL5Mt>lU>Y|1b+1`pk|X4T@4XmfwZ=ruP(8Jw3(0wA1c z#3w{5{er$Yu$M0UYjs%DK`4HWCiAzLy8rR?KiSVjnKufgx+yqf7*9hCy|ZLalt^(Y zmNNYJ=wbB)>gg$SE@fb1N9?)64d9bzC&&43g6jO@c={jDhhMq>K7CQ2z_fktVR44` zxrcyK@9848W66Jx=(>fy*3~s*-OXzO486W3p);*Lw7d7Z~TpZst!?hlsbG-I;;YJ#1gMMk;nFr&W9~ze$ zpT9+WE-j+-i`$K$Nsl&k?;a^UFjA2UzEd=quZm-|XV24ZNUgU^OcwKVA&S&e2&LY7 zWQVm2&u)JJh*|q2bsq!d1&N35DkVEx-g}?Lsm&DaiX?zcl6$N>R?j|gE}8nD;;0qp zFc+8Uzhz0>o7G3B4*q*iUFNLzLJ`E7&$skyeF32`z~+gQw@W25&fZc5fY9Sx67lLH z>-bNwii+ExvV2N9beizP_GxeXbTVR>Y?V&N@Omndzkxl|S$A&t=P2cu|A7Dbk z?Gi~l8_8RR{sxrqLZ|Na=WbX2^j@S0g=dA{Tdz2}eZh~)Q@~~awW(;hX)2e50Q{nG zec&O4;9n&_OZI)-hA2ka+fCcaL)nA0E&jgaurqaLTpy?lWAuGA70##c(4DiK%W^No zFAL3h7`zM4F2A`pM(X&v+Nxp>x*4GG24DgD;?RZ{hCe$Xf8(a_8+=PZ`rVh%#1A2q_E#8d$;8?(*?Vo0c;P$MsZR+n(r~#T|%XLn7qz zj-~4bPE^>nYB@>B^aop%sIZ>+VtcwIObunJDW>WBRB!|BMK5v>j_75)5rVxoUSS)L zoZ=aJ?5z4??N4cdM5`~J0WKtlz86w#*JGnfL$5Qj>zTFacq-58ag_F>wjFPO>AqoJ zDw#|BtU0>DiT&cFVt_nfg8lDJkQC~-YcW57E73lHuaAdvueZmRo;v}$(%V}g7kYc4 z?ZDMm178~#<=A4ya@3t=dgtrFv4YD5zhoJtpucD#T%YbQ8J6n{<$jMJT?&~Ve{eBe zU;4L}$>qtuzw|?;Oi#6M1PjQ2_?Rjb5`e?gUMz(MV!3oAj7~0TNx4v$`#Bf!hKJg& zn-E@dcC`x~7mAzLqxs(98o2l9`#Jsf8X1ZH9GV#yJk>s{_W9+|HtW!oOY}c}oem2{ zWISSq!pG2*=5=E7nC)_Mt3K4bcQ*D=RToj+syjYkqqsodFiKFtv>YRUT>R^t9Y zI~e9aM@lA7Dg59$@)-A{$MmT;)_yeUooMlBmKzCe97#8uNCutnCTGYYq#?MK|A>`nRwvF3uw3#*f44MBEd1PXTcW^`!Z=o0X;hV)ZRg?^Y$f}u~lYxCdBL$GK z56QqK$*Gh>j}P~Mu<+Kg@BTq?;e6^**l@1^X9>VP*jRbsqctW_AFYaYe?EyWw>O!- zRpg^>i*(@jSL;8XGCh30Q!4IJN*bETD~~!R*n$SI%Xfcp&OG~)<^on%3AhJLK(Xd54; zpLh5D#c+-dP96mJ?RAcboTHgelu>@WG)~?TAaV`{?~}{vf-%H>7Nm^S=Sf`TdWWl6j(G zNB7?ZFqssQAWB7r#(CDjF5xEM15T2XP*M3IIt~ zEQ-ivmJgY(XsBnfNSGb9AlfULNQU23qkO+nqBPNQEawB2h)$*w(YaGeNPeA&aXl4` zMg9d3+3zp=O$z~UDYq1d#xd`w1C$;kUlEKf_ks~p@Agb)0oMl+C6JbVG4o|DW_ zU$JninD_!F5A=ybVl*D2kVSZZl$WOpo#VY+EUkQb6zzvQIlcp@Er@f8zG$}Gtpai0 z(|%B*J$5oMp{utX3`DJjPP-4mSNneo_jl;th2Dh?jG9`2&*wJ8irJeS72mYdg{LObZi+HodBljx~asF5IL;trIKWa-A32w)av{g3s9f`LO4NuASe*dim0EQciP%g;kztAD%MOirryW!=> z(Lz6rIR$)9eL(4tQu?Eu{(#dTNa`bUbGrYq(jU6|#_IZObm{O?_1lj4mgaqJM~vJY zpUSzjV(yf8hS1Ae;>9(8YyWku;-aW#ud5$knPcWSha#B0d3t*ZeNz^!Pz7V1ep!lc zgnl9l&Rr=5Cd%A~YN}@!#C=9$z;?2uLw7pJ6%2Ot3w?-Lh6P}5+Lgh=J`T`6&H&^=4&?3tXBh)YlJ1p1VG3{@zXx&Sz|J(}4nL4C`xrZrhPciRWZQ6+ z8AumP(^(8-MP7V=03yQ*@G#Et;61QwoItwFHAWzV=PDm?aR$`8!+Dhp$h7PN6CeSY zP`tbXx^hnDR{y6o?p7@Uq`{{u%h$x66cPYrp zd5n82zL7<}~oF!!t85*ACBr>|i{2Y0kFb+ToccYmZ!+uMnU|Z_F8L z^zel_Lqs0FFQ=hCc7C4hqz7-y3Dm#gvV4a<2)p=C;tn3aQD0>ZuH2|^@C8?H)c@x& z1=sHuH<^O#H|l>kQ*iwvbe}1BY^Z-Q1-Gtt--s!AY%0DdQ}EbA`Z!Z?Kkz(jzoUBDvai%1vx{r^t1vw1P2r`=}9;{1OT-O=Y14NMH9PqdLg zyAJ>AJ9^}E>NU(P29_j2pF_nVr2H%Zc%v&56(aoCfLwwoM?dq6@EaTxM2)zA^{-+O zgrX0rE@AN1N{u3|{*t3Qq(IalLLF9>lGRZNi>Q!9U2PIgO|5n15zo~z#)LXdch$_7 zx+QC4?6|tYj!WPi zT`$1CqzJg8QBuA#)DXgJG>UAVHoO|XvF4mGs?&JAzo`_bbEBhGu~KzaE7xnAS#Kqv zNY-gJyObEe^0 zC2TZ7#qvZe6;TLXc3c--Qi?+kHHO{RDuhxU)hLFV)ig>Bbc#~jB%y9Moq8o;^rdCa zJw!$OzO@1!78F>0&CLwIS>$lA=!yJnwJh2zmd}F>X-PdwqlZ*wjv}_V!hDV23A;e< zl9SoYvV^YI7%obZhnU-c_9~#zwpjN?usCW?JhmUK0(30bVb2s=q{WajQOqOIs#}s7 zi;|f>Yv2$D>-b#SXw9PPm$0V8(e-v~WOZB1N^42VwNXA_?2ns0q*|%?;TUsPvn zs!En%6m01gIHwt9J@=pBq_Dn5(i?r5_f6eR=`#*Vtu z0Y-uHo`ZJ~K$dHKS=5SaZ%{&KL#wp`7oe@66-ooi@Hwq z$GXxprCrTL>w2pNVJ&O4=C%e_hZ~AGTrzESD9KBIL286l6*REruS(R21IFMusC8JE z22162+71gVf#zLW5^~s>UWO{Z-M%lSV$}7%Y zwMuPr+%cSg(oj}R*M->zsdj5zI!t z7HN(b7$WxJNmoL4exI$>YZqgYSgChFI}c-sITdCt+(uz8 zKU0c}l{@vlQq#d#AiMnI3 z-NF_6#T{;RCcO&n6W#f6AkK+SKToem9S5tkwqy$%yt(a`QOZXAZnvQ^s?ywR;Pt}k6H&nMK@Yxdp*&b z6vWvLzoW6b*F@DhOL>bThrmjor&g3VMtFg#O-2i9v1H40R+y?3Wl@dFFreycO|drU z%o^6#psg@Pk3*1=iUwQQhOovu-*J0unV$$_b8a-vSy@yoGz={4j$5iXRZTH}5t{CPK74D8LhfnMVMFlVPFAez0$P;cqmWG6LF``W>mkf@ikAb%Co5}dedMcv2Ae! z86%+G@cNWK^*oNw3Dc`n#n=EcKYn4|=4$x(3VeZ)dakbX)=M{ykt!sw74gyXP zTNYiUM_pu8?Y9M1ZVsVfkU+Mf>Z|1LRgHgv46bP$9`LXHS{G>(t+>}K|yRPO|Tq>|;YaHog z7D6`ym0L6xHIVMNS+5I@{5DO|5^ImDl&Y8Q3S<-RmY;EBq&_Sy*O2UQ*o{@J0A+p< z!2ST(p(ehYVYOn9CR(C@RGlFUs=b{o>dPwSjzGT~XqE*BW34*rfHn(^W;#`Dsr{1L zVw?VAr^_x4w5f?*tsA{Yy*PxbUBjUlA_%~gRhO&2Jp@$Jujj4)I^QSfq&Ms^#mc-r zwluFlai}pfZPrL!tRq0D-JI5^4Z2!%w|$p1G1(_avlg^%43(jOOja9|--paHKZ0mlQJG_J)Fwk7mtY9bL~% zt1;p#Bb}Xp%|Q?F8~vT&70J$~<1i{wn;O-&;Z9s~wG!3E6s(Mv&D#oE}e8%36f?RuqaT9v8{!MiR)k6OKn&(-t#DhO1lwCODSI@1uw$a+rC zTk9Pn>kX8*>=B4|8WR8?Z2V$tX%6elVIFp9S8TI?%lQ~w^;K8z*vzae&^|*nxt{J# z`pbbX&ezpVfb(mP*bIAK9^c88C52X}_L?xd>ZletUd`Zy0ZKZXy0HRvMGcTT<&3sP z6O$3J!V>nVF{WU3ijS6?R)uaXw=PoRdL^|~wuewF=yB_%w+hhtZU}SGhQ?Hqsv2gu zPC!zBe}q>%qZ!^Bjdrc^5*CN+KA#`-`F5Vd9e?CIK)u3~i+*`C8R<-bSo4vpk>!q5 zlksM&(e+@OTw9>0s=d+3W_!e@V<=k<4f=GoqJb(+IkTX$BDMtVx|%=s2w79<^>{%R zJHW6Ft_Wo|WMFDVY{)jPRTp+cH)L4em_1^DL9`nqs8v&iIZ?N&Lu!OzGK01oz>J45 zr2>_fO>rc%#hS=_LqOFoav2*7S!oTrn$iy{WU?_g6%d|y@Pf0ldi59+s?QHXdc3Ks65cs zaA>!#39E)YMc~0&c2^;;(zOI^L2LIvxZJAfuZuKV;ff$f{K!fw3U1#r0ja zJes)HXxJ_zx{SG6Em#OOtRt>wP1AyZwk8E_-9=?R^&7x$%Zlu_>g&DEbir%hWXsXj z>5i|E0%^Njyj8WA9Ex?PlGhzpntX3LHyC5k?N9JJZkyC-02^$P=&>C}+Nnj|fMM9c z``)}qiZg8kv%6KlHFn3q7FCxR3FBzD*VXHKyT8=brlc~!vfhD!fi%O%iy>-%2^*(j zqSk6)7smi+n&Y)UMEzVXOe!n^UJv;yW*bGJoY9GuvKF@j3MdRJs5zqwlo%#SO{$#nscV@XZ!^gtJYV3AvW;bo9mfKfsph^;+4og$c zm8RT49KtKJZxF(?y>+JUf~m}Zl=XJwvJ_1+TcIW^z6T76e%=wbv<1Kb&sTBT6MSGg z7r>!sZkuyzL}`70RqxkT(b3AY zdPO7V-6kv#TSx=#jGO(L)a?!yL(&}vT4k>8M7u$@Dz$N)q~YCeV1eyPSpkbMVytTQ zWeH^z1Dz6Gwp5bvL5YR)i=BrkuV)iJGswtMhsjkHQ2E|*YgfYJJz z6#d1nJ)4;T3D|T{pA6N1#+LI4V~ouBrB&MD+nU03ZNsEoZ#rQ}i(WMiERX|H+LqTF zaz+y~VPd&ezvI}Rx8@bQOsyD;MmD&zm1PA0CdAksRD`s{BbmT0j5I8suzT z>3NN*?_wRmpDZNPtpf^gKokr?Z9KpLPG4&3x6O@d({rL!vToYqR7$rGk8GR<0}vEt>KTG|cgLW!()0SB64#6k_W zY^OtXx|4AYTTEzwzmo6tMXa(Kk+UEm{C1rgYU~y##>RX?d63*0EiF`TEZ37^n;?3l z#Q<70Wx8uJn%7^fwrh7d*$g!rgC>&Ip{u2i>5b)%CCS;b~`NOxQj9e3QBX5b{gGd-&6Bc%??Pxg54}5vtCdXK>!*6T@wZ|GbHb%0cl_j#+Yhl8sGAE%b4l z-Zk=n`GMMCq`=8qTA}0>Ji1cGHY;3r-x(ZrC+zpeHDflh#`t)rXRo? zsmkj9m z9ipTRHWIkB1IgPmYpBY0%hd+8>hwC4R-RB}f`FFtu1?cNTUamH<&FHLbp2^QWUpr_=IZ$6K_fv1Ba|A8?PlH^3bD7&XVL7Xm>L=yW?4ZcR1h7 z=E|aoAu=hlLZ!&Hr;XY|*k}?6V$C{P6L6ykGCH>ohEz>oYb?FqpxBsnk)p^N@Dv|1 zdWSLR<)(nzlijAs80E671ME(3uX(qvz?L;!;m8n}G|{;+sdvg)XE-JE`MlYqyS4QK zY_yqGe$@cErMHs0O2gmaeI4U}c0{pfX$!7_jw@}Dr8<>-zv5{uQKm?IN85vmW)&x- zMK`J|)~S~yr`lJ1#L-rjl_zSWrOSFWK3$JJJ+E*9#$)w-YoKF09bU~QTe{XTqy>r0 zs)7nHNWy~vqfx0CNV`mJIssRi>~O8vYL%+(a$aa*!`igjHX7CvUKFQ)_435%l9d^& z1pN``bo%4odHZ ziu9Is%IdT`SaJ&H8dTX`H4s{2)He=r`NTnGas~NrsfC@sZxdKrX_Mt&79{OOnrxc4u|7>6_PA^sK zE@rYr%o}c<6-aa|J&h&Hb!I{dlQlq(>rIYm8h+KORsfk6Dv-3T`%az~*W^H9tePzL zxhXvE!_LO56xYtKOmGu#>CkJXAy`Frty$%E2a$=j9?X#Gng$0vwd0A3HP9;cE$OhW zErR%#hFT6;uMgFK-DZprCQZIF0)?PFrE2A>GZ5MNqSUL8YfQ6(uj+PwOoPojJIHSb zetTw6)}k*N8p4e>8@yL7j!N95pLSW9;BcpW*xS@>0wb^cLrTp>nfYW8g^>Sh%t^2^y_p z3sxCc2gca4-38Z^)?xzG)JBWif+#BT%fWI0sM3`3M%I`QR#r<5_ZXtp%e8uY$6?h` zeZX6+GHwrQ3ZPYorpR@JrBSO4i{wVv@pfkKAG0mRA<+ppmn1s>5KdX$T#tB!w(^bp2^;ZkR4ZGm^%l3@<~+F)W0eXv(@ zgNoIW6nIr_`>Xb_T9dIFqZZ|wXD%nht_hCR=FnZ*wx_JhEpAAfs8OH6a}aL3jyvaO7q1x`SKFFI|Z%{6- z?H$2Qs=7-UW0Tg`bzwLw4#&t)hWZ|VzVJ1uOt#meSEfZ-A5e^|*6ooGbolY4rc`U7 z&cP10^PrYoBptO4SX;5Urt6JCwQ2yaDx=b%y(P;8Ri-?@YJi|>n$olc0kURqk+oET zO<<|AZG917fY@Y(#lq0BnpO+i<5hJjSKw}SGbCiai&sce5mv5NP(Rqz29^%4s@2i5;&iN)&-YkLk@91FGa2K7 zQqPli6YbJ7iRo2{S_6i+Mqd^c0mao_uxlvJ&aD%2iJfhyjw??QT?(*~26yyIYhk)5 z-`W-#7FO5bfSW5`8>1_OG1_Z?k9ELkO{pd{o@dY!VzcnJ;YCTw*Rvt!C zsO*f;wX~DUO&w|)gQB#V%qoDt81T#f8mQw9x7Qu?nJvD813joxHKsv-&DvGPu5^3$ zvNn--wPhffc}#GeakE_(Mmw=53}@8|)-vt+WL6*BN?BBAu0k4sgR6|zp#s0##zq#) z2P8d!{Cc^n^m^mD#IuYia6G~EJ32C{E*lk+;hYXt@5|znTlzHF0vUtzx@DraV#S$O z^en7S7fbUU(%nw2@wPU9LP4I}uIzBMR~i4GIcB2!&p&B}T<-i)S` zYL&6;5~_~J%XP1@WSc8TgjY+r+>smZbV&dv<{9;VyW~ULF;}vf>8@>eOLc!4pmWF` zwFVr`@T@v)TZRt|cI-@V8-OzFPlBSowwnf6o>w-#8sygd0t$(L&=i0b8D!Avz+CXA zM^|-E>bmpBTt-R`P5XKY@oPJN=+szdVu0ML(d#&*!#MT+S`h%B=L`%lcSyuwcO_#| zs}1L}xS6@mtX-U`-e_e@`OdH>&!GB{BaVBk^-8Tf;@UAJ z3lm3PRf7=JEmh`!&B<~?F>8^~*8BiCK*zt(6_#9}@_9;!L}=C`raO}(ma0Sh8eze$ znuOH?uE}U9JDAm_W|N=JVHgx!8tm-qVu_!vNtW++7Q8GGn}Hqp0A_SmOcYm}QQ0RU zjvLOomOXQ{>3Bm+WY3f2A^a7Sn3GrE5gb zTNG7^p39WRkO(D%JzQj#)LL9Dsl~#S%9VA$wj{|}ZMIpj@tG^QU87A-w_OS}1|z_S z=@E#`tU7Ks?HS*yf@7YULLT4k(+j8%L3(4Ole&ii4PgOx*~W;-lN)M-VKb*$gGa%X zV~y5et2EZtDbgEGf60O7PlxTcK4{e4Ds8W%iHK^=e6`wfIWTYMr-RD4>3hqm)P@Ef zv{@Msi<&ccv8CGZ+pc5CJKF#)iynWzw!Qq!Q3IPKco>5||PRIVz0s*QJ) z*4!6~5<8Gu6P(A@@))XH2HIHanq==bt;KYU?r=fJ^1cn{hwDzOQJXZ|9aG+pa9zpwsRsExl3g%RxsTGh=j_F9k)Wf-ovilRHAN`c`vglUkXu%gs7w z=MiE)pcXbbut#8iO>bCnZ0I81wQB1I2Xv}RPa89e22^`6KrLFHHyu;Zq7H91?5^70 zZBURQ+l@&dVQLs<&0BCA=C*T;T8erfZY%SR%WcJ}e{IbI-74e6ZpqZJ4maJ}lus!BXS(7` zD!%L&fh}Cq*gA*TM_7L{*_Nzn(_41MMqT8LN{=Wn*wSRT(&uH?tQqZ^q^vp%D!(Fh zpg8S-f3V!^m*%5wgY(q^}0&}Iu`>@@m2*PZSv#d)6V z(G`3);&D|+wX!u|HOkJsGe9O=Y(+OZ>lwMDjOj)YWZ$*>hTg071gFOvZgJey$StmM z1G3b>oNY-WVYln-%$^xcA=6*)@P#$V6NEJbe}N|2Dv>h=0HsyWtSEh!mfHHdxI++f zyBf30`T|(aXu=e%E-sZvHMl#R4KSc@b~SNqT9YOw=fQ#43)m?QHZKy?7f=x=sgy*%L(!d(Ku=M^uZ7&B36!}{ zd=0^Y`;c_s`y-brbq+rUgxv1sg`h;Pe{c5NZS37C2ob=NLK`vDVpYc-&Mlbzyc!=g zWg7Gx@pln~^{#`m-|xo(cCdu{BB}eAwWg$f4ltJT7xCMTZf1fzJ|Be~WZmv~;l`Gk zFi40pssJ*ix$dcbe@Lzi}p9muF~)AFsOi(7IYJ#t|wZ~G=WHOaMeuy9tJilu&O1~V2Q{V zB=CGWV}DIFGEvv5JRi9~T6`Cjj`Ltc0pWz%9hW_onon`q@1s!p-% z%l=@5CO)GG@m7$K-^6>OOh8(uJEKg9{H z>fPMoi^w4=^xjp{1KHp$p(!MrTQSgyF&~_;RM@P(yf2Iz`oE`Cw z1H`Pw_mPl@v+$0HCqN(>`EUiUaW#9@$=;zqhetT4;~o#N=}qbL-C21M*wJMJ)keot)csLQJxs zW06sv3g!ew_Fs(n;xZG7f68x7MW~%HZ<(s1>meW~K9~Q~^_cnnocXCKnoH%xZx=po z2Ga&6ouR;B4wh264DlzTudzWyS%_k?ePMuK_iA#%8KSYDa+Q5rF?{XvGn*c7e(>St zV?*;))W+okY+U0g-mI~N4;);b#p;;f0a>>}7d+0^6wuE7Rb2H;f6-S3N;JRt*+?{x zA~g%0kHm}TMengzZMt07>)YWr`m~-6p@)B*0#DdjUL?w;sI1ld%pX!2dw4tqNS0c|WJMx}cM4!SvfneGRP7{uHC>tVr=p`dHII1jUjc-t#&_5{x zJaUOstsEqIe<98wf4{d-t?fH&V@)k6W(PO=!jPuje$dG6L%tk=`;KWG%W!ylfMrVb z2|qH>6vbpldqsuh)@~V6GQS(H*x^?UB;7uX;ga-ubdz((yqXHF2>hC5sjuv|JBaG7 z8cor{RS1S(>ti^NI@pft%8O4IvtemkA&Xx~JGFwb2nSCpe;$2{SqAu^9&C?MYqVEs z&YI>2ROsr5tv|HH;IyG4k6Xt}bx8^RGlZRP>%onJNj{RD!wSj@J4(AW{j*KY5uMqpey6o1F%`cYzEBZgxe#x)7& zi(y!wLnda%Dd9M5=yd!Qj*2Tix1$5pyEe+VSQA%@R0iM(>-Ih8Z1z9Uf{wvAtqUNe z&K~T#7c9spI}vvGg!KoYsba!BHU_jQRkKNwf8D9lG#}3Smr4=FI0&k+P@s>fkB zf8>)|W^2H>1u0KpU`^(+>EI`e9+W}uM@tXcx;RYQd@no%0BE=v0-E4XprIlnSEc>j z22iOWseq?OZ^Xc%f!IT|VdK+gPzATq30z}h>qymjklL_viMoS_|<{7c=Hh`-9Op6QIxG5ASz8T32qq+eC ztv?4@%eO4qTBzeVrWUQa*99b5-@Qd@_{N11;Uzc=a=q4gb0Cr)40Cydl73i8}~!l9LcsS5yuSif6lbB+nJZ%7uGf(uegxb>?D4Qdd|NajpbYtfp4m8 ziB>Yl)3s|tH^n2TC#7qlbXAyD3lMq2G^c^}&?o(sg|-)R?w8Jf{&Z@ao^?d(#cQD| z#giPIK00jC18^4*rsfbwQ0kMk`#as4e#*$k-mOu$`xLmvmwrd zQ0W@c$-|i(sW?NP%Hh=lT^(Fd+>K%kLxGMZ=`64ngmlUwOAjJ6>Lk{iz;Xl+L@WAi zo6%P%LEQJw6ChNDj`4D|$vHTMM$JE#1Gzx2qgs}M`!x~^Yj{)#D2w7BhVlF&+q1OX z-1>gVTPh)e2jzGxnHl(-e*o4^!6JAK5>96A*Z2-GhoIH@ETV_s11|W;2TJ{1(Ce(blo;CNxrOxoQ_P zR%}<6B|)vOfP4O2it6;7=32tJA7WEO2hMMcYwhVu@R2v;&AUCPf2badDlZ(V$CGIN}$GW3SQGsYgDQWilIQ{i%KNR(ZV-gi7kJmdBaX?A*Ohv}_ z)1?|CNsZY+1X(Y~-JFWhQ4WeBL6L9d!Apa9sm~0Qhp`g2k_?%&9}O8=FS%k;90vWn zi+v4{8zn5d!)Hg{e@>TqzANU0duwhU!D-~9q|nr$*Z+SXSc{2!n0u{l>P?SVQH3!m z+Sr)sai>ITS?0R?A(ctur;gd4XX;wt!@?MV(>b%UjcOQT{gJ$ky9z~TaJT0IL0p?H zs$ub2-nKL#FUHXI_KPOl?&hHEZo$PEA|mOadoj3t@SFX@dojIf1%jRo13# z{0eumAaUERkh?dq2>}F-Y=ez-POfH(9}^i??jzan%k5S^heu9Yk6Zb*a~Xs)>}S-}P7&^23~#DMd!C z=lsy>L8{_mng){D1}6w9&kX^~5@)vX0;Yi%nl$(Spi->!a4GtUFXJxwcsX@*Lnly` zsTOPz#i5>sv?6UeeQ%&vclg1k1f~r9R&O!EBtj06e>g${=h9XR3=bRX1;7_lEACTx zB?I^sL3;)na6^-5$ z{IA=-e`?%OU|rQd#3Z&evH`Ql-;$(}?u|u-JbAUhzsT5%R9+7cX7G0-QD2&>A6uJe zvnp==o1vW04`j|@s%ebVblI$@6a+_;1X}1F_EA#+to1z?X(^)_luGhL_xm8}ZVf8f z4#p^jBivK-JG4+CqzJ$w!V~-#Xp@={D2q%zMsJONq6$DL ze<5bVhv&DC{kq2&2rn5RqQ;m%I;0-^p@#RTZx-kJg%NEaMH37anL>R?_ZB}Y7{u;QBxP_tB8-!ow2>wF6G58Lsy578s&K?j(;u2^XX`M zCa5R|$K}#{UhdBT^UQ;;)UC1u*vQy$fA2Tf(6vH});y%hhqk&t>O%}raB=(ZQs(Xf z&^G^8u6jldMQ8|8CnRyK{UpX+5%S6YS&PrVUEO^hX6zZ)>e0 zgfamysDvs_={X~M-$sq4!a(JTArSa`atE1E))d6>(|5_*MA<)9q4>NN$Yf4Ml> zp(xmR2dW5BI@PuO)30GX&dCV_>j5P9yL{v7G{b7lAFJ}sqH*Pvtp>Q)R!X2l5OPE+O|?(#uy5v5Og3pBPRTTQ zf^BOhkj3`ZbKVs>1@H#}0ZlsOe@++3@1U;mXPp%Qmz_q~3pw@ULPhH?gC*Vs@VAN) zR^*-dO;y+%F?lE(Q>}n)G8lMmHJJ6DNE(Yw8^MolYau-9CtDE^ttwut5q!nac4mm# zPJqV#hRMOmux~q+&~sE}tS2V+pvO+hsN06xN%gp0YcPDSoqjxt-hvB~f6Q^nAOx<~ z0s7P$i%I{{h}rn@TxJrFBMPu?=F5S)LjGtM0yk*}dSwcEmjjTo2`qF`LE2!@jijlD zwPe7r73S9|ax2H8yVyVX(McXQV-4?C6l;4@t^KoowAmING{__k`Mso+I*SM1XdLe+ zarDv2_cR;tR3zU!(#I=pf8m`DI#5G49Mn&;k>iGI<%ve;LNwm5jxNg7C41LF$NX+) z>WDc_Ertw#f7ci$t_qqDs(A3T2u$H;aTGO0(!rNy}G8-VOQjZMS<2HwbF@XbHeb zV0owld*w)j`od*esFhgmi?0S!(}PYFeeXU|OdX}h2rl2JUrk#He*>{ka2Y(j-$yTr zUeQE^%5)Ij*+3Hg+ku$EUtNo=d#h0&H|xGRtZcdbfHMv$WiR%}X21asbaZ<;IjlbW zgF+ZBXr%@!bu+-5_a*Vhw$=NJeX^>!$<^Ps4sY*__#8{l3KH11uAUF4c*{g#t9wWU zR*Y=&PM&mvWm_|Le}s`KGyK5kT)lHEBM0&iPx~!(+aR=8Yga~^NGSOU?OJhyWz&y4 zf2CYM=d=$TRsQwiZri|Su`(k(DV z#3$rqo&`mOtgUv9r)yftP$C_g4!d2s6I=Y480~SocLMxPe?fe98W-Ux{Q`C*UU#T; zk5bDD90rq{DnX7QIYqi1jh&E2gNwu6!+{C(`_0QxTFU)$p8QTGc^2OzfGQGXjgYL%lH&Dz=by zADQ|l58>3?vH25=)}P;9O|?lgXIovn+ANHhF3Atf4F?f?XzPgMQSJB<}7woVy*`MZ>E~j zjDX~BT%i)@&psl2#Kj+R`^fp0ZqRkC9F#b{JsItV`5<9CNFjK4XNwLv8$T$|haaI& zh{_z`C@zz;f0oORqyn1fCm}F+n40OZ()rve(k^G&FCgN~n6}v}7v=B)JF&Nn5Oo#W64wxIa)M^jqwe7Zj%nl5iPt zrHW(K*1QT?ApGt+xK7Bkk#|xD*V;qFe_ix@;0G0g=K7-{;p;9=}6nwMIqd@8jG3s&fa2N*~R>>pK{BAUy zD>(Wa@IVV%5xHpp_dB))eSEo}*bxfoB{s3wR;IWS{Btu6{>*|YXL*;Be|t|0U7@Ei zioU17NTb$~p}e+{zlw^!)9A~vjNW6sD++90)9arrWW(U(Wn(d~(Jk6hIJWSYsD z^zn4rvuodX63(T-pu~b*KE-b z0(4kebO?`voW6Q0K1FNHQ{jh`jbZ_Hxi7Z(*`V9ha8nvp0y*Hb)I!7QF(}kr=o4O? zI~_&_2?AySU1qT#LoO5j9-GsT>Ctf|C0xx&$HKid^z2UrE^C)*Tyfbj4R)*X%5fOh zsW$x}7?~hio06JaF`X!%8RDD23=D{HQ@TMUIWpDxe}$>7yR|nJa`QHwELK;%R>qdz zB>omKSjldWBEgL)|Go2wf$?CuH37%Vdgaki9)wS0mKKgd1K4IC85BCm1=S~w912s^ zR6iRz9l~|2Z7sVU-O1(gu8*^xWRjV-C}3Ogy;n5YT2su$AKh!|r0@_e-}7$$#ca6` zHNoIif8{GN1KZ{=ZrYSV9>KI&xsFPrWqxz^c^4}yBKw)LN&bvOO73@qDG1hAKMF{} zf-?Bc*1U2G)*zfp={qN)DY_SV@h~D>3WNv{;BU7%!w25qEv-&oPa6PHo^yvKSIua5 zesE#+kLh%?Nu45S$545D>_`H4SP}C!b{l!+(d4r~gsl%Ao11xaojfVO9`2ZCS%7%^U0G^DswyHiQ--h1*J z2-J8`VlX?1yRpH^-C)IYgaOdwqx7@i^Q0{@sdk{WG=CHPL@E&2+~ENrmn;@s^}+5i zfAIv9emp9@FNxzuQ8vcThHJIdd5f^a_=SAQMhg~|x>_%V9kM_YKS!e(*3M7D+2XQ1 zlsTuoepHl8ZPYEj3ecU0FSTDpFscPy!pd&xf7X>O{Q{j!U^)#tj||Eljt6hc2{i97v&Hc8 z@7Lmwg*V%k0};5(u<4*-gv^ssrd^OszzqB;NkMmhuRy}CGQQhr%Y%eZhJu)=9)XY` zz7FmccoVkCo1SODn=@|lAVv^1flm@JJL zoe>>9T&&V*(5L6a8v=jGhW>WBp1qdX3{ZnB*dWd_fV;~Q*y zZ{ZCaWDkY4ZxaRABiHv^%Em;mAfs>ykwk9`igU&C$_p8TbLjY)f51qe6vvgGBGki7 zBDq(Hs#W|KycC?L##-z}ocxLHe;l{@GzA!el@QA^$1J)@&Y0i5Tj^CqA}pJw2#z>A z4?Sl$KEnqlM31@?_Ntw^k)h=*f%%KxjSngbQ|izf4B;C`EC;(<^kXZ8CP8J-v+R$^E8v_aX4s|xrClS2=B63DOs{yYq$sdiL_Z2eZ!fLyhm(IcrZs&CXWi}gF~5Tn)7UiijgonI zLqq^-5pcYq^~SZ4H;$4x1>}C&6A^2-xmMjtn@#19c&gW%e{NhDe;SN-7r1vWDZC{{ zNf6yaS){et#OdeMjOp|HXU#ur%*O-+EUW0}L9N0pWX_4p8hXF28@qVy>o>o1B@Xf5 z(!yvLMkD_E+8eQXSsODvp91Md!Kcegi@7626IS?{u#nJ-94u5vQS?NIfLU&-0wawH z#+ba-kXouJ55W=)e@{z)9^!0rM-0SnN}6`Wq3wi+|vs~htT)M7h z{DgR|EsWmr%$?SqDWz;mykcmaJ{9!t>rA83m-6{mw3t@w^yUL%f6O4es+?#i_g0zh zNl&lRh@yUb3;|_hb_cd6wm8l&U?g+e}8F&X#3AfVr?hcc!O(hCGr>a4X@dvJ3^9Ce=0q>Qo~p)@b40}sS-i1 z+8s|}Hp;6&D|IQvLe;#>xWyF-{r3rvs{&NdIdnLpSk!5Bn`=r_xxsNvm1)l{N`XMv z2Jjd_AnrKXeQ~RmK;gaG17rdRH|1UUIrCzpKZ;;;f7lz?{hw!}Bw>2FDl==wv)2Q( z)%_?~1Nb2l9GDn6J{m8DpoJ$CF1s~;n4bmrd%&E@jH;b1UHAN!o5y3Ce+g9{TaNm9TO2%&Cu)tqJo()$G+BvOr(@!T#O64t>4#s#+kpM6aJwyR zggjrJe~d<{;yGtAzZN8=A?`~6dh_tiL-c6p;+*Ni%plHWQLfNbl^NZ*cg3RulAM& zBc`Yqjs(Nr2K=b11{ViQ4~{g-A05No@H)@AoRTCFX*uf>DbY6v1NhjtBA**YBR_)il~QOy0Z5NVd&d7Mu#E0;ER!#zk<-ZGPwCR5W>4TZeCbfSDet^R)pQ>@MY%|cbsLY7tR|G~EfY?pHaw5*M^5V6)-&E`RkHqG>z6*) z7%AR-Y|N015WKlJW=jF}t6#mH6OkdlWgHBB#n~xr2qaE~CCQk0V4=d0CDhmZ20@POoFIH*Rtt6vu9gWAMdTEv**>U=`oe%j9EVm_o+_EHE3R1;3(Z<~KTXog8JXiJw)Y@=A7zm{B7G>hn4%qLeo&`K7d|h|05#+h6 zWug@Esrocg);Q=pryK)|^HqZb{ThfcFsAm~fb7Xyn`kPz5w)jR8qKS3zSp}zB~6}w zrg0+j=diYb#5_x&R*2Dh6qibz!FOS3*ly}iHiPOSf5@j}phO0Pw#C~|6}Hk+=Z5&c z(ebV(f9EQ<#(>h9W&@mo#g87<$cyCXoe>y6E8=`PyFCVXVq0?DQ^u&t-sEQ3lQ402RQblc3(b;YYN^XbxtR9nJPR?Xv z*{(cjT@GGBbaRG5d*IQSMY;~Gh|jcNB;v&E^b#)|iN=Gt7E`%Ao+^#G^`pkr0v=dHjB#tz2kVIx52x(JWmzdz* zfA+0ssN6VkeWTKEDgGjKoka@Q#+LH5eK7j-vs{5nZ=ubORKd+l>g-yDn#m4l0stlQ z{uU1ybMG>^^+uL4)JKUr>W(mNNd=a|(b-XUMOQw*-lYePiD#$q+gaO+n9Qu+P*IjBVFXWL^Al)t=JuVy&$XUm zd+G^I+C^5&+X9i(Ty&knFXW~W?kNch6=bkVzi6dTGA*`*(!VyCe|(O%Q+Jh_Z;?huJO$aT8@o-isrWN(ki8<525|Z; z3c8$A_&}+Us@nc=S(1Up!t!-$iM9slRd~EIP?4n$j^XFqn$8ihwmG+nFi>iU=}5i5 z=e?q{Zdp$rVacmLaolPr_FXI&@V4O2>H&p=JgcuAN99)m#JPfK=T(ele<}8O0u=#q z{Vp}@%uL!l1tn zKBWgPbP0DxA=+vpsbe}cyV4RbP-QEBd~J4lUd#mYF9L_ujKWeY5foqJ4e>Y6xt~pV z5oh|v5dY-3n`Wd?I%^y*-xDt?*)cFS05Qi> zAmr(>Er|y3N}kQby!NTR1BUxR<4IbsPr2ZjbYHR=$8>V$3CJhMne_**MJvRSHt!of zjsxd`JwC#9Kc~#5{O6^69c;FGn0^9TXYJ>>U(^_7zq{38&6=@YeFGr)s?ozK6f0#kUQ?QE6pml#@X@1kvfk~?}XTFKWY|5yf%%LL2dRC|z63n^& ztJvMPBY7cQfR7TC?Z{1CQ+`{FSTxdKAt}|R{b)NXv~|_Jia@Gr;^gcXGx;K1@DH!d zbkA2EdS{%8U^mPmBCNsX$6DUXK3&YpB*ZR~i{CP?Gw=PQv7E&@awxc!{Y8ZkkVIpZpz}6SZ&TNs$J7bX0tX z7OI<;{^}>_$P4m~(yEVd+_^I))CIWr0<6EU7m@}qH= zhrL~sV|n-ue+oW$774?;a&HXSeKTyMZhj?VHX$5a9`eqgc{iPhrj}M@L zi$|}EBGA}dUQ4L!joy3;x@UXQmps|I?dk0f;WP9%f009W%#a4PP%3b`o`Z07?YiJb z#oB0B*iN1j&@z-fH{LB|_W!6;vZ3z-(EMYIS318r1iEJ%Bb7t1}Ivj2?ScRHLoMY3yW$-LO^9 zc-Hf#7SqL8aZ?pz+YBoeor=a#F_8|1dqGCg6Y&YqPHgUijRXtnU>?;!7oFBFe@~Ma{1CLE45o@FgO3OoMXAP984vq>(RWL7P%%b>K z1|i8T04psZa}fM+<##7)Lp7>!8d&-=2iR<1lB*SVmWmI2N)bw##i#$^seT1oq=DNf#{&T%>3Da}kF6 z0nk?|R$&94;4o^4o6nxCuyF$9M^u9_Q6Upapd{Au95{yN^dX+Qo9B-Je_*IrYlI6e zP<)htkTCMA$5fBFrM$f3W(d%l@;y){YUGsjJ2GhrfUfUe}|3NlSApMEwc3<(pL48U*wfVavn)CFFAz-#!btDl!jhv z_|4cXZ1&??Zkm$u)j+oDfrPGce(&bukHgP)Ku(g$UYfusrS+r;=}0_KpGa7^pfKMh zr5#t{p5K+2g6T!>E)EYtW^(>YdpltEN>1c&{U~17-U(rR@jDGaf8_%Tl}?K&OUV`= z629*QZtz!ymlV;WTMR|q9`APPm^DB35Xf7k#|`+&G<>k-wc8eYN5~i}JZoX_JlpyT z_m1K#L=DzM8-{&=eBMwW;NX085<`seT7!Wcch;3Y(>4wdtx;-o!?vgF;E%8C*x8un zj<*tJm0ynJ-V$ZuP2PL=&b{}?_3ESd%RX2_2*dcm0%4dlC*?gRl27$xKZs)H++Kle zdSGRED@Fje+v&xf~tDFyuqe|OZcyBbqly`@!l!=Li2V*R(Q8mma=sLj&O zkgK-1dMQr32>>vkEIu>^8t~F-6(2691-?dwB!0MV?&^5d%gN}4Z=Hs%+n|w`P$r*} z5fEJcViEP+b2J;3C>W84i(|i={|N(u(U;9iX)bZLk+!*`_53n?9))yBRrP$fe{V5O z3$z+fIeD2+)!^c-$d9)FodS&OPx|KPWS|k&o-Kjr80MB@w?UH3!O5rAVs3OI#j-Nz z$b^hqzs;rk##xgFSxxV7gS&JZVOi>DChh`hFHK>=;o3U4DqO9M7J;TeN7rlOiIus? zd z-p!Zzo*TtjO@We^WlVF18r2EaO9esr^7AB8EnJ|~8S-w}Mx2)^ua;K zM?l|0b7ptpC>guS{ZH~vqk6aXrrVW-Whf-)WW77m(^rOc7?IR^(OwcLc&#lrR|XsN zH(Vse|59@-g5e-n=@qtubN1^PHVuu-(}sc@vxz#3d|&qwWxTq;bzJxpW#1Z zV`79RMbe`4*_*MbNq-QE@tb}Im&s!y$4#a(n`YAsHLu`>k7uovaA;Neg50(4gl^ZqWzEH5syBX9m3fttH@@*h6 z%6@i|g#Ll$;H)xNe+3Dl{|WG*B&W>3LfsVTYr|yamzD(Fp^&wVJiNm^WcJawm6ch6&WsyLeBYcV8OusFw`?*af0NpcX=LDw;R z;om}#MFICP`GW?j6j0P*=?>r~&5%Bbvs9|MSaah;e4n1(f9-w_D$D4FRuO(!u?S!G~w_PGc287S!HN{IiPH}UOPq4|oMcg2+h-eN( zHna&u_=oubeijn^^YiR!_M+8%I<^YZ5MYn-c0cZ#M3A-7w11a;qZ|92b?dhims@W) zh1eKwFbfcmf0Cah&$DZrODkWF_Wa&6#f$C4f`7YzM=Wj%Jc=V^0{O71*tV)Go#M1v zIo?rmQ2Z=IV#zUiPe#y@@uMe4hV7StNgi&VQT2oGD85x_jXk*my8LQtt>rK_K6=_a z98=_rofB(hi8Ua3a7kzByU(kq31R{bj82QI+l<#UfB77N&6u(huOwRd*oF0@Qbw(; zY8>$tqt(^W0645q6d-Gk#3GMuRuwK&YR(qVupHgO)z5;`<{kk`A@XN|J#~nY3<+U( zTr$Oxk4xjJCez9W?=2#zNBl$CF_FKK!LP`J5@tgm1#_WNCXzbA*-6{7(Z@&ey*6Sd zy1db{f6Co(Oy@WH88Mx`pAVFEfbW~7GZ9KQr5ni`u{ObX<=EK??uGX-b5NHL8&}CA z{1n;n;)PmA*oP+ZS2cu4c|-lG330rR=pBlZeEVLlv0BH|^rTJz0=oQK5W=H-RvFn0 ze39aDpFy0|NSFFj)S2z;3JFhWAwE&UIguQwf5>e)(5ErmVjz$*W&T?X!!Un^J(B>= z{W!qW9S<=X1R!|MExjL)kcz;GUFEW}7$19yLNxWE$Dg#4+vW?w1*N@6F^dexlw6E< zdL8U(;0FLe-Qf^eS6$?EeowPQj+z*?utIxivS!z-8cc5RtSE`FPU8!WmyV?I8-nS} ze|jzX85tNdB9Ow+>Ju2Ux0M-KKSwhoavA%VYSob9Zz zZirsDe7^JM2bN(Y25l6$!e%(60@V$@)go!_=kJyoDZlb-^yN1`ui3SD}2w;!gv7pdNmXAx*TE1(yLEyM%Ude3@n-4del?2qH^|Td)6Mmg z=L!lqIis>6D|8*Z5_KFONKHC{6TcJ&xfk0+fA)v9YfO{1 zcv{0#mLZWzKN|1YSg+37%tDnxWs#hglyO^CgP22#V(rbVY+$+hfM~l4*rui@w<_0VZRSJ} zrYq*<1cLXzwU;uxVqI6D_ZM~*UC z6h9?|R70Ur&)PaTtF-$~L73Dv3MmVeXm1RT5rl;EK$1Y;=h0$>r*08+Mvo`TZh&IG z(CPpL)*=R>6-gYnWf;(4RJZGu=Y#0J#6i|P`|y^po2MFHfm`Wve{3cm7*2Bdn>+F~ ziEYQ*w6AM;O&JE6VJU#k9fqz#Oc5$(^K#|Bizs89&UkWxai}=yh2x-PMq@4}$lGG> z8NpFGmyK!G=iLS3I=6utTVXUB{lJP8Y=+^ng4)3dci3f3I9O%mh;wY55tQiQw}C+~ z^lReH!8_Gd%-Rz3e}w_)g_dGP*MNO42GyyToYxMxR6khR~l9dn7P7? zAEti43zK%S-IT_>H`|nds?=V;uy!$s0k+Odhk%DWmn#?s+InX_rd2d+>RtPpGe{L@K>s|5^U26%7Kk%FTKVjrWVZc zrb!G8F*`gWV}N(|id5gLThr00PQtT4Eb6697*q1lcdsNV#&T7;&zSAgTB7$B7Wc$O z5vIpYkYByBe_FD&{Ftg1fp#+ITJgm!5o+X<^9|V7bTgwx4AIZ7!#3JwC*{lqYxo#2 zoPXhf*X-|>4Ii%Em&54x(KM_6yVY_0$$Hz)LVSNNst4f!GeFG0s$IuRe)7YSQ~A+{ zgeKrCjw{BJb-8|qz(+UUCujNqg9{^w)U_LA#@7R+{(k$0G=Blr`qZm)+4yOMe`nHI zG(5i=H%Yv^r;82V|G>cfLR^4md2|C4>vo?PjnwFGjYB!$Y;GrINwCrvsJKPLiwRRw z9FgnIe1aE*%c|9RxlSK{%J__d0f)s1i|0{LJNk4S9@~cRPYes0Op6(E!svXv_nN_gMGhgzs z+?tNX6Y_1&N7d$3k`Zx?K$$!%L9`%Pp8M?-Dq;yPowKan%tcK7cuR@kwq?NRtd*`} z67xlE&Pg*F0QQc2B;4zCNJU6Usqx0r-~?D}-Plyt^napB(!ACESZx710z#346s}BY8FXbGHczoC1imnk0s^^WX6b^WmSlIDPJNwSJfU2(WodRtSrHwZ7o=d)W zQhQl_bjohHC1{SJ*a!DWrgh3EZVR=&lsDSU4uoXamrTkNOFSH|cgqQBi_uhPvTlpN z+dT<|b$?{nlF9t1O7ALu*?{vRPYT~Q)W&rq+AWKI3vtdl!Y--wblH@iNi>dmWbP>d zHklr$azMlgV8eh9{aT3;ge8^|S;UxC{D1M`$@hrMkrrw^j&eWvu4IGW=ZpPryj|CDr4AH(P zG2;|l{wa;3Y%cjq;qn^t4T?GpdCt}Fm47CsQF0bdB2u#B*@6K|ysO@Je~r|dZhYlj zm927F=F!4n^}Ko21S&g!CaNspu7n(4WRlqjhv2b8OzmKnO$^R@`R`1MHvSW1(+LV- zYqP!OvSM6D;5pDbw$3@yyIra(p^|SiF(?t0I=pqNYPW__8|wumN3ScV?)o^|ntua` zkm5eLJF2MW1Cg5qdrQi0*-=(<_0+v2ZG!ANnd=931$eRvtRPJ)7Y9T)>jHBU?*IAM zztzh=WxxOU_j{>pF87l8z)Rq-+cHANz*C_S`>R|3DchMIJo; zD+Uh3NTkLvyhE(xfaJd=jl_Xr*w_AR$tQm~62B_XMD3DB0>jA8KOpk`2Y>1a?%mj# z1VevD74zSyq(4)5;I|LTryeZY%i@=>KL+`9CAsvmxVr z5`U?~Fm?M^Dg8kUg@}-~k={9KLJY%#eOoEMh$X~`OCK{t3V;(tPIjU2?p7X&uFg()a6RZmZ-o0izsO8 z0+=rWhNPJ&Gf(I$SN99YP5yOg0$#)uTMj%GCcCNLJ_aNiWHcHPkbgzM(;g=3S+VV4 zkcvl>YMu{uoQ;-+o7P&o(t;Bwo2l;jP|5NI-A7!ZH0Bzy8)gteu=Zjuz~pG>70>NB z{M++2LQxXj4n^DeD6cUJ)UGs#hQ3PDm1yMwFWw#{so;Bc}vCRmcpp z`S|N)l05&djBbhd_kZ|I%@|D4y(sb;4u3^4*Lax6s>0ARoX4x~U7$jl@g!W~WP{>) zq?;5=b2r=OhK*7NbR8}`(*(FJG81CxTZunAFk1ai(tR2W<8>zyFzZY>#-yttf(*%@ zQNTGdlaLOh=$wacP}>NJojiF1FG8WvIuXDg^9^&lUQnXVaDN#sEVJy93`Xz_KXEMyL1*Z+U_pphY1Qh_H#m$( zwdu1>fq}4C&l28#XK1>dac#HpFv+g^{V)1S8fw(B>trPJp&#Y@BMnu-=zT?@9Lae~ z*LR#ytbbnJImZ>rt13mV`r~-zc%}|(SWD)qX!&KLXZ8s4nexsHKB&>SWpV1rB1el_ z>NG3<(#94+-YO05{3sQ3w$I-~???zmg*5|5ha1|kr)PlY`StSRR!CPgFVDPkWM*B< z51&vP;_H~XwGl_Zt?wZ-gjjFXos|TEA8gAaGJm0&K`*s-0wDgIWs>}W;|iHE#^O&5 zxV-nKmOC+)>jByQc^L-=d$}u~IzC(bOEY5t1%m0pVkDLZ4%>ucWP_-MXX0-oWfR(bCqD%Joth1ozg;R z|9`Q9u;o8|ICmmo0+EaWQ+u#*GEJNy!+=UH+?}M%K}dOD@T82>z32S6P#1P&vS3?O zr!_)B{aH7bjX#vGuR{J!aWje=d3_)5H8jmgEWMXmkGfQl-KKp>wJ&016$EZeUTEGU ztx>!&VnWxU)Ab3pq%;5AYVV>5>1Yzz1b>+%XL4ODqsY=uhb#9tddKLMSxRYQDZk&9 zvQa|wePD{KNYp&ffjW&8!ZxYG`I>G1^TS`OV!jY1feR>)3+ghyh2F|oVl2M^wKKLG zB9V4>NUyLjXSnQ}rIT)WP!xQ@42?s+uaJh0d0<(Br`Qz%M>VMEP zxqr@|3`U)6$$OK!Q7K4w$Fy}Q*IgWM!I&#j> ztEpK!fIKtt74-Q=gb#(JXxZ3EsTTZ!UGh&k`V|!Z;+a_=;Im^HP^>DqE(GZ+e2YpRLvLq%aG6BCYjhyLrt?p4! z=$IyHV!J;lGxAn4=2kdifmHn|Aosba*WU#!W>4K#XcgygZPX0+(Q%m<5l1mgOz)A3t-)q(yG-Dey}4EP;s^iV6S-w`Y{A2nDd;d`G19^`s&$k z1-s_fMkvos>tXhWv2dT8IdFEF9Z7M%1x=HZrAg9 z&l()`(|p9I+d;($dpdC(Kt?LdYC=?5jKA&V5y3{wB<;DCu2fB6PXpl)6LM;W+?!kj zZmzoCKSW`cuhvg2a6F2>*nhOCH~`~ZuSC0F@d=G;Y11AivfV%71>suYJ~ zkq&!XCFWUl3Y%KM`%?LO;f+Lx)Gv&{D90#@BpJ3l91n6`H2@1SOFgPmg63F>!0K6&D*3j23lm?s zxPoT`2x2*7%`l6*5inaO5&Kx)en3V8XiON!$q5NGR#nFoPJhQ^oSp^TH;oRYpNmYp zv!ZUta>Ph+Y082)VVKYYtS!W)Ampz#I@C;95MFONjJ@{k*lzXvG^% z=?Fv9sn&)lH#AzeVYs;{vHa2%fW{0l3!~S~Zd+Rek{oi2JX-7Cv2ayj-Oc*ax9Cnn zgrB^GOTv~_7=Okdy3H6oRdkpO+^4MZ?x(;ir++?;!&CbPJEIYp;01ldI7n4_eDs1X zn!euF6@^Bf8$ShZ#TnEJ+yrOlssXx*z}gN(A0PbQQhobu=;LvSI6ZHsChLO|qHh*w z`d#%kdtNQW&^s4#5`^IXiAiw~C8^JWDxacN&v3CRa1-m178v30y4O+u+c3oUgVXbo zCm+$puORXA&z^0aMTez_M}S%^#zqwDjWS zet-3^15(-&9M7ZwWGy;?M>iO#^T(`>EZ@R8rq_K5tPVPjIig1OayTHpG$^ZWaddy0 z-}eij{!DU`@Z9``GdfRc@a29!GfXo}SJ+~ebZyN?O^L&EJ(bU&Imji5+6XQ=ix?mm zaA6_kUR)18J36QQl`bKrx074K1Q1bkF(!R5B%WtPP zS@V&7;|?q&V85EODi+>ew?>@RY{%JFwiJ?#NJ4q&1N9Ua=Jehx8sc^qNcGSjhQ8S2 z!81>A#x2YzhbW3GxbvIt3}3+A`pkjY7%r5>+`{YAVsx1u#*v^Ln66XiRXKt@zkk&! zvfr2(IJ-{@6rb{Q>Cu66NHF2#+N1gw4fMG-NSJz|Jf8@Jk=pLfm4J0vWiE-8xq|?( zip>yARMc@8PorRCRL#8YII?+`obBFFgY?WeLlAgl^;<_V5xe{-?LK0&uuvMUsr12o zGe(=OWf--x)a5JDecqLe2MxDLsef+zdxqr6GJG6?+ zQIdRDBLb-4Pl(e|7KSCJi080|GfIYTabN>UVM;w;8q=eQ(Yi!Xs1SuF#W_nVRFd$0 zB04s^$6TTT^V&XF@>hqt#UI-XUD$X*jQWez<0?`ni+5zP!_=@KzJJsLH@;-?4_Mr0 z>9_OgFe`9xZf#)c$oX)k+<#vo<^d^R)k@eBM4rG11|(@nlsZc;l>Ls?a-I|J;HTuF z77r8>d#S?mcLc7xy7Lz{0X_g{R{{{Fd_u;NqyG&J3Wy;=iD_l`HTmHVAxIfLm!L-r+8@lq1ik9>YmXG1eX(}hlUOjW>c_P9 z3?t%FOP*fmZeiG}^emVjL_d=NmG@V${?dLYXV36!?1nZ7fni}q;+_Gv`lnfThQqZnHw#9U+JCHMnll4-VJEwrF9DscO5cFE!T+5SY%8$b>`cbTnJO6 z&ksOuTtGy3d=q)9RD<~{3$7*>bv~7=bx>FjqfcX{%g|NgCV!AaU!Z^kWXAQN*~Kq} zz!tGJV~>w`PCf09(Aj2Jbaic5UUY2c(zty&B$QZ151({n>J=YOnnHuL8yc_IBh@q@ z0C>|1L!5r7rpa{b|RNpsvx~Cv%JqU zJsv076o{E4QGbZNOZP|~|TBQk}vrem2O2@NWko7W&?=s>>=U>^)BXr@F&2KHXMr2FT2D8 z{`TO-MYuoUjtTY|t^55UkTuvBL!HGrHE zid#jKmXmbl3)_ZuWcq(OZA@mU#s>0z~iWO8KmJ^zCzPjCSFBVn7uB zDp-54MJY5T(3^;I%oD=#%p2fl!zfX#?&%j{S^}+5>7(1@s6Eb^C`3H|1th(t;JqAh z6dxp1s3#iDM!Wy764A}>WgVEp`y;9#Phzw$dv$GO`nJV1eM^7f!ONtUpp4jlwZk!? z#GcQ?^2y7-*7mC+y7{PE7xHFoNM#U`6}Z-pX?p|q0=Co@DAuXZNMs3S77&&V2ecHD z(et-qY8Q<2+q6P=#LJruOynTJ3Iqe~tfmcFt@~~p0hS0N;MH|^G`HW@tWYZ}nIX7E z-kWHLMUN)p>Y0BG{(Pa_9X9aFJn9xsJZY}W$bIPicQq2GkfbKSlMXL^Q8w?M8N#Z+ zd>n*EM4i+)w`(F)0PWyAzbq}upcH3OAju9r?~;_Mfn$rQ*_xSmyYOc)ajwxuGW~If zb1q_<6SbhIN}+I(nzjX6UkL=YAK9n#d0??d?QUDDk(Ph0Q&wm7a!jbW*H zb{b>Zxd+7Oj+|-9G~137bBkSZ=Ff%uMDT2f{S+!zpu>dzC>2Y5ZWd}Cw_pRb7^ zEE&pi8<>9vLI3*`=5ATHWK;lI?m=~L^@4{GMQ}ur(x$macWorXN6H_ zXr^)6_w!u0?`M3ccwoNywNBH>^B+NGM@4N3%D#{8 z^iF?$5f0}T!D|cf82O*K8H5$GpMv@`HbTJT>PfpW8UgS*R#B#N}p02K!C9-a58E3op*I!;OFWR?fG-e{{g_go(PK9m1=%;D<_FVbr`Z zo3A-)9T$%>o-+j+9@@chR2}{d^Bm z5K9zDbKQ#fF6fy*i{L7vpZ#-#di|;@xKm&Q=s{EqiiX7|<60NS+!0OAR>4Rly{UiI zdz({2rWqN|>q@_1CR*d^3kTjWe#e95$qWB^1tk#_$4osDgsZWfA$*{Ztq)7K6fQo* zl%aO6B?9v{Yk>Wel-nY67`q{6KM~#u1ZytV_>|%e;)w8XY-basQ zN&$pc)*OLmK88?}XihX?pk}T~_QqaE%^X1Xo%RZ!rDqDIqjS`a4*^@`Ey1-74lz*pKQNh4o~}hzk^SzzYHC{*Sra-2@Oj(bHj1@g zA%(k@ux?$>Z<(7%??(!QDh4WZy!B;sw$SRKsf_tFvmmQK|A-tp36O!*dZ;eX-WfnU28=#t^+8 zxo@m`Qo80)hI|pufc<-b z5L$&m!fz<}L8+wd74Nd9NY+Bn*CGDqXoSBojZ&6&#=nvI<;*CcmMO4rZ=BvXb7xPW z=pMGQyvXTBB%*)16csQ*BQBim6P~S)0>Hw18dIdxGWhrzhBw)kzSjVwBc|?#%ax*8U%(sX9om=(d@y!7Ep6saEr|FXQXnvsyp`?|-nMi}9J02-; za41Fm20?28ao4cVSa0sS8$pEy?_nsx273V*mL8Y)Dhhwuu2;!|Mpq&xbJ)K`3a1+L zE>Hdv{YbD4(ML7dhfdX(ABkK~Q4IVe3NJev!-FHrBTM^aBP+ zw?NnCF`9o%DS@d<|aGUZ>{Q)1|?+}>3@1eP{)b3MhX zQPaZME{&a+JpEUX236OUGdq%F)YO3iWvQ9=QO066C1|6WgNmFXEF0o0&^|1maRt;}7)SbuLFF1<<5n>J-3 z2sKd@S;qJmiwJd1c{)>snPlhXbsC6F_!YTC5&++YLvUrA@nKQ~l>z%tD-h@mi`N<9 z3;=&HO#)jht1%K%a$Tpy>Ie{xn`hk-GK-0OlFA26wUxQ6l#s={;q`xcxD1AfBq7eF z%;}%pWLBN{)xFn1>B)7Kh@!!C5wnozrG~x&P@r4U{>t7?97&q6vKYw-k*+e{r!*KV zt%QktMNpu8pDZhjYe{X{X3l#ySTj6j@)Uo8ijx$V1o@>D&=mAvBV69(bVkwE=xF7u zolo*cf`}jmNyaF2h`wo=P0RDiV+#3}Es>^chUp3O5-QRlOsc9{4pvuX;?M7H{9MPW z?t?80!*{VCJs0k(qSr?zUY4h z#wSjS+(?h5n!ksU^hl$-Co4tok7;Y(o3BZ8nFKvc%9KecF21#9?if6NX}l(<>q6*gA^y=v5&e*Arg zNmP$Qy#Sgz$f$*RQq20vZR=a^8@GSx8KNEqzxAuHOy#Nxm~&0utKUX=JB_~W`MXLg zEzS!8$Ej+o1+orh-xgwsE=~yFyNjoVXKDCKEL#p~-1S?oH@BOqI=B32D9}~AGg_~JvlmPmJmY`hRRIGL zJ|Sf-Um2-((q{0B0PC+~z4P~z+i8zq&T=eqE@yfUn82o4pknj>9mXRKQD&whhUjdi zHV;%N_7Tctt8rRB6@#B=$Y7F*lDl1oJ|Oboo`jD3%eLspUVzOMVo0e4T=1>u$A?m2 ztWC_pA{-BKrqqUEKb&f8X_tR3{Sa*^*B}eV&(`&RTeERfkwILT3TUZQ+foCmuhH|s z6h@94&~prTaIn9&vF}E&apU)7gD6Ae(73+J^t0~@#Zf#J)Bq!-rmN_6n+w04dtxj` zF+j9V(#PNsaA}-Drda$s09hMAYtrcHpo!x|k+_I=<8+O17Y|m|n;M0_h$$ z^nmTYIajUJkP&@|l6&X&80MsQg1JZhB6Nd)ZKQ)IR8!?zP`|3A0zvT*z@5sx^5zTY z8W)!qv6=53jm1fEMRT)Xrq+?Cb8QH>#yT7wFt2mu*NM-lRu_yoRo{;)7W zHmFtp*B@lC5HmL(hUu1L}o=}SBBM(1)-Z< z;K*O?w>Yi4dalhqA}Vomv~7|PbdVcq;uv*!RxK-YzWslcTq`fMu zjS7`(HZCXSpz(n3)oYYI7XljM*-Q!o_Wrid@Um9sxQYJpaog|hqMEd7890)_&=Ey} z2&XF`sKhl5K_F{{fp~$L(^TiI{mQay005Y_qwbpZ4034Q1HUyE`U{^ za=17L4Jm+Y!}n95HW3UuVx)hpgF(NtzLv4sRo{f0$UV_1l$8t3>W!jt(3Vh7#o7y? z5W4U53H??M0EzYOLdPV)Aq8-$w>K#j1`-R_b?oUCtVK9DV>dG_#XEq8DH&F!tz!Q! zTY-Pj2=SSe1JyzmG{(v1DNiiRnhA;zIV~QU;KKJ}`qc^sMrg(XG;nz6oKkipU$Zd5 z9b1s&;vb1feyTuzRqiXz7@(-d<+$Rct-~@6ukWlnfard4b)1z8cfka8WcdHijLZ8n zPBEOsLh{#s1;p&X>i=rQB8-0>$6THe_9A~XhaK$Tv?*waikg&{dN#*4GEQ$eE?qcR zJ^Vb>J*GPB7eMwB`d8;R1bxqF8wE?uGzQ&syx*c8-Qk!^Ms-yc0$SbA=%!|Sob8IA zxCzX-dYyY~a)ZwSMgv2&rV5>SKka}1adJXFAH=>y0Vz4#e~8K3uj5;^*D8eI9BO}F zGB=pQG^EU_X(nvxKCKoFaC=xF>4_{sL)Tdvew3Q+=KG`1Qok0;--#X7MH;W8v*D4d zQ5%upiv0j{J*$eoo*`Vu%}8mC1jowjHHPTwV81b9LsUWn65VbbI+AusTRftEUsN1h zbM{F~&%!}0a5pV;Mz!*z#t~kgdfI>1Png^+SN;fogZibdZ-dIx2yc7J-?Bv~fZ?V7 z4KIsuqB(;iD}VQeUtcS5P;1wPk6jRWfkiu6wc<^CER)kQp?2eXv=IF;J~FR`TucSL zY_0cg4}Ex{t3KKdK8#Y1O>=qeu3Bjw8TFzHId4Cf$pZhUvh2zOo8yopx`2OiJ3@XS zba{uc60F)n)!mqSYB1a4T~$FvqHl@R*geShNG|2aK>&9%oHcj^^C+AyDZuZ+x2`dR zBu1l#BdW{o`UUDVmQ*HlAL~A-b9D{?_OM=~x205E=k^U)m~F-c)0w61HA_%-Y~eU{e9P#lEsVdr>z=XQ@qyk)6+@%jL&*cyLvhy&TnD5G2e z6sb)OOH%3KxXg!#@~>O6N({B2CAOOiyTT;a(lReQje*9E=n zL5K(lZ&^CcfK+cr_IvW>uJ4ZwHznfvVoxt#R#>6Ji>+Ea~=^sOruRZ!uR-=(VaV7)+{f5`i@L8m|3lII5>$JAImsz{O&4ceg4%g;)6iecK1hu~ zpO>&`)TUlb502-0gx5r?Um&NIRK%mUhNd4F0=8$WGqfI9b_ahftth{kS`Oaa9d8V1 zN0I#Xm$v7lz4vwjd)SaAAS4M=V#f%?mm5<^i+i%+r1u?9O%qyF-D}0S!w<|~pZCiL zmjco#BgVlDx6Br?8Ammr=avvXLaFXbpIh77TdVPq@gx#^PqK!!VpF+?)37DMcKu7Yp3@Y6# zjax)AYCPvj!!@ujIC2gtE>YnjUF~lnR1Q_rU(&J@2}!@^FNdk`tesv<%_BZ=r2IPg zY@R9})kCaQpFla&;jdcgEmPGYeL;u|+k37kBJB58c<+DRC!q1;lyqjcXJOkV71G9R z!WD_GZ=OV8ons?WTQ8c6)L4ywp>4Jo`ocqIjzD&0m!xV#BZux%1m9tLu~pW;+`VzH zJECY2R9zGBj%{scE5oVxBnxN_+_v+iE)^s(C|Yi~*VatrVD$I~an|~{iuepk2ZeE! zL>Z5Q66t?C+N3DbniMf8qG&Nxd1b21oExlf@t1-?sFF=*0ZeLo!GW~ihS>(h@T^Xckj0c#2#XHVe4n z18N3c{-hM>J8Xo4S_~r0tGp@8uAlX^nU&ywTk$`(6%#67X)n)YqU>|j>l@h`XyO%~ z{SJSNFW{QrF~WBTMPg7Lcvjm0q{_;ZrD`ZnH-#g8%k`ZQypi6P0f>lVJMnK z0mJeqOzB}xF0Jl+@;w+%<!&>1_9?=Ts6iOq$7K*-xsnjf^=7iKvVBO$qFp z#OkiP3Rykc*VMy6RlpOkk1)cPfZw-hqJ}Lq6{~DJ)v!?eKzG2g(`9zml5Zxq-dlgU zRpX7P4%hSdymxX&9MlI4rdrba6P$mffMG!jrARVr;!uSt| zJEbD_u_-~RkOzoD0pe33VbBYe6&|^W26O&Ed{|5=g$TsU^^%z9wLtZ+!UH-j%j_k0 z7i%Dp21sVT;o;28lw5NNTyf-eyJ8KfzW>^XZG=c{*)EG7ibQY8p{y)|ir9Z7So~8> z|Fj{2!lH(BtB-Zy4(;u8IJ)cHR0ns{iz|6EmPeRQH5U6GwTAc?KAv7=rp?Mq7(jU# z7JfZhAT}S(hXgV2A#n_WHQfh8_K2Ws)uQW&Wjq?|5Z$7J4(73gjO4Wu1tyP@D56LX zW#hJrkXL4vIKNi<{S>4JEvkP$elO>`+djXQ5J;EQ|33*38&qI@?UuRa?csAJtpdzD zf5NFW3ZABkeDqH9F}f-QD?-2!dsjfe4F9tQ-~CN6Y~((0pM*WoZTgSX`-u8&cL{`M1t1osY>0vwbE_xD>3#P z0_)etrl~_U>gjNNEmIe3sm_h5mKkfzw{UtwK<+==qv)^aAKx05{VxXtc7rJ^4h#Jb z2lnOfe#V1jaL?HiWRHJ6UDt7M!Vl*Z(rmQnAwpoEi;XZB<6baa@GKbpcPeiLEG$M} zxEcC2z)rkh$Luv!;CwPigsFV=VDH9)8!Uj3iRUMq6m=lVgy{H`jFfJA@&WG>lS(bT zdrWD-jRlvA)`O?Ui0jL*FbA8RX7vo+unP}n5( zem#2+Q7+LRrHg+w){!oQ(z=c}IyJDp;Wm}?NpggVE&a`5fMZ)d?Ud?=&Fq%$zu8k} zwGQ1$w~?%6?xn=wquo*0ZJgm+Fr#D=$N!3f`vsH}WUlZsBOhy-xW>)O-assg8ZZ?q!Zux%^#!8`C`_SfKyJ1NO*VWe#EBLR|i#B=I6WDSD zeMNR2kQ>53(InJP6&uHjFMgQ(_ zc|Ki4QOkc-P5)w;vevbKBlV@^tg5RvER2f65C@E`35 zG2pSfwy2-3eqa4-WuX25BjM{G`e()IMb&KSY!!-c1aYzo&ZUc$lcp)j=_82o=!Hq_GmgtAN>Ue;OVxN5#f(lGqu z*K3I^i9@)plRxx%AIFgk^=cXgSllDrK+T@EYS}#UcWPIc(h?C6JpQe0H1EgJ8{~_f zW!V^edZ_$?j1=PqF_O3TS3^NAc@p7i`J|f|$=iNi7YJo=VZfB%9d8n`g(M6=iHA^0 zjUay}YeSFu6}Vs&p_ZR+GxBi!_S-X#AaA7YZ#Rur>G`M@Fe}98KF%x$*qgrXnzVfN)#Qfp9%GKh)2XHW6!asT0v^45ys(&SNLI z?I^McLgir&N%&cuN|SaK!G5=Qkfw9c4V-`TYt@EXT}~@a4fmp@Eb;1lnyjyF?Wv4w z3MNbDv~5so+DjNwq%f~!ZZijxM*?lq;5MpnYAdCTUSBQuKM!qxr|w8$@c>31Q?jZ` zR`(@W2UBl$_*KS{*2;@gn85ouH}FM>h}g|2R+edo6*^?qcfu!E?OwsW03fT*<^lf1r z@7YtbXCs^6X55HAPfwD*G6eT= zO&F}BK2po9f@nEcex>EGtQS#ac)jW=fX=dA9o;ti$WH&kD}uA5Ed(*>R4g}e~0mLT|7e~KTS05M%9itgMcvf?dAEJuGzs97is1SARwFOp16Ib?V;h3;M9i2&l|nb~6)CL7+f z;}JnUmR}Wip_Q4_8qnGWP*&n#!9H6Ujvq^(C0g4O14v2OZLctVDr_t{N+e*XmPvi5 zV!bBzs*P^hSWx!z@0sR5;Z}yoA2zD;u?eUi<5SEg$LU$h1Asm6xk!J$HQl{>!(62P zEq1n%&n#;x4dMzo`URwX{}SJehT&$G)o-Bs(5c1~@;gnV5K{RPHB1?x`x43{$l19Y zFNyRyWqLv3xE`zuY9j(d1LI{8JkZoL)fsv0T8kgJ1o$0 z$_Ygq5tIUI8*mQW>agc?kQAh$iT^~)qLskGkAk`pw=15Z61ab7wF;>u05s*sk~W}v z@hm%v?`oFk8P`ntoT4H{+A~XDa3 zwTjLkPWsGc>Q{iibryc}l($-bnE5V|(E($7^- zZs7M_N{5%~0r;dFck9p@W~=>pjVBqoO>)h-6xHT$&X{Lm1z3f^!24_Yssn zW6;JA*W7rXKUEuKL3+--WOZ>wg?y(0%qR3?q*ouX@@; zRo^+2TL{~I?)zp&QmX;|$yI?ta(ec#n@slthzk+QZ3~hG=L+3iM#DN)Nj;d17Slx( zuy5`}w8=AwpSm#UC(sM>rF`2$8q8Q#vz2g+0#JY9a6n&&LfaCdL!Pc}D!u3vudwTm zSve1$-{MZB9J*hN3~sXb@)%CVdZ(cSUNCXiC43GAo{%nHEkXMo1cyRxAWv|3K^BZh>jiKJ{UWH4ic9MEaNvD2bB7tPzJ{~H0m~py-EU~phAW2Q zvzveVi)Vf^6XHABm7lUTdDhJ}jb(eq)$ugdZF^6z{z>$zWy_iY_Zi3B;5c_Jo$-k9 z8UubK2i@fHPIvLEET1UL%6gken(-_sHG=_R!ZIRKUa?x<8|XFsZmz5HvlVWfT9EXR ze0e`ax`jKb&z}rjaMhpU`o7C`c73{7xF>)8#OjljNS_$fDm}FwFyvXZ6oxm3Qaw~| z2(dG=`mir77J%ao-#t%SfyHe-$E7U2ZOGc7N zgCUR;M7i&mBd1<)HG;*ISKV6c>#2V`{OGR?xUSja#_cd?Qm$)`QzDZTDSO+m?x-8~ zXl|Th7s=5B_=T5`uHU!$6gZCA#KXTMF>vfs;(1XLZRu3uihUsP&{pZf0&FS26y^ER zL*Et#C%0^O7#NEnk+LQ(1S|bSTr)Y4kMgX)nBq@Ox>R=rNagu)&fMLI= zpR&?|vIdQ9fn8d%(sBqP|79 zE$tVth|NI_yth!nOGKXh)zp6)jK>Mm5%keLAgyQwkE!iqeemop^7jcvRkM?7Kx^Ms zE1I|5Mj<%q`n>;{Q|S~b^~3dWTZbU{&f!ww=ZuUz*6p~(GXAxmDqQOX3xBwMdeC-7 z(m*q>x{8q6U=kB7iQgxamRL>c1Yg--fia4OM7;(atwLX}dZ0H282*18>;`-8t9$(P zcMczdeOB<)$%M!CA)hjr%=|a8glCGrOe2+u5c#lyVQ_PZJBn2>yr8T|91#!lW?S+1 zEb(}01&vic)FQg1W#pjTOmN|7X@s6z5=#Apd61~+ui42B|N9Sx)*A|2bUI}w*^>dP zj%UIjcf;eop}8ExF13FZ88EtsAdhmP;q{;XdSk%7Q;<6MaECW5KI5^9&#^Xjyo!Tr!e7Gj3ITuLvzOoH#%zh=ia@in zpeygGJ!MMmi2WUxz{Y_AGyO()7{00K++Qsfi{SUmr3(3u*!6KT^tC8ZFI{J=b`o_9 z?Yd0!pkW$VLxgp<8K!r{R}OES5ijo1&Gk^%e=%RRUVEjJIKmy%pL;B z3fF8+xM-!H=8%8;Vsn&HSwf(UEoik`-#a`Aa#GZBl!`m3oHq(1rk~7?l(Knd(~q5H z4UaMNT3_t&1^(=mLsIdDt21IChA<^vg_xLahIkM3WH~&pP!@$R-beM_c*E$_H(q_D zFn%7F#0?#w$s5#Rfw*wVVfRDOb?GNA<;9s%GsoI*t1o|S4o(Hogn!!v_CP1hwp{Vc z)(<5ns?#j^imr6a!mTN4XDC>3DR3qBELL6D6V1Us5bH3Ks9xiV1^6-1=9si-5~p6r za2kMQ$Z|qO6aKjGS~ut&;!!Mj_+#^BkwRcdz0j+&+hYPiVLX zfw#Zrh**D`Lc%ExGXn0>Y=6IB(pbn?&L89gm273t_`Bix!@E#0b=Cqzvrx(aDx5&` zSE3pqgx{PZ-JhkKGke*gqEy?Vk9K%A|CJqhC3VlxYVT=U7ClvF{g$;xgXPdO%dZV} zo#sU~DfGq8#->ke_4x1-tI^0H@)LfVp;%?Cx6glIDRab^mm5#`6r`gZs&GWQR&6&p zTFd&6ohJo{EF8Gw?!|r0RP*5ksAUgVG<$-dIdX55?FA+wO(IZ2qjFyxayL(5PnadH zRQ}2*M`(~r+jiL=s9^|r@E0>1ey$8e$QdP@Uz2lzm-e;-$oFUDuK(`O)A9c9%zf+?_!!jRQnB?c_OszATmYPZx`Xwd-WLZ-Tw@Af4=< zI#bg`>C&YfL1he{?jhjB;-9q2AF?!u(GwJhJ^?r2B~qLNB@GGX#8W?2f+L^YfRkO8 zPhEG_(*95r$KO<%9m>aThQL`&r@@x3m@L1njK?%6V9WLnZ*ePHhVV!{<-fBj#4q5p zFeNE})xJ?4+>P>plldR+n!m>7A83h=EIs`-YsYwS_Do|*^YOt_C1oUoR>uz(D3K%S zPDu4kHNUtIcV26t>0JX2u>Yd7n>>;|R(_r4PA#qtGj)v7g}nPRPUro|RsPedD#Js# zi_>zuB&a8>cWnk#6Q`ae0X@m}O{2W-X7l4>x@d715PK3Wd9qc&;g9g8H<4ZjVs||T2R4xL4 zO)l$l#8tJY50R-?!)00xf7-+6tjUq_D0N{Gfbu&3PzP~YiIb5n|I8~99$}BO3{=Jt zd)bj`Z%*NTbTdow3yb*b-F*A(LH_k>6^NVs4nGfKfpc4Bobkp z*`-t=@F$66aWTKidthrUtdlVflAim5|A^*qMTbS=g1gI{s$S2KrlZ z23zbf?jb&UE>h7*w7$`Oh}vWZ}TM`xYAjl~b4c8?aiY3h}=Hle3_ zi_djj)6Cc}9$yWwCB4l?iaZy8&SchNac7y74FChwG#I|OA^y^HjKq2LMI}cM3HOHE z)9{Z4&Dl>9Lrl`Mfei=*L-j`RC&8N}7ha$jf1$?tl{IV>@MnJH3e1S1Xx4h7WoMES_!y~wsES5>x@NK4 zDC@#jh_5s}iC#EFCbO{AV37AZf>auJ)Dg)<(WEC+fw!bbY8;X8rZ(n{K7(r9j3Xq)>qDjfq}O8wx360A#?5&Tg8vA#lV zT%cY1585-%?ag>89ZZpb3qA<8)inkOqr~QtAYxZBm4E`hf2@dVmR`{YajYs)9i~x% zG0gd@!zi4i^Rq4jb?_#N#jlB)BIQf~SiA(F_fh*-P0uhO;}^FGWPz6q8;N_g9BeiC zcjkaI7T@MzP5K+W`dZ3mC`)`RWw%!p3R%c!RVRBAKE?R9e@r%isCkA@XErDExJ zUg5vUHmU60MwvTn+1ZDXNTPStO*Ev3QQ*Q^2NuPU7AYU;?O5Ul*u@G6U5aztUsDe> zHOkC(5Kw9S(_E~f3a?e<5woK*iBqg0yD`C3eRADq7u^pfSbqQ&1IO;?d? zI}St_ltGw=VP?FUg_$|4|9Wb5Np>y3mTa9mprhxDr(o0*(c2`Cq*g0G7}E$EtMRM{ z{3~e~zJV-%++B+vl9*nSe(V>ns}Lm|c~f9$)C>z0&p}wo{cBqJYaD=a`<7ZcrSQ#E zmJH1#Ys{p{gHFIcgEcu8!$&1s9kbiDIW{ z2D;wC*F+VWC3@V50dTWKF-UNytdaVoG3rD`g>0A<aF?-=6r zhps0e+0KJHh6;27$A*m}H{(#79jQ7P zZU}c*19+l~EzGu@{H$p#?kk>(m63yuIM2ctVQeHeQc zH+Gq>W3$l!TySg3dLl)5vIqM~s+gH9llibqo|GrO2eS6v2grwClv8`Z&1!?vXgctQ zpe$A!y^PWN7llGkKo@I;m+9X7m!}z9^XhP!G=p|W5ntA9I)Q$27U@59h1EAh3@4m> zq~C=kGe0LvGQ{l}Cj+?W2ku3G6fTq&u?&v)NBUw+gLYwe$#ymWG~BO}1|t{?Z9-2g zYJwQUHxUS_^&}y=zf(gbH-Kwv9S`)Ng-{+VwR|?~!$S-=$%tqVz0!A22kCCnxnR|~ zK{e7*arB?ye}hOkwZF1W zgA4WvO}>dz7BS?PGXr7v8#ApeB2X9uPq12~_>>z%XZ~(8Fp;h}PhT4(jE4i0C!NE$ z89uI^oWonJQ)K@RC+(yO?TSWZ+0PZTAuW&A`fDdL1Y*NX=TG^8L;oXJK+c1{4u3&| zm)dZwl$EJBfp4+wrSXR_EzFeVBB>P8 zSmUdC?yG~shL6x&fY3dKV|RA%@X9B_$I&9xf(g#c6LH1gAgjQ>^Ss3F60oqKgJ^?o zfZ%N?%^4_+;)UAX5Ic4HSW)V;MHgfEI;44eJIeyoOo2AUd*D=mzo=!~T=1rwR@_jw zLlB~lgBz%8(m9A5}n*LA5Mj9_*IyvW{-q5ef%rm8p=2zW>JMxeO7x^`mSgHAB zFy1kwF^-wz`wn=a^5S7LbK(%jSzFb0B1g!U{52i$t&0Laj{P~a)|>|I6Tc4b{={tQ zVuV!BGiB)>SjcSrlrcUV(`FdB(s5P0qoOw8Se6ZN$}Tv6vX2G|Xt<)jcrX4i+gKH7 zxe4D-bqjTR3Hm5Yls{H#6BK@~i;##yrLt3qy4eX!MzK77#EpPbp{In2fKu>cKWwTW zLQB8OA;i_tztX~fRj+OrTwt|XRmX49bpozFOyP2&;?T*v@1$^St1zpLA8j$c&J-Y* zDih;fI&(omZ}oH&$Ylw)(I@^pW52i>kP(u8z~(|Pvx%Eg1QZc^!oQROuo#3fY1f)$f5Pji){51sSWQoh)oz_Wci}j`tJn7Wf ziOfm~-d06f=<*!O?$fC5QBczAEasgY-se1^r>$2tOX}44?x23Eyx+y3erlKUpjJ;1 zrf24dZh@D6XFSHe_khw?#Y?ni9}*ogHE*V=)3k)H0lChm!5bu3JF0e_sXKEMPET}X zDO%-!lBmd)3lOw1;&-LM<%|0lm1WvuxCbgb4)00h)YI8Rcrp#eEd8^1DN2D)$m*S= zaJN#6cq9jc&;rhUaAGQPOi~jZpb7&_BS}5MrRq0hCQWltBN}5J6E~{ig?4fPcTK`8 zsO}!5p}}C*Ojc{sZBTOZZc5sM`hH3d0#@!Z%{Rubd2H<*GAm|I zE`n|(dRiTK=|SDfg_Z<+nnf0Nbs#!~q|!Cz>f0ifVI_h7a`msk#e6}nGe@alil+t$Dxe&h+Rx-6FWpYu|&tWNLC>v zGBUhaAv)?Sf`LOSg6q<(LIb^c687_CC|Lrfpz+&|%Z02toLX0&MPeDC&X)ng8oTq_PIoK(QKi(NZp#(bihEZKu3=Mukif6NQ!@qM zQ>hCVV1EawE8eM5m+(row>2zRQVKO z(;l96%r_bC7SWVvIK4N2m8*ZD8Ci_z(%5MtNba)FP*QzPTT{$?QWB?dl3c|3de51o z_Hfqi3k3af&W5F{5M1icO{xtQW}h?X#ng4b&= zqZoHgs|GKNqei?`Q*MOCxAXw-FI+_kHd z_ZLBnxZtE!W-2_nob!&WUQI^6MMF>3*8OBSXXMb2!3lt8D}?vhB=F1zmi`RfeD)Pd zkSf`_s5mOLIjx?5US(+5L?3xl_%UDvg(7D<6mU0^=#spc+k~VmSD7goq5ulOcO~1# zK99!OO)mWwI_E~vuB^bhy0Kq)71z>cwAU6|8v^OprIavg0W?8l)s7%xn0l*s#FbP} zl>4;&F(CN?7;!1k4S=_w;sfrN@&{5?Y-Ro$;hATKyGTcWGa7BF=FVLuiE(0oJ$7V? zovf5*xgc@AwnBi%^D`gz1MqAik2@0Fqs(5tN0)-!$TH2m$oS)l=kVoWVdu|A^mXp1 zolDtV5rLVeK+T5bZOr(KX-cwy0$m|(&COF`k`+afT?ZMY3^z#0u|XCfkR($3^e$h5 zJC<9^{)fAN=^lxA@^mCM5lIuK_%9EfSX?1Nc<;NJ{N9$l5J}nQS}U@ipOo0(+>hPY z4WLY0brrX9b(Y--bjm2VD#i`{-yoZk-hCNAX}diVnNL30YSP>;{@D}(r3WC!8WXw*Jom?P7hUz{ESg=UO zlUbCyvgsP}Hr0)VPVN{k&Hh63eS7-c!cb#Qc@!i-JI{D`VnY)|(7bKhuKAi#*uGx_ zP|I%YCVfd2eEbz{{v&QnAC5x4@i4^FrYC3a`%8gaE?)&Z8|~8a z_w(%Js*N0pDzL~4q~0<%IBoJfzH2X@4*QO`LXibv$lKW{GZLxBhBAJFR!#P<&e?L8 zrp7ERc4CS2%^&*9B6H1~BC;Ibk_M(zLksGEvf6At$P=nxN|Lb)K^m!?hi))NVbO&^ z1OD`>c05p$-rf$R8;NP8y@Glb2$>(J;=n%24-4H4>U3IMtQQar;a%0y%Qd3^)VOfcq;sI57HYYV*|?3Tp_&AL z{;bS4PYYnKlQh&Yyu2A|uAXS2aZHX{g*eJ&K8KEeMA`R0hKU?ECYa~7Ar_!%L?9@I ztt^wKN$Br2Ed*4x@q?7>AA1^@gEu@%X!7gx#j7*|c>1;iLwGzQF~)i)7KgZmKW4M^Z#@HCzW$GlLv|twbDy z{c$zZ1a!xW=VcWC0y?lQNCYWSQR1R8^MRG&ahuVaKHx-v&gepueU!H)XZ4?@bBk>5 zZD5n+(lKcqg8R;yjj(O&I)JfISbd)m#P#4mQsvc!BK>FyL4qCF7@#@#@T&9T+sS`C zHdIVcKDt+#%-$jFc+kqujB})a%sIu;N4q!0dHVd@_%XLXHq)t?75FW%ce%Sa=jI_E1L3SSNt?*2o{()GN50;pnIg_s7G zWOHmo$C4jp9TzBvT3jOu21Q2vKdzPEZJ;?Qm*B(xJqQbct&hehw|!e)Dfc-{(t=!_-0V9Gh!9EPL=usxLDSs_+Ed7mElgVN=u?Jz2^9Tg04$z+ud4@dvGs6@X!BoS@Vq*sdj5bib86StFuXt)Zv@gX_J zelR?~4jA1k?nN*2qjE-`KAD7SgSe%UoD5cG!sheCeguw(jH{eJ&VEhb8i1r6z%Q6{#=wkJSHI^VhNBIyX4k(R`T5P*!!8Y7vF$0@UJ zoFQgH608VJLC&_=p$JJUnVgoItv~H z7bs%ZCefAWh!zC1q-(utLYIydVg%dU!4He+S6d80Y_j;Uwzy5>`Ac2a4r4K;K%Xn$ zSi#nvn&p6heCAN3j0~>)0i_HpI<@M=+!*2KuQQ)3|N6d>m}AB~Y!dF=b+nsO0UBXB zNRzK_OFUu_gpcXi7=L~Ar9Zo_sBZwjJ#q;>U*WjSSK&8(A-#p}1je!J<%@2zabd9u zi;g;eIVZ#fn22iNT&kEy+3rzmH z{z^Rvi%lcOL2@ccPASBX5$4)+J+J)%%P+@kW*QMx1LK*dt|spNhx(O04CW7qk()}A zbsi;Chw0mGWF5BCdUMJ;$-;FcQ(K58bssyg6>YT@oa|<3BWSH`InHu7+Rb81Wahx{dAM zzOERwXMVU^-k(4KD9-_w73n{U(VJnP6q8ba&Kg&S}6{`!jHVKi@K>|g-n6k^Ek>3_?hWIsp2 z1kNH0_5}RF^+6>)BJl0J{s&NTQ?_}<3c;q618TnJdp|}CS=LvOXx{;)p^DgGpkH4{Ymbt=K3s{`U1GAo z%IU{K0!@m%>Gm5d$sIo;pSqr&kYT`o$bpf54>KPou6fHWTsNjNh54Oo@5(P-65T$@ zDosBS@GBz~hTLd_1bELVYEXLSC?;S~`aTmIV%9g%7VQ|LfPtI6u-V{3>>OxQ#AQ&5 zT}Pd|fmg>@D*R^;e;d@7a;}5nOtqrP*{iPP>g}f9l?T-p66QFQKZR zH625Y>7i;+2oO7+WJ7#Zt?d5~de! zmjN!IG=8M~7-NZ>K#U*cQ8h7@X_0y-)lIL=l}TwuwKAdsO^nhfYWZv6fg)?i(;dbm zK|VmeLh&Ut8**%o#Dkamt|WxOr}&)jgBGQ@!b|cTgq(q85CY{zmx8u`lD(Hb0L`s5 zm|Pwv8F4a(cbr@sl-S8w^$d|3$?6_*U(fxO|Hj9esO|C{ZZyz z&vvoh=JSVSryyw8Uke*ch=1}VaEw$bfCJV}bwcKyi^_T~r_Q#uxe|)w4TfBW%kmd4 z8tdlnNhVCbfwE-QM9)vG@86;^4vk464CpAc0119egyExW0hu;`%93b$83|vPoH|Rt zAt(;XL(DE_!@DDl?*!}%O>I2@ltESZ0RaeIm|Hg~QGQaP(Cw)s&YZPr9&OndzQcnI z;%Q?mIm|pXvUy8Jx;Kgm-|gVelY6$S~ zk-9;YJ`9OYA>TA0a%5~o=2O;%t8QozeWU{FN6aqXu81*zu)g_L0t6oPQHcBg_*RAI zuxQTZt!c+Aa8}8#w)WxhK8a#+8?|dik0?3vU6@Gw{=%!duB)XI*?rV!d*H0#dd^vi zZ@VP6U*TwxvX6D3Y)pE(V0P7<~T1;O|VR7VYEY+49^*`Bn$*mk8m;U>~&dTlYZI5A3h zK9z{x9^$7(Y6Osw%-0Tbd|tnHbJGTZ3pj7Z2xku%=ay0q4NfQYv+qdWh|`h;sm>9y zv$?B4(Cu-XZ)-|4>ptk%f_uK!&)9bdcFr_Bc1P>J&ck$tk{!n~EroQqwCFMx$Pp46 zLa<+d{dtB$cjL&CEh(TEyd8UASR=P@sL^qh19&%23XHpf?m|=w9 zVwiyp3RgwzwhT50B6t<5I55r!0<|EP$YLG>9j946!*O+``ck7hT;t9}12c{~i@S!0@xP#EQ6>z+O-&1J|*2u3&M5#$7xy(dr;{qD*jmWKx9U&O6~Br483OBnU77fXVX={9PqzHpc((8 zaPXGZ_jsxovtKHho_8%MyJ%X7fYrpqd_6ok|gGg+p7M=A1o#Hnxo8@oN9ljhXnS8ysPf>_kGZ) zfLWCuG{6B7#|9@Xg2MxtnrWcR?0uQDl{F8mgf>@!sU z?(ub6Ro6}A_YO1K>nKPQFy*xPq}O5b25I6OYx?=!OIIy5NU*Im7n+NjI#uP)r%ZI7 z?)t-GZg1VgYmUcGqDs#gM8BG-0yjh{caeA30T@@BmJ2Vk|NG49=NGw0jG$ciQ4QW$F zomyv0I0vT)tc{|JYl`phCSZ`?K*=9=&aPk#)p%PFKf-G5?$PfW{yin{kV8SsP{qo% zc^@NQpxxDzND(U&i1}iFlrSi|$`Gd1(;$!PV^+Y1_La&Jr?~K(TWNP!{M@go^Q^pi|$t!W0=NiY!ilg3xn2wVD z9V0gYoJv*PEMUg$pw>+^xQ%eDr}*?YQ@4CcMEja_x%euPBz$Opo{u~BXuv5-{uTDK znrPcr+6a2QTM9?&Z#rN40~$~e<5&#GiE-o7d?(-wdP5R?Y=7EEcp1ThVeZ7rl3!_**bB%-aX42fUt2kZO8ri{ zt%!wuHh?nVcovQ7ft<;dh{%8 z$GAf0qF>Q}syK!RBsL!R#~%|Y;dMKPM2Tl;PtslrS;Z24$kw8R%VI|zev+JYmu12< zxM$uL6HX~qg&6BU!Dbn>=<~B+;&5z!OJot1UcC59xI}4^?qxgSNo36yga#@%+n$EA zXIh*ff+84sY~8a}Q&MfQi}Uuyk$k(5bq4-kgaMw~B)I<=E2kqf5hr-&0CJ4C4X5CV}-uXt{-nLk{5T z@)Md0w~7`y0?E;L*r78ZR_^V$WokRxWvtTh_Ag=6sXVK>EhCBjlLQlNGt zaJPql*<*7ZI%p{ksGxLq!(}-5aT*zL8Tz%T9*N6LHvtmFI%J5L*ZMHMjcQSJJVpqi zp1^9aY(-a^h<{6(P_zLL8N^qt_w0#^NYvE7Svy} z5|ryO`|bs6YLqx#+`&>N)*5Q&7hbN6yr(qlVx<6ZjJq4s2fbZTj~GmJ!-LiF*n}_v zRm$7AG?d_}fgCEVIXo7n;#1*F>?;ygR+uS;efUVdy1=ZnDL9(2+yOKx_&{j*xZPrZ zzp(eg7iK`lFcE_BC&U}|iLJ4wkHtHsz9Od3Th0`|Q(*rCTrj$uU#5U5NO9MADDMXt zv7Q!#pVhe?4{Xum*6wc*r1h?EmmOFw>48_){F~&5FESXQJe7u(bt^_>IW)C3egO!I zL1}jS2hN`;Gg*jE=y8I|2dt(r%3m9Q!2a4qjRA6WI;-xp{7k}dJAw5WC2+S^7(F5> zd7&aqMk5}(5?w&>7-J~i3_Tpcd>$>XxoWEzH(||d8U_E_%V|4~24s?E*-@m_Z7K!L z#~+$a2a;mfUHhOJ3c0H1Y~nC38w6eVRYsPN6r7M?UzDR{9Wn!PAn-sQX6~+k-r1SZ z5p9*KL1E`@s=%5LqMnt&@F`$mGD!tbNl2}0+q+4`@seX6bMPybzKs!&=Nmm*x;zUQ zdl!lN5E|m46Ml&5|hVfOpUCQq>-L>$Z3Z$yu zxMOx~C+u@gCYJ5yyj^}Bb;STJVguPeTNoXQL`D#9m92Z%7Px99;X_H#7JO;ikQfr~#2HO4+N0mq zidzln%0=V^IA}A^W!rq&Mze5tBBDkb#2j|(^w@VNKmrTL#_Z*TmDP}>NFUPJR7mg9 z5~5Om@FNE?Z)`HR7ZF>YplGH`I##+5OsgW3XH)S_^Y&i^4g6$(M2B>&pPdFQ`GQ^r zI~CgBpRvS+Kt~M-h@ZZ$HEY66z@+!o>PWb4A;IChQ5w8M+_}zCl@QynpB4iSzH4ux z`zG;|+DqeC#%|;dfP5tjcy8$q-vb=Y>PFXYrU9ssDwLI(I;U+bq?hw#0ZlZ1$GTKQ zn1qq&$$GKyDqVVihYeP=Uy;idUt9JiCh6xVh!=E_^wrk61Sw~Sh#!whZOP4&2{{O= zUmuqW9`)ibjt^61oMq^HuiTl?VIOvOo9XFpOed)Hwtyl?ioJC-aoA0dC&~8nx+qmg zr)MMCy~+#K0jf$*kLc>mRrZ1)`sDF3oopQI)wlK&cH*wZg06mhY%pt=4`9eqt;qUQRGD8z-$7uoV zOz|Ksas1_yhR~P(k`U%;RU|DmJy%#d9of26&N>#G;2Rzt=lPvYdO~QRzop;QoDIBx z@)i!nFrlM==!*@eTC>5A&cnU{-{1!kGdqz0&-OMiOoNTx*w-YmiG6MT2KgHtXib{M zRy{2P-V9~zqiqwGTr`W)^!s5C^Z5ZR_lWRN3qGh;I$mG4Nb5=Gv_I)LlL7gNqOer( zUkVG(p&$C-cWj%pnrh4Se3OkxlJ#aaWO9i^Ib&mgll&q*L4bua3+TF!>bBsuYVT%o zS{VR!)3CuY#6<*h4(>9*vfPR9Jko`YDr4EDtxVdvzy~M_=u|-dqy1Dza0}C5Oy~5oO}ge6-{Nz1 zAazi;{Hg_x27kYD{+7e+4kI!D75BGE3}^mzrN#{A|6&zJs2T6CZ;T``4a0r4b+K|U z8q6_POL!N!LuZ)GBH5te|w)S%30es?6Sb*1Vr={|e7VI=Su3 zt)V0t3aXg}DMdDF;uSo_@0*_|sY;o*eyh#Ob$8)wEeb&OHs>>HTo1ID*y_ei+*|A_ z1 zL=1kuYk!{C4Cj6#UYd+5f9+2LjL-Sr&N)r>2g`ian@x_?U6z^ej<$YvSu%rB->_Q6 zPQ?7SSx8NS-(A}RL!BQC+9d->9WL8T4!j-t+7AWBJg_^g$>ajxJ8sCjy(Bq#!HK;B zIup)ag8p;hg&!fic3D`61MhOJMbx3Fb(=z}f`6=Wze6s9^7i;dIfcIReyWNI3+o+& zo(LD_E1{`EsP1>BHTM+c(~l#BEFV~~D-Hq~M55aa(i-HrOTda9w73_GDj6(^cfooS zyrQ3r78G)L5Qp9sI%4R;u^kR(bc=Z(0^)BRdM=8?T8qqpFCaG5Vi5@@KJdyO zK!5Ki74`g&wRdY;R(ZYzzsf{kb%z}xDxPMpvWfx+Ll)=WV|{++x@DKLb`JgVP#P*yjhZ zPr6lqgq2gW}HnogKC|Ta40jb_eShvoL9P4RY7h-(~D_Qi(Sbr1O z(5y4OOeX9;nuYps=^l?W=`U{X2d$Fv^&r{<-u8~ij7eJz>rr#{wPyU75A@c=>0$Rt ztf@SonrPSCoheU$F8{Y{Z6}5iwvyS={BrKM(Q)ZkC)!`bUJEh?$x$`()#b< z)4b=F|9QO}d0_Xx^pk8|?IZx$=Up;HMPi{|Kkadv+@tyHJ?PKUv6qniv4k7AiwPQN zaP;oy#9B<+C`V$uMq+ne?0@*o0FC*XwVg@JpbZNFp)#>NeZdy>826ZcUtF)u5Yjao zu8b7>8R7M*+0FJ6gXZ;Z!=smL@hVIt65!zDI9Nzf?^m>gWqKooBOh(+=Wa9^j^C&8 zEqPr>=f!^e8cx+MslDm2-fw2(YG4og=zxdRKavx_i#c6u#n_(gaDUIGiTj#}x9__5 z3C8REvdXJ|CT8=yKa}iXl!yA{LtNshxbN%u%AS1X+?RML>cuWzy`1Y%3-k#mr_wRC zEJ~82hW$h*G3%9B%ZfK= zzR}}gT*xRM#XMyqI#wcDpR~HbtPUbpH*QtO+sc?PDr0V?a(}4F+o#C#$8NvDj}aOc z%lZW@e)#cXv0B_#bYnlo$*~(ge7aRVPn?~e4r+84^S#mZKcDv1+nn56f8HNN(|8t* z2HR6G?k!Gz%K9_gIo2OTw z>Gig(+e=byQ-5P^xXBV;H+>nV7sr#doZ3dVzrOpItA9U_#*e{xznW}=-Yz??(X75_ z)9qy~qM*M<{oweC-y>~t9R$PEda~iW{$@UVO{S@DkK6fl?GJj7kF%OAKG}BGp2FSy zyc%Si)ZN#?5j_9EfP$_SZVD=XAJyu0m<%;_YOu8mJm2^DRB8tmug>lmWlswg1~cKcgM=q4(cS5am@K8v=vEY`*NYMyxi z-h5Vc{eP@>GBZ4_Sk29MT+N^D@;Z-n!zi+3vb%cOWWa8h*(YFYaTq<#WLRHc)>}W6 z?n+w@*XPLCIZ0LZ4Wk^0Ry!>4nGFZ$mH$2&*Vp!dYJVsE{$5s#c+-BOx;zip*<|f| zeS9)TNpJh|+v~o9;rLaoAKu5dmaCD6fZVQ8uz&K3$zuxR`qFFavGyo>1iflx3j4p59d1G*V$J849D4fR!?5j{e+(M$-=bHJy*|x4lfpD znMO98qZeHL128+6k5UwLPK$betUk8?zT}@#c!!#Y%S+}?!sJ>yBhB8QAt<-c?R(RD z{eR1N5w^><6~%;j?7dxVle?A-KYL>l`8&Lk+cnr?_!#@%-P0%AO`bS?4C}!tz~g7y z&R_SiLfM9&TQryju4m#s`qULcXw|-4Mjko%f{fs0c;Yf5`O{v72eGm;e{-^y*(0T! z)uf=iHkS3ixKBjZGfY-G!^^PF(t`2Glz&rN);o8y^cHMGoujQ6E+xTAi#_)qa%XO5 z#pYNx&Gkj;)_T+Wats>#xjP&DWSjc}>rU2d`H743gXq(CUmgbf@G*0O-8(ihEZ29w zbw-?~TxTTv;7_G!yO`SGY${T?wJF<7W(v*Gbg-OuQq9y`i496m}G9?%%g*I zoZh#WDVJnNt32#^$zyf4$bR?Sm0_M{<6}EshFLnZo%nq&vb-rbvNs#?aWGt6%=2*- z7pG=aJtphJc+aqI>A z)bUk(u3K&$56%hgGnhP|a5T>@FHVhqBOzoqwCGwo;rL zeTVOxK-kU2J+_Rr;i}x{7iaGqtj+JU?UJ3}WzRQbh!*eb^}I|9f1ZEl25kFV@0gGM zps3O))91J1;RN$jyQO3^_FVrK-sd-bpWH6dd{e$Z=UduyvSK*cxN`Ffd$`|j*JHD7 zT30lK>_kw@KaZeip5IA&a(|1RH%`*q;Wtce6(_jnDgRWEzmS=3%+dchRBTE)Q9_FX#Ii z=C|hZ9QLO-$RgA?o5X$T%Z-_0Wbb?^0HRdQN zwDYrY=A-SE2z-qDPIP?KcWCyR%e%FB*ep1no^QXn z`l*bUi_cmIkFM|2d)nZ2zT01Jv}*3B6&o(Y%P8HgZsE)QYt9l0`hdzwUZ1&hdRWjWyUW<1(@f&mZ5| zBRP*y^3E=reE}z&W1h}Otv869wVO{(-`NMx=Qz-`I?+oy+JCx5Fkd^zy@mJpfp~0Yrvgu(muJ%tJ`?+VCgpZBUre5)Nza6au^IXNqH1t8{X*h1SWVM6$v>NWq;naIide=qb#gFu{ z_4lU}KD}Y~$bTLP;rq}@=o$*Q5KzhzC2erXZ)C>^F`VlSRwie3AKvemRUQ^AbG9y8 z_iTHbxbDi<_q1`bo{(noTzE#c;(0mT2f5XKw@^kCM%S0snx85YifCGu{V+;T}`eIOPK4t zeSeumW79>es>qif-{zIK|Crun&J5PF-aM4Ede$2?7iKL7{OI2=pXT{otye2Eu?I;V zhL88V#8Y#?v|gE;`T8@kui$T+wceXKyxvBd7L7xwc7N84KGCN%y?C^FuV#8QMx%Cb9rw@PN8cFE z{IR z^T_%%eEYjxBbz-QK4<+1q(lPQ1zZf%3V|w3l$#{i80&vgnzkho6dwTOmc5iyjg9<$eTyhE{&U&*zhyJEb<6e?x z94Ns7%feFXc9lXX z)nC`q{;}A;Y0jR`q$eJ3Se1{5`mxFTeK$NmCWlXAWyRid9^30U+pSlxWca#Gdqz24 zw)y0}f9uEFWqS0V<7US5Dt`eFz#dmZQR;kJJxWxiRg9`&voPK|p$ z+8^HSuIlxn41GJg%Sqas;(Pen>|f0!?e$UFocztRp3Hhv<38z`a*>`s{pk$rAqnEh ztZ`i2cfn=5+!SUIt}f;(9vt|$}x88EnthIh!C)xaXo}9eHO`AX4 zD7-!&+#lR)ebNNk`e97+dSamSN=CA}_imH)@O*Ew{%tUdQhT5et`S{Nt#bWZ<}zVx zYZDGJI%`&I=2F+(mzinHiD7?=n({3kY^$L|=8LtD?zb&k4nL-D`^6HySv(n8WOlk$yLU!UGrg>iKmzZ& zFwZN`bvBJIy1)PG#gno+;(0PZ)Rqr9sK2k#V>%Q(nIYQj+1l2TuX!inN-wtieF^^m z_`go&^SXQe_x~o#)9JtckN^38UjF-G_uBp6r}Fmdzh0;RXTN`|juZWV{wU4MPB4~8 zL<9C=#SX$;1Mo5>RzPBTuViCP>qXN77|Ewgso;FLae*e(egVLw>SSxVy`kHFP2asy z1>i~bHE?3?#L0A1_)xe&p@lE(<<-T6^i1>g>a|JB&dHFq$;gZN&1u*QiPSO!zBcP*V~WH^IlVb~GV;#p&})+s zIj2LjPC}BUYMYj2$<2hcW#?FP8l5zz239Qkw+T zb`b;9n z7>U$2>qMRNl?kb()(JKA=A2{AYiZ-1S+_nu(4i0X>KT0CxYtx}8 z;--tEoSn@eU7N7fK6PT?Ym$?5b;nZE0$F0dKoSRpX*vjQk)bUR=XB9% zhQIQAJ$r6jF-w4#_#r7mKsj&N$JMG8k~|1o=2%&5`DC_4KoCf&J_ANF4IqRezj!Rx z=!gPYVX0o%7X7EaGP5!;fG;`@npXswyGFp{9Jzx z$Q^k1c_S}4_Xcgv48ah_gY!B{kWpmWRsBL(aJfU5tP(48cv{#6+MwAh@?Scu51XUKGT`1~q}Fr(2a6NKlMmC7e1&0`!f+XSNOI zs$Ufd8|59>#3A^wu^McybN0GkozxH%SwYMV5H5mv8(-KKaqb{dJ=Z}R;J|-*#&~o` zfyuRF<^or-5UhsDcFUx0LjX4A@CQ#T;l99U?(RALT6nJzuO%l)2*H%g=6o%r+U(hOX59BmlL4%vyGJnj|1VesZl_3)>Jh zHWf8If7cwf3R~5>eRI*0%cpSO|NiTJPXBiQ-+TSAZ z4{!vx3=lTT^q|`}BS$1sYIxkhQ87k9F$hYQQK9k|(RdtSp<7N(zwa-|Vxhs?4Xaj~ zHOGiLv2>pxhzWtSSdH-^Dfk8nwLYAx`~XqL5R$)gk&_q;+U24rG*5rwvf)qP{r8Mv zSzw%j3w(x%Kh=-hSpXSRf0JoP@u28WKNP3MrFwazVQ5jHHoiO>O@axaO1slnc%1^K zKy|oCd;pk(;WXQV_S}3kZWbajzx6cf>Lv~0M$csM4@^F%nr9?4gcHpRldIFv+{xKb z4#DOpUcUL))ljuf($#-WB-2%gwdYu4?Jou=XL{z}ZR&W_)ra$+-0yPS6hxy9i_I+V zVo7~I*3!Wr%op*1*6G@mY@p()`wTT3sT#wi)3-UL@R^?bubBZnVZc)tg%Ez%nF_>9 z8@OD6q$P;d_{rr3h%E90skxjpB#E~}Ge$&3R2<-%t_j{cSYdxEmt+y~)Ey+LEecF{ z?DntPz~c!9k*I&wF%5`|G0s>ZKn9j%rit7)3TFu}&VV1nK!PI#gkaDO;B-MG5uWBm zk?9#;P7sk52F^SzWA(2Mmz8NkOt36WoR1C6IY#wZKgg|Zhp@&x++PKE76w^Ty2Ln( zZ4lgEAelLcQb>Op2B{mwpVs{kaCILf<#01{BO`LlXSNZ7IEuF*v;{oY$dL*!h>b-oi0CjNq!7ER z&SS~2EvOBWAjJ6+%Y;P{Pgz(H15$)FhamO&d1->6W)sDOt~Oi?OYH+Z?yzckPE1mp zeI$~|6tRCr0^2l=Y9@$!;MBvF#N{M3OFJr;sv?hD}uRgfM83g zjo$1ul>`*mEn_UmavZ5xU>L&c1Dtx~GhM|zwoP6bAVr``jq~Zh;VrQQ6oJKHC(s;V z0Qi=@cGp0xG)j;`pAgZ1t=k=!{|hsc6+!8mwN!uMhI(GDw~7T-^Ct);EmQG;S&r&% zt-mq##hor@yp}Zz6QIMW-};|y%$DCAK~&ven{{d+kv1LXwDIVRJ>=Z&R}=-7Ize2m zXKWcfUaebAx3VCn$l74^IvIi^$Y5&lYKcX0aw2#vn&gCe;hDL<1Wq@pzFqQ*^DyLg zv%Y^P0mI3gekRM6h!LuL$Bju`6btm0`o;)Yg0vEWq@J0Jg^E9ehrS8IggZ?!lmrEr zFs=&|A;kSqP^x|i(4TH>AP1yXJ=7{*lDgj*VM}tJBQB_eTd|{bwWG~&mt4sL2h{IB z`|g&L&46swQ~KRL$W2zUx` zUz2cqF4+_~YuuWtJXE=)*2VhvV{!YNw?Db9{qVevsp^j6YPG~Did1_~M`XHG^|9gFMkZBfdI$`lp{T|4ohIS1*4| z0%D|^m&gB#ADVNHzxBsRQW)1W2e@a5IH8DQI8g#M+kb0ImI_v z?AXKv35k#_&7b`d;w-XijtySnn_XQH{71E`75lE%Y+pzagoL&F=i@IAF_cgsstDP& zex!7PMXE3%QG5n*jUnjpBl8L`avp!ChGnVh1v5H4;skT;2V+7wZM2Oqii;uCHvbzq zR7=AUjQ2lwZ-zkKS2qfQK&J9jejooQuLB08g3cVPx)V#(0_213B`JZ^e~q^s<#dU7 zYLNgown!SqEzFj7J?Fp15<4f2FXEAx5$-g)rQ+gG?Lq0#YuMVXd2D8qsMvq}={r>{ zshH9{G&9NsE(o+G3JSxJv5O!z5U?-zl-xj)yNJ&WlT!~%H#;@S&$?G8$eD=+x7BvQ zRg5qKG9_5V{%;RC62#uiv^yRe&g|~E4|Q&1&j60P{bXz**!L#==>rLFA?vJk5(EGcde{AR_~Ah<`i>Ai?fW z6Ep_*1qO$RqZHsVT3rzK6i;ox8o)ntT&i9m{e$@(hdg)N#2rTe_JEwd;oHaGoGjJ4 zzM|{jVy^I>{nT($GtE24hKJMq2Mb!Sezw0wlH)&3={C@w^}z% znC(oisXoM^ihm&`*fW1e<-rl~5}X-4WXHeq(s}dDF8UVl_?O1Io+?#%r!e@p|NReh z;me`;&FR=+#Va_%mT=W|#e~SX!(9`){7U}~u5yA&4yKn&(glqHg_qgV1xf*B zmn72#EdiOAQqu)Vf6L3MWr_k3PC_I<^Lxw?%V;P0bo^`*<0nT~umzj}M;#9@d!D%6 zhxFRT^n6E&n-X0_A zR^J+J_ZHl~e;;g4K&m`VD9-p2M<3q*%s#;X@Gc8UZh};L5#17zsN)#@*SE?cArfQ-0Y^wuf<(IL3ub~muaP`g7=&loQE!x#iOp#Y zr)l1QQ;Q&uEW~W!>irU8^@k@ch}G%5K*9XQm^Mk=Y43`+{Vk3hPP;fEHQQQfmVzXLBckL;{^8Lck*8kkPdx;w;$z5?WCg4B zdQw**5tYJUf))v4_ky9%bk~1>M}D;Hn+5(zhyfR{6v_)f09+(*r1fe^E2E=64&_d#n9!S98MP^@wh@Y38Qr ze;jcl@fyNpX`V$Hg+;bRd}5mV9`jk&e;g<%-5ik15M?1T_;nkhx7wzavT88uGKGJA}M3UXkCf~$X)UW!>m1KmhZbrRI0+7K< zEjyqCS?#UMCApeBHjtTi=dT-rUQ+zzFaxvto9^JflEHWz1?4}_-?vXx8o-yr)&(bj zV=E2ycjolfAHKTj9wy-(D^qHC*VaviAY@Tv8EXt0(_Pa;qPmff7fedm|E>t+e6Y)Q=T zoQnPQ6F)pf>iB2fnx9$zPj3Hk548@z8uWkB`~2Iv)~^o0-k@|L++o0g7@aPVn}Kea z=x6R6{LHrv){rLoGk^Zksx;>tZ^N#CxBcp1=PvGQenuialR9m?Sj{Tq%}4|y3${?A z1-c-YI8O%DK)mCu#4OT>lYyFNos*FN9U~4W{P*66eS4qgr}tTQGcw%i2s|AxRa$@N zXr}7pS}CVqQ!of#>iS_D;ZztxAF*LUOdtmnm|+E)fu_7>P?fCFx;eDc7mPxGpI~%Y zxPjOrNvy=t6(+fch)v*X9!k)4d(e-PM_5usIy@odzvjoE`Ki;@6AMGNG=ntwND`oK zF7_|aA1;>C+94|+G@IC&q(>K36&sRvvrHJxj^M!~xTE1=f#m)HCHn4yrbKoDm+kU^*W*2-f-O*>#Ge3ocwYF&S^S6axSn&V7H@Q?@u zb6}9I5eNhE#OFf5bC%0eR|IWAn#nbT3-PjEH>YKyK#%z+CXK9_ybI#QTFbG|p@N#fv@ARS+S!>wedGwtua|*g)TsVYFb#;k>v|Hk24u76MXP!gNGb)o*nmz?b`ku27LZyP z_TB)-e#8rebC=@r6_x5|>&8;RP@lY=P|OKFu@ ze7!_Vu5l25e3`cTlN#YtX)Ajq4K|$NU|qcJ7+N!v9wegVOil~v4_Cko3}Jk{Mrj6Nv1Ebn8 z#-E(}=3%GxQf#UjdY&3TGu*DmfuLO-{LurtnePwgE#b?mf|cj+cG?0wKwN;#lcgKkNMMlU43UDnC;rs@aYxKBZ;* z@0yjNAp^PRiDig&1#44)OdSj<+O2hy1hI`}h5Nv5Whyd2XGp3(i@4TGGoXY5q4;Ph z?Y(v(?3uvJYD{ldOVbowdGC}>#YlL|H*a(VD6t7cJA*J`^+`(LVNV#|4BIaU$ zUU^9tTdKzn8~d`bI}TGk%(jaK)qmwq=OImpe{x;ToI77g8vgVI zt3UpxAHS*6iod;EYXAC$yO;~O{kx6#TThxUe$IcxpN*NZ7`5z|>kYh+efeE~okSj1 zy+OwT)@B`#eDymR`A@I8D%IEfDII@hN5n==bp#<#NdDl*2e2Vy;rY_3Jn9_(#WABClZ^c23Y-h=t+-5z!P6a8XSJxvmx} zx1cVzME+xrthjwZkn$IQo52yJP1Jl)dE0cSv^)Nha+5Q=q^W_BTmwJ%W}ulD zI^STn@tKMth=mcP7sScO7NrTF35B6oV4w^ukCB>>0;T)4G_W*(w~+Ib()moq&Tmrb zd>?gvKp;`r>3kP+5fm_DAvj9s`{(QlI&1(V?dtz82K?~E&Rd3L0!XIc*^=5e>U6vq z)ykii4oOQxa!$`mr-nLhoG2N3A&tA^wG8he{+%z)hrk023>`Te2U(5QkQ#=R7FKmx z?SG=G*FpV~8?f4cu8sKq!gAB&qTJgME6-WrN~`(6{c7^{=wYx{4cIV{49RLAd>BRi z6ny7P)F#03R_WBDc38muc)$o!=a45J^oL=`>BOrDGJy)7AT!{q2jcymhNvI|qoIzP zD@@hgAks19#KIZ;rK@J8()*8KGv)WE-#L;wJ8ptcg7wXR_k62aQ0LQ9c=}tfe{eUo z{<=;h9r?gi|N0k}{%Uas{mmEk+F$%4`|)?xUw-zlJv7whM{`vk4Q{EP3DvyQ-$1HUH$45}q!wh}?7BH7J;&vR4r;#9y0cls0K?-Bwc{dakD_j$oeDl=8 zQZr#tykNck;2?H^bMJ@CTd)R1B~46W`lt1H7D=*ywiI6oQnZ!SHhJarOJW_bF@k`F zr$EGn3)K}b6t3#ZQ$%ApOQ5f0VtX7g%d4!@IT|+j@%avDQyL4w2TrGbvG&_j{ov@A z+YNqjPx(U!OvOq|N34#!95qpTglvYHx*ns$$n@+*Z2k9bP{zMpQY5zO1I>tiAJ=?| zu<{Ro{PCT(tXBSzpPF2!LlBIgHWkMWnPTvvx!Y(wH=@MLFl8VZLEQ5~ zX}&;;rPZ()hvUt%;115{4Fijj^6fRO&$0eNNJf~8Fc~Ba$+>Sy?{EcXB1m|6N%r1t z&;#XHP_ZSLjkz8eO!Zv4zo@rtk!uXgin)@184E!cD892_zdKSskZiVI2daL?Ugz1F zs(e=4QoInv137@~C@LfasdLD60~+0Qc(_&itHBj7tar9l_v4?rf$4^lQ;_Tp z+eEE@L^99m=2QMWfHA{0ha(UIXMxWh4%~0OdbF&6 zTXzfn`bdI*)4zYQpwmgyD}>2Ue5u--j7X?|`B)qO^?u*?vdzEVzncYkXZkmf>0i28 z*W)Yi`S*QWGx%N$q`d7^{`8d*`|%oBo)wlg-s}b}&|Son-P~PQrx9$Ch~v<3Sp&t3 ztOB&Q6OK3y>at=qCoCOk2b+{Wh&bSX>wT{$-aRbEl1OzuS7LUgKVwUw>+40V$3#-< zH-(AnSz>^`Fc24tPgx+gCd~E-F=JEH&L^_q%4bdpu8BqD1kpwkCZ(E9D~!-N;#(3h z{E-PV^dR#MeQ%@qZQpNkI$W>pTwhsr-L8#sWC~(a{5WiCu_p~%=9QYiQ!mVaM7AEW zC9T=svQ~lk40MpJ?65{yK9U{oln9c$9aMrql6f#A2*n>SW}M`ji$v^LVdWsD6B9-^ zqIxKw$$C(_3gcWbM=VU8uV1dj8?!`+CjpUw0YCc_73VJjYp$*!RKCBez_RoGXa9xJN|ENWd-NR)7mLMxZ>``;CzWl=8xajO^`JP}-({zA5AZo??me znQP*i!VPmPg+oKKR%^T{FASqoZx)v^4e2~auvwkwrXdq-Xxv7H32q+m&05qTsJ)db zg^-Ad6O%%W;G^RJv8QQlNmy4W7wL~}6z411Q3l!X$Jmwb5KJA=(^J>|eAlUEKiUkt zI^XlIZmKU+*9d+4F2##~qrZ5=SKCRxzJiXge{&rDr6+W=mG4^2@XxxbwGF+t^9%4+a8 zXWc`h5LlF^SqgyDehmcy13X{p3+Txl5JYJRjbpF|p)RL?&ot*b!YucqXgHMr%jZfR z{|t9^CHay7R&mZYz`NuB;z@@I6vw|D_NzzW-+6S0ALK6&Y*+J&UPd@I$J8I9G>qmU z@DlZLg|Xspbs0Dz5(9g=l@?gY+B!y5R*!@C?cx*6=xW2NK8 z)~4g<(bC7 zcj-mpz2fAOq7r)>7l^AqCp-0L@ZzWvC{SND!+z#}D@djzU$9?1|9#H-x#shiPqgfh z?}Pq$0n#s?$UmCq=r1n@{c;AiuOK^U5^RK;g}Er5IN7|UzBP01g608Je$k9Kw(@D# z5F>>4UXuhSQrftsZ!X%_Y!U|*reW9d5rw9MfrX|}7xP*#KVWV|)V03x-fVRL$diR7 zqyvqAeDD!xm?x(4SRkI->RKGx{=1Y{bmx+78RYa&uX;qD&1So*_lxwng_% zxdy-;e2y6?ePq`vz5ZvGt~hh})j85D{HcpG7vXq`*0Y@j96ve1RFwZOYhTu$x{h@H zgSfCsoW!{-$+9g^AP<1KBPPgTz)74$z_0&*o{|m8K7G319=?n2lN}()8cH>+wTecP zOZsh%h!O!&g2D@6J$lEW)ZY_NjWKLH`YFj9*7o@i<^p;1m}8m3FZ<1Q#g^RiCpJxx z6Np^zf4n1RJ;$F9ejeI-9TN_Dmmx;PI8fJ5(eI1TSB^O>t$%96E9C-BdFGL_rJ@Ue zHP`R-$6c|8`R5y`DyAyAt4_cW!$K~feqwTmW>yA5M!v%)pvnXnD$XFMTYJyQ=^+Js zF;RR{$U9$3^};|+psI)~c!r66BF!dJ$_F{z8)AKdEtg2dB7*O^$){Y&03<$>SU8tt z6sP5c?#v5SVnVY|pNW2*qp9%@?*)B-SEL01td|^how~0B&s|j&*LvfS@4WwAYrf^h zPWs-R{qo(fHtWXayVgB_aks8(eZ7-!J`zz&aG-EF=I-|UC2}LEYF|Sk$!cGCo05X2{Pr}i> z-MV})=7_I@wP!vbsR|~s$zS{o_Q_$LhBel}7-}oLg5r7|wEo6M&`+uJkr`siSgy4Y zQpAX8?$ep2+_ok*v@ul`rO!@(>PU;!-$3%Z^&zhWh^OZj6ak28MHJhh%`=S1c*1%f zqJR~E`^UV;1IMb`-MDbLrghchnofOe#7^v57wb9pf(uZ^DVC-C_|*_NeP?EBu8HIk z1B^uqep$1%4;UAAOCl|XxG0eM=;4I9>B6PdzJQ44cS1}Y!94%WvZ#iCIZ-m9wFnWH z>2Fz}IT9+yycNbSRQomG#uSz<9VtK+6ym~w%Nsc8!P!DSz~Gl_dvSYSmfh}D^B1;D zbJ$_oKDb0Mikx{7BPN*o8Rx`b-Z27g0^D19Z`D%SYly}xo=f_QTp)@^ zl3~tJV>Af4a@DGylbXd>or4|+FyP(YQ%PSbED@s)veg(M=TsrT$tSTf^;6RW*$Jw- zn7h^ePSGuCcLc`qHcf?}OXcU4jHSvaCyRg~2&wIwZ7ty<*Il%Knt$%}{`=7wwKSCx z0jI$m7I~C&)LPLs?;no#9~|$&=f3|{x4Y?k@UxE94@p)z^rI9k=O6Wf14Z6U975hFHgDPG5OWfTz6uT+^KuE_k

VW*l?reX2I9#WTr~)Re1(BWWo*N%ju*pwS`yU>A-e8U=Oe>eFxbj8JN%mPd@}Yng zn-Ix8s+wTxu%OomX)-Db3bF{|Tm^_t$9jh|7vB^QBl_uoPK)EOe~tggKd;5LCjN_C zrTv@%w?EkT^I+3=g?Ry1d59gyajEAb#M%>4Yf%6EdAw7T!i&IMS+b;d;L7HE2_{1f z=|6iO|01?D?Unlz1qM6;`4lih*z^F@clGl)Ls+R=TYK)q3mf%8%C5_l9N&l+$7m{xGoNr zEoF{5Jyn9y?>45cn`(}aSUN0=GpIpL)0k1o7`+pUg5vRkM+8_vQa*}t*AYP=Wsy5p z3lOoV_n#wSswN884^ykjMcnheuRGw`A?Q!bs=ls&Pds49d>Qxr_r8X)9oF`qi|cxq z$XUDZYnO5YtN1Qd7S@eJDtZ6>lASdf{Uj$u6mKPzi~w`#-hL;dn0QdFqmwi0F6BMu zK`AIQt)E<_ieMy7zbK#p^y|$yWA>N zuEX9Xmlp=7a=}RVLGH~UC*2o2bw?`L0kV#Pz`HshjW9>;5J=Jldoc zFxK~3-P@I4amP;o`!cLy2FF12p|#(7U3o&B zJYy1Bf*t*a4$(Z^4Qhd-%{>^xoJFL5F)ipt;IBs8h^>7c*-QM^4 ziTln>|9e=U@$WU@+0Pm@^e<~*m|wJiP%lv(zXYlYf>W1A^=C{)l>Iw%9-?Ah7W+ls z`P6|i$80)w>~VI+SWOx1YzT!G-5B^;L#%UVKUkEcn`iR(J-cDq?r%=S?M%7@L9fqpEApLH0Ur7_vr+v$$YebQk)X=glF&WGX#P!qxY z0BN}x?yleGq-K3Ccg)Y0ljP!Bv!DH1lRcM%wnJZQ>T3pPwp%GqJu!~F^tx8o8&T$z z~Fuv^xo7%+5QZIRY zWBGQBFd8Dd6Ezh0h2<>n@fE&9po*OJ;_xcq2oah?Y1?@Y(;szI2UTu$a z(YEav_dbY6&T(%u`n=n5h{4cz+zvR>8>WSyH6~@j&($&5?T;a~n0JwX{)SdZ0y2cl zDW$vW0UwMmyVA=`1*+wQR>uuGwMu;XP`-=1xQnECnZTN*Lb!Yz?*nEsDjT*J;T%*G z5@t3*7H^42-L(Jg!c|2#+C99{CLU2LkAPV4e4Nu=^%$Zy&O^8I^75o{E!(Q4&VxgD zRX9CY_Da1uQzI+?zI2c}Qx+s0APG8Dph zQtA5+2q;Xv-^nNZ5BG6%``j~A0&B(5(m|mtc`h*zA{v+NEX7Xoa?89Y_~a;Y0u1?$rymgW44E$;isF9XORHKbHw)qt4hp$=IPlK&dmNGvPzhczJDnqdiS;vW6l{VK0og4TIabC z2lQN#i-2dm7q58Kq>tnJdY;yIEN?#ZO-FF6b0R~`RQ;-540@EfwoN<(d5QKZY~5qM zdZ}YVFRssI%baWIn<$9BT#*k`J6sVWzwZY~TySJ{12K;6_kxBZIK)+4A~>+FmV5oOS} zy_@OiXIKpDy7;BFaLe)iXea8p?>_mh>o@2jYJcEb`#cGMIIH-%Z@~Xa_vAw-9oe1& z)zNT&YaSi37cUs{$meNDA9#^-wKyX0j724@5~6Ge>Uhi>k`)$tdOyKoRd+t9;)N>A z&2_FT<2(T|f!b~faK_G6hQA!F|q2;$k5{F?aLz+u1*6?P`anI_sr`;* z6M(euDr#P;$2ji&erg*Ri>$G(OLV-J#-}0YaI-hn%?^GO;fGB(bJ5?;+l~tEda}@5 zvrjlhfmEJ*szP_$1V$T;B=X+ziv+xX85av4;PyTD@renoD~KAB*H{;GWV`C_EUE%z zu_A&8k<->7*(TU^^j%&?`Sc;y*a~nBSXe=rmZ5v9xR+Ibd7et&V||^DrN8Xt)y}6Z zvQYa2bAH`q%P6GwVRA;ziSXTljv1{#lA?KYWxH^dTn3je#{9@9+Glz+rqXGD;G;yz z(_6j<@5SLhM4zO$TPG&ICFV~H>O%FNor*aKUQf0vfOpIj=cCMch#l4)9KaHGI}b%e zHqteapPyh3?+NHZ!7J#5-rvSKa`0d3Up^7(m6;mJ65 zjswbpvLLm%`qJiSDTTYB&B8r@!7`LBsljtWS|OR@XWAf4&55hRmu15!6v6iDUk~@= z5L^4t%ei_26+bK$-zC(F0BL+sQo6MsIQrL8=Z&2X!WZmyD)c=65i7~TF4ljEjgGjF zh|}NgL!6F2_BL?_Ey{krdL6=y9fR%WC&!?$Wo5xobip`&k2dfVGc6T=r%M^6?y1^N zGt|}0?Nz?bi^;AH)QIQ(rNqwfRkj=RsLADxXHL-w#imSe^&$i<#vIRc|k$;6dmX zAw7S3>}*k2m9HGUF_m3N6c6KqG0gLlbV*{-qQ*@^6qB)NO}GSwCrfYQ6^xBrlgM&4 zbGVa0d*b-K?$nsd=o8Obg4(wNc>q;<%_}B(aW~lSf#0pM`NcJVj$#6F+LbfdnB!HK z1cVh**#&glJaG_YS#V0|N%+EA7u0m8UPnl3J%zFjDAw^rYfkIJC9-m-I}vuuB94e- znpLliR0}@0TvD_jjKD`G-;SX^M`lYm7t;}J0;M3eXilDN^}KpJ7X00MGRzxhJ$J3o z5e_^Ce8wKbv>SAPF6$usy?+slEAEE8aLs9TNR;i+ExeLilBiC+P<7#gN=mbOMrHSr z*6|nAmFI7r(W~+B`u+O#8dQJS*iji#DI>ZQBh-i^X)#KmB}8izwC|IX<-)wPNjCm-jVU~Z zsJksp;%qK=G-oT27+~bOyQoYcB$UN@B|_>+i-c7Xa`>d)o6QV z7~JWQpGOLR4X*vC_9`##6RFp3Hrz))xnIm1?g~?>c;TvUrtbpf8v5RwKEy`T#S7#m zy%gkAoYuU5-#molNNyep0Yyf*erIMy&`^@LF4V$LCLP{CmlAc(GjXq9LoSfcOKl8W zP833wJ%Dc=XFKub#&JFs{hs=@-ykn)t>Fs4a=6!jT;R5%_NiKE-ShKo;!GbRqq*KE zKsKs*bH(>_3f>rAL~m>R`?hsuw{TUQrnI8~Jc7D#^Gx|Ja5O}eX3FB5$I89h(oEe4 zz$o33iEfQ8!7`q0T!$IzSS|2ju(o)Pf5y1I@wpGWmVU#c{f_&uxRVn3|CgJ_G2)Uh z@hhNzPUSVPur2!-k;c0p-QwyJOFy5;D5bl2M^X_Hm4>@>`$c8x(ZgyT$-<*2;qpry zLM}EOxaXwtBSN7Z)q2~qTLq6_9UzWb8KXnSY669Dn2D^6#q$$Tuud*DxXG^MpVV zt8=-}f%6~(2Wm84nvb)^`8`QK$36~~pqSfqlZ()}{OcS?@`fQB@+vz2bAR8VcX!qSvpF(e&D&-{#``@vPzNSDh7qj&-kj zRdZ;CL(GjS+6z}jK~?EyZ_q|3mM-7SFXY9$Q0!v$F}e_2f|OGx_!E>Nb>w5XJr&*i zV>f})zMh>^3b(UE=F0SQ(+sJSPHIXroTGq1e~wUB#cR_SiuGPNxruy>a=~$ZgkA$> zMpUrNQZkoZ&&|pDIX8mD#shPIItX@H?*&?TMAuM;IU9UeT}QbLI45N}?&&a0RFMOJZJ1wB^RPra zLdj)}_}D%^e_+l*;q%&#JDXmh5PJmIuVN_PmcWp{k^`u^)+I z{$=bSyMko) zp&Cy1>3oLwC72Y>_JxY(jb7|1-mOI_8V*~lGi4v!z{))elsG(p@N4~tW^aho3X)v7 zAxMSsn;V`@mt=7`5p5SSb4Nr$un;}eOHspAH)JA{7;ymKm~Wl_f52l9N8IzY#52&K z{lR<=uS}n&Y@+jwY4t`_Oc$nI;@%;3JzXg7kaM%Z9E(p(heGi&18Xt^H+Ljcjxt6j zc)x$H9a$pJIiR_JI$bdiOg38Eo#xD@qCvelN?_bXN|uy93r|+u1L+U{#ePx+FkqJQ z#sC$D>;?eb-Nu~>JO}vPb&&sWIFA`vBnEqGd;)3I?Pp5We~15gN?RW>Ddg1u_aae0 z7oWA6)9kr%1af)Hh>FIJ#V{`^zv4nR3FQHUKCk(xJR3}Z3lhXboXS2Yd8vN5);+gp z`_u2I$9~62lcu``_U{Aj`FZc~?)qPW-}7 zndI*N9{X>wjj?QrZ&~a7#4?cJ;JOYa*FV93is8_geOLCgu1uFHY zuHaLUBD|DjQgipKSu@A^Y^FQZ36&m6`NCI!JX!B!q3;zBLucQC7o2DG%ES$q%LmJ;O%PLa??f3OS0ODvm-5uH315!Ra&;e07XE$zqXW=k(3Jd5nk5J zy@+=PJcDg~ARg4jtxKn#kEviX+1U=MTi?OlL(MV}pEChvDrVIo61E4T6ilMVz^8P8 zxlS5$f2b{;7np%_fE6Q-1pg0vITI7eVd@x9;%c8TFFM(bcF1oqBm_Wg{2Qj`1%k#+ zf%a4L-qx_Jx3^4`+u+1%n8oXm*sttVyc%ES1hSS;*?uwFc z&5))sB{{##igAPY$ja;*|E!)VxF1>5+=W<^>Tv)7Zyo&D~QjCI6Fcz zHix%%YP=``vZ50x@qS7OxZbw#M>cY5e?^zLScg|8C9l^D#PO?gI1g zhSA~oo&`wMaRthH9%06M`y}BvUjc~f>q#-jK~SQ!WLxu_{4zwu#+Ch zf@9Ay<3qV1-X=`=OWc1WBA@;H_H|pfSRD2p5zwD_JU3unI4`DIAU zZ!9PwJfT4Dsphz5S|eIWm&kdie_VIwJPGpJ(s6L12+McXJ z2Rh#s7I~|IINuLTjjtbF3?kVZd|41;vZD8~FfWYz<@a54f3+EITK&mGd9Zy)v;pMU ze8)yDWsdrGC=*OYi?N?MkyStycSJM&aFj`WU@wvCmJZSm0ddU5^EC35e;}w`3nd9;}h$VAEnm% z=wBo6qKrtA=L9;B>c<>GEDQFE}Q@twSlf9fFqxX^YnD>Q%j z@7{x)4iwUS%p{|aP)20>G&a5qa~0h^>*vf4-rGglAX!X-d&FEv#@BQ&pzt(?MzJ{Y zwxe$$A|i{o`n|Fz$fF18ySujl(x*K(aRst5K4j>?x&kTy9f;xHf5CksFc0YT{HU8g z(f-%Z+W-})Pi@K1@tHrV7k(B?kH!*~YU`bvE$$pxb}PP(&-^?cMKpf#GFE$O+f3?i z$Ln^$Hmd-7JWY-Ge}%aX=$Ysm#N`jYfZor@#gFdCAGnQybNzEi-p?2?D&?zxX)X~_ z-(k_dxdLOBILsR0Zd!8x(wb3veNEzP(YyKZNr5@!3@xwOYMR=w^wc`EehXJ%p{~CS zX`FgHbK}eQ1$2{aS&J~??&@S*vKn2*sl@1FPl!*{yvf8U|!ovy15eiZ5n@I$_D zQKu!OY5SH@w&Bh*@N?Dh+U_R8LVX7h?VIl~BcL&PxeqcA``QTg8RquXxyT2W za^c9DDeLjN_sF0(rC*?4I{>?#J}x)|#A**{s4C*#m--daa;N-xTv&su_Y z4b({d%X>pl#Q`_hyPx;=^%Ye@OT$xNf~Pk}nNZo)J17E)$UYP03ioJ2e9*XMor zIfUJ4L*Z$T98O`MBZ(ZEJx>S^XdN>~s$jV-GncT_&uarO=`N{X)LF#0p_oau>RULX5Fk0-_j4+f)Q#6Od|>T~z4S%^b1HQFfm1 zrA65E@p)1|O)tuv+so6eziG?< zf10!M7j{+r#={Lc6^jn~XMg+JAN@IhYZ&)7jjGhmf~}o8mvp|^G2h8ff!6<0rcaA! z$Ehuz%$BdQsFH{^6Oe^(l-)mljfmKxV}6KEY#NW(>sSu2VgC75v?Z+yP|YcUf&{7Q zIT99fFItGpJQaPhgGWQ}@EN zVv}xByR+Yqu|S{MVl;0)T*9aW;v<&9b3_|&*sq)t#T<7H|M_QMp0{{rAoLlqe+e{V zGnp|#g%w405qhE_UTT|uYQ^sM1dfoz12^e^Z{E>*`i^Mc32P_xY|Uuh1dukHgqW`m zDLVnC?lqLBEr+yOC*O~#Yld|I8Ig0mfP;UsxpdZp}?ETbN|1!Vi93Nb? zD}S!bb+6-j+5KAI9rC0;)OtG2f7es;ZcnLPZcB{uc2(x>sx`ydq!NO~A^}<6v$`Am zHP-ycyHb{+^JqtDZPs~aretMLVAdt%QoC7>ahSrKNQMnULelJ*sN7E3E~x|XLAG8N zr$oi5*X8G1HW6vNB}4qK{iIigep)xw(JoiL!uX2Gx@p)B`xEmuHEIm9e`~$@FoDN! zV%pD%X?q{a4#^?dVSJdJ_$oP>Irf=65oCR$UVQA}o#aUO#SxX27L7@i850qEtbvMB zx#TD2lb3i(P%mCp%n_B3m&s&0DlorN3iK}pwuHs=1n5KF^cC}dUu&FER?aZz(sSCX zI*_QL91pQ2Cn77Q$i#+Ef1h$k2G5Rq8maFm$_UU6F!TqeMj1~)GzZRDTJ8Mra z<|gp%gs3W^N|L{hb?gl9L1zvOPleb$4r3?obPs_oU-Wq;+-pVs*SRIR z_{DWaf8Ex8Z!F9He*b==Mu+IMz#eMy&r(@5+wvDTwLQqtpXN+|e`Y)JaD0yFIEeYh zQHX_!ebiVaPsIyQ+l+u(@^h@6I_G<|g722jf9Cu9I^Wm$;a|=--qAlX71oO8(DJQU zH#>fbo*QM+jj$(17mZZ?m;KS$jaN5Yua2jT#4C{E%+I3qvVZvuD~uJ~+ICTB%>TX5 z=Qi-(pU!{g8~Y;Yf4qZ#yOYj+4h3r(y$?~cgjPFJiv{Mqan7k+TCbuiJy<`yLK*7u z){Y`4SrP*Ttk`FMHa~Nv_xNx|n{{5J7I_)RdjiPsj5x`Cxdrr%cK$Y7+;@n$&3;(Ctx1YWu8AihPp96wJX z8hv3bE|eb%f0i3x7X45<{}xeT+K%4vCwONlk{e@2KV@{n!yJeKm(<6!cpo#=eH?Uu ze10yPt45KI$*(oGy1L!JE>!d&N{5Qhhgmebe@xXDm?RKqWu6=M+9lY71Ck{vedMxSf#!Penv=?U}bv&c%UUZgI+?*Qg)%-Z&13k|Yr96>uMX zKfM}X;4v99$Q;Xfl|BetkK1^`d^P$2fy2P2_p(g3WrK`n7uo}dnq$6=ufJ{In4$lR z`V^aBf4Ij(NA&l2VLW{dWY>E+$i#7|H(-t8UiSn?Uhi(;M{9f> zzSpKdn5r1@I{liW9>QSLrix>&;@@At7h8O%``QZ+Z%ztiy|unkj|J=D{2avjaOZq4 zC+)kirbgR-p;iV3g|8<$nDj~Ugh5FHYd=W}e~%MUx=e%cVMWg%gCrN6KJyPXpdf$>0efH+8d|x#Q1NSc4A}<1yo_xofQ{3Nyqvz&v`{qbv zf3)3NXVT~EcDxzKj)-1~ybHDh*=7aRr6lZ8RMvNf+SmBJ`nlmh zShR+FVC zn)P*~>KD(F|Ej_FoD$tDsNc_b*EoRvfAnMZfBLnoTc)HRJ8?o0hb9V~ROz7}yW-JW zAl~a?Of^OUmRyTuRU4kQu|u!#XN^n$_WeabytKNDT`HMBS2ms0;uSvIoDlWpK)3a3 z6qW8mHx(~b2*)YE?g>%CQ<=3@CicXAaY=z#r`RRs{`Y6R6t17K2qV)byv#l3Y+wQ2>!<`zj2=Aane_z+B776M5cWRyfYn;^Ehg;nBr~cFL z^$^=0pN>70kWY#A3o&mVo@O8`AC!9r5g34aiU-t>t3)PBL1a)b&T0~aShVjkV$OZK zd9UwKz{d^s*J};@ zs++}rI81S$YrJrMhr3euP{#NG`ykNu+b|c#<7AkNl*sy>iuTBu)OW$Ue^H&NU#DLn z941%0V21PS&8d(flj1x(GYWfix+^OBAzwyk=^jB81rd`k5Ru*MxRnt+3zv}C=MsA{ zmhOZmi<+LCnd z#nzH(o#^snj;7_~6tQts%qQ)xFx$W;-)c0KnipGOZz~@fw*pyBe`tw)pv#y-$>6Pq zxnNIIutt= z-~;i=%rQfHieqnAN`F_D!MQD2)>xS1bjHjkH9v=EI489{hP6z?95u6}6-ZMM$&TB? z8jBKh@fZz$1fi8jl=t|e_IBsaEuLQH6|fdhi9fTz7W6NIk4ljmQ}#i6g7X^eEF9L*C!ngI zeuAnhhW>KLzGvkXU*BVNKBb&EO3aO~{ZXD`f0wF`xeO&gPML>&mrvZA%uel8Ry;Vr zmI;a7ms&&=c$DPxhjrSxw6;cBk`z2;sTY-B=qEU$^+8q3*KIZSAkN9|t(tDj81Qb( z-M4W2zHwzMvb-EmA$Evr3x_6lEQdJe4#yTSpq$g|t ze?#1OkG=J7zQMIO+&)eyVGhmeEu^g-A>Tp839FLtF)6VR66${?{@oHP&~xw=$~(ej z&DI;nJSa*cMB;5e-xGmay!=d|eVJYeHs+$Z$8@dkWZoYM_So)xH`~kPEEfG-97wns z2ZSW6)ECH+=S1MW#KP5X88}MyJfu@ce@z)xh{M79PVgu9s9#r&hrQ|JYu~ugVOcPW@9H%sO>0@!n{|rtl}V>m`VAaJw2~U zT^(L2_{Q?Tyf;2zL2G!9?y3^|QIS*W>)zd7^*t4K45!(2;;N$uoHOV2=}jLBe^1rZ z7lC}k^3|)mZ>N@?Q>br#CdrFZ#5={GgYAssGyBWDwlEjEZg%w@*`WP`+~~U>9!Yup zsoUSq**TH_+dbT(7Pt~e<7@d;f=wxO1*Ol?_t^BjZD4VMocuBbDUY*_8>P_BjaB(y z-Z%b~D(oT0z3lSVGH4h1rLKaLf235{PcK_Ob!F_bi}}4gVcy4o&dGmz2QipU?Q_b>Xy32L?cVWoxzqiTdW_gbiDY>w7_qG>73{RiPHBbt1MH0=D+={Xa0cNc?+4`k@l&$N#9}E0fjHi6 zUbmw_2mz`InA1EhDBkyoMUqZH$$KcL`tvaGm1Eh*zxkj05_^7Led%5!`PwgUd$oVI zGuZn9r*p&5EOaluAKxA0e@NX|Hs1a0!JTWT`Z-bm^Tni&#XphH07?o6%KbgqE+Lal zoXT4FbVFW)s@f@!mozRZf!24V?@x<$LhF(^s>8lx6i(bZSCSE>1S}XK+2@i|5mAXe zfTC{mmQiR}FpF3DmZL5sqlDR!S!{{4NsaxD_1s3ISH$BhNubYre-%TV(g17pId*r~ zyjFR|A>uwV;CdEscm(V#fVq}i?QDnrz%}>s#?`F;8b|IR8}J(SwKqw1W(PT03H9Z$ z&l7{fW#zi`_WQy+V?UNEa1?(`@^b3jpW3CGytiOZ+mLfd#YaMBs;X4YUYS~?bzN*E=Db~4pgn|b0vdO`vrtn5 zdjt~AD{?0}U=G6!sc^{xxbMhA0y269Q6KaAjd{EE%0->>*WxGs6Bz#qSc)F!GT6Sm zKd^dr@mJkvoY#Ljhrw>Zc=TL`9y_kJ&90i$@s0?cr@0Jwe_H?hy_g(U@iHbB$#m~9 z1WJ`I*~?d|inMmiWpeH$k8{^i_mfNgXAJ1bhiCs}lf~YeI`jHio~pAa+g${&)n0k` zuzy%X7L;OKEszgTKuH&*^$9c+_+Spj+~FZuVl84a7u#qXFJ`_-25S^UI- z$bHC6?3ar*f0u?lIkhzv$o(;0)Sa}R5u3t~lbKqjyUpuOqwLRK(m(t8s#U|TTB3jCGOO$R>OZiQy>`_x*k^U6Sni*zvlnzzXE|z_ zV16;T5)drRvBr~%X8UF*&ck6%?&eFTiri7mk1v8Df1VO^jeMI}uV7A%ui_Fbf!LfF zPAunl4_}UOSL+AV2SUDoqw&Cyrk@4K7f(u~Ox09lP(_0^I6_;pc8!PJ5!H935ThVH zP{qbqEWmnsbViJx;<`sn284<}pXiEk3`Ye^MU!jY{AL?(dc=L+Y;tK47jWzOHuPk_ z_6Ir!f3>a){e^d|bB>px9(nB_e8V&qYP39x_5P5?E=eb=B?{^bJ2|Rh9zm*eqadcn0=oA4ceB36r8!hR&L6ea3z)#c^eIT+SjH0uL1wHJ8KJhi zfBJYHqn0*mK?>jO>$^weLn0rkL{iBY<{Jue#l!zS@9vi-aUSo6-2R^&iTs9r{^2=q zgP%KVkecZi%sXr$7ffmA1+x5@LPJ45f4xrj{BcUFj|uQG6KpD)&oSylmF9lRlnC61 zK=%J`9*vi;?sdM*+;mMo{QaS*i-i!#cf`Nj#-3F8ck!&+p;9Y{EjQssK83Wirigp~ z>pb%ETKD_S_x>AwGTmqKM~m#tul?-jaE%Q$pAqMq-)YSg%<-#VwSg;dK~>kDe@|De zFn(^}3u_)B>I>ZejwyR=Yrm$Dwx7va@@4ZdcLg>$UB;_Z*+g0Tx+P4nOY-#g@WGhy zUH9<%uGb^UYcU~mc?Z~YK=C;U={N+r&oSx=SAO0(L{y0@S{k11nY)d5Q^i9UtN>Ju zjd>mHy$xwxG0iC}2^?&nfSfYbfBgPT0J|au@l3P(VxE!ZE-UPuK(lBL(EmICc>a1k zJuLTp=9=DZO<`dP7m|6fEe}D8eL`7ERl5(Nsc(eR8Jie?Cvnf3?r*zyKF7CX;SR1vWPwe-hCUn483(J~hTi zcl}=XkM9Ni-U|6Ei2u0++bY%C9BH{=qV(a+7Lb>y-;*W^i00rZf8=ayO`jw7QsDXe zXI!G<6socDYPulit51WKct=4IIxg)t9?oMa(wDl7YlI<)clK-C4*ed#Ay;-Q79;!J z8;$DS^PP%`V?2GqcFx_blZd;9wrQ(V8Evhqk5AM;taJ5u&qJ~T*R}bBU%@Ku%k2{t z7@DVRJQ2w9=TAJPf0Q>~iaqE5ac)BhYGY}P`Kep-j4eGi;Qaym{%fdf=O8AMABWx} zGp+9hRqZ0jQItNP*&3TnrHY91o*l`A(FpQQG#|=wl*WP15eGU}h)sE4_3y=c20miA zLcJ~8^X$-`B4-k9^MYY~aN}DI_x)h8?|)YA5yx~8|8z{xEBKioqtm>8+}p+(_k`|)1PYwu zHooMyR)4Q^avj&@Ppasn!u&-QNSY@Y7QIeQj| zatVSh;KfX+yQZK&Q+-Finm|gD@GJr%kRuio;VA0h{{LKep3|*CG*c?q+fuQBgSX0k zthcfS^4|11zqPo|#t-kUSR~iIm&a#t{^Yy;U~EbsZ9?Ctvh>~lXY7;ZlkG2#A@0W9pwEF~09|m=`#XMYRA2`Z_Wp4s5-BN9fFU zfBX}2lKc2{jCQ&1@P0sja#v%vvs+DS;3ij;QOM}UZQer&4e{M0pe~6wov>}eG)HP1R+hxI&KMKIT z*P1qn)4bPp9~*hAo&+>1`+s8|T;1R4+}~U9fp+?3WKEx5G3P=A?{I%QqH5vWiZ=e` z3BK>=+EzEQ0P)~E($iQxiaM@}`2_gX*x2NLVv@<_^By+8Z9h1;UTe(H9u-P>f2{Db z0q5VCuRk~(>pfM-^A$9KW^%15gWQZBKgu`k#XP#dj9%c1wPceCaxS)oFuqHdNHZiN z47R|w^O`n32_G7t6uR> zA6s1q-qTt9=nCsTlX4RcO`COw$KWeG z*!y*(3J{yo1U<%eF?CcFQ}NW=vJvRl(~WZ^N}?YYJ{Fg8OIySSn^bfGMg5?G_k9~D zG>CJCtA9D*mv0=*t0oToe^EQt*zSAexQ#V0&g$5dF@k+-k28}9D<;w}`mq)v1?58)k2H>rC)&wq2(f3)sXHfY)Je&ITQ z`&0wY`hXJT;Rb^?aU=2 z)Avyj0~Ns?yqXXI9Dnd5gRV|LuW54YL-K1b2d()aua0mmV=9WR{!WRU%qAkD42k+Z z7iqil>Xu9&+>cPLe~*!YPpRTAfuPmz#|sqdb~WF(l-_lke@G;04$#oG^gn=>5Mq5? z{B^81zv8NS_M4Bae+C_AS9C39Lb4!PdHZ+Fw`kq7yLG$ruy(3#gR?s9+dSh~duW3T znR#IUo>xlb^0@NqBay@BuWMpFoM9%9(YVBTY>fT%Oc{>#{Xycxur~=1#1{Y7d-(?} zF=|j+piGUae}TLuiQvSM6(_L5i3Xc58HCD!Znpv2%E>w`1pV9y;*nS zO40@TgZzLn*x<*=K_WFlN(e08jIdKeUdN(02`gG|X-B6z^y0!zdNS^zt9t^%Ii_yMjsxH~+~)%JSB-kq9xD0;F2VkdexR$p z^mO6GyyKizM#r_WebIBQ!76wqI_^s3z>(cfe_8N}lOH?M@M56Wr(mp|K2MIC`guSG zLu<>GaD&Ryj07Zq}56;P@dDIU%(JO1_LbDs%EAFPG@A$2cLI8~}AFj<3U2Isz?LL}xZ?xPq#f zfBXK`QaNWl4}u`DYAQv@qCV?In((7wELrUP%i!!^Yo)l~HFjt`ry#H7f8k#vCN}fZ zx6W;HNu%yKlknO9nO}{js`U^bgH1Rtvw?b7pSnNVcziGQFee7?UkIQtvk`fJP?9^@8YqeX;ge|}b`-*KEwKy$$WeFfbfA$%1`j0-u(WxXi6 zmm))5u0%FG<2MeAEC1SskM%sJ{>q1D(qLV8?&G;GTz2O%+3Yp(nX6#5o>_z0tQ?~r z%z7=wr(0g@?k{{^;fhASeN-EH&RuyjIsfkQj++bl4bA!4_(iav<;i49HUrcGqEUbSp5$>( z`J7z(gxY)z9yiY)R#p(@j2+~;f0BFfPDphq2Ie}-D+dx{%id^kavY`vacj8^mipwy zrE|*45la$c^W$_c!D-B-SL0B--pZF7}H{eZVRmosRuuu8=DsE4>qc z35xVU`X`sk&gi7R*f`m&eD&(r&>!vIEa8pSlPBN5chVaDQWp)srE-^W02nFMYsU7y zXwr)Yr*J9%I5^o$QQOOle@5BQwnxnRnU`+aU|ssuma_o!4@&Q%br-oyu|IoM;y%Xk zJ=?HsZwJxFCykD@%{~dn>%0+NJ1f|mURVdu6gWwzB4mAoLD-3?`n3JJ zX6pI4Ji*W(ckNa#hD;3(gjP9^GH>6@!i$~b5%pVgjg*eY=5O%Pe>7{(_Zn{d3nd36 z!q~?2rez3dA1H^d{dQDBCSYj|i7wCa>ykR~UJT+*>O|?gM>k`l9=wPrHdU1kkr)xB z8ocFq-}x@ecX13NZ>+!0_K(YYFf(>zA}+Dee~KqxyS4nZPgkBmD_lvJsEoK@bcFlJ z*1q@9g){~&i5TU&e;D-?al7wck$nt`as!GFS^C8zgO9;;e1nB42C0dY|EB7}qh;Td z4Ik#W9KXO)pvVsShmc*bP=5HMzjn|En}^3;A@Y{Cs$B^&X72ffgl^ zk^$<-qCE+@J`&uQB`*Ew?ONYmYBc5wX%h$-Qz>hGAg3EAe-YYKW$CsK-w!QiwCJb{ z2?revi5k7Mv|tb`Nwu_BUF1^law7T8^jX;zM2#MvTD=wWK>lyiG=7>^&*Cj9x`-1F z6^YJqi&j|6j;}5@sC$fobuh3mtOlWUq7W51O> zICTd8TU%rxf72nx4myf|fKF^OLfhJg!x)e!dH``SWT3~b!{DKX`+P{L{q_0Kb5cf> z?GR}ZHU1;OfDH+&a97?q5V!*}Qc%`JxAaPoI8kwL*%Q9`hm9?e>Drx+Qw*T;< zyaGBYI2u-$STY7Ee`D;psImG$C4X2)8_tMuTMOiF3~;|2fckvtLZAAGXU@>3Z3zP6 zlVIF>s}n;WP%49m?~yKA-%I3@U<(idMKDig_l9ai3H0N(6r3Z*N56_ziW(uPfuII^ zRU!u!Vuw5;M-#5PHbl8dWqXwXw;TNS(*tY;TPVbDbE9+0e{bb8ed%)EbK|CL{B(aV zN$%^f!EZEYukvEQb7?e2y8;v&Zsw##>Qbme_((40B20^KJfoa+mxdtFM&QDC*HxNB zhQ8ul@YVt;V>Se&MQj-sh*xVvVDKt!sRh+@!ln}ma{jWxI3II%c`ra(1FBN0$6?`V zVYA$yfpdxBj`I|>qD){W2Q%sFa&1{YpZ#s4`5q8;4g^Ffa(<<`@a zKy3w@>VJ-ZfVp<5{@R9RIbskbqtj&gDl5CSD*plgVPp2wxF+r~{*nYu{KHM3@fN## zG-r*~-L-8muQNPG#I~UTqlg`Y~oD;xj-pM6%JGW)$9kxXB=k$GcY_$QxgZ8i_R~&hFk3JC64Em zUe6bIe?GJZr}^jj@U^IeMhE`yVtbi*4C8EIujKF}+J6rqUzTyksoHS&c~#hw_IIPG&1LWYKc?4N+#B?Y96`zP6 zYod4wsdyvE2U-Ti#+Y4$e+cFz7@!R*pfEjof2-;MirY}Uyb8iICY2{n={hPB#u1}< zsuLlTger{L3rfMeTiYQY8hJf`ivb=iP?M4xJ;W>Svj7MAl?>0giGlh(*8B(7=0~43 zdds=E#9JqTvC_*Q_}tss`Y11F<-u=Q(Y73ZB>k~Z62|7zx{E~*UE6sg>g_m=VML*F zf4K@AcABmNnbj4jO@r60(G1_m` zQHa#s)I1i`N%32J@LA9MVROTOF8^)(e<%4BSCU=yim$qqjn36o=gj)a#fW~3JA6fh znZMwH{%w4yuV11Oq_G3)Q9}RGmtEJEyo*3dzm}7L@UIYgdi9gMOV?%uV|z=%n92sS z`gTd8&-fMksQwMkjJVt2Po(gN&|W4-JhM)|f~9a>tp{hU&z$o1o}sqWNrh>SfAQYP zp{V%VGN@^s1w^OenD&X-@%=_`i%8SDVqU1rF*8z35*fU zf34FF`jOZQ?$Ra5KkQJcf+5ihTK7wP43Rfh^j~07U!tPHRDr6xY#NSj$tPr5)`2QN z8Diy=>Lj8{jHlM6Z3_D&-asNIe>&O*r)&Elh6sMC_YF9G#5>+$7qMQ&ITrNZt`tnq zaR9gV{wMe3MF*e=$2aX|anm?%pY16Z&0n17+Q0ZRF1CbDsJurTK{*D8MM7Dh*99vK z`*!k4LAZ5bBc8EGufFN-kKroK;ZXBWZ$yv-o%`_yP5&HUk7Nt!P% z7)FOwKv(GnT`BJ^aL{poPro_;RX*vf%izOS2GBBf6|VE)=p4pR|I1a9gNd1 zGsTQkF?C7xJ#;YE2)IpjDHthjrUDcd zMPBw~SSAB8Kp~(WA3KlnfD81+=5{-K2>18g| z_Kg>`;r3QnzLiEV=vO`##6L9aXB3V#8!6OKLtlTFp*`e&Y>Bd)Mz6{IM)U9+%IPwQ~6~ zJ(avW;0j~Gl=3DoPcC)sMVxr=XoERE3BSx-i1=}(yEaEVYtEm&N#EsD*X63wm0RMu zf8%x2@cy59Gwyf+a6fdq!*!my>nmo<%5`eD$fuL>sLpH@!{e&E%S-EB)bc&~5@f2B z%{7nx*e!;2n^2|Dp5!=u_Q7;wbEk`TSJ7&7_JPYXsleArLJs8SSN!GOcbaCXS7iKS zc$RE9(=IV;XfH9G4?z>-ajx5BVCJLve+O>l90zBf|9RPmT=#`XKq}~vOp>Q$k{qWh4pghz~$v49m3&ZRW6bbHhS!hPbFnCa(tP_ z$a7`Nr@BLu=Le6;BV(HmrPjkVnM?vuG1mr9NC=1_le5>T&MfDchQm`Pd}h&Oehwbw`Yy}=XC&>AzlLs|4mHa23+k&lhZsu`!P z))wX!$q<)=UT-{dhi|e|76AYTY$u zx%mH_K1{o&99R5P4!>$KodWG7RN@}ol!H>=I~kqI=l3;C`iPO;whrl*NWm`}l7`B7 z)PI1s#&#Ls>q(!H^Lr<{!HJ)RGcU!TZu{Dh;N=tjx%Rl#1NQ}%edFS=vHua# zxtuR~j{BO$l67q&Aq~-2qBgkX%hKKiZ)i#f(u{y83e^2ZovPyQ)R`CLEJeZk0XCa<~~6C2}xOkZ3i z4DA$MbbTc*lPMKZf9hg>TZhrZv*b2w?8f)cnci@|NdmaP!M)2dutY5;JkL7eN{&EP zBh?ke=y!dO^X}J|!20<8`C z`4Z|IER8kzV+_7-c;7xy$9E6t7h9Y0*~AdvbXW@H-M`KQfARSoQuC#xLJKld)d)H@ zLAw;$*nQA)NFeMhnQ{W1)8oK>1UdM6mV+;-&>rctS}T_z{b&D#TD}yjQ9!hbF`U3_ z=OI<`X|Fk%gCO(o=Pi!IVDx}YV)P*4Wb%pqngtmDIkp=O)`{K3|Gg7}v5PkjRLncS zaV=tfk$Kx_e_uJ!?vnMV`ZDodgQu=tq%o60NMQ2x{`vKU{7SE%s~7YkgX+g0+VMeh z>v}Q8v-`U@=ZVF~%Bw%G1eeHpOX7r#?hoGPNsi&X&^Hxf6^Z?txE{ z|L5$Fyl}kU{3;ha(uEh{Y{Ne9lVf+B(i14bQCEKOe`bXwm7uE7Q~iL*a1g1$wsV~k z)h#@O^$$pIfI>TFl}tyE6UnoPK{ufg_TP=3pB~vHeMZg>DkT~2@bmXr1suC(uM4+@m?&DNjoR8Dxa)li7 z!Up~OZ}{pdWGW>UlCQ0UkP!apiYJU)r$jdEf9~D5!*%z}*>u5(WBjDiGBRr;SNwtM zKKl?HF&&P2)eK_^nsFyN`v~X}?c~+SM-Cez%fSgLh?StauLtJrdQ8a2CC=cfPdy#% zF=Ul%sBioSS;NgezK`2);t6;Y_jEoVh|9W)9Ij1YU1o%-fx)%33N3%c@sq$|?~)Rk ze{za-Ed7$n)Fs-c^Lp|)Cb-rGbE?nSkzIO?>2FPhi@x7Sjr>zM1~*QtNIVmFlkUF8 zqp9+g(W$2QxR?PL-uQo)$I|${3CWtd<2=8@SLvA8Uv0;&{X>}v6p=&j*(<&Wxs|9t zdTc=+ILKJfbz*}`37#`9b1{5w{td46e{R8*BNWD-NXdwl7oN&9KI-N1%*XPIaclpE zU!Yl^fAVfag6C4;E1Zvhj}uzHJVQn!l`_<8yz#JG9G0BE$-m)RFE^Acefi#K^ig&0 zZ+p}S@*fcQJCtO8UT3C;15wG#d9%E(X@!h~Tl_itFV6A0KVs)EXRVIS#2xQ-e|pgb z`uu^nFuFLXW8=*^l!D38OCN2Ukoa8AQFlE@u3ONe7RTd9ROr(v5&btC=WDe4xkXfL zPVi~|GIiI|#_b<|*vdtBaf+dp-wQF2j=A4YeLAh8s6b>%aN3JuEYZXQl zokC)jUd$yo_~VamSQgQ_ts^p-VrBCH$Y&b9*yDE{P$o3wrR- z{pig}#C;bA+tvrCQ}Vy9NxzSe=qrG?xXAh zPDVrIpJ_Zqn}gqQ8{z&W7bB2}vG!}rG3P2g)1dZ}PE=oY>HELq-;9J$t1}l{f13s*m({^Hd)E2X*uB_$B#2u9Lr=-{Q?bs|$=X z-tg~i3^HX7e1c|{_e({vWbk~3OZ+weB7RiygT zi_;*~;zc@nLI40=K%&3XDwVi(#8GihiIs3(X3)eflxRDJ7|+ zbjsW~`!PQdD4%$-*-el~Oe%MWc2||Wn)%>p;qw*Q1P;{6=Xs8}Z}(^9Em87c@c~2q z6T@_m_e&lxd_#X|*H_ea`+u8x_$T*Jo;ZzP()g~e+q`ei?EhrmE}}@vz)t5#KTA|K zU`f+n-ZHwiD^Ew9mTSf`I!egh+#T{uf8jY_f>It)M;Bz^`IG1}=MTcAYWfP3+n9h0 z(tjno9K%DOneqAh^$d{+i8{rdkCU0dO?xSC4SM5JSnrXej6^@%6n~VOUfOKL_?YO; zm_plr1?dhWvGv->UvSq~YFr#E#x83JQRjYAU%0Qmg38KoePjcWA3fx}rE>goGm;OPAE%`NG_dI`|^*Re#CR)2hiWcb1BY)o=hXB@(Kt1m#P!(zF zriW|AJL)_|7rnN5HH1=8B9%|2|MH^608|yzQ3r9rs5>1))Mahtty>_j5jEIHF<|T| z#+PP5$;V^)ml|Ww0?!E^K?pSnn-M|V5_M0sJzJmSP`|JH`xxu%m-u8{GaK85g4bq@ z|EoQRHzO(#41cbBd_grQMt2%9Zb#t%@>~P%GoaB*r-m~D$xjU)qvFk8|L@K-`g$Jg z;C_h%92Q#RmKOk^I-%NQXc2 zibu(hn15W_U@GxxE#5lzsV--;EjJ%9Ffja13>f2QX}cu2KO{N?H4$<%ES+ep+Hm)F>32HE%$ z@9d8{58StAX;ge|r6u7tJ^0A23@U^oD8~G(r+?Rb9B;jH$Xd^yzZ4FUDlqpG9l|@> za*=04aGc3T$7%{v($n%5=WNT&cTRm&8sV)}{5Kk3=6u;v&G|AwaX$A~9EH(xC~S7) z$iCN^cJKU2;RW-Yi~mlS_9*{LUE9mp(c7&yz5ttyMf)Qb0h4M(WVK5c5u2RQcD1p? z)qjk&ZN8&ASPwu36AN-TdU1sA?mfuHL6IFni-0NV4&y4}QUrunHGEo?CsVwCAo+i) zf1A=59r)zPHy;J+uuFpbnP9CpTq@5gMKwMmju6M2#gSi*m;FlSqnFi9Bl=*=O{3p@ zl8b@TQ=bFIs^D4otm`}vf9m8KJ>0+6$$!1tdAiNMmufUp1wZ2pjE1vy(Z#(#pP)bC z;;X%BD#-tD`^BRY*e7<7j%|7F6BIdRa12+w;UQiHGWDpIj}yzEByoj2L=XS(_lbE; zbryMp?g7y0hW%k$Za9wnQdGm(LQR6Hi388wk2|-WEG37#PxpaR!I-h?Y>IK~PEOS$|59t7jE8~f80_mMepY2WJ_6*L^kjjicdef3Kp>D=P# zf74&T#&KVG2h6;J9C}xtkL$eD>>>1x%cSw0EjARaz}sqvAJJcw)ZwdMGSCCA1K&IC z%C9kZsH%9lTF~|c`;(+2Gk<@W{;)~%E*+sg4|Fc%=36W;edfs@!Dc#QxIq&3fa?=E z2DN*_cr-mEaKr$Xx!Vptn6?s(twkL^$kU1zR(YAHXqi|vPkKX@#0cygGx;CSMXhZY zqwCInV$vsw&yy1>CH4o@a({w})Kr|*hYK5wZn|l_FLRGV)wUz+&40}`;@ifg*ry~4 zu)i#@j|NHLLlJl`>37fl@Vs7;q*a%VUMcNDsNmszKIsgVc>Iqz?*DYApZrH$8@IE& z>e2{#n&uTbb^a;0#>YdH|2(@lc-T?P!}Iw!2VX+_4)^#gANWlh;mT2CVl1t*U$^1Z zsMGjn!$_~1#ec~;|9`9^keL!};T!Imvg(MVPF%75U>Ljcd)aA6Vwt{LuTk#U?MF&Y zJs^Jqa#*Y3z!49eh%skRIpRv17|G9n~-ET7O^idxRWV8g1d?%Uii) zCR>iZQq!+(8p8Wa=TqUC1F?DB@Q&C{TT)WJQTB4J)3ct3r)=xBl?Nd?bz7+SgZ3xF zKsP!Gc!n@~36GH$lxl_Zhz<C{)Ot)Ag^EB?Zj)6Ivi=*#dS6VI-TlY=|8uFdh>Wa@u4hmk!iCZ?ra1N7v!k`yeI*NE4!hTu4r~$T7DLS2nj2 zuh?Kr`F}thb;wEJ5*3X3)A7YcjZhs&|E_>_d+UQx4byX*ALExAXoAON51OIf_ zUt22sBRXyGU0qJ8a%1Fc8Ee}$F|kdo3&IwiPk%S17@mpxitq-@Nv+V=Pk;?tE$|bV zi?cq(MH4w2gr_dMj?43R7oF)l9QAcSc4?Dl{NL@{PZwUMtT?wV&IQC#!5WmcN4$?m zCyw3AhE9~0RE12k;GIC41bN>F6-XPToSw2Zk7NdFLk{-S!ilmql`|rb{mDQjc`Oy< zd4Fm%2gq)0jcIudsZMO?m>pQBbm(ytP#IKS8CncVoEsUF$pB+jSdn%{l(&bB+eF#j z5wk+a=#OlwVC;GW(8#?gcw9YQ)}v?Ltj7Z&?mzS^n|e9hTPElI$K=A(aM$14VSMI) z9VfZ58LGf&&?eu#kFNn05CezfUEB)t^nVnm7Y%*n&<&771vC#iv>y8E@;7kMUku;9 zqv5oby@$T+{(GA9WAv%&JsgDxI&r*04;=OWW^~ePy(3V0P+OJZjQ+y4`-k1{70HVK zP5<(LR!6JAMV&8K@^_od%opyFT)tw5y$~of*gH9d^o$!TL8n>o!xO!S&S1V_00dE;|rs zjy;}jLE2UZnNN{ZI1gSpbKeGvkC=>F?{W9=m8PGDpL1J(JNeGp(5_9?#yKtElSYQO zHbkz1SlN97721gVQ4EvR?i<{Wvww0|UcC8UO#*J>CdL~&0;hLCei$vH_mi`OZ|575 zY-DsXPg$p0!pu`$%4DC9*xJdB{TY2F869)$rgQx%=EIyzrn+ti1Zg-JP)~!C=@M<` z$bFp{AGDpQH6!4xly`y&&h3d{`t_l+Jb19W@LeO0Dn%UC6rE&byhFv2h7aKmQkc)~^$bOyX+Cq#6+A zZYJx4W3U7eu2bMoSf@}Y>wnkDhQCQI337GsQ}U@&hsi8#?c2@{Y>f4XxYdhI{VFms z^~s5!nKyAP)e}~1F@`?{~$7g-^ z#|=5iga+zA(_LRmQBUnc<2C<&UIS9rn%{P5zG~;7lM%?O9eVTw-a3v`(BTvOXWHuq z?&cM%#9VvRj}35-DMcaS+@}^JQc1!~`tkY{#QHk#JmS2QDbhQ~k>2C^5PQ(Mmv~bK zE`P@%5vX6_km!B<_w&bAuF(Iaoz0*wZK5l|luclYJa)KeuN=umC+vK^fW|iZu*Tr6 z@**>H)z0g`In|r;mC=9?EtCUAZRRXJ?1SPOIiql&Du2j>+DP#{)Pcj{Xu&(ik6!fPk9cN8e4bCV zTRW#s#GyNcZ=euK3Q;YFy5Ye&U^|{PylL`s<-uv@`n9;6udeg|6{q=l;Y~qKo8W5C zJ=gzFUX|Co&o}+cU(aDzoUY+oI2n9s9gs85Zh5bJ8Jrdg+olCM2FR<%2Jqs{B7f;O zXSMt|m!+hTP;v}>dQ1X__$?Xug!Dh=9Xed0H_MumWEE{uC_^T(hHxz*!XkjCt z7S{=C8}$inJVLXLh(kA4gLyC((;$av+J!OES-hBlGgsX=Vs#@=Z9Gt!?Kl%Iitf^} zYqVEPg+^^aAxU1&K;j-cwQ-$!_0Kou03e4a1%)hj^BFY*$TPE8oq_t zvMB6veL8DrmWvHT5=X>~>^6;E%h_CX8=nDrWiRvE1zT|5f7@s7&t`1i+Vp&;`f+al z_45yV#+f_y>dzjZH8$(uj02b@o7aHwWB)VAi#gMeA6vo+Y1;?G|5NIYh<~j4+Y}0+ zrA=}{!Z=wIs7AACxSB~TM5e~gvkckX(V`GGk;#AoZ|_xiXhk;R?sm@PvPhx=T*_6tM>^!q}F8j-b&!_9$P*$!3USReVm`OnV zlS}$)wwP${W#vX=Tqa{2HMh@pKlMqLs)N`-P<`)13wvZIs0}&&P<;-wOFK0hsE*23 z5#qd+8hC0mB?eEAdtcWdOXQecoV8Lb7Z$B|P-(>BY)6^szzQUet0*i>V`quSiHGPn zT<;F5Ij3dt*ZjlR_Lm=51`h$!moHZa7k|^^rgTXELkE1svYj`2_}rrA=HMvDI1ni4 zn2%M%BNJo1W+_R&wCy33<{APk*v~`HOqo)udyMNP=rn zcGd}M>`=iNY~+9?akahh$50(iT>V5ZW68un95)01lsH7<{8w&4appwFVw6M7kALuR z(`Ie#SBT3ax{1$zZQ2KUZ0m3-{^i_fpMu|Fkgx6bhxO`*PHn@>*^IZdO?cwjCy0U% z_axG_zSs;G(Olu2{S|~YU)U$>=u^vXY=MG^!cHb&jcQKd%abG4dY<~Rb_n7#6fZ>b z^jiE$|a4b2fAOe+AugDs^poQ%w zM<1)~wc%Os4`7$j49+17pT+g*OI36A^gq4DG0&r5f_ z-9LG_<6=@R!nisGFTHovMDt>anu(m}Rc4dK^lMQBQAC^_Q$0thG$a~vk$)KHSJ*HJ1kz9p5WXe98m302GoY^%Tv7J$jiUh4svjx&{$UaL~%;d zmZxk9l2`tC=Kk)ZKs&ezB?XWY8B3k!3ox=O4j8>)5ULx%lYg57-KIVs444$5R!=cI!szozLq6TagUI{jkJb3?D7SW0 zP7ZncVy|Jki{_Di?7qH6AF1-o(^_0-amRT-HI1S?hC83 zXxsK@UDd6}fNiv5%nPJGPMPx=d}Q_Lrr+?LUxTv0A*={#{RK3PnJ`F}+*+aeT)uUs- z^~xS9NkYgtD`hkxQRz4`TTr_cE&2IGzqy~v_7_`QBNdT0e@)`=X!vFO*0hN7h4Ycj z^XRl6ERuH}vza*X@*_@Nx68jsykGe<{@opmN$6S~f4mHbn}4!zVFWOr5S9$vk-g%* zWNJtG%GoB_Q}o;xqojC>Uv0OSe9hwTwx1`pJm>f3NP7~sUq_QwIMsGm zP9E%Zy(;PSbuv3nHpz6mm8 z0GECY1n~k;z&b$0s|Qrz^CT1+2GW}Yh30yX`57UZDt}(b=6%|m14*fmBN}gke*mah zr9t8EJtZ<=;0``oCmLDja;5;I`4aOt#OE_kxP}0E$^(p}p&Fmpg|F{cUpf6&OJIIG z@Yx4f4=mt-hqh;|giwz*z!ZDN^yWHx1*Z5!6I0Ja%oGjmxrC?Y{=h;$ix+?nUUP}} zyIB6N$$t+xIg+AmBz(aehi|;5B|g7%dtb(+B;)2fBwz=>+&B3tuJ7min!h0=j~Fo? zDq`AW2%^ID=KXIL%uxf|mU#Vy`FX|mV5Yu1Sl=Py-B<=4MdlFATRm`4$CbPwZ^0MF zpJRJu`1?*%9{m07I@H(emY?B$xUY}PeSb2>>jdxHL38~fAy#+SdIRzG{@w4X z{=@I@?wj&9_a|_DUh>QPpy@A|pYmEicjY;k&s_q`n)>RL=DO7{Y1(SJut&W_KW_EQ zKbD8s*||MR96uc#525DM^Ycx9OHA*Y{I_rKSM&kM7{1l{dFS$cRe< ztq0xohp!3(Y3@U!$r%5}=C8b7Pr&d%H~Wu=?}tE=HyU*w+uyALjmzfyU;m9tKy5`- z)+Km8}Htww3CSx3zNs6m$*UIr3>Q$==2I`}2&0Tm`)?K`kXP4m$LTA;$1k3^j1 zLBYEs;dx1&Sj?*Yvv8Dn3-Vpz9K}4#54F=0naof1U@jI`-lc;=C|;0Qgd!J(O~q8? zR^b++EtXax3)c~)D&oQ&iB(WcYBv+>vY1o%BtEl3#hx#|pjg*lF1DLuS9@)Lu~Wr2 z^=(mQMVtC_ao|NS4wi19R*WXnjpWLqliGdbtE!$px^ru#>8t0eEo47;YhH_S zn#edVhSo!0I=U47G6aI?*t#M#)b;p@47{(AR5t!*A} zC*CgE9^;+gg}Guwn6_H2R=3q+t=_2h(R%5%I-^$b-Rrj|y_fjIDqH-!KU}oho#W|) zEPB1p_wv0TJ{O(s;C;P+PCH*8@2mausI%$47yD-*&8 zY0@cs@A3Z0?Uc)RxqpIx&T{nbFS^@K@$v5NyQ@y&zl%lpqmzHX^Lzg~1yXVF@87M)q=?KOkN;o-6OJwJqp_G7y<55h`4 z9(!|VQN2Fu-dj8T96Zjyr~SpO)m{&#)0f$}y?mW|FX^Wkc2$pmvdwvX{r~@N^Ogpx zRRv@+<&&A91UGfVBsplZ3g4zaSWXW%(E%ps7+yRY!yw1n#3DaHWjj_o)B9N@!=RivZ_F?lS!@;vd@#%Di!TApT?*< z6Q&jLol`qn+U2$F{5cGKA6!`2Y~mcS@k5SEMmrkL%3q&*<14+F1w z)HK$rDd!VQdy0c2lzFaCAhlQP3hZUd<8wJTRi(-y@X(gUCa*9?h&TO<*7YC8g02K5 zw(x;?^-u#JYCInO<^_zxipbj7*?QWLo93uG$ zs#qg}l`mxx>?{n(NX(&hKDj*EUh}`iJ6cbuSK;6XC5T)23m^(qR2M75`vHobW*{Grj^NPS$ z;Fy^5+jWHfm$1}7Kckp5-%r+>S>XW7A3ahH>)-p1@L_1i9gqV1A-0Wl$jtP^M`|#C zq{50dBts4C8BtB-+eB};RE->dcV*8W3H*2JIKYANWmhKV&7v5n)l@+n5Z zlgqfArhK1FX`uWakSxMErrcN5stjtvaZR08bf&Psh#C$R*tJvhd>TKZa1K<^-&GQ5 zG>OuR;0}%06?CyaLV^8*BkMKRO_Dmmf#Z6#`BGIRh1a9LcdQEYthhb`1uS2e9AyR+ z5b}M_oBLS5NpruhkEAg7HZ zgS|T+#<+YCr{Wj_pLhk|S1hdr(<-X}zWg|Mq+_+OSXTLY*7xVo#Wuov{@Zl|jbx9G zh9q1L>7yQEA5t)NV>k%@uEDH_Vil)bFRH@<&eu~%%*{F)hqPKM0#TA+yAF$519{W7 zS#Kq-iL2%}&XED373`V!IQ4)0B2a?Wo_SyW`)>q&8VD;};@*Pb+#xl{9r&`YkS$N* zK1SMhT4||B4D(9MHHp4vvNY?aXVzvbTSj%53u~q-m@!;O|Io)2uFaQy4B?@g(V-?u zn9s0&A!2ECz-wjJv^mf})O{{$e2!cTYnBg5wXEQaMLVGG96Wi>M@oOGCgt#t^KFXr z3Fe~*4$kcmA2f+4d1dw$?w|S$ch|hx=QwhS>tyOga!UDJ)b{L0e(Z8zF+Vue=od@1H1jO$ z|IBw8D8D?ny+biNZ~ezQtZ#i#QoR)P$-#ae?ci0mbLq6**_1+m8b2Qc;0exuGX5=H z?6k@6>59L59_H^2%k5NT!KSvDC-y28>^SpxTtg&x{+ux1hi8AU{xvPk&EE27TJHPM z^ST=>j~OqfZvItyu&>q6I>0%eCruqT<8OUw_jC?{u*A8aIGS3)PSnsBw7VSo*Y`WV z&pq6a+&$yJ8^iqlZ^jCak4hDrFWiT7`AAC916w}YW)Detm^xF_j(cz!VJV2XRfZgc;jNf{g} zhU+A8&3K8)pxL+LKATT$g7?#{KPTS5xbNe;A(G>YWq2LW_6rQDGa`{TH-YXIWl=Y-;CyZIc~HOfnx&jo*eK7Ze&AK>RnlfL=fq;Ebq z>6_1WeKO|Dy4qK^sw%Vt@&L;}^ZkGT5QgcdMSViv)h1nq=>QUwF7y2WK-wnVYmDiD zH0iokg`BBPx(^@EDjfH4M48Wt`JDT{Am{H3^SSwc*`!zaxopy#&#NZA`Mhq@o6nyn zA3!+fE=!O&*^aA34O1= z%^Ml+&zRm--1MCzw*816w8VYtKr~|%f72t>q{sd6_?q6rJ>M<;c}z=wO}{Ac`PS}k z?&z3z#0M=gPXp7hU%Vb6nEpL((pOwy+Gq3pYubORswBUr{lL7BuW4V*bMME@po`Cb z@OlJHddxfP?kdiA{+vRS{-9-1?JL3Y+1IoR^F6+%WhNijwB|h3y06FFS(E-;CxyxPHLYu&eNDT;eB9F3bxn11?yMl4SExB}P0%)jYt~E_2Th%+hB}h( z{%wEn=u5O!F_El_BS>dBoddb7d700FSYD0&%-fmICk6?j#r6oWpw%&8AUE4ef{?on zQf3o?3a?$c+rZ`efZQNJcoj=h;JnJInEEARY|F^!8%XnL>MH>vPcLbw(c$Q7M6m()DqK&HtT;wt9~6#9EobDh9kktO~`7HHsQb4|NI|IvFnlH%XJ7oBXx z$MmTr2TUg=zK5f}<_a7O-X-4W=yzIyt>9H7eYS=Es2KAzLupq>urJlW!i-y z;O_$PP#FaDIT3hOb)dQ5*$e=zw`{GEe|vB6+jFVL@Bg;nn)2AP z>8F2{KMGimN35T>^LR)V{x#+5VEP5the_AuOG*-*e`f*PV87X%$A`viU&A-PFV$E- zCH5x^ul-9t;jUe2AIt3^N%@;eCo%5!8Nc^6rVlXP7m|H1fShoAHS>R+zVE-^!239V z1h@Dh)>8TsKlI0cguppo3FmE!3m&Y2X2yk#eElAp&Zvd;_>JQ+kWk~e4gvWTQq&FU z0xMytp!fd}_Fg-(Gs}YBs|a&72ONQ(ES`Y?kwx#lyAkNU_Z;Zmh5q+my$&D&>MNqY zjz|-cnbquwNSeK9(_(*&pZ|yV)mNd7iJ`BbU+R7~Kc4=nQ-5*N&wbe6ic@LpCqI-k zesT*rnzL#9r$!3r(YD9%6&L<{(Y#Ij660OGNBbV*SA0nO6%$x| zLi-yN(tJ+)7!zK6MF$)cReVba786@PQHL3m(ELmX)6ZL(jSe6tt@x7;X(qGyhYmF+ zr+JtTBPPFij1GT0rl@%G=MX6^9-zYyQ_(-+vJz8WKhEkgbtQ;DPTN?5LPy)rCZf|3 z#)REf)MXMd0So=Qd~kTSgZ&I)~Gtdo)&q z|HY+S)+hfo?rg{^7p@R><`yjPL4wPNzr_ZgBbb_{F4n7nhpJ!h}Iuj z+_L`2_TN9UhUfj8^UQDGJnpw`%hi6lRsG>S`^!D+m)n2zd(!pGTka44>c8?5`pdh& z{I7nQ|I7c3ooBUQuD!qf{cWH9t6%%8|E+$x@!Nkto%!XE^vkX5mvevl@BFa#uYUVi z-sXPgIs141yx;NL?jJ5)+Mo7Y8u|+Uo{2V|AwKl)-+i<{e$q;}{zq5KA*{$eDaxGL zXwNBv*D3AS>5y;nuRiJi)zSX8_h{EI|Jz3E{c^qPp+xYXIEc?|`2GCVt^7>_cus{s zzAb+}8uZ&Y=5C2`KZO{C2@7I6sxc!&&fB7GI`R_XKxL?1q-0GMA&R@s> z>bHL9UuJ&wcRc@>|LXt#&R-|}&Y#s->^-3L0EWf1$!1-Q&-h-g z{ljh6pljD|>7O?fm-esc_rE+p|K%C_*Ym&h>2CI?zp(yFay;}~D)?PAP#DuyKl>i4 zT{)BueI29zpS1^DzisSdC_%HvnHaedDE3f^P|8s_Qf6lO;*I4>r-19E&-@a+P^!0x){r_y({Fnao{lEN)m;C%6|M9<- z(>f&kfB$d$dKCLJ7ys=){_p>@oW}isFO#hJU;lm@n*RAuC2@=J|N16*8ace$)5b@4+O8NERp}2CR;&Z|09Pe zL96~1;`;NK+RrPfpC9e#{wQYI|Nlbvn1QBky4t?+rTvtO-Ck#AEe@d@i!Fa%X8l|| z9d!kRR9fX^Dfi60Jq0@b(BBwOD-b%qdKXD}4IsYlO$C>dK1iJJ$O=yO|%T4=VYfhnb@D?L@wTB;*0iD zFGB!H9nsSxqij+I+LFdcO-W2>GNWiSIZ-8pQg&ANGWeVg1D3*nZ<2 zA%(P6m=^wid5K05OZN*Fhh;nWuWJ6_QI3){og0eNTln`2E%4cZi-Pnhj0>8ImQT9l zbgm_c1gY~po!U6X_VJX;2*`;HK_EocfRy*OFlk}DrpIa7^u~Wit&Ef-I0C1Oo`*Bp z7Tv?IJ-xJlY{}<{drd^#!DUr2zjXDn<^FS0fFp9TuL&)x8jG@B>J@m_LRKx-_+ zWt8n-WSf9`nZ`;R#(F7tYTZLOvDd>}EGcerel^<#%_-V`2y)Vxj|Ri?#iqr=pS-0B z?Y+q_{$Vh>basCV#Q8c1Cy%Udw^1}1Sv8*eerTRMWBP#c_%9JsE8YdBa^aNwgJg%I#*hmJ|$FbDlEesO!kA@sbvt({7eF*ECCZ2IXs1yyn)W^!ujzlzqIz^iIm1%~=Qp=`HyW)N zJsx5;KF%O+BfSAM;z2NB`tldeh?XW{T;lk6&HR)@3bSUaWjOgH(bCb97Et7;IO$wL zrwdtT9TmNTSR!j*LcME|zQYK>^XhWoQkmg;@t>4^kqd-}B`%h<4Q8=KpFG%}5t6$; zaFXuzp;CW}vZoR0=N{tktM{iRh)k#7c{R0D+*@d5S$BogB+Ft~Wl`AIVUWI$oLPO6 zBAcDvvFCI=GPz5v8vb^)P1m%9KHRfZ8!srcpsjWJGI-xx?qd1lA7ca_=IN9YudLZRo~=hLEq0k>^vhEp9rSHAj^O z8(-uLR9W-9Nk_L!)?+3Y5$r+h{?zJ?yH?Y;RbVG*mdr~pWI*$Z9HWQrS7@tJoJ^ff zI*dK+we!tKY~mT3rks|t_oK6SNK}S2xazmE9gO}YZ2Bd=6%@2ju4|G$PAc6@TlSet zUU7dgcR+OHWD~w>Y|JBS4adBWuV1NZ@T_O4I0|rD8TsQI_w>z$sc0LDUraJH@7Bkf z+;l5Yx+k1!S&NrwO%lC$6X)FPvQ%nC7IHVoRam7MkE`mL$d#y$_O>#tX(&{s$yL3C zY>+K|TO^h2<@T{6e;B!aaCSsey!|ZkO0IGka z?XyN&MuB!+?pmppyx9HsT@j)P2?*qxyd;V_N{DB}Rfig@ulz>JW75XAtGOx1RdvD%I0c zm91uqBwHR88t6ivQbkGyJB_ucyXm13EiDa?K!^xLfc}o~j7E_U10>#lXU+_lp(ZbouP{F6qMI z2g^*yUTlP}=1~>enAS+&ZxgzPHNvLL2uEIVy|>-9s_gXg_U4?@>J4FbJ`K8<&&Gog? zPMyQMY{ZvgBvq2OPTy%>34sx-0g*FY#neDE5;0uSrBK?ic}%!0oGV#Ux0jk~yKvXo zw^l`2atKRLhO~|=KaxkbG8S0$eWmGPwZni`d*>tF5r-;~6;Y#?c65JuA4LU+-hDuX zG^;}C)ah`_GQkdUb9mvl`|kDS1xKy|6ue!8p$K6H6=$qAu@g$PxWW%VID*K4)-lMH z@|@nbRFErm(D=+-i*ox-;Ia>MwIwV}A_|^5?dFhJ^lp41L4}Yd>r#&uY5hti)6#kY zG$UGIYwHR$-^zrGQVD-LcD^4)n?gASm8)VBkB_p@%0oPwK3t;a>9lDPom|g-SM}Pe z{3_#J(4osh1*|?QLWLh~Dg%zn-vhFJUnY<}nxHxe9r>0WNIeUSu+y}n%C65O&_$Q& zv%Yud*rDz1+J#$~x)#~wH~)qb1K`~FBv+iprF=3iVOGJVew=^4#Ztn+Pn3RrRP?!P zQhAcJmOfnSDgkk4wW)}m-X#Hdx{PSrT@uVf{AFFZEw5-pDlqkpe4aDM%DNbLBjd-P zWRe-m(qkE;(pGjt*<~TN5yOJzh3#5~Zx1Jk@T;y*vZks1bkcr9zIi+r8r}Ja&PnW* zA03dT;dB~}iZ*}rqNH5^;&N1(_nSRB<^Y!-5MGKouHdYpN!U!XS@pBF{qu?U|h-tNAB#82uZzYpxDKdj4`nr$19~Jn*k=7(GE!3vib9K`zm|br7PokKO@&nHJFsxcT znLXXrZtc!SH}&I7#$?gxgFUjoj>`9YFP zmbkGaob4wXgBhJ2Scj_8^pWmZj701Cv?C)1{_0b{TJLS0-^Q^wQt|gys;@OfFB?Uc znXtgx!^!1Wx8B1nAV)sXD0{B;LkSt?B7IZ!wh4Uq^7w@6CY7K&pQw z(qb^qB=bhgRa9&0T#CA=r<)Vf(AePR$j1#T^C~842YTuI`w>bJ3}ER)u0eSe0<%!D zzUA!LZ>2A`qe!2_`|RoWfcx=oQ-@U;zqmnSGwhiunneqHGM%`u<0r*Y*O{363Z@Br__IjzL|wqc@Q00QPBqBN@uYr$v6=RIW8UcSnveXK)#*M zx7+~T)imuXbhA(%$V`W9y>w$knq_Cd!IiWimmbaCR6dqw=*?DOQ|jib6Vz>V9xJQN zPB%dJvNQnA73}KRG?M7KFcP_D{l|j6D;Jc{!^9(6DfzQWVe+vS(9C}oWwGh^0oS1R zbg^ZaY}J+Y7MZ{8mW6}sR`SsP25ZytB!W$?}5O5!Xz$#U;D#- z@=`)d_VYQdtN*kd(BFT4fGa1Um8f@u%1Ay#)NRGw65%m$uLii8It8KdB`?r{-Lzv% zxwGs3VzcS`pzrVk1S$KEDw`a6oNZQC*rz$gZb`hf;Uibx+D$-;W-*WiD@n$GUo3OE zP9WzTg{EOa)14-J^@VvD-X%EbUAq~DxT7(lx|Yw9-zv^hjHiDzxfYN14Agu0MKPx( zaWF!Jz*yS>HfKyS5<_11lO&J?!$75(UGo_jPEgvHDCv^OaOPPB4nzaA@5#DlG2Q&j zQX{-GHq056hi-oXcG=DmwPJi00G&3Wjy^|j9B+HTcbs=;7tP-DeW}+|?r>?P@tY}j zaB$Klp6pN_+-`sQ&(F@4?iKop^0zDz@f0&G<&2E$JwF!FLnmB^S5$0Lr)H-JqN_Z< z;w76|8I|G_e0LNwc5EU&YMop#;q!^shOXGpw^6}Wr7C~WyYh|VvZz- z)@?2jiRO6xzBspT9~@2q3_FsTXWhvKTW#vk=AqXG^?6~S;SL`~QCg4Rw+uhi1D#6P zEP?oqpJ#tny+8_C6_^AZ4I+T=-3Yt`G*U-M+@?1i6zoe}1)7A{_Ld;|6_K&Q3QZmX z+)XwnZ+HJ(IBWOk0|?J88S)eJ^+Wp306ivA2Cw4KYm9Cia+&ScNW znhx!3nPW~tOxTL8FO4T$=D26{h5%4jg(0Z=DoWj(`HtfdE2V9E+|vd`MG()GKs>1gep~CziXJ{+4s1 zdg{7z$fr9ZJ7zrHsbB9T@&x_r>0&=q$zq&Efs2)G%me!caM(NaQb6zdk_7s7nsKKO zMI|$WdLZsSZg$}Cpg}mdNQHvq=65^AhbMn4AfWR3mO;c1)AA6fClUQDuBcD}&Da4c zQc)u>TplIoK4oyg9!o*w;(p;aqp>^brw6_v`$<86+Bk?W& z!>Qxx_z8FkntVG4K>!t8x9Hp}191)1fuhvC4hJh+S7RSp+7wSAQ~8y{B-(nhA8+sE~<)XHtS5EX>ot<6hu+iJ`Ygvq*jc2Rw11J6;hwt7BNfXul)td zp;xjehtg#&05U+$zj)KL0n4QT#%H>NXXSHy`Zg;c89ae(c&rZ9>H$hD_Z>H>oR$od z>~>z4F*}Xqz9XY6=H8RB6`XX*i^0tQ80?B8vB&d3xE1R?^wS-{_e@ZYe?l|CcLG>{ zw3JtvNtmM%V0b2moi@1mA~^{GxkC}jlT)3mdYRgG*+%Gqnb9t-=2g_G+o6R@2xPE# zfH{*&0DF~k8@SqfpCmy5?BjWt^li}#q{(^?F5UkY+pcvma==nJWFq9qA6zD3%P8 z#;c<+YA%51D9k5UQ@JE8BJlyV(2o9_T!`6ALs3j;Del`#!e(OI4Xcwto9La36q^0% z-LmNde?dME{PYS%Ksg`(dCAl9@U=EtHf@9WyMNg zW+es-uG701;^M?h;!YR3(R&I!%`@`wN%93QYAb~D3oe3BEK2dBe&!@@DHP~XZC}t) z#_(EF#6x|=-gIIJr4E+w6PKD%}Q5gJ;>(Z+$542=q3yDbiM{%JUIVNoIH;pJvu^tBCsQ!JVrb4I&hM zii4uby&635hXU_jCG4ZmlAq_iuI>wB30}zYIcUE+~Z54!PQTO??Qrrk#Lm* zSXAc@%P9qY0pwlcMBqV&b>Q69Za~8q-HO%dVm`=sH+1diDlK<>=5lBCrj~1Vx`Dm7 zOk%+?Epn_HPhOxemp;@yqi1ddPmt@{Hcq$=ETLGh+&RGb3PGlND zj3x@8h9%n`Xa)6jOC*Y`c8dN=OMLzsEBj`^FdA#gnwy? zBP4a2pijWMl@_4~b(FJ-zh9I}1iAIkj2yklP*4b6eSMvlq(3DfyTiUynsP|K010mI zUKLf|J|TjGLivoRw=s!`t`}2+f!ki6PLukgl12W8H4@B!a_H6}taC8;DFFhOZ!=Q> z!-xUmYntH%vtB5F)|K;R4Q0<0{`%kHJe|l zHq=kYz^PVB7i|fd<*Q+@}96bAWS_r7}P?k@>Ta8+o=1>-GdJA8Sy2(^O?rBb(M5WKq1c~aXy_T2o(9XgoRqm<@ z#9gqQWbd*0%A&B&rS-T55Ds^N1v_bI-x#rfec&5sP^#EKIR0w2!lGLDN=MJKV;DZE z+P#hwvfrGd1fn4%MANMo3^z(U;-hKX91a5CrQcWg*cx9MDPsK*^JO~{`5EYA`x(o$&fl!5%Y^W#o zuzZjKxAOdq>ETV8LqVD4LKm_vPe_~Puu(!3zoQE?71?*k^fg|wdADnV5PtexL2n;U zw~%4lqJ!&OGV%}|+mnYjCooiXQ+7c_2FVbfII)TrUavCJ>ZQHigI=35;9Ki|v7Rt& zI3A*aJ=B=Xt%k}9D>_(D*+i0aWrrxab0bGqKUyrmeGh*ZV>ev zh?y5bxn)@YH8A>b_|ZRMLI3SP{>T42Sm?jT3faH0LRj}3E5y_P-{t&&{zeIz-t+lM zi}>gEF8ZHbIi|8F{!tKpkA7-uGtNIbbNau^MbOQ~Z4nwnKZzCMKeYgVGH-s;la`X! z^&u1TpWq_zM=7)Z6St&A4M`SAUT5e({mcH=^82R@tQ8|S9#{YF1tb2HLyr4}YzY&d z;(-6Dm+YqhWO4jl2}>h?G0(G2P)~8d{geXyM~V28-utyA=rYbkfT7~2yTm`^`6*9G zmwX%32)?C={&PLCtNxejw1E&8&;ZxP&*e&trv24N(~m_2>A>%?MbjetPl4w4M{*pz zy!ygdit0bb2XpuzeLr1cCL>#36J?(GpZPofX{G%q%`oq*1<4A3eeMg|{~%CFO}4?THGun&xwtX&TGUzE#W1)q;&B9p1y$kEzR^%}m#Xf{(gt z1S5tj(4&@r;Xp-*yBBXermcN()KIITJOiH?6%RpddR13GQNWn0kS0E<#*OK&aq>cI zU1m4`5GzJ1DWd2A3u@MqR+>0f)xr!tHR;*Jzj8%t@S&cguF&=nONyIJL=A!9BeIclVNe9W)LyJ{cqn5G1NLru)5tR=sjP>_T=n@)|dHBje*ueGPSjj?SY$1a* z(IIMZ&)MMD#UQUl+YAH-c)Ym27`eVMBB0=nC6-_qLt^6rs>nPgb&%g3X^aEVtL{V6 zKubD*H;>nh(AT^_Nu$8l?ve0A3d960VV;DM_;eCCYL+uOw7c0EM$5XkGjj4KpNw%rtFp zTz4RsyA}L`%!NX48;3}yBl$>|xvOw7>ZX~02F#Ba!wZ;{EvESVT(M}%&AM+p_HlMScWLj%t4Hu&?MRXPqx&8tD{ zV3e#@W-##jZb8pUSVSN3%`f)o!Z8Ppaw{b8y7Nugd>2<=-ZzKq8W6^k4L}WEyGtH_ z;-cBWj}7O~>!UVDD>xeNYi~-HN<%w&VVft>m>LkT6uQiJ-l~+FI2ePS57kB*CKYz{ zwAmD07^MSyCpUfj^Y#gf;)OJ$1(Pxx7I>EhR7;fe0v_I*&B|y~G>iQ@DUa-V578-D#foy+*12z8dTS!4PJPM_`qM&Tw9r6o3NB zRv9=PsmvG7#kb-+fBNno_oSAtW&jb5di$lLeymP6;%LnS>md}>@r56JZoc?E0X#v| z!ncwKQ4oU6e#kG^MPy!EKKnuTJ9a|^y=c%9^-tf(g@{eI{>keTu?e%=Y~+rA^FktD z$nXj_hFErxoN2~V_O*%`9({Vx04)uC%%e_*9QjqP^H*^3DMe%J-J3i#eR-1WynW@N z?>1~uWHIeWP**5NL>pj%%%+l2`N(g0sVJQ6g|=cNkG`}9H_^J&#gt0b9yMD}D9=|X zsn5PsJnVN-HYouhNCO>WcG}0^Q_4XTUoH3M?@xKdNzqf&{J(nN*>971f^K_5cjEj zFB1YqTapjHI6C7i)Y$#KgoYe*0lCW0T^MDw+SkMs;{tEq#)$wP@0l)-CpiK@E-Dz~ z=r8?U^tc{Paf+M+m19Ar+5Y+L?9^bs?q6UzUMf9f=nPp>^>t2B@Hs`r5jXyq{uoO#Aqmfw8wAba7JcU6kfJs@ZfCf0P=TQgw%<8ot%l1cV4FFP2qM~deJ6rm3%H8V`P>R3Y=`SuTxDn*($J3% z}29(rRt-uHTH{qEjORP_LcOOIg%eL=bP9=4_MI?uF}tgHKJF@!@?=w{zNKSon$?K zu(P>)T1YW`2*eLpwhQ~Rc|oaS!+)o?hCJyg-az<}x`k4%MZ@~hFp7fnPBn zIJp6^@UCxPIkZV^b$TQXBUo-$56YsdpaxvXTi$Sgb2N^49K?{?3w|*UY_$EV7MA~A zHy1+kE3M#<`3c0`esp4?)Yy;YMKd0<>w~P-RBF>O8sop)XG^EdLj4%0EYP$zHx4w& zu_qZYW9VLs?!I3h=dhg1v~QVWt?KmXVAxUGL7F=~*O3O#=6TeaMYnHwSjG(OO4=Z{ zXaI(PkuS2}U}-SM2Qw+1zH0SY#}7(U@WCQkci+{cCVuV;jMLIEMLDBj#3+9bgRWU@ zqrN;D?#23UXGwiT`XamhOCmc9=HNXJhCVA}!fm31@~ReE%;ol7v2{=|Aix}yx)xa! zsh4RibNmOWWuqc5&$j$4pbR0S!x>sR8fTe*4KEgR0O*a?;?=|UzBmiVV;`)TKHkb? z-S(yBukH!X1=VxTQ4`?bJ8YF-!$VD)pynH_f}wXSWGhE7kq2@WKG z%C-8*h^`pII0zwk8GeV)T)1)^uKX7JYAj{}g9S2-D%R52L&;E>%$@gcPyz^yzu6wx zx=S5jok@Dy2clv-46F!Xi714J_a< zx4R_x70;pU`0IlhgD%|~fB0fcVT}ZT#UwEwW4qp1&R`3LqXtWcssEBbY#4ZHQe+|* z1c2C!hLfs*cOIz+NR*~5_#$q8ulaP;VHP)%m@~;vpa;HU2-b`!M36{L?+l(fuMO&n zT7r>8!2ElHbxjp1x8&leV#c|KpX#Ruk5u$iE~&`l#g-82k#f>Z5Vl8Q;AU@s=O0%J zQQ*50MHN5ENEcWXUEH`;Hz+WND|~BYTM#FT5Ap`X(_WBvSa(r-)72dFknB5jJpeIF zOiwgNl zGw@@I@AS=(?J&wA(#Sk+5=D}K05y9$v-TqGCqz-O5$I2GuCJBWkdK)>44w0Cp~fQ) zLRA{DlmNuMk()#7HG#!pTYtbaWea;OGz*6)#vNEY-LjB2zDwX^v~i@GQWXrKx3_`y zA3Ws13_L#ba+MKorWgfOA+YL&0_j12_OmM3+iPnB?9OL`=ZN z!H!W{&04P9$e#h15jAJc#81i7Krf?3dth9jJ`rcWTYHcD2!89yoH)x@3d8M4zG)gG zbVGaXI%|=#(Z$gt?FiSRZGDEJ)AuDE524N3pEs~uE zFA7t_@oVSBnGNHARG^|$ymv7XgjjH{@9PlIy^)&QMqXx8wq5Ap zvuo>-BSpM_nPMYa-tlGAr}|zivO-BEQ*nU47{wE892ToHe}n zS%;Z~A22PX26>NhqC`?^gBy*z-~}pBR@fbjcpRhzP(bPHQ3Xezi#zC%uKoE)1*zlE z27b1Eytfc}%+8%KHicyEEvFGTTC|PBRnYCG++1LPhhj5oNM^iC5kLCeli!idFtTVY z>@#+;Gq_f-aiSyU08}V}gD$-eJK)N4Hg`=_Av%fZ+>%AxLgYZ6uuK5LM>0*QN9`nz z%+$`Kq}#(M;lV_CUhX^-7mW;7WswF~Qlv=w~ex%p|bNZG^J`GlH(-E5Isx zV%P6~3LtN&pE>~VMvAAQ` zr^xcvACr;eX$p_UWz@1n?E<&675yA@`(o06rN{(Qgd-Eli-4_C?dwiP&8Y9x^xe@V z)0DAyM}G{gr5!TzN5LD!XGrCpHC|whIQqgX)W*S18FhyWJWps1?wsW8iNsovuxohk z8uuU%9LwN=kR0YmOknjazDb8tY@S{MSr?TORypFpufEk{CU9VZn~HLPK=P3=hkvzy z1qpMRPk9k=&Mov35{-kyG0X41=pu?G)t#6s6#zTKK(|ET6BN4mRXVFMf0cz1DDpkX z*EDvg_`DtMylRd;Dui(rK0uwV{Xe|DZLhjM*C_a1=H0|hM8&83WRh7e1t$cME3?I_XJ|^EQ1nbMxWpjU;z>G~?`9%j*V1@n+tyPXJs3w&pu(Deh z)PmLAk(dhMW2ok5K&%mm_`0a?JbV7r*{y~sAX>@=IR2n+9lZr55n@s1)0=WB$hkK0 z@LN!X_A$eQqVAt24d!u(>TguRq$m%Q*+t+(22;^J7q=WogQS-Wh;3vL@#o#kq{-UZ5NAqKI83LiO|$?e>JX+pvMm_!njQHce7hqCD5x2VGWyF9jp+Q|)ytu;s|3;wj`=@8CL4WQLH=(I(k<=q$~RwNJy zNEz)Y#7|llNEWqK=B=z!b}Nu;>Ssq=>@QN2+QU+8#|%_lUy%)A;S>b zoXqmc5jTSCiW_O!5Zt0rQExMDKwN#WBv!%Egha7nzq>>wMKT1R#6^IjynG^V1xnRs^sy2Ex5q^kn2D z#^W1vNj9QG^PMEkQ@L0@qtLUjKahPxBz~0}C()G(j@r8kwlZHCtj>o7MQ|XG-EGi8D1+2F}LfXtjv7& zdY)Qn^V{+UL7NZ50Edx_VXRB}X0dbTot@coD!8~SQGb8(b9{Xxiq-|30NP#jwr(-* z)wN*^_$<9a?R8UGOU$5 z`=<)p+XB4JK&QX_F#;@}vH%XqbhJw!*s{Ls9KmC_^EUFMnk0sWBy|qf@w4*XUI~Cl zX{hZuBk(2xad}Xj=fYx((Wb!h|G(szxfr(f? zXguQy-=3B8umKwOb~Vc?9;!5IXlgw`{)%VBPGo=g_eeVi7ciR@adZejt^NHl%GV1a zYakgOi8fME^fXo&{n2zA^@F89r8u?$K?tcT)cJ0{J`6v^%zXN5l2 z7XN>S9{xaj*8^36xR^)ad|dKI-jMNWWU5@YwJ&?g?Z z`Lln7cq&&-v*o*QO#WIK`-=8~0Vo~u4Of%8*DEzVKOjhggK^~Kmzz|{(a^f(QB5?& zxGAI;B>8#&i+BITx*tW|M9$n`$+$qGN6~*l#Dj1VL5&ZI>5J^Fk6O;SQSivX3;}ti zJw#-}y%SV2!2zLOSMcDUHI%34x0GXBEn@H_OywJNJL4 zYl>)iWU0|CA&?APNg&+Z9-EroOG8=q@!EAs2c;DDqCtf2G7eG}96;RRsy@Ix8{iYl zSf+tITJLr97yHUl7RfnqVFVMd4?EI2c$cHPu~-7G^txFH!^g)0CV;rz?%m$_=0dwq zxYbDtn*9+>B>{Fz&NMj`)MUlt#WpaBs(vI?Rpq@)E)lLw<7tqQFh|(DhK}LNY z^f5R=cby3GCQD?NwcBpf^xurZ7#V!hLShy&-~3Y z?|8OPb4sPQ@KtJ`;m;{BwFai(RKA6{8$AYmU!`cUV-+w%&~HOZ-@mvwP@pv+8%L-F zC`n!n5Py2#nZRV+FA!J_=Tm<=IuwTHYN5NZXB|XnQ{MSy0KkQDA);gUR^7Gi7nV6_N=;rI1Y4=M45~mJEw;3L4MBM^pfnjgE!7!w-LRUP(YaZ=9NW z+0ekq@>q#bR=Sc_AnmUVwZ;~SZd8b?^f;W}+T>TI*@znvj!h^5>(ut0r6$U`7fY#U z>|o?$8D#uwp#o`9YhyrO$`Ms?fYSzf>$p1^hiTIxjx90^O^96VFrg}ZEGc2C{zWIb zaI`vey(v1@WDm8U`(uCc#`Uo>&1At=Im^CRQDGbMRMl~t@d{Jp(V04;?Tma2RCDR2 zd&LQaPwS#7R#I0$y`{bWSRMRk2hFdpU&Ivmx`TOruJwuIwW? zJD470MBsX3|^Xa64!|K!Rg;9_Up7Lh{1{|#vD^^`#Av{sd8ITbJ1R8#nv>T4#!`E`>b*^;$6@6vzA*{TJ zlWWpOk&@*qW_x31+7H#vG-xfqt|0vFT-KZ-oRb|dJFSWbSQapj=l;E>WMIy8r3a(k zoMcHcoV}$A$v1zJCOOH%;l;eT2gm+IR&os#L-V5n5JfNP;8`vR-=)Zz%>ZQH4Soa! zl^6Td(o#RLtH5XX84NZ{_^XX|Lc49}0Le+AV7*qVsx0Dee_;mcsY8#>@<}-A`}uDD z#ZsA#RR4v5MFNCT=D=miaG^J)W?ureM_~Q-qZ6u6rYnD;SOw%yjE_cd70O0tsRnoC zv6*SrU3KR~b@{+_|~12b7X`1C)4jJtZyXUzZ(qj=6L(~M{{p4lfRd; znlI}iKYiy;*ljGLEc1ora5l@?qGbO!V1i7}K;(ZZwioPSzy9u~hCDn|yu{bRSXfxg zr#kI1<%{bmp)~lyV`@K@pEBV zu6a1RGO^A^Cop`h7c>f4R*^P%^yj--EN93MqN`sqJ}9X+$r`Hi#uBUNP@_qgoNv!@ z1*(4w`ld2=Rqo+iJ5hgxw4-T4lZVZEx1tN+yuO2cnwAkkv`^gpZ2+w}@wrZa)ZHBf zOp?mb&DhZLnEpGUzkgL*=EnVxi7f9ZwHYb9p9I|O8@pRdw+Ml$zm(8%{JwN{I=x4j zE4@Cf@@b1rlo4P-9!c)kBonr z-`hgeo!WHTdC=(wCwvyxK=_4!pwF+}ArxY{?}XD`0W^t&jEOyR6em4HleXcD5aKa`PEdE=!b@=`NPK_n$R8HK?PlHKaMSASVOnAfmqcd~b2CT;} zpSw(~;REN2pS;-gOM$ga=K&Lj&I6C@=BKGhUg8tU-ZNb~gF|waRihYEmG4a)i z{#l2e`6fobWxudRCXqm)UYt<`l{1FC&Va!X9hq?hAZ55KA~1UC`c;3R^G7BK)Rx=0 z;3Z`ihnMOe+@(mKR2(yFdxHo&(I^FOs(7L-3i4^g8ghdYJuAg*D$RU}<-i(l##iLVMl>6T76DTby*=bj z!?mg6&uTJ6SJ5YI2S$IT4e;>MQ9U>u8*8i&{8!bhbl_cnL>2M1U}<{IvB5O%ibi3c zA+Et5e7{VsxH2V`D9TxQ|Asf+8_+v4(~vKeB7_#`T?_j6DgGV#^Ot!XJ@JB5QHTux z0D3GZIZp!dEp)pEm>EiyR8y0PJ9RlFi;4cRY1GdJ2OOCRV&{K&5it=}C$}G@2~d-# zg5C*WL-|-j+EuPDWevQ-ds)@6C$(J3)LeDBmVsd}3)eH6PJaflcmxG6RiJ+^OgEAw2nMSHKIwkv} z_<;M9nw%0CgTc^z|90Xs>*KLj)`;5Qlt8>k7@^fj{J@+wSVL4lmGq=;_Gha7&WQCx z3pH09Ll}P)4%cyxIiSXe;u7_9Dc+-O-Rco|CMQtiH@#wNMZ}}Z=Ku;T#6U&H3KQC|EO%Fqxuy;2D9@VqHZFhCUku{ZmUuBLQ8Bk)uRTZ3W`Ani zJqp)uyw1FZuNQB7=1kwSVwn3)%m`f=eY6JEhD%V_hs}Ak9N&bHdnR9hl#M1I+Cow z!kl(NcZ6u&r4YG!F#u$QWWrmWI3tK-ogq?Ix140gRfeE+E#Q zSUg2FKWaje-fnmY1h9;nj}L^FM$xg<%@;SfCsvMZ!)GH2Z?w7Qj0EHHy)5&q+`;}A zGL~7aj-6nvgzt$a#VTrrR=4tHm1rIhW0?C)dHZ|Cc9X8`p=| zA^ML(g<;*~pVH(XnZfy|R7ubj`hyHe^nYw4|Eb_#V@8IjLMs1)MM3{Bvh;_A#*CyR z$qM-|r3#BS|DE^xx9g+h&yq&sUvAj{H$MMwDoB5VrhJC{ zQ>^?g;h!=^{!5W@j{G=Amq{>G{Y@A7$0rz3<4cs&e;HO8hFz>;`)u@2 z!SdHU|M3|5zph&OPsvi5YZ-qrVIon#>^pkvnhpxdTVCWwC+fQ3I$r|CNnc)T;2{9= z`X8GIjA#bGe-B186*nB@5?iphy&XIDiDed{Izn zD!dJ+dA;sTNPduAV`=9;;4T-UpXx*J6=11 zOh9ACTJ!-*UlQ;609^rGYqnfYU|u@SIh1j_D;!M#0Lsebm4|2}*i=X(NwA?m=E3HA zFkhUpp-~RUl2pP^{d~3KwR1+7_hY=>Jp;{H!o`tUK?$spB;3&SE2)-;Om;EfuO|+S}5rP`Qd^HXG$nzWUat7V~A4Xd<&qYFQ;o_)a>{s z_ha}%v0Wl}R(_>M;m6#|QM3*!)f4(mxVDJE+2yos-6 zagK*>0LaiTYS(`V6oVhZva&RLJ=b%hz-)W8Rh6`@QIIexgw@qG}=yIWazEFodjj`$gZxIxtnG1 z8;u{=wh;H1(?II?C^>AnDwmTO9hB0Q-)0Dmm!H(ZFr*;)$Y=vToqp956?0Wp;Z`su zIB^Ce=p%nVH0f0QisNV3d&i~)E3l?(_~QXOLc@qip#|pF&OAXe(lY-@$VI;Zu_5S# zlRi`qp^U3M+ePI}`<4l!NX9$Z@|7j^0SuM~8Mf>F@(0d(w9ZW3d$D+wCfv8DH_NxO z`;GT*(ufCD#9qrsZ?^5g<%1f*&#*kugSc1Iqs@Qpqc6BJhzo7DOZPBnBhgalde!T_ zxfQ}FP~)s=i@bv>N97uowfrF|f!zntNa*xgXP;s^+VFcRA&j;IYtUK9Evu&p;r(0bNX1s28a_h{o9J{#@XbTU+ z)C;UT9ZHObOUd}IU$L7c5Pa2u1%6L|H~~N65sGTdMV{M<4IFWWcu8z_LMN~n z5eo;QhY)Tl}#3e@CzyoMJ`Y&!`t5+WQB4xO=3VtSCZ z-Qs!!J4|Q{(dn3lV$7{~5FU0vkCt~g67lO)q=Szb8)|UH>{oftst;i|Q|5oXD8;e` ziES;Tl{vb({vfzS4>yu$1Xl7a$51VNkCG^^o$e`mcO-j;hCrj#a8aIsh-PXXdH%ud3=()^}3Es;uM3*@tmJw+zDHprO z-d&*Fy{x2i9LzJ2f3FnZ+X3Q^Ih1?0$~&319ed!;*Ky{7&!&?0x~_kbBnThD^oRf+q^XPFQkszMZ^X{9%8@`Dcr4MXb1jJQ3tkAG zbnsV-4j^_WenLR#f(_5xB=f*t3Uhjjs3C^x@bM8gEVsd0xMq)71`_F{*xoga?`aEg zDNlr^eu`>csFIUcEYN=;&*0BjHxpY)Ddo1GT*{_9+A6@4OR5!@%s;FHrhFX;AI2vQ zPQyRcCL<9`%{D`+{N6T*qtfGj*Xr{Cx5#}i8?Selec-Z_r$fTG`wKH5- z4XDQ-@t+h-I2=&qc-T)NS$_6z!};A6lGjR$!3yoIL%zZf$eVv9hp_9*{Q$G_r72Km zu`J=nBU}iCLU!nv-*h*7u%F0qHN&qU-8{IXGs}84H;5~wt?+)Trj?3=7&&^$CMS*b z!rVs+qLq^aBI#vpyCL;nS}|tLMcz0xV|#Tbd&J6%5}QPS^ia>4;u4T3B3iZTSD?HP zT!3u^aI!;}es6yl8K5t}<+}o?ecFT&Pu4AE{IzbmSaj1bb@NP_7;L#h3j4rug3BOo z%!zH|o_Hw-&W%s_691u~B1z_JJjQtR5yuq*EFMTPi;^dGZ_{vk1EtuH`Jp%jJd@59 z(Y-EfI9mQM)vDa8-(0NJVPUv@K}EyiGE%+)6Zx>y>Q$d+3p)& z{-&CTp*PhY_2?u`ZKN$09s%+CZ?@~V6+D4|P(?m6WpqoV&ymIiC1d??Vm~VWGPVbH zoHJ{bgnrFmEt;Ak4I8-nSF#fZX_Hyx;4VKx;9|Mj!H6e=>*{}~uj(T5E+=&}3 zFC)FIx`o01)O+P%FOiLtLVu8*855r|3yzWi=z4R-y>ATG(fWKNO@B0`7StpCK1XCW z&c#yh08#nJjXJgbRfjfGXG`xn?=7pcDc~ zde2LGtKwmPlU_ZGVvf-3H~~WEG?MQ)wS{*>XT2UD| zpg(^WKps1L(@d)F#FHfLt%+tmaE@VF2WQ^^=>1G8qy^(e%&uptGlGx_aVM0>45X*; zG$B}QH{iu#u2-_0gRJ6(u!NMhh9{ubK=?_<#DkM1;y8`An0zD6Q`;OesZ&`+PT?*@ z;A-M@5Gpm?)ZI*d@{7O&ZUi)T*Llp)ugHH7y1P5cIc_GE1Y&$}-N!l*0FTj6j=%fh zvt8;S@og|XdtmPzMR5J=pm#%KNh_11&iGQHYNw(#_%!HSslzE5xs zcefp*9{?z0**$ptQegOupU8~*;Z*QOTwjYeKSWK{+(Ig*)`6b7k2mUx@5#fo{TY8c zcWT#6>qV`F51oup{H1~R<^#Xid0S>~#7q=s-SqXO_S2hW){i;MD?im)GcqOfd?bLA zQU`nm5@EwZipVw(=VG`;MEZ>+nWjcXAeGhWBG!mI!Qs42iR_Z0q-!*4ol@i0!#%y- zJNJb6(gsKqzU()WLF0A+NycaKmt}v__q7NJkSn`R_`T|cT%S3BftB5M`;rex?2B0- z4K8#MTOaoivulToo%Xi3K;~RNDDf|6Vd}4|d#n zc{gqrpR5~oef8KPhHF=~nVXxsW{;*7=!JQ=UUwB*%4Oy$iDNLg1{+0`m#=@riePns z)ui3$rYNtX6eg-W4IgJBiY=wez2(l_yN$QH13EZVltlq00IFNSZ<0 zc0Z!LBng#?Gl(i8RZ?@Gg=Z2?rmd=eQ9oQwUD;C^YcC=Po?$2lf5djoG|g*fcwH8b z$k~6l>YjO?_NBBa0u1iBW_y2#X>u^gBj8A~D(pnq9WuRx&1ApWCrVw4ND;rBT->MKDb!%o`IP#8C-bLotg zL+zW7O1f)H&4<2q93fQPcr8sk3wTle;ptz>#L!pUL_}T20$HFcI&FWD52>@D_VF5g z_TY8uEm53-4&&H?w8CQ>&>Ef>eUlv{zg49d$o7bC8YoP`AGr8{kYmIT+u$fCzKej` zCnAg5pmQ-@+l)%la0jtIPlmnXH@K7lc=<_YhWZn_xn^wi+&UZ^0fJV7yBOXt5&2A9 zRKV-{7*-~<+VZF`%5#5y;apWjJi%x}`FJ?KKNH>Z+ZHTr@5>vZ2{!0Us~!p->txTP z`Vhh5Ou^!d+NR67KrcaXyfs}*hSVMAxwC@n$Eqz2O`6zsr~#Mus0RSHuF`Fa64Lay zy`xxWFH<)Y{NjBqgTkbJnAze<*wJQGjJ_ zvdz^)QC^8d6u}Ce6B3oDhG>Y1NEUz3T@{oV&p8AkV5JM2%KiL9ZNSGFFT<`|*u}_0 zjJQNo8soz8XF3RcKbbL(Tb>k|{oQup+7?h!AG4#hrKg4(MPS$LZ%YS`~QtzRIyPK7x zp5i+^x5`hkBzo2L5eE*g**;LWe*43|KzhaCbBp`52|i83*`)`6cP=t}oGdM~1r2g> zJK+|ASn)(cGXaVgc*B}8sYjXw5-8Tm8=ibdH=Be1m$KYh2s3Q6`c1O$9a7FWA_O&a zdTNtGGE~EGfqZ$aVQ(dW7iJzqRA=+E!7FT%>gfd_ z^9C#Y!i5aOIZ;}bI_0yyl5f;SH-<8P%XBE{`oMH4i?4R3Z9x3@>NV?+8a%#ZIwnX& z_7mn-RO{C-T3xdHBMpLB;0$(fsw5}IlEie!BK{K;Uv$5JS||cI@|Ee{VlM1H^to2p z)+F6B>hIZ&oI1CVp=DWCT5!WW8Rm~FJ22X!FZDw*z`~Wb< zwSJ!^C%P?v|FT^W%zz%=SU&=OuBN@FMAp}sU+K*mr&{Zti=I92%Q~7y#5>yDnjvJe zPsVK|eh}I-=+p+YTBq)jNjU}&S=qA!t$Z_%Wh1C54d9@L+i7Wu%?n}r$_nk!XeMkv z&t1`Fol9SRp4{r+Jw6WlpYP zd5Q6hi=taa=9Z|}eckd}Q+L6$6>H#IS2GMn@d94{41g8b>)lv^ughkLyd zklVjs5HWs>(EkBx4Kc~V0*zWRXTgV#y|GduKzG=(r6HA0jm9kDUo{J$ctu^;`gvcUgCsr*l*wcKZXvdqjPu%c0d=xH8r zNqBaue2BhJ%t@Q2ILud~3|X>;X?>CaNn?^u}wDVnM3gm zWZ12gQn$n_ui-H)yCjl{>4yH`9AL<+K1~MS+#1guiI`(KB{+}wX^;jox5?Dc{d#GC zxR@&IBoZTo7DNAI3;bt1w)w;DDr~|I%hOCt%rZjzF`TIQi_mtH%z_}@jGd*>HTZy( z4H6i;Ee^vFprIdepY38ESLbQ)EwWu<6^dHMOhvz&Nu@zFq(7vCpmj{X8ZxL=3DECu z-!ha2)`$W~qYtV|0_hnVit*n3%s_&dn=BC<0hO1xED{sMrxaV4^*wNI+kSMgeh zd~uOW;)%>7-QE3?Gj1&Tb6lVs=^|aX@0&0agOJ&K*eM;0DLD4l~v ziiD_4p~s~f`>(#|n?~#@_bGfL8S~0haqIKv>BsVO99G2#DD+cnyKbvU>*J_OZM9n_ zFB}KEtVmve842NQePc7aYNaG$6;aJC>CQJfp+bpDqDrT)B>x%JULimy16KFA7}#A( z!|!OkQhq)>NR3tsO!)16Z}%Q#^{dOfQx4Ue(rtvg?$mXESMa4;vLyZA8r^h-8# zB3!MUbijrY;aasQb@TWdSFYue#40ZcPhq{95p`GJd!P7naSNzi#|mL7^4=Bxx7zLVN6+S{r6MqE+fKBCZ>_YAPsPfnl0J|L z6WAJD!b7iv65(9t)VSPn1+&kBZCM$KWuaJq&jstQ|0y&s5#s8QRlLEg1JsKzB1$XB zV5Lr1hF>3PWAd~TvLTzW%t{p>fIlp;&Ay?&;7W3AB4z)~3Cj{eircHhzJQZ*!HdGW z{ec;N*qw%DN%m#@i91~(zv@pmsWsK8?)hzm&p@NA4pmT&shgl_$ePM24NQCyO2X<@2Rs?*n z$DyrG@2y(#01OFz$8gl{lGHrI_Yle8*Ze3Xyxf91-o-eR_+bi2Cx*IB$VrypRqCrq zR$uaK0~7Uhw2C%&GozLXpkSClef!$$-7@3msm4AzC!cD|(3UWnTQ&$stf6n0X)qBZ ze}4deE5Thq#l-CuKRz+j;9DmJnc|ncZcQKj1;WzRO2RG|5imhGHNX{Z`Rr7>d62|JCMZetUK?}4$!o^BAG?GICBp%olm&1F za1Pz{wkUhdv%hX{+d~yCBF`!#QD7~)V3U!)Q4=1V_kr}&CWXEueMw6ecevo^e+;r+ zQePPwW4Bz7|3*+o06}RQk)?4a;;lj*cJcHy6H)qUq_Fuc08FVdU{^ZN4Zf46oBA8NBU`3Q>MGuhbMk~NOAm0iS%jmoVGqZt|# z8Bm<8kJPg)^QP-9x&hBg(3YiPGOVIoi>vW2&sQqyv>D%5TB&%oMn}A#LrR07^M-Xu znsYFlpO;qv6SYWW|7!Z?}NO@8-f6sIHXZw#V(>hZF``JZ=kTBB z*NJT~$yt(#of64!SzU=4UlP8eYgo%=5#cs!=8+ZYZluD!xni9XpY1|LdLwr#B*SWQ zJ{gLTn3N+)13synOMB24F?u}(Yr08V;Mvm9B}%7sz%Sal4VnV-$DMQ;bV}-aUx0An z(G9!^dDk@zygm?#F%9hU?WH4~Oo@T+hJg0H+weBU=0Y^Em(B(eRj%76&yByg{QSZw{&923K+igIWkicegvG0J+;*|c($Re` z$M41Qjk2m;DCc_yy?>%h)T89=<>&0M$Ta2kK0rd!=Tu*2QEuI5!E&rc1CyZvlM zmeXfCCUAYwl$UJ7?T+(I=|+~-pJugM>|=HvPhNglb?d`pJ%5F#&0aTisqeOL(A-&i z;MV!>)JE>EokCwMjf%dT^7v9_&hF?c_g5ARUE-QNzA90N#Tvu8vZjyN;M zV-D7T?gjkYR)4_9^|HK@55)BOitPMfW?g2>cq4;;JLnf@70Udz7+t_c-V=z zHN?(5IPAm|@J2#@z{;B@$ngZzyHr$3!(?=TXN4{Z;2SkNEF zimW=^PJhK=iV*;>!zqWEf5ag5yDDi4* z?ZZlv5zloDWQ-u#s4UfWkd|Jlgn&5^ziDs`H`5?bS6aFys3#Gi6o0@o$KwUFQqW?s zrvgN;Jqi##5c~jV8vqVJV7Gq&i~j-}PG>(r3McNH{s4~~K=!`COjASq(0%0c2bl2} zh<_Z*1pxBJ96(M7@aYfeY4{Dc>y(h;PW2VfMvR1hdw^Y}XdQh!9rhiRdRm`)Y9Ox> zU=WpZ>cCb2vj;j?0)uBIjg8-Lj|@_86Dk{>(Dnt&sU1TATD?l4?$e3W%INu z#lb+4^rBG*+^_Zc0*VN63g!aLPowD{;NBm8egnUL$>TS0_b>48U!cZ<$KRl{pMSA4 z1qZy=R4WTGd4&*PHeq7;G&MnyIe_IC)#n)oxeWoabs%fH-@fdKPwk1h4l2jB7p;wo zhZ+TuTvS(d*1^;V0>IN5K?rUEtTO}z>UY{&KK8WEiOpHjX7PiEK0S!TCWp*WVdY2I9YF$7beVkh1WFIsInFMV7$gRP*KDw)Pz2)X+40({sveiU%-~X z^y4$uKY+krAf$Q!2PozbUHAn|ENtYLUd<$hnd#L8oukE_W9Xpe*arsE+${mp-BOiQ z?c~-hI%Y`RV2a?yniqCL`#dP&6#$yFzs!1??H~QDwTI8l{3V1OXc#zhb(uX zv;~}7Y9XtDO2@b-sa;(?3}qX^497T^&7iCOk9Q2lBcQGY@TlepFjrZn1DNYiko5(q z(%$(11V>-szryZU@EiDhur29EW8WZ`&M$!JpJVGAr2Pvlbn3oko+Q)ut!#fxorDT8z z5XftrPzR%)T9Bq?Y=55@sBd;a{GtK9$0ap4Kp-hF;UDUF09ozmkA9|qf>oE!08;z$ z3620v|Jd+%I)UGy!oSAi7YM5W)BOR2{<|!a$N-L;8Z|Bqvm^_`s0~Awszr0N>$ua) zy}gD2s=iVq&@O$>HaMUKW|0>D`smko=Y zxAVo5KR~`cP=7?x4_W*KW1R>ez==>W1y!-;3ukz`dlPL)J)2*5f7N0tIus=woS?^$l9IhN47T8L|ERSGE#A}dcv-) zHITY?L8yl48TN9mQi=>nL!)5W1STSVepgw20K7J^uYZiAfOx8}!91o=$G4RVlr5mG zs+-=v(y6IYu9_eUQP*mK$)<)H2^8RtE+_z;LS1_>m*-Tce73nL8^ACW0IaBJEkrTI zq!ot@i})8Y_H7?rpb0SgJ{olOOJ`?4z-xtUevkJmeSh(Mh%E&;JuIEy@zD=}Tc;8> z*AcS(_po`5$` z5MvtZq#ARYX79%VRPNf$a?9?Bv3@ISt;_cYRSp&_G$AKInS(k>RY62ZCEzVF1e6+x zOBv)Owbjx{!ft0#(VobHkgZYIV#ISZ=vfK@)_)ugy>G~Z>KCe}~OlzW&NT&``DX|bt6(2^o1fM&C8H)G?gg=0lKm8o|;=aEDm<8Z;YLMR`$=VQi zhxGhyA0bOW^O=tin*PmKe*uoWDE;<*B7Y7XbP~|k0tL`j*xnr|L!^qfGzeAB-2{>g zA>Cm}j_^zjMD;!54a9)!pzkXOMy}RFX42NNs@8-h2r;AKaROZMrY}na8VJDs6}7F= zNrl0zF<|o1)Avn7d=C`(fM^5_;%$#-(}%qF0Of%Oq7E>3f@r-~sRGfo<{FF~6MygP zrXw2Zfv<6eps;X(4;?SB=YTU|OQgb=eNZU5UdSR2VB=APDsDq@*g3pAV-pNF47{%y0P2!6#2;Fx2#nEZ%rV844IT@9Z3-*j-#b6;8fGNq5@h|ozeNA z2a6vYQjm`o8Ryhi#i91_?#AE~H%-6%^yY_-g@Ydb^8aMe!Oz%re#VgBynhqG>l0xw zzj*r5-7nB}vro@v|HnB0WgEW1*niDQ-zmHl0{5P3q@teMMH;B(@Gu4k0jaBk$c4S6 zL(u35rdzd$T&;}ukTzSDODbTP5K1&yq~=wUi-hGEGDcuuZ!v0IHK21hU?)NyqF1>N z3YkD2X}HfFgj|jKwT=pcUVogbfVaozl5}Qua<$ZfMnEBid7w(*F@BvjzauV$z)^i- zqt4L*Ra^_oJJ_|_#gwoaa4*%h&iYPM0 z+`fqPU-BAcmjBM1R1)(baS+HcETXf@@|eUp6VAVHk?MvG34<}=^w6ccqy zp#gSuCa~3j4G(AE4$jkw<=2U^nQsX=HPL1jVH!ciBOUvgU(d`Wq=Tm4?2pkE``YQD zLVOPjY8ygyg=u*)4~E?^VqT@>XeQ=WSQ7d+ALdn^m=EUCkOQi|VxHoAZuP^rvAq1U z1JmF9$j{_^UVrtm3j;rK;fz2Poqxq&Jk?EFTYzGu-QgV+^4%=k1GwZXlM&PPS+>5; z*BYZp?^zZbQI31MDt!s*Cw4`=d1y4Z)kG-)r7B?Vk9oSWLm-6^q{vU00VpP3Uw4qt zRq9KztI8^5B8KDvpc(f1sBjRL(P%l#J3w_2W3p5u6n}?+Du!8bp~JuD>=Zy?*yAI0 zLZMHIi2o&0e#QUiZ$I=yS48Zo|0Rd>m;Bk6&-!hTf9T$ydCc!QuZZ)1<${=1W!fVT zqbNm4I15#dZUSTl z-klfXf7eeDmwx8#KXmiUw|(-C>CgE8&z!Kxg@0Dj2vm7LjQP)eYRFG&^zShoE}Y3f z=`h}$0_S8#@syox>bUC?-?hp?pu;=M$Ei_}BLXn%|h{suVmP#6cp`Q z<1_@4mZ^Z0ZkBEUBZm~G+OHUH#d0oO8{#=t zXSfDIL;$6g#fXS6GcixVA`Wf{;sP`XY~L};hk3%c&-q~w2fOf1M zcc)7=wQDrMQ;6U3i3Pa}3iB4gyhDs2e0Dzz4OY(%33amU6m&va}Cx~~LK0um|6$);(D8EZ&ChkuX_ zpmybW$yG{iSAgokyAyuLX}{vJ&pb2+zxnmW0~rl`=m*~FKkdbjybPLGV2gb~y^e3P zsoCje6uPu^RnBS+kcB7a)YPnYR9b7mF>If@2q-&aMrLT#qFYZ)wV)wksRpP$qVdEw z3xW}kyr^bm@gxK=2|*Awm`8@{D}OrlL2Z^Df?O;Y8RO-6T!T9n^U6f%iwmM5pZTRD zf6SYH&l}Spb4T*Y-F;{xt;jz%l75Zpp&qLLp;Kl0WBub-PCUbrvskOqPVJqQ%A3lB zc^TNwSYy&2g664#8bh8EtU4==QH(%yP&=S)d>Uv){Q^Aewo)|5K=u)$7k|__W1A@^ z;!2`H3Hg8vuyct>X%mrVK>~7jVagYVbTc)gVOS%;X=L>K#Y%HEmFE~z*)#f7*I{Pd zD8_JZY8G!0V9nSr@r_>L)$Zg7gVxnPdC_dFw5S$&4|TAQSHK4QD1GJPKDtvmKm7Ax z@1!4dM&svN*_Y0I#%nKf=zlfW-|ao24FVMJ@boDr*$)CqqH^lkRf@H%=46_bK!zQ_ z>a|xWS4>F?b4J0F2h+&i@UD<=N3|M|E6e1BrKivGp#4tbt$ zyCQNE2>cPpMUu^k;6rC5_^uZs&P+|jvqutPG564HU;GRxN_?t7(8;sBYq8O_jz$$7 z7Qk@<$+#}3Do-KcY>t_uLi1i}Osp9)7gOorm23?zbwCW4=4W343=3T{>+JDDz|jRd zqd`+67#Tfgv<$cAQGfNp-VDhDQKIV_4X_oC##d*Jn5+ffZWFLnA3rW5rE8(al|YYI z^G#J8V)e6=dN>KD&&7DWit*@iHrVeUYknU-4YoRsweLJmNWXMQtj+uyv&PqY!*{-? z*M80yMV>g2(T~{ZGdE#>u0IU*2Ve3i9q1#{V2+)%Y7V$dEq^HT>I_3WBMvC)wbx0R z-Z_*+rKwY9K`LD!Wk(aV1kko{>Lyk@C4!&4A%wH}69JL~&Q!=-u61>8`Axk}r3V7c zG61mwGtZ`2&jZfYDS&UmZiGbqR&;xvQW3_j_Vu<|xb^ozp>}MYLEAReGw*^1QM~6H5{#tANl^-_y{O`G> zKk_8M;(y6u&CTKR_L{b-gB05Y#hmC-suuN?N>XkHjs-jEpm&as0t>7e4q-phd>F+x zkV7qg)H;)2Dj+qc4MJt(>&B{wfum#-`qlz$!BW{m#ywCFww|;WsrKhBQVx0%St>lZ zg+M4|Y@>2oY&%7)OW9hd#JGzP;`vEMD+5w&i+|j?Msc7glZyc`(paiuvA#|t9a0Bs zvuh~;b`fw|%XbRMi3u2&EiCpTG&W-V3p`Mq@FY%#B40nR82+b#3_k?fsvkP=b*NMD z`DF;QK}ccAjRiQ#Xu{nXj2fKrFoX%#B%V#+D>pX$7$dDED|YHp&Mcqba@5K8gL0cn3sXQAI^%{N#g4C&_{0OtUp zjA{ywfGI!)jtU?_7=Qz46@Zx;0>ssj0w0vdSZ3B4AO(#WIDk6=1Oy#`Ty@vu6X-bb zm?Z;9R7ZeTP6R*9gQ$-P@ZO6(UUKsUV1KR70LVaF16OROfeM<4JoBeZqQ9zkTy;Qe zaC;n=<0rs5{o=!4{tN-Jdl!hYKpPN)nKkVDseomHyO4_^z+WNwD@ddep{G#by&peL zIHOq|*J4`~5eSjJI`#nEf-VA;*zH4B2k;TKSl9n>mj2R*!Q*Jd|6yNMBnq9clYh&; zJzmMV5?4NoXVY&;&1J9aV!7ptZ-bHyOeK9~T^)|UX8JK)f%$AM<1Er9m$F4DP*on5d5hv9uVx6>V?K=-(c-RcP< zke*IirfXRX#6gmE)dENpJ%7%U0M;$cR3e#~y#tz!phj$f%|xq-4GcPgX$$g--T@&J z*^xlhp)q?0bo+~^pl?}bVY{&ySPe_v<7O;Hc{)bo2R0M!iK!p8vz33#+X9>zbB zthY)z08&g7gmUdK&=v2EzpV|_dZe>seFuu@Ztp;5+gSaSH*HJlr@Zew=nv(&ZDajYE`PNx&4+TM>^lU9@}q5Ib0|k` zEBO2^{}yB{nD}B&|7?M_7yd(xTO098=*;)H)Xvb3HC7R{;9!FX8)NUl!$uBkAuI=; zF2Ok!`hpy0-vP{vwjgo+Xb}*I%WG?i0jT3Bu|Ys2!u%cZ1V2xqYb>X(9fQkN*vhT#Yt-S-4ZDT}`=pSuc>I~&((033Gcf-9Vd|JZ6`5$;6IBbYHN>@)qwj)bmQEAITnQ&Bp zwIdPwo2mb3Z>OO8w8P`D7PcMVTM!9*E#eCFPdWopR(#?dSd%4C#eWey>Ah1Ve}A;6 z5VlCMZ2+`8*a+Jq#vKe&W`E1`&4u2qe$up{sb~op!D;0$McdrMLsPykd!egb>s{0W z=Op~fM^-uqnxEPwP(-XLp8qZryp2x6XMNMcGT|_Y&5vKIiI@|lQ>XgYt$#hvohRu5 zTa!i>e&61K@t^I!sszoyWDiBG!hf>+Kk?A{t%De}!p8f9o^QD(xoHF+K@u~z*6L>) z0;mc5hwnG2cl(Yx@TuSz5iyC^x5c1~Hqb>( z=-Hog(RTX|xOg`R#CVCAzkeL!RuN}I&8MM#2UDw@@}T_Rl#6cz9N{IHiThBvG4L{eoZ5jT0PQ>-sbLT_f zi);F!AFiVh{cxRr=!fgPZN>Z@JL21^Z9BspL-aoh`nQK=ddr4a2_l`p;^<3s<&6T(E@zi3O*?(i3_HMHXwZQHi2{meUUpzBM50dqBx zn-M-MiO9VL#i<2O9U0~C*fEfR$W`?s=Rq;7PG=6jeclXp0PIma?AO12t~tkmHn)}n zgWjD0Fl@Qla-nBsvOHE6kOl+sdn3AH3zup7b&qUrP**P8r*t6~oj^%_yGVK6^xaDyT z8N@#Lw3xS8-MbI7!4==f9DT}dk7E#&F^v~ z5E{DQ(SWEKW%Rx&m?l=fEIfOW{TA zN7)a_O9oO7uA(Il16;i6QSbmN5nSuUJrR>MJp#E{x6pI2V}OZ$>gjMFfgOXD_-~8) zaQ(SQGJp8L`|hxw_v5=veAgEADTU8ZK2>}NgABkE&?Ru7q8x)*+-nSMxcD9hJMd{| z&R_uy0ZSq79S||`UJmvYwt_BaP#5jnF(?6?f*pswD9v_D*ZGGp1`+jf z(ci7;CzAfDy~!X42H)a4JO1H|L%~z<&*OjrgMvMPXFSoC;D1IzE1m~m_cr^~zXosyj2!GacnZ0uL%SK+ z4uA4vz-I6z+LduI0;vA~pjZFYu2%4|5PVGkPX8R*!{7y|GO$EHK?C+;d>B-~qOdQ0 zcz?-nD*k5!-_nm94Jz8@4f;#rY2efUmA;7a1}s58$lGv4eZTelBaau*Q=uQTFMS%m zvlZ<^;A`ACA9)$W|5`lX5Aqt?(GT~7o_|N8{!onfaOlS%kCWifU%wmvt=^9w#Nzt~ z`LZj@$F8VY=tcKsKccVx{fqViHt4y~6EPkJ`4I7c=+__o*YT%a;`iC#?*Xt7a@qnY z3A$X|BL&!8$jK4>xeEKS2jctkS3CJneSC?2`wTiC{S);J_TnY#$%X!Z^pg+u3x7Q_ z9C6=@`o8Ur&EZ<;LHJ`mZ!A0C>v{je6)9iqX+-$H8lVW63|s~!px_3ONX3DN_>Rp# z)}8B z=l}WN#EAp{<+#lc|KscGT%P*>uz$AUVe{X$`?h{O@BeiuX&?RR@E;dZ#p&~eA4*m{ z{$nXg|Fym6|M7P`qv=mTBj+2?2zAjnpi$_F``7AgIqqL`=?nm)l&rxyS0N_oxd+m$ zg)SpCkWB^2Vr7vsm=+@_6Q&Y;(WfWq2ktG0G6|`%e@cF|yJYL}T{%VY+<)(1P*UA! z2kO!?OV)%J&$&MDSOUSVpU-4?(2s>*dQ6GK_k8)P?&}~Ew^p7mR?df znQxVFKfQPdx7LH{BRVYSv#}S1JChdsWwV*mk@|?YPs!V_^ZNnpo82qvDyrLCv8__c zQghqZ-IGa+BJz)Er*zw5)PKCkVb$Np<@UZ(jK_4tkz0q(=O}*fFi;s@ev~FYIqVXT+-R+`| zyF(mF`|HD^%GIzlRqC(1nlGcm40id_E)u>4(|1u^b}MYm#xN(5UVrQ!s~*^%{A$j- z`DheTN$K&-+_%wEC;hHn?`GE~xL>U%pGL_z-xO!_=BHQmvX#zgd^%0bdUP8f;`Ex$ zr7k0n>%?gHi8N!>w8^39R4R|h$BAi=)^lqXYWM34kbq&2xMun~U-i|ao6qy*Rtopa zez!hvAFixSTy&H$hY&PhN)@z613d%8z`JUcsbxH#Xr z%KFD*lhm%fKd)HCXh_!C>+;@3c)olrDi$mENWph@ycT`$C{k5jZwI)U8uF@o9^WJW z-p)J^9HvKmZ#(v>9*?SXoGT9t?A37`p2$9!hx<+Xw%k{=x^zy?9%uVJK2RtH#?ib@JzjTS_fBhzKJNig$*HJQU2^I= zC?2CbW&OxJtuGr%J-d0uchOWb!jV25g%1VUHziRo%YVBwH|)8kUb@tKRe2uD*+!$|>LH(RO>5>UFUJeA zD1qV37k_cb_RH(Wdw1L2e%wu(bAGI*QS%C)`~0ThgS80eZPM92Y0mG~90yM-5_IDeBV)8&jT*4e9y=h}HBX_$94ep~g$ z8~cW@_b)gaV}WcKkP2<~k9;|fZvM4!7+m*3A1~(DcH8hi-%3%(ChRV^OtJDseRp0D zbawT*o7^73f||;#Sj^H!xliBS*uN(sE!NB0f1MW(Ntv&M*D`>9SU>gkIflvA@*Mt{ zb$?U)fc1tK*RWQLThG#>JnfaqCSFGI!=8*U>)^0gZFSTcxt1!$RXTy$g88j|)|8H! zc`MIx`lPSxE?iB?O^zIOHKb;t>nVw(m3OJj7Z_YQB>yznv3bJrVp4L)8<_6a(brSF9GE)nmRa)$$C21Ct z?>2RjEsDUlwYXXO@R4ky*L1E=Yj?%&Q>)#t>iP&Y7rSV6P2&6P8MUS_7M$bV9Dl$3 zN4t>2(K4`4$yWQ_w!H)$(@Q_`R*0mw36) zZC2~kbCO;4=(1N?_RxEyxTKlmJLS`4B`p2RVROG}7uvM_20vqDzAto=uYcSm582*u zw)7Om$@#e%zXG@QD*Fu0;;gl2SRYSa!TbJk*iPn6zO6RJY%*y;Qa4FZK3==EIv*(! z$?#e?dvdlt$?%I=g`=wV;rlc(;^pRAtt4NKeEq(z=XpALOwTa%lhbN`&7Ofg&)9Nz zzfZzcd8|kKpqe$grv{Q;Nq+|ert^(ot@o9)wu=Qz+u4D>Rp__2pP_7=Zq3W0n%mOj zjNmk;`d-gsejI+GYNV# z$4}4l!D4+iSj~H9emTW}_suE3oq5nqV*U0G(re`1#$jqw$2vs1)HPhXZGA5wZKk}i z>73lR=2Vcd?4(n#)qk$guk^)nO?iI6N7#!119G-N1bjvC@#s_WdYO7cdX(@M9 zo#|_)9vXM;9*clYmB|R#WaOma)v@!q43+0;Gz%v9p)RfaVJ)-SrG6WGBhujOUUrLR z7)E%$wfrXU&HKZif%`3o3)61arCX0K+WIuVRJ`0TCsU@3uYXFVaBK@b{Z|$YgtfF}da4TI`oFW2VUF~&iWCQT@ia8BL zPN6s;=LpX+sDDvhdg<9^eN7`4xuDIhgX&N7&1oMFho$HjC&M@2lOH4cJiNT|{5@}u z6+hQkNDgLSOPj)k+r8aK<*j>;E|bSrJ7}>&vTHIgGJFGFbb7z{hIT;Qc2)K2mwn)l zGyTxsk>8u^lW86bt%#b@oNAhcXL^t(D+g6)bJJV#4S(3c+X5-1`J}n47f?Mr$BNV5 zn{Dz$S2EnimxC!uqs7(fR=bvuz3euhZx8bUZkg`maPtB z>s{VkqPd9HwYd&Vzd-q6b6hXSGTa>G&E8mMt>ChnZ91OcAecxf8T3uxS2_Z z$TqrUKkdeuI(`$3I{#HfIXTSyXeF((cQc9>H%U{K!??eyi+FVMnx$_*?e?5y@m}*% zB{`m#Ub!M_u)SPd1FE;^HhLK3LS}sBwzui3ntx}Bzn-lj59%?A^mXmNyiAe~akD;i2S>CMUQk%`jhZTrN z^V&1h;OHgoLh=o;Z39&Sq4faf@)f@(M|tY|W-^^Li`-rov0j*l*F`xpwy}jS^AqUr zY=5z7FX4^r^DTZ-XS#ijJZS-TQgYvqGW~WrUABb-Ux#~lP0_W$;pjMh?v)_3-p)pf z(&k{UiutMU9BJHdXV7Hx>=+e$t&NM(GB_r0mTx5@MY!9>b8W31Q+czS)H9VO8wo8B zSJkO0EDUc=uGhB#Vm!&%Up>g4|FQ6+muanp-k8niMx zW(G8^a$il?>-G8~wPh0H(X&-2WAhp)sm7ei44!XE;+*g6bEna=-C$*l>kAE}1 zLyIzCU@?lf2p;O>5=Ax-ru}3WKaUUco=m%2TW0TxwG1{6)l+U{%(}<($TsBjbP>Iv zvFk_7*Io%6cKP0R9CiSJwj)rY5?L#zL2ePBd{%y6IIL~nHw2@pq z%UN^IPTg#mY%j4rj&G;rS zw>zKjf)Sa?7WIYQ&k=#o^^>s-glt}qYzbhK`5mh?;88+UrP zzO8lKJ}+llytYkU`;*gZ6MwIE$J=4jhoQO2!&5VTA2#u2_AquR*=ZP|`SR@d=H014 zR!ez$T-;-{L*5K6%E{(6r?Y1en44Q-#AAQaP}#W^<9dJIz_IpnmUh0scO6qE=OXO- z11{I@uH8LGuNgdS-^xstVwGZ3ao_bLd0a2A@`+#X?9Qx9F>d=yD}QaDa__xQI}1`* zJ?=--{p9G39^loqP2AQ?@4Yt^qn^p_>hU<1K_2xwA3M|JdMPKXTyYcaJle0>;b6zr zLS(%veHKjBtAElxmWFxTNwxutx9{epjWEuS|pIJ!LUmgl`S|8m5K=or1zeYaa|Cz8G98)a^8 zP_13A`(-yxa{n~3eSf^T+0)Y88S#F4o?Mca@7^Y#JK@FcZb#QkWq7KStUVg1?ds*^ zJnhiy{2pC0{}GtSx!)HMGCG@QVRNdCFma0A_%`=hI$NC%l{>8iX~lofPp3GtZl(LF zZYfh3F4}p49HNY$1%iR6&9=wscHc^cn%cZOG(~}L@R}ZMAHUdqKHsI)42|c8`zRx2 zHgU{pu)IDj%Cmb1Hjw&h?RCh}~YZf=Vc zpIU%hdQ@h+Ub~5%?9YFxTee15P>)KUyIpz*XR?|l@e8I4HXjE_E#zd`GW}A)go6UB9!5h54-RtG^dC#soChpt8jgP|P zEuN>(eswLJz1&|WWwm>&8U}&XOvaX_UB{L)Sp-`&9?9!?W3YdDKYq-~#xL^MigUNv zM=3l9#61Rea6YeIrn*Gy(U{7Y<91T|?6%upPAa{sJ8<$3_3?gtZ`a{<>dBitLGNR^ z8AW>vdt=g`qs4T`?AmpvQW!X1AzwXrz6r8JG#aZ5C63Ek4R^RWjQXIH;3>t;d2&ze z#X=^NyES`An(Tis&pWS-F|H=Eg6LGaU$3%{G_O2i-(u@>^(^i5!QITyT3?6S3XP|u zzEAsQC~fEA^dVJVbZ+9ElZP+v&l;sNkYg-o_Q@L`+rx_1_uHm>*^fuUX0My|h#&IH z?8hW|&b66oUxW7LkcLbk-QcWYPSh+g^F@3-u^mg41NY;0fqvRs8DFCnYVWxs3;5=nWShi{o} zkpJ{ETMxP`#dbI;`BXn`aE^_*GP+Z97&p~jtIvP+#))q7q`*1};wZ)2t5-VHT-x~O zt;V-exz?n0v{*ZL|5B>6NZz_nyY#)7q4}-v0lHc{V}1`O??vPs@Pob=k?S@S?D=}U ze{R9)f3bDm%Gv_Wdc7b5ybBcIy?ODF;l1}4SqqEoO^w!_kA*~D0O&|$rcp+g?q`sW~ZZ$<) z!G4xfheqaD8`a=e#6a;NdA*1D;l+`oJD>Z>v|wI zZC;?1Uq*k>(W@-qAIOoqTkqTlM7_k|1hm1?T6#XOytx^9kO^s^9)6E5kLbh}plhJN zI8Pr+g#cGjpE$Eh6bY6-8z+8}Tb!nYOnK=w++n1#sGv07%ZZ)^X!fl3^d>x;Xc6|b9S2bkBEXg_&XwVwYD_y}cz@>WjRsT%3;`BQAhg*I zv>TATMySre{@&JGseT7Ex5Lzi8D$vEheL(es#5<^trss%CXaZsY&R@t%F2HNKNHC! zTJWt}aS2lKX7Fl~=21OpBf=5=69efB_RF5%Mq3QK`k@UmCHHjxunJ)HU z7CH27A0NS&xbvhP0?SQr6oh}L#~plui4>KKhBApj)*d^{xgCy$5S>F(Ny84D?((*k z)WrG(b2I*6pRW^#Rq7FOABM%6=}y~4s0q7QqFtX$&Pk;GtLH^{+{`@D{7llP!WJLm z==C^z49x$otChhvzc0T-AA7QGjE8A-@h$<$fZN!Q`@FF5CRs z*oKaJxx}WUHmGIaaR;`L1uZSn+(F)(-Z;*PsZ+dL$L+Vs9k@;FY|}cvcu#2rz?JF97-!s(VVyA9vMaNjw(F^?pY}E)%V~@~lx2}& zuCqe|nW3L&3SWOvLd%ll!*s|s9~ehg@ad7)UGIjbSG0J`&cSw5>MF?TYo5am()0>A z^RtnnZ3&1`uG_gJat*8ddP<7&w@8dpNEb*^d|B@>(FEKJh4{8`gH^a?fTm9|U6JRq z85^KnHhd4DhvV;D09744vBq|2k8%X`Th>tGE(J}A5T0>#N>iR zMqV<4@?%5fv<}kg+BtTHBYiD52b*i}6La+m;43ZEcFr;O>4Q_6^9>)dJS=b*N2F7a z;C6pn>=^~>+{^S83|DqNkOK}b?X6%}5-*NT9*S^8O7X%d9;dx2ow~BPE;WQ@AM&DH z2gzxdJ=7vdi2MaTA_=~&1ww57l01m3o|QR+$eZOmE*nfu9mKx$B4s2EQb`<%#+Ecl zuaV@imAPeYo&B(uv}G1x# za9_IUJa3ub1S-~9io3Ct;SU~0W)bD3=Tfr=pyxaU(7(Hg9h+=iXR9TjkO*$SYbYN` zIis~jSVEH+A1|L~1=2h>>j0NiLS(h_VjpA^>gt>a6i**%sEPtq1u@P7-^+EWv-pTPuXI z7oG2>=cWne<<#Rc16po-=7%^`5X?0eMwF8xsmh*E&>kF( zxh0c??DmqdId>eQUo&c)KMQ}Dwg-oU2V5ou+zGj-G44HRZimuzHBps0QDtz|l!OtF zx%e975w-l!E9@#a-aM+S)=wQ!+|#Lm*U8;7!0M3V<=>e`1#o)7F^f~YdLzEIbWj7o zqEArUe29Shfz*b!IGAqZ1CpCz2x4^)ph2h9`^VEOF?C0bd`P)QK2CoPCn(~4@KLd? zC#v9nkm!#><4%h$Fm$%9pq)5CW5|brgt`K)dq&<8g2jX#Uh``dop&5D{QN$Cd3hAs z^F78q-T(-&qI&VIsCaMnG8uEhF@VB15WpV=_3hMEp-LT0ScG`_180n2~`EbfpZpzfB1L@nFjT z{GJ}9?6JchpReC}s?Q|2_xQ{PadV*U2})Q+fvWUA6=9Z4^)Amso+a-vJoP^B|VE(6EhD2jSUAQMe* z4L_52YSK*dRWLpT7K;RH9{a0mwfbS|`KYQCJiPOxbQwhZHW`va?&%-Mz=Swga^g(ZiU`eea>idWhdHAH-ESq(U|Ara&1u9Dyi0_&?6jO$1=MH$%b=chskene2M5zI&`U_lBvp3^MRL6JhN4kCB>JPm4dWQON8lL zF3pl^<8f`F{MtZqMTS;O6D-(h82YY|`2O`!#k{kv8U0lByCIo3jWjHw-}#8VA?ac2 z^*=O<&hwUTU&*`@U)3am>xV#G6zGCUQ=W>?*@RGL+MuA2(OSK%K%3$nA%MQ&?xZh{ z3dw(hQ3Gr9E_trJvc7T{efh=2W!7o^GzBP9i*1cXCp+wX__@{|?I=9hb)S(Z8)vb~ zhi_3_cY?*9;*qCL@nHUJ&|Ejg4j83>^-FaDn+zyoch8H}89FE8;7D1UVKuY#$S6Ux zYsO#Ncr|Y~EUgb;B!~skxR(WVRS*#=m_2`7E9-xe>*PPGGec%2aZ2t`GM(l@oqFAk z`xUHkDqlR=bK=WG0AGSj&4Qm~U4+v~#)G`Y91^+uH5t;xKNS5Cl|tOC-W<}o-e0+F z`tsUZxnyea(sKsrlzbK)-BcEGXfEY%918jkkOC~>NXx|vj2r{ zR)QyQTk8TqIZ(o?U`-!*yZEhO9vbw0nK6%*ocLV$VIqa9%8(w$3C}#Qq>NAkvW*vr zdR@gS)(XnWtPV**j;c9el8L{O^nQO#AnI|(MjsS&p?}ufBF!3fWwC_FIBfoE)GqW< zQx|5#Vg|L#rNc>_j3N?EP38jVS0=D=`c6S+_M3enup~qYYdgczrjDS52E(_K#FI7$ z0We(}9?TqS1{90EJnPZOB zc8#oSd~cm>6x`%Uu@-gZL&0HjWdK=fYz3Vt>3-XLR*;Mwx(0l~+RtEb&mLliaCv+i zKAfUAADPs`sx0)xjA0G9MT>uO%2c=L!}`(`%D;H|Ee@EMu0%8v#8a6%AHW3GArxFs zasfIw5?!9p?i({e4z54%*Ljao^_`6F&wlLvrID-@ieMf8~)PO|`K^ za>c{Qsb5Q-9<=U+94nRyAtxDN;erpb#0ZKP*t;~;J&oW7#>hp=q*`ZKAGC6Mz2a-hg;2ghyR+tBlAX^ZrU1x=^K5Vp>fI%~aLBTex+< zre>yl_!BBiVt2BNF~$;`C@2}teDsZuuaJMj-p*@^C-px5O_<*5%Wi;J!~zUv8#)l+&Bl>5J3e5B9wDKM!jBdWLz1DLbU^{h_H1H84o=+*7G zCxN^x+jf~{NZ?39MvTVvhMjdD^dNPA#N^agwaO*&^O;4NuGFj7X*1WyyK$Gj5?Hjm zZ!Y@drOt9L)q_8JdncNMD1itAwZgT+y4AiU;_HhPVcI8BF_?c^FP-(xDyN^~XP8~? zs62l~xik}rQe&Ja&JXHf;z2qggs$T?vr5ljxv&rp=?<5T3FlU}VTMh3O(t3bsN?CK zZBf}8&^&7+sCCaVI}okGtmcyky7^~?y!3YDjiHeSbyn7%MJPCJ+Q)t^REn;JKyy(? zb)Br>;YvAsr$>Jh7z+}Pyb#fMD)MqJ-?|mpkWzm5GGxOvU!Z3eH3iYw?X9l+X@A6$cCu+IoZ|(QI}D`(9yn)z*9>wc3gqs zrw1zCc*{E0>I)~LzgStNa$=nK>(0b zf`Z1S|A>G5%Q3bJzdW?mX~&8udCIArq)_~3fSV1_kiZy?UW=kkGTBzWs$+@AfZ)2T zwQ5x6;8eyK3+xFF`&{lIr~GuhkN$At->S?XIvgR4G#;MC%W1e3WAJjth~x(rmbf!x zNbj{qkZCvO@WTc=CXd5WiHqF7r%IU)Ey{BC65)S*nJld{1bu8vj#&~1Haj-*Xrq+H z+30nByLseF3r_m64BARsioyZ6>Q?j zSBKEiHbX5YU5bpj2-tP6{Pbd$I15QiS-(}i2S|~*(FA(z#E2Qelwsk{iQof2099b0 z<%)l{(=7R2AbF{-|14pI>K{I=QCT@D9btdC%jK2#hsUtvs%u%_!0U`e>M*!mYekQ3?!0w=p|t&K1zQ*f2@l=KwF_KGWvJ)j!U!jXS+=C157P)L9LwD^Nk~` z9K!tGqG8lMyRz4EX~B5e&QCkL<;f>9>HUbJ7PK9rBlCL+4ufDe<%@w&BL6(IK*E2b zX4zcZ;%V~8(DI&Z7vl5+Vi?0t;VfX%iZF@uB3dY>YrI0*)e}H5wSeDzZP<7sf5XDj z_5nj_QE_acc}bbQzh(DvCTIh0i)t%+w4@yUFA1vl7nI6 z=<+4_6Y;dSu+8zG<3quct;ug3&Ch?Hf;ORmQ9At)EMUk*w+-VWn32B$#8115Qr=z% zEC%Lnr}vKjc)*qA-^w@ zZA1zAy>P#UgB7lb;u{%DX3NoPPI7W0bwD$*UA}pRyHoMuf%lv3yx)0n!|#9QZ%@!= zE$UPdZN_yYXY^N>Myi1|pOBCTstS?j^XYw)3|7lTo6_ZH`xZEO8Sam`A!xiFnm98V zEU90CQ98>#G#v1Wp`tH+1ojv_9E#zA$lNe8lA-p}@jB$2=*OA5O8#Y|V~$6~-COcQ zF_`0Ewwy=#f$zY#go@1%PbPoBdP8Umx*F~wYO*Mpyl<9mne4AxJ zGMqbQ&xgh3D+g_2UA-WkbbWkZwhOBT)Z4gB0wFX)7X5}04P^_{z1D+t76!L6sti6+ zF7K8sT=4OU`O(dAAjnLy7~jM`h|?hosT28PQU-U2Q^g)jEtCZLkfkmpXuu#wS$T-E*9h!Gmc>2Q70&V{a&B*yJF?PG{avW*&>ej>MpK zx7OTlF1)^;_H*cpLn-KjUc{w1z@KQq0z9rZ7d=PsUG@lsk-**f(w+ph2&gw;>}7Do z)dk#WmdH^d_F7VZPHLU@S8{i|myX>LHW3L{ej^#T`L!Ih|N4JKALq1D=#F;kA|t}K z;&7fQ$|idQg(J|hx;mI49@Qau^1Dnun%+{7w`>N$=J)z4#3$7p&v|4NkB6Q(k5VdT zJs~sA%1uk@4b}@-Isk;HHl#*cQL;7ARAKNlfao?nfNrF&6w*DxP>^VNa4OR78MZ*R zO}kI34f`6aPcVOSymp(gM$A3@B+?xaG@MMA2!2R58gfz~Wr9y%t0SyIh42lee(|gG z-o?H_Dy9TNhIvW;3MqdW(kU=5(h0>JRxVaZf-=6R zqR8^51rJw+9_}!q)%ajr7bhY1*DWnaQL8>a>9_a1fElw51yUPsCq4AZ6Lv=8d2&2c zD%t4#oz;>#n?A{_JFDztm8~?Yr8fD6yMz##*jk}v>olAyi{vl|48N8`38^aZ%S(-l z4C4W1Nx^@Ck!&lGUon&`j1*g7>rB9!9>!Y05j*;_Z-M0P(GxAbVV0*4+RxC-<_5X8 zj30zEm_^?ly|_iv>2XAZI!um?+|`L1kk)Amr8=$!OFBl-$HPP?g=hSch4h!l&;%|7 zr?{PsWo*0)9@5ZmJ}znAXB@@Yl-RJvw7kFbVnTmOJUsoZT--Wmh}~5-*7TBMG5WX( zUOAfsQNA5Vi$S{)^Eev*Du7i0bHU$YA-yR~ zP?`^K;-WEG{Pit`Hq=#f^RLdCM$x1?4*3RSgTl$Cvk^a0?t#Q9yY3EUIg+)!Fv*W5;Wkxqqh1QFc zy(&5Chc2gQf#goST8_eP720%s!5xEW{DeRX%F>wXG?qX^RHbyloTBCvPV<-M+FTw8 zmYLJ5_En!Y+79-EvtNU=u|ca9WF2LlOkaN<9^2vqB~`TCA^}Yk=I;{q@RIy!k<}v6 zIjYu74J1u&ct%gln`2TlYo`Z`2wn6?k^jdA{fUkhVOqD z$1$2ff2ED^UxvB;M<`|xi0R-lmx@0u@A{_)XSYA|IA+8xNtVce>BRZ}u!6b&!t4HZ zm>>_H)c>$F48w&8!!Tde@SCOoDa6H|VVLQkiJbrIk3nL<D#1|sN9SPwnii(NRw_$)_z{2KGsJBE>iyG~8~>qqn0>>$;eT}F43|;=4F={I zmS>1N8^z=A`HlGRCQ|oTv>K(Oa)LWU&A<2a{VS!{|3K1_;kyL-ryu_-|DUG2ze`#% zyZf4#<{FfBP$O`Q948Uz&M*k-l=RDRJTZBw3dut}1G%1f_-Arj4^o!7MgD)N-5_t2 zH~y3|pnb+vnzR+QA^s>=%`#{HNzfFFUbPuXhVx2FLQ9zvcZ(1YTh;5KUJCn57abDS z!1Go3&XbUgP7+BAV1iOQI`hruRR-59FK;~lDf%Q6{j zP$PgCb1p2bx9|uc^{y#K@z#Hm2@Z=b5EHB$kSo~w+K42I!x|2IXPdbo9`^65Bu6oJ z&GYl+ynwp)>)6UIzSuAtXA|V4w8?%H)?COiF;!6Pt@nXDu(?5|8x;?ZLn|hGi|-O# z8;#)0UG@QzN8ozF+Ts|1S6L7!iD_*jYLe_HcKPJ4Hi&eY2&7}#0nkR`h`9{EKGd_GOG8*%-YvEnv)uDab{m#j7EK3U13Bocp}~bz zNKr#r5!oL+YgvEmO*P5kD;adx}5D`tG7oXH3e0@Qf8s5AA8)QYB(C=u5~}B ztPr{SlfRl*@vTm+T*xVJf)okc>B(PcyCeU5yezbGoMlH&jcXszA;? z)Nwo_=TeH1zB7IYAuL==0sIXY!=KWOhV>>vF&U%NG}wO*rGwz0+Q*)O@JH8R{1pwZ zCl`VXgJ==wGwJ~%Ko8*3P(*qG@7p)~v@F?L^~UVB!-@(dz-QwB0)UCd3ke8%-okbO z^!RB68l=d?5JAO656x6|cJuSp=bVdy!E3f(tN+a`5c0!E1C=cc3^DlmLD!~1IU*&3 zhz2C~)j5CfW6CHn4Fyn<8z6c@2!3V%IoxbClv&0MHMBJ|m-qeCkmx-JwTP=n7DaoB z2g8(624Fw@@eN|>LO9+o3K-4jD3q(s2l5+0d&e0`Nl>jLKe26BA=}i0BiM>VN47j% zYD6ge5LX8g7O|D{wO&uoP=guYHB{QJ!n@J#MgY-@dc7++>Z*A@&=8~gk2m{Vq{ zpRQedhpR2_m3eMj{p9OW%0u{q`%!-|S?qtgqzamMahX}}eA?mJ$REl0)`W7FBY_=` zT(%CCI1A`p97U|qiEE-*2VRlh_w-mgSPPPM1`wYXL_F;E7IndKr(6XfZ$;D_4i z>omlpEk(4LV0%o|jw|#)u+vt;NbE5dl1}>y@t!XeRk`BJ)3#U*W`CCilzz< z^pWP4(-7TN2s;$x^tE_ZUO-{P1POntQf-J*%80g80|DQq=J(v|2WO$KAy)KUg*81- z2m@y(hVeZ~DRlGDVSQ8! zxQA*FfS;3Gvrj+DlBpS@v9FY)*mQAIWo&0&4YkUM*+NwgSssC_BYaz6f8FSO0SYh|<&^f3qojnG#YS@iJc@MU0hSR+Liy9&_41QYPY+0( z068_$iNlg^cm~U+Swd~xCLJI=cKXEVGge%;y`>_)0xvn35ukvslvOb*rs(mgkUHc^ zA-DXfd(EYA?aaR2@`-=O+YO=6*)w)4I7Z5YUPfa4&{NmF7x4(ZHP!Ux7+{Bz%k4vwX{X>CFKe8c-^+ zV4-?BonT3R;=n!Cs=?1s?j@B)_WqX$JkST6w#zZ@b1#2$pgbI(eKDlK0P2EPB0L=e zKrX?2W`Y$66nPZ9iH$B00E%w41Ky~YQ#VJRnBT5MM}6iaJEoE++)v`IRTF_^WAoM- zzOGU7dn`2iR@iK$*-~%h9BDDB_!Lm>k_o1=FR}~cz#4*UyYKR#x1s^p7~lGoRUM)o zCIq0(;MIRmr;f8-9Tcxj0J+;F^ZfCR)E{%>T6S;6-D2fqpsLQs4uQ4U;18sy8P_Ka zU#>#R4Eegp)n%?oc67!hH;UbRiHsd&6SyQ_ajG&rGh~pcd~xtP^&Aqws{(B{3g969 z;HRG)sg|t#K#!~6#3oG;=E591woL_CV^Omq3B7-D$Z}o9TuN|Nk{>O#a=X;+;NrsR zE>$qy{Xk;cwli|{?27qAe6+mXA1L?8gmgDy`4ENmfl$qeaLzT=AI>@njol^x`oG|C{)9iosZ^ zrUgbxM865}pmKoDr~KT}BSjzTd&Ubvvb=wY#2U0&^TQNRB=}f;+BP>RmuJ^a7wF#5 zrB--F$6qA0Q=pS%TR==muV4uhn!-0cZ~{X5%W6oMV^hFtBlALrI+H!3Hw(+3JSjW; z6#nIL2XC_qcJ@o~{F|yvP|D{-tG|p9!r+({O^`y^&zswAYH<)2-SF8W@ZEu9<|Tjh z;2YB902G@JpPhOSW!E3lxR`T9)R=^h}S4cnr;{ml0!UV?B0~mQ5&@F#V3_VRacwC)# zh{(%EAUqr36pdVR{@jf4PWpBW3J!|k8}J0>zbx$gxIDpdtQvN@{g`C5zGHue(RSZP zyK{zxxwSV%M`gV~^B&MC?Yj&D-%40sX@>_;PHzMlduT>AEJc#DTjo;qyQz9YD7R0!;4J?1fj&U! zuap-tR*@Fb2Dyl42wN~qEQ#}=I@QC7rwqm5^-+^R#iS=hpu7P!q zNW{Cq>Z)3B1KxxoFW}GL=8aroOy`>IPIF|O7~IOhznHzS`5d?uO}Y(5hsQP?0F(b{ z2r;76tw2sNk##7w(?sYQvpFmYjZK!zkGsRQx(@C`sO+fnV7?`|)0KaZcJvyu;h%TG zedv>>Y^fMB`YJq6MZ4k{h_c-`xFKaaiEIm%BKJ63p-4kP)<&=U6 zNm><8`mIFSWIYn&LxUU!7a%Ath8JTQ`F5mUG@`0jcvH$9ghVxc-2VE4OZ4{cD zy7$gUoP@^D>UmMN0&4c)0U4f?EH|L{q;Co_03f3=m)PeKhnEn#lycagV}ZJ3+;?>Xe-htCv@t)jaP+O85* z^VrjHMSwL|p}C&2*X3S0a#W24?9VhT7g^be->h){8z9heJwjJBC zDLLi}7BZpAi<-x;Bb}K_4>lB@WkW4E#rUteLoZxLIFmr?ZO@;WSTNx3nbtK?d>jz} zkrsI`4nDd=<@X^{7r#@f#A5~(;o85Bs8%)oQ{c= zrffJn;-xyOMP9a9gN=QGhFJm}+g{$E2$segjn8N0nmKwmf6nNz-BLQj+D~h4hEY0; zgs00p!}x)Rs`VO$cy<`WmyEhPxOAJJaTQOQA)SAVmM{c=pmV_4or(csh1XB_Kp-0& zz8fW3FpL6WqwC`a?TON6UR-kungzxajVcbS>#heg#6t zd?sc`b9{26>ekxjf&H1n?g0_L5gXuQTjwj*aJ*g!xx13XVhK#r36?`j`4$^8^Jno{CC5&E1qqD!(!aoEp2*d(q+hj@`$6T&6t zMv@o+@l5Qhak{jE`pYx{9)#qY@>YsOCLWXbBOrIqyCE5>ct>WPZKH3IKJ`x=`+_DjCO$XEc*03dx-Z9=fy82V( zE}CP-LQ8rvkwQP68gkdSj*`=$lm35zn^jKykb&RFCIWav4+0d1zMD3ZO}I~QEUoVR zf!6O_IY${l5^Lc!igaS$l@WkG9?o5AWiqsCT z#x1bdZ z01AqQQeMnIKY+e50I-m{n=^k$jtO8gibph59xqwh1J#DDz!HVzVyLY^>~*~^XE$tHRTkhT=vva&r@*) zO=Oq|uhAIQ4Pv0c^uK@U`mQZkm1WBh(gPmBW8%H{Z#dyWfDq>Ed!V)UKIiuBu9vDo zm5R*Bh&g8jWTeva98M#THOP0o@KbGVw;ta!Br4>`aMT(Sf8cZ|60t<5BM#-c$3$8& z$Q)ppvgCdb8^{Mg&T^oeY4T^{F~w-4W-ZluAqveOaxJIz{bYZQh0WmNdZU{z5!;GF zqMsz>>+IT^fIprnHN!I}ON!@hRjr>dF`T>43U{CEp}4a@e#q%5WK&ncwr?)kGb}SA z6I~7C0QAyNG^1jNroyYio;A8~2~%s%*KDbZX?Kr-OMQO?gL!2Y4);Z&#xt+&{^+2^!cxjqnD?8y4-AoL#S#>%8gjYkD3DaP|bn4C? z8*lG=l{RWerBS6kg+8KIN};~?h&9AFohZwpn~H_5MU?;6HzqQuM%bkdf{+e#YrFincx1 z>BR#$3avJcu%g<78^1M_yMu#Pr;(629>EyHY32ZZ*+Xip)Z15YFETaS_(^M%)o?Pmh^Ad6apV@ zQWrdN?L~j%WwJY?;TaGy8*3qRg3uoIgZ)Y%n;i)TeJM4FTDim#z=qmweZsnJ;LTq# z%jt4?#{>$GvqFs{ct{8Td=1%Wi-s&RJn48)2xDEfO`4Iq@*jtVw?!?j)pM-ZG+7dw zODsDmsA;{)$t$=eNEYYuPzOx1^RYW=gOk(Zd7XbQf2tOjL1$hJ-jQKD6s6P*kK!u% zE|;`DaWz?H_99z{sO95&;aCe*_#KxI(v#XnAsKw;RVT07$QbW!QJnnfHlkRu+D}ag z{q$1T<7D+#kqj|4ef>mT4gBEwi?SQtU7 z?+kGM)8d6r0yKezhQWQNs z3Xi7vuSh^NmtvAZyC+SK;iqUYynOf%+Mj<3=R5~RU+wz|(JOpx&%40{C0Qhi@cH9m z=hoc6I`hHpxzoH7azn3R0&4j5WqD;INPc`EjAJ7R#VEXlbe1j0PPwp4+GFY*MjhuP zVFG>F+X3r;qwaH(ERVsvtnD;yE3Rx)JDHy|;hN ztg%m$J68|O+{X|EV<4AcoMMvf{SADom*v6>B2%CGehWYyeVp`Ef% z)fct9Z^N4y65+u4Ic&i~M*xx{GGJd_qRpp2cuA|rB|=P zPVN^=-RHA+%aTC)&fOuUC+vu?+MV05p2`Ksc=`B7Hvx3Io9rU#MhTkpc@QRIZtMCo zx~J6HTzIY0M=%am#e6tb9khSf6B8bm^C?yRK(F7B)AA_?-?@zqIYaPex;(4vgk*oE z@5YW8!=)w}WlK&qJ>w}UD zT}85Yi7QW0(hv^h@Z4-}J{ohLb-iFMDD?o;cP?SD1F1QTca4C2@63Np#(_+(lqfPH6-!Veqw&YCjqy=F*Yf*cCRaJ*xvks7t-JVCU z6L!etK<(JQZG6uNlcVHfH}V8N(iTD(jr}4M_!=fM+6OgHRp(rZnkvph(R?+R%QT7G zL|GC}h<&Vd9cMn;zKnlXKriiM1olZ}e)>sYX*M-UjeJZZJ%m-OhU8OlDLv*UcR9yv zoP+twLk&hE^0Sb%-M)HunTd`T%kTC`yJESl?5nNz*Z|h-xs-o*$8(pt10UascF1PV zBmwP!)M+Lw&g`<%R2ytllA3MV`qj`DA{M}NBwGG!6Z(jO)g^xbA%uAP(^#<3T#tfl z6IfYr$mX;7;L65pq?cJkFY?jn(F1toV8{r zmulvre!VD5-k?9A)9ecogQU-E8<$l6LSeEEMI?Q)At#c-sYNsH80niqu<~WTAAwTb za~bG}AP=`!?6ZG<+53Dn0pyN;+xBP5>1m`9tqG=&|4|ET;LG=WCi{LYvhpn&Z4kXO z{i*4cW8%YrWgh{hSc;inno}C+O3==?!So!CT|P6O@BZw`*ZYHwL(S;4mz0jRlvZKU z?Sde(IV@^;LJO&$s7U)S<2zE)Vvo-}T*f|~=_`e*?HPa3<%AfVe!kP!nx-`}09jM0 z*Fk5moR+L0=bt{gU2lfy@8bcqZ2r0ew2R*(q9pSpkno~h??G?xR}hn(of(u@GejfM z?-PAyQJ>~Mf?Hulnp|(pJ(AW4D6I`9-=o{N9OcO=Ff^ZY5jPm2C%x?`GSJ-^LY%Go^Hm>PdFaF=XVCi9Of8631&p4*!x zW7ae3=T7Z{Hoy11(NrPl92&EWN7IEU_7t6{!zm{oT^4ehS6@BHC|X?Mo0p5#F{0IO zFNEy0j5N}ai}bK!rz^}J%PT5f`lQ3BsU#`CA47li^j6MAaL`o!ov@st~mrw+!e^`4{Kl;=|}lZQoVLLdNareTIZ~@lr==j_08;o#ss% z#NbsMSL`H1_%jZyXU*{l4FPV0#Ir4Ft}=hU#4B(S$LKFz3J!*UrB;NV08ci{9GxQ` zti_6~>b0t`FRVvMk{8VP;hm-~9(?e!)w6II0t*x8ZMQ3=*LdJApU83#^#pr~_@u$T z<@e~vLhb}TC8*C4K1?wPwiH$%8rly`ov}d+$PF!HnLR4MvEeBX=YeM3vPQr3Q;~mo zB7*ml*giA?mcpgga~hp+L!7=)>r6vNWP^Z4312EbU1jFw3QM$6-PSQ2U*cs!RD=#N zVABrtO3{MsK^xD)B)4x_lhg39v?LWSjU*4$B-ju^H_OjMe9KzrX?@cD5gQ3*NQm?C zfN_9%UrE+Uo%XT#9g?_|T9s9etVl zUJ6`ggi$pBM?kp0vk|t0q8u5QB2niBOV%&nsv{)(7H}E(s&Fi7Ih7TcIq{Lcrnx-D zi%?S4QYmZdJ$*xD2_O)ZuGDc?X5>Sv(%d4MgLYVY0ei7oAy3ha9$E zs&AlB5CD48%AHzVWcFIx4f804s5U{FBiO9$qC-B@!*hIrlC3fCj8q;|c`dRUV6PUo zrasws)hM1K?ik(ySqCML>sZ@aB*aI5lf9o?k{LoeFO?L~HKK8yRRhf-ty6CznB5^R zH)3vB8!=pnhDp$p@rbW#(!RJ`bn>)@KDUUm5eBFR@8`9&93_CvF3E%h<$^7Bk4R2+ z0?}1-KDqRfU~_|!!udJ3NpE>&rhLop?X3*3qwX;osrRy~#azafUJf+#!nEOk!tZKz z>(`0He2v;~xI(_Hz|a0OwPIhphLiG(Y<_DVZR7aC!Sri!gQtuWS@On_P4vPvs11tg zgGX9A!S+mOsdH8WXISDwLO#NK^qiZ? zqFn=I?QzM$aiBi*gVV`E45WU4MA9?@EmjJi_%N_HNG?57Ry2V5Im-OK_usZnSlKhs z8YjUJc^;+(NMd}9iaft-^LWp?0u6doR{AR-^D(p+XU3kAE0 zV;o66+Yg*YcDUI2_#r*n%EUDNehBr7jzazvi1IWJvrh;d?y#xct{leecwUe&n^>lM zU`UZe>$Z`;sxh=?9Y>;ntzTEcWhglwAktg&?2ginpQwjCvm$Z#M{*7cBV6qBYjt7 zSvIlS$g=J5Oo@udn@4rVc8-9|9LL z2w^8K8lBvHnM=A5!)ZP!O1=4^_%9z3z=B89$N|GmfuxpUp&zfIb};_V8{4>XhB zn#SaRWj;5aJrsf51Ail!m2#Y(I}YQpiiR(Eicox?d-1fAo#^=J=(&kim?odfHy_J! zc9RTVjBtbOXD5?FE2?3Qq2HY|015v&llI=?B2b|D9UBY&-C838*7)s<0l5C3?LIQ# zE8yPW&HGqM!v;^A_kcGsIS>ty@yuRHAt-TwRsxC)#-06CS_6)li)@1iPn>($cERvC zpX~0+*ptxN(R3f2JMMYJ;Fa{;Yrrac*8Avy=0QUyjqVi{Y*-KO3|Mq4PXCSXyf*LlB_PLpX-ONSDJf3#BNpux_Z`)RT%6d><*qot0pZUK>6~UaoPhsqTRT>^cvC4eOjq38Fz#NzAep=S2dDC1#4GQQuYQgyZSml=^tMvB*s= zs*B|JDHZ^o99d*|n=#i~K>=WJv_2Kb=~$a#h1YSIo3@F&IxsK}uGFo68By4WQ#2td z>QqMN?M~8Tin#_>apcW)%=+NHWWM5j({i={@_en?@y2G1vzklOC%dV-9`m5-Lh(2n zfgk?0KzO9pm1Z0j4+`2Yw|m=~)TWD&MdjzCb2U0|K^X{Y3WmI0jJQ-`8rF`djbg9p zd8%lql*NNS7i*W`#5a3??15HVrzp=y@(FZVKX+~fFt64?4$;Nh#YG+^3)5 z9c@BQv-X`dVx5-UeY%WKjCjv+Czdd>a}S+32&FU@5vV6jr83=5W#3+}ecWl(8ZwU} zh9@w#ED`25JC3$5F56}lqJsl3G3RYJ^W?RzWud;U8f3X`N&uf)U{RykMopY>vTK zb(R&@auxj*E!uHC-|<4jv#tAl`;OhM8c1J{40m>#6HSlrFqpecd4p!+F}^ zqM=%d#)9rAe4$lOi9yBS#=Ln~>XqgY-~;>GE8$bX_LgFQI+UgU0^$Icsl9mdZtK{? zo>3VC>>z)-Eu&5qhcm{m*;lL-hddvMKgNPHc?fp2Q#!H&QodPnMRYwR)N+zv)#0gZ z@h;XlQaGMJa`RO6u-V$yy@ZpE>W9KM3?CE%=gxk|z8pLeuvuT; zJ1iYXNPvxhjbAG)!Lm%&rb(fsRFua2W&LpJ8N1^8*%#dcW+h%Wc@Z|f=8bI^;ie+Z zdl%pN7IEg{SuWP+OOHmcCc=Y_0!&wSGsG30M15w-3a1K(1S0_j=}5=nbAhRR{r z%^y7ms*6&OO%uag)_*Trj_TJY!0anvWVdr)KITS$8yO}gtTSSx$=$aN)jmFI5uMSQ zpGLWi)`!{D&lW9y!K1S=a=ZU@^iM@ zBLMvQe7~n_P&g=FAbS^Is25F}2Pc}uyNjaZa?Hib@VP!Zw zvk}(7hy8AVwE3#Hb58Z0qblpe+S%|jb#)5jv1a_B$18eQi)bFRb=Y%BkWsui>|I`e zkH{PFp~F(q8)JVUtq=m8LcLS^`yh7N-(N>QuuIAHM+7QjZbUzoy@J!s;X*E-pPJWm zK|DJSK8^1eAH6Q%#E?mXeHCa@%V6xS)5qpDwe2rj5M%%p$Ozac*%Ow1HYl>`T z?N4{vwBjHn0;UxSM~vK@Z%>|z03**2vbhfSL>x9G5t6iD4>qrhW1*7Sg+&k=kSNm2 zOCNJqdZ(;&F;jq265`Wwkv}#c$>5!yCwYlD=G!cFy()r9W@)L%GNOhXVgT`f>Q(+A zIPZ~M>LAm5s7*=15H{<7?M|`^>M8=gZ>H_=&Ja$?hhG?aXsWD%x^Mm!G|_NW16g=U zC^OktI(vHhpmnycn>KPEV|TY^Qr^V_e^w$wY&h6RBBj1?_Na0EP{japA2Y_5j`kh+ z&?4o`2>F=)eDFmYTIxm7Naw_VPXRdg!-%~C^!nfkVWn+PV<3&~%C_&jLAgL3)xkqC zY#*PMrA}#O+jG%`tWYWJTL!AGf>4K}_nhAd*lN8Y&&z&_9j|e^xr(a2g4xT8rnS>9%>l_3b@fVx;*)lNE_`H*j-tz< zIh=wv(X}{ILeHaqV3VE_mbbBW8)?Sb7-IveafOm?P@{z-4a$I6)0K1x-@Zn3G`RI+ zYfei@%y-Irn!8j4F(u*PIFziI=PNauOxsYadw3WO0fB_(`Z`wLqfL;MU$)5t^ZW{h zI4(4E(T|!qC^9(?%B~)Nj|se9ek!@&;kc9_6kZtgD7M<_E?o3U7}+ufehv_ln;Dh_ z*z?{&fA6gld8CAF zPE5ncpjC2k2-||RNiNGiMHUXIa4iQ|u5_TP+-aRR*}=$g~U$zM#{B zRIP7XGzGbuaH9Kcx|H~b$w&rXl&=Ooq%q$TYSldAB0G*i0H%4GY{|kBgZZ)l@{M)} zv06Dm9=HtpQX4#crQwBJ5YwHFD;);QP=NP`L)g&J@$-~ z1p>iKu?O~?lh!JKW4ZS}0{$G5+83p+(v(C@_P9uy)o7d}n&b6o_i4Z(rnWk962j9J z@v51mDTG#k-PcRMjH_t6OQ)%aMxFVi372;`AeMznLOkEEJ0V^o`-Ux(O`-JCkl8xy zzvL;^cL}-AgzIC;Z*&wLWBKW&Xs+oml>hchm_d$H7FyYVzW1FeM0er9A-pW}iPzSV z6#42u1$F*}QRc1n`=pAk;e>yI#a!G$pR@3U;=ZoQjRMASRQ-!8P+v|Bo{jps%dF?qx zYFCDB*)m^$N^4c30co>Y+mZC#OL=7;xLdR%P39O;*U~v4i1YH4hs@9N5J$s|amJbS zs260|D^!cBU00P*3*Otl)AH@XUFYx(f}T?D`NVMNm_86asNW)K#4d|p%BFt5Vfmw013#*~*&S#D9hM#ok;&|{Kx?Xmtu{#xijBtT<;yF--aERnU zz0WS6g>Z}}eb9;-CTaHVKJ)VXT43>Frc(p=8{ztJG4FG+ll775;gjjmy%`Gom(Ist$Y==<|(em8r$JI#O-$idxu zh1mhn5B}oa{Ni2x><9bQD>;D5^#mCYK*j731i%xhCjdF%cOZXkN@XgU)tePHxcC%m zTa;^^V|INoJR8+7ITWP3h+=mJ@6Y^y33%y%L!q`|g|%S?$TT#;EQqK$6>VjJ#Wltf8+DHN|uB=j*Qlal}2y)Gai{L z4g@B?^h!q!&mQDc76r#S@f=Y zIh?-xNCGq-;3^}sncEd*AAh2M&>qAlA3&%dV86Rw>~0Z&x4?FpXlJb98AsqVmEM6f zbaz~L;l{Z5T)^ttI6u1PT@MsHzk&y^p0qOkr4(WdV7dEs?*Tp#VW3d#X9p?(>BZbZ z@n>D&-@F}LV$doC_2ekPe}T+nL}s#OyQ;R^-Zm>jaLk0e)&Rx^%%ZD*z~)bN_XDN* z$9^#;*f=jZb|h3n*0Xyc96{)x-BUVbUU1cwt0OJOA7U&dxKQh^7X(=74RNZ*zj3B7 zS^$2`Vmt8fak7B_jni;qM*z9VR5^0r$7p@TVDk+OV<9q*A9;Y}SwCWrjs)xKD-aQP zXdk3Cuz7c#<4;~Sq5S=S#VFu`0we}F6Z@?|ao>mA|JXymVvMQs@%9Pj%0uC@M}3ba zqs=3x%0pq#2jxg99dwMv;IZjWZ=>EDZSQurvSE=S1nJBnnej1DNsQUbi|kghUI#%Fbh;h9#&4BLgIZt2B#) zjwkEL-0RnlN(Sl+LO|^^^leY31FYsdf86a-t-Ju|&%EVR{^6al$9F8>Sn&XN%vU@S zV*`~5#iwiVO$!-O_4JS#V}`DK(hp6t^pSb0sVHm>uB`J-WoZ8On=iOKZ?OuH-hi3z z`RbtJPxa78#f}5Pu`j)iFXQlQ!_!Zp)lZ}Kf0&}6JPTZ4Yqsw*;`hPVauhR_f5rni zS9Xb$_L3_!I|MdA>p(ZyRnRof7jAoLzxG%*L+;@9^XKa4&$aC4*grVNocRO%#W-e< zMP|=M!MQOpk3c;>gvw;<1|HO)ay2iAu!9hj1pOwj|TSry2^Iz%A?+6HDke7#8L(GQ?ezV+ff1f?7Y8{Bu;M`rqP!5E zpw?5?A{Aigf$Zg-)e@O=b!=|Sk;>6Qz^qmq=GtDpixY_2|dUSfa%yyL6y?cwdGm*ZIif`;%1 z_XOI>x=OHGVlrz-R9u>88$~v6|79@9vgIC>6%Z=W-s>kjyUpI~cl!U}8D_NkjqUfC zMEUYj!iAIMOsC6KqsXrr3RSf9pI7r!^)kI;6OrBIm00 zn)<1A@~1QHTmS9yU;XFmb?h^ZJQ$4r2lwx> za!GBm(`>PCS!&irs^LZ5rSj$iwY?tK%?Ia7snxH^Aqt&2fPd?s1M$|E&g%R>{{QU@ z;~%|w`*9xre`2f3rI`D}GeT$UPE~z*C%1Sv-c4I)u=CHopg8&e@g5797`%b}1p$2X zo^;!vscaU2U;Na@$xAm+gC?RWdu6$5`TLz;^rbs*UQ!@v0%^`}x4*Ib%}WJ;rzoww z7&q?y@h#Y>b?FaJ{}YS3#MHho9R8o{SN`8RG@2cJe^C#-P|wxy8b?k%gN?eARh&aK z&Kh?-{H<`dU;b-TvCtfT(|EtJufjG_yOfkY>i}FX8^~{O0@+nKo|1}<|lJ7B(e`4YnE2!}=_vEK=#Tf795wpjRz~s@b$#2~S zy4NJJe+474NGz<)p2FSNuwlty)0~HW#AxuTzk;OVF2)YJAysix-z(nqe1RSPij&Sv zYW!ZUMUas-NZqO9YF*XN-EsZSQd!?IE<9J+wYNh-kHoq3LC=5`d2^nAKpVjup&tlU zVe`9wuQLukcy8a-&z-h<6_ek2M$pl!<4)|Ie`hH6FP|9xwZj$v@xdl;R{|LiBDeu_Fl>S{$Nn&EAZ7R^6;p#LST1$XNmS(s;IZZ0pAKbLveJQI{;!c zcw+@Tt({<099au^T@}S#;DkUl5oZDo)r5V}u*XiymmP?}u+2el?Hayyq69qvi`r@r zxbDXH9qd1zjEweA9w*qp*cg=khhevWf0^K?)qmJ_+tdWV82M=$OxXYAd4J+1MuFEQ z%~XYEMTK!Q`5Gg;YE#vMpRd|s4_0#@1l_qwnDXjjz>FGym8E{=C*@T@al{}GQ#(#n zHZP>A`|_ilTh8vvHL$p9ddRHFiaRbd9IdLvI}EB0713Pv2ek}^)pGw-BTDF@e{&U3 zKY;t`!$GqD%<~uDe_}oSGlzftNN<=umXzynj`|ndnR{;c@2dFcTff}avu_SyMkjt4 zURs;$y21)*=-z49RHnAvvzHzV9Kwn) z{$#z}aQf;q2!`)m?~%cY*hd4Sf5z%>VfIziar1PygkpHV^)4qJMbpUthSd|MK@U|M9b6D}!_? ziE8T|3v!in&0T16imN-(;x$mCK{;GIJ`KXD`oyK8O)h!BmMW70l`Zj3fBaO~n7O>F z=D0itbtkg^3Ud{8N}Rs94(#*QL0H(xoAqa85uqMM!!o(uwVGpNj&%agTNjS*RXMjm z0|!E37OT`12)7P4c>d2iV?v!vf8)3WKW_R*PyGD&zCQkomA6maIPizR2j#@2pB_8^ z`MV!K{PmY#j`}A)fQa^uf804>H$?~J%4siJfZA?Xt6aS%0=x4WU!{uEE$X!&N@EvN zr59{H>3mg5F|1YwLX~1295F*Xs>duM)9ak1cY0UX!2Mt%tW&(hTe2G;?y){HSdgqb za3sdBgd?YACNArG?~SIb97ldCtjx;*e(}!k3;VPN2d!2I6BSHsLJKTR^AgBG9hc@^ zEUOq5dib0NR2Z-o@BolMUUf)2#s2cZE5`ojZIN;1gL3QU`d=HI|8V~8BU&Z@@RmRS zaMbOWSe*x^fAJHml5Z_^&vmw$ir|*(G9qJaS`uP^P-p0f_;6pdyqaNO}8YlH#MMN%M%mc`Z zV+Ycq>x?a094jNbOF*3$k9?!|npZVa{V|*CmP(a=={HyZ;f~wa{&LO_m)M>MjDN$=KlPANY4Qy;w~mR~1SJlo2}JF{525B= z@e(4PdBkd?J^C(Y{L9`g5EzNmd-@xByAIW}d#s8rXxMOPV<$&s1|*6DXX1{3?e;xD zz5HNYU2i-2tK}3T)xpcLd)2vAS`SW9tO=krdcr%({kQ}O7BqkpX6K6GfdBxj1%JBZ zR_d>gW8d2TuQvG06Mxq9r;l!4X_PC!{?So?Yn1=sTlmpss+IZ=KO|fA~ja z?s~&dZ|3Gb$G>qP{;j=4|4Xy~Z1sne1v~#+Gyhwk_-Pj32O)LXd+K?p$bZU*u8Zfv zX;KIe6wpYnoWa8=Feo}B6=@%CIfVjBBem?~xA>NH@-0yh8DTq>^Y^>E<4H{31~ zUE7!|+0lPfwR0#n2mJ6*{6a6r0PIq+%8iHV zzxhi3i|0Rn{#)O_esF87JAYof8z+Btz&{)vi~r+q&iujN&oTaLd9Bj_@t;4n)aq}} z^H+2J(fGeS^k-Y?j>lWqRCnC@zEs`unDfWeu$g8j_^J}6#!hUWmz{JvWd`?nh8M+Z z3Rs6%c>r511MlLo^Q`0VvI83ah|PdrKbqSu+-_IQv8$p@MS{K0I)9_vPBmq!Z!ZcL z(`MUI8lxx~lYd%RVAHxEeZE2~aWA-P$`z&yMIIj%o9EesF9lXlJrVgnJqIYeZLs{b zST2lzvANeAU~+q2ut%)$AXdPxzvWSI?2`d*gPUBw>&oUTUt)mH0-I-;mhpcgpJj5`M%0c+g#j1Y+Aai!@I#ibqM9<5 zGuCY;c}b^ZO5S>RrKIfuhNY_GMf;~CJm}=P_0HG$^*+Hv{)CF9?)=TS!i|}R`;Qj? zi=Vf~`WG8-?7&&({4eeG)As-1f&SBB+-+axZ(eujHHYo+PX4bx{ZoU@H-`I*yOn=L zHT9Yq4I{V}1xcJu2#4*+u>H}S6*rlkl3Cfd9K4n0WVXXR?Z$ZGtI1>?D&dYvl3kmW zgxymI{OJh8S9kVEjPyx9fz8&F!ZvZEwhzOv2>Wv+F^}x|y}dy}1Y2*Wf^SzysH;_D zZZD4C+-Vn@lhZ96lP^HYdP5D&p<91^!&f{TP|`VNRzQs&kRfQQAl}(M>CL|c&o4w< zX4#4=1b6+a`!7${AQI<#TMPf@Pb{C^Oy%zn=1J1ws z(VsQAdHp}neo^E4)p$$B$xCqj>hSEv`Oi(>yJZz(E;0dw2uB`sOL zUy!fZFDWqKD;O{!n^XDkDH+C`bM0?^Yv<%t@|5v1Y}A%pt-h(dC7Xx}j^bLSr1aE7 z4_6L&^ekX%A3W9nM||1(a;F~^3IYewlmHL2QleP0eW05Bmf4qYN0uZ|qXRjG1O~VK zr3alI0e82W!v|Ob0iUBmkC(#92QUIZH@Ec32QLC0m;d$u{NMRk zV*a<&EalP@@JRQb;7?x2)yI)O=C2Lhn6 zV09{&@q*i;Bu=;Ba0sacn{K7s)^h*1_ffg+2fBAjOSZ+xO3+v30ilYa&}8s0tZiB#Q^DjwZ53-#agA58zgP#{#BiwIT4= zzg-jh3Fjn1HdpR0DkWqZ`O<_rX$b%U;3AFff}Op8c=_RZ#3G?AUFV{|ab6#<3kh7AG3nIQ{m_2GkBEYO4Mf(QVk2vU zs>STQ!RAUU?s!+=-y=pP9z1+?E)(o9^sgM_F4&mP#u9i zFIZB4T)}ZS&z{(386m$!FmXt6y!6e$#_m?h#+U{H*WauGx9p_BxmAlp2?fzsj#kLl zGw#u>3Inu^P$YK$w#BdNBz$9qEwm+v(Xr_?`l}-Ex?lQH%rC{U7e>$Mo1Mnue*#YN z_SZBC)fiG_*!(?e4_`s0BOi(vULh{>xGN0AHB=`-jd=r!DW5qkfjwzChB)EOsXZ?~F`@sOG4<+qCb*#04Dgo|qAFdJfr_aR_i zbXwv~hw6Sv6veCDfqEdRYvc0;XUc$h!Md0iQ5SpMi8Q1@UEDu!A#UBN9uVaW@kCvJ z(F|h=(^cHLnTIub5dx~G5t%;(Z48NP7$0xV#FBBP7|kWf=vA8b`1(ZM$y{VO6_XO# z&s(l)(zsd<&W1Af`Uiz%XB=|pNJoKvI`aiSmsp90jfvMFhCJ)HG9?0v$RG7oj)F5> zv`C%iF@?xT!q@+R=h2;eiE`_go-Uhzd$d-n1aZHk-$v}&@$z;$-@*P)P0RBcaug2t zBY&!}#7i77&M#w)1~r?24H}G`kj|nFjvb5LrNa6RDWx!itC&0|j$M7MP6i^Q@$d6s`(T0GVu9{wLj2@ zlzVN{h{}U5kPL4a9G8Gc!y}pH@Sep<4N%M#nQJmIB!FkfmG&&-n7*BVdMTuCz+5Ed zo8SP-$F(82)f{mZ3D5agiI;(71L)(jv&zFDoFn&sp``~|+-VKh@?d3SpcGK<1Hh~0 zpu~uZz?$CGP=%u?$6w^Y65uyQsJ}9K3Ix{$l;BME@WNmSIv#zJ-RAdRo;Yru6o`y_ zbi#xv%0x8uM1YTw^2g184Bi_X_NK&dZw(Tu5xN@pm--xvx5wg;W$iRdaX*AK)z%&L z)(o<@Nb8>jLV&p?i~iNxm8k5;uj)c>^dg&-xayS`zA_oHV zpxuTmLl6dM>tK+7E@L%}13o`b#)Z?PCwM;pI^44bp)Nx!uyJ*j!6a@M!c^!(E{+f4 zO+##8C4F10_&f>$C?OKQ*+9E4r~Ij+kIX<=2^clZQMn7ZZY(J!zM|nR`yFey(yLvj zQmWx(Smwq}qNk`9 zo17NrqbiXQNKrq4>Vmjr<7J>|#hRy`mueV;g32g?W2aPK7i|;spiRRtIMQ@baI>>8 z-!@(-PoI-UUm7H*w$(+qA_F%%sBC+I)QmQ408p}OZ7Ml<`w)Ylj-R&%y}yZF?6JVq zHb!M1x7IU%2SQ(Y@7R-6XATkK8?jO{S=oH>Df1;HrUvR0=l|tV%W-8E#syy zkR>V5HS)Esf+liie38V~vk1cxwm{d={n_9~hqyI=pgyOWQ;sTqMcKd(ghnGZhZByC z-F2HhobKs|Q(uud$>KXVy{iq<28YY{T`(RBbns;OEjpo>MT{ z^DDQ17YaXUrfK-ev!zojhbUz<1j35O9xaE@#>T;%)RCGdL-O0wFueGmtKmn6tvXKF zkqzTPTPS1N0`~cX7I4MoRr0=2(A{*rA@1l%818>GX7+dK84G{QlQxqVaaPGh4|9av z<`!;bP(DfU71BKB9uWD@zEjvSl>>RuX+vy(J{qBE0Sw5#ys6U3t%$_qh*n4W)6USy z=PN_oNgYFdM`Z!)p-V_ZKxD$8gzBw7;%Lv{V#iN{t$n7RBRr-E+7705lQQ*CkEu{E z8#$Pt90BD`{hl7B;Sl{T0}j(3^seiamHY@!`*fTSmO)q)(R9lg%j=Mpj=(Z!JyW4I`68iFKdJ za8y;IEN^&ek)=DDz@jrV2pjw`d^Hb$_hoxD>sF~`GCpllbk9o=ex7lX)=MVreyFo` zg8w+=H!t;CVmda7g_dDx0k$gxf%cj9CcEcp$%@(RMBg^60G0aGxldHPus;Rbt7z$( z0N^$&+=PjYk%l@GerUAclb<|o;08tMOVJ*f>xbKPiBPcgAzFqzp_8Jt-Gg6$_T%;v zWwt>ACB9u(UsUr!VGd>-K>%G^T-X5=NoB4)a(<7$p}@M$buDn8vh@M1N~hjTug#Xr zDgBm}*ZjRp17M5QRX~%B^#%tHj;Uf4EJRn(PuZ`?5k!viK#(Vd9X(eC!D<4B0QUOi z=--xw9kgnkKi^K-;dCkJfLLCCKvI{L(_MoFZRm$z$Wt1dNmeKt!c^IBe{spX)|%hx zNA!d?tklZFnOksnS$$$=>Q7&8&>ZAy;u`9(yhY&`fz`qUj?*F!cip(SglO@g)A0k$ z0_c)2lVjb3=+i!8o{UwdCbvQ*s-g6|wAXZxe8C7^IQ(|gJ+UDTlw%2hD$3A7J!XWS zb(peb=`^%0>uuCbT^m)LEV_5fvT|R06V+Jr8KE+v(?JVI949!JcrvpBfwiANz+wTq zc=J%>Nt`AR`tc`1YciWr4G6RO63(L)v>&G~VC|1rAa>5(Gd>5_RUAsU%(#02z{;*_ zais}g+)ExOD7A{OEzIhF3jgHP0XpTT>`{McR;oZa?_lH-J8u^w(SLSOfh)lo0IF?m zg}25qM@Gluw&g5=soo|aasu~+2V@X`C80d&2xH5lgE6So(8f= zBk`S4p5*FsQHsx$>&oW)V0#KkERLLI3WNJE2OxSb%UByrcbog-P~EIpie#_+7^{2Y zQHVHYNt3S{L!{Y%sTmrV9ux%`@MXFkeuesk##}rS0Tn|xFsjC6Sq|BXcnYW5KGZbj zoPKMTn!0*NCueJP>HY))qHc_qH-(mbO9auY2PeXA@|M^ZE3H?`-Q;BIB5r*=w5H}+ zRcSmuw{z>ILUtRR9LS08>Zks0!fZ{BbO;ql$EP|F_=pI9ngFfBx*gm77L#xFM?bK9 z4`TYS6B)!8_)t+=MDg$p19H-MJ5F23c0Z9?)g^DPSIBL~#d}I3CKJ*-DxU2kQvB8${V}rW#s~i+ zadqNCBZ4Q@>$9Xx6<~dHZhAH0JG8|3nf>v>AVTFdx1(ad7WY{-F2MxFP4q1c(gMsj zh0hp&=m@uUiyt-nlLPep%m$C(XPCcdg!_P(c)EE}?4qqcDVv+CAIH)^KVU_?a&pgN zTUcBkDr=j1s@y7{=!DBJ=BZYM{;9>+eb}_-YF00hKohvPgB2nn&4|FMM}_4?(Vdf5 zd72k<)M7`408&e$NvG_8(Mk&UN}>n!SUhe$zV2zFJ`+N|0d=?D z##e};dt$}B1uT6|J$@4FJSP-25#n?^cs$m{x_7g2sqexkTw7W~k5_>#zsA7tf!O?# zxDc=({Vd=J{+Iy!rf9@how|T=*=Z3&-W9pcMe$>(rJ~WT!AVlF$P=OSKwptCtE{?z zbeqL;vL8%I1%6sU*ZmUNKz`@|-|{qzG~a4P%{v9-*upR|W!#}wG8MbcBpdo@dP(Et z<+`;}4nb0O#)MCOw@_3dDAg1Q+ZTwe9Qqr+8NI3m&aD11r?SRwGS-^AP2gssUcV`5 z@+~7Z&j91}V`ZPCNjf7*dNJK}G~h3P0OjkyY}~~@EV3ov+i78oBfa53Zc_(P>2Z*=W%}d@{ZV#<3>j4BnP&3@ z*7nHa8u8sSyR(YdQkxs7=}i>nH#6C5cOhVVh@^8^)iu>_Xxv`6!Qah03x=LV9bw?{c01z8MRE1RT91(mOMbVe* zz5#({;b{muCJ|d{VE5h6zTW#+K2V?#;zT{eK_x%lrOn8ZY6j0i%YL$MGkq;<=ti1U zmy&I8+!vg8POn$ahhp4dz3-BylsMifaVw=4aafeI-X5KTGX``->U4HkC4GFL$5fYdnvChg{yHSPR*pO zOn<8c{~e~!C2Ki~Xk=x|vyi;(9jbUYWms|PCk2|ujTzDe@86ZO{sJRQ1~DmN>9j5q3$PH<|j!F$ulS2m>YY;{@WGo z1D+c zgDmKq=4WzUFYwK^2Vhm1UbQ!R#2LQef{6hSJf#P;(LBBbkxxwi{7s#6*rm3k?rMY8 ztioY`H5mL{u$#w~#Wv(Xlk(k_d_nPBn0(~ib*+n&a{*{Q&V)(2?E5RxRq(x1K^on zXd^BsSj>}eOuwpjZEoK0ZFJ&CJro)fcY6C&IRpUNm{L&DtDAVDP zvT4<(tvHH6J_<$VrdW`jzJD^NbFGgs8(VT}8lKH7w24q?a&D}-IcAMmTr!0YGg(iIF<`0nmUUX_54 zbV9BPxrir~H5uJRBf6fa6dJ9c*LUUs)12&go?~6waPiVTCUPVveu2-Le8=8LgRB+9 zn;>){2YReB_oJx(?z`gnw2AN*7d&f!M}Yf%gHBeH!5;jIsg&gWCOZXy`N4|q*v?f< zIT-9T!uw#9yw^mT=qg;c;SNC($~{AR$>qW|bZmkk{1*ja6Vn@0&IO8_cgr=tj9Mlu z=q%*17=&-7CuDqnF7I74`#{MIEGf03iW*zjEDMRwgnBA_bG5Rx4kt2=Tjz3rUrt!< zu&00p6y5eo^HLvRu4mFpjKpAWUNu{7;bSfBwV>N>IavD=lub(uIUb!U9QiQOFkC2}otzge*q#cN+DV{_pR zeqrsnj_)dU&2*{kxcI!ZDq$~JC!k!CS`m|L1+KHIfu8BRy6ylo^V*4uRZFT;*JTs6 z5;KNF;Wb%(N_YAzMXzx^dQ%xjNm<1UqMPu$Y+Pu*BlJ#jV)w^%3)eV*lv5YR^B1`^ zHAKWwrf1)vnq(A&jkooOhdmi#VhH~oB!<2gnAIA?)RD3PF+k40rdvwjaf&3wc=aXm zLrxU>*;(M3J@=ujh`7nyg|&as8Bn`%5{Gy|(-^^b@IsDCz;6I3LBh@ipQQ-RPwW_; zNu3GqJ}G0(_NL3Lf+7DXNNkjxe{Gn3{t4d{D=;%Rti;TSnFV}q&3!Pk4>#QkmNT>C zxLSUXEV@x^2e_aOb{jIc_922d&BzNP?BHt4LGgUFJ5Em1{*_+>GSP-?blTh)G6X2@+n9ekaWEFKzW!?adWc-=&mGHnu{t6RNWO8>He{E)N{x9?d z3u91|!(T=y$>7z}#du0Yq2#BK=;DbjeI~~(d+8+pY6h)nk##x|(SrJ8^e~2dDKxO2 zPwVqQ5tDU0*RmJ~6Jz~=w83EwAm>i|f!E<)$6wKotcfIBjcF4`>|eV;OD$X2LYMQm z41jE&ki`9J$iEj|2Y{y~f7&Zh3z9opNK&~ve>5#+>f=x+FH#XQAPb8jOrW1Jms_Xk zEv)qpwIRuLFUFpz4jaI!Eg3vnNb-Jysehi?k7QlNWNJ}SrgM#wIj*6RthXckZ;UqI zqNBlU&6IqrLFBF_HJd~1es_W(*4aDZhNd)K=hD^Q8VC0W5wb?ze`eZL*n`;*ZbMk^ zOhMrZ5pJH(d>~hSr)#+y@?8|F+Y*&Su6wBT+JwN%Y~{U7pf836YJ%dy9iy3wS-Bf3n8(n=FShgsV+E&QB(E3M z#Lv&dfez^*XP{3cb?P;kn_F>wgf-FvoxAtQyz@!^C|iHHf4g0UMhOMi_ktH{v^0>m zY+>p=r{ty<_+I==-u>0HRx57TL-qtdRKB~R*;3~dyKLpU_wB}%%b;JLmVr~DW{&vpgpA%gCr@N`$0yoCc zJmTn&wH^Ke*U=w|Ex+?60$7YD(4TV1|{AEFb?N%$|1e(L|t?Vqj~iQj~K z!)8uG;ZIDx|1*pHy^wQc*D?E4f}xUO7?t`fDs=xQrGqm6gZB(E|DuV;e~u#KZx%ck zSh(Vhe{fU&X^y#c|3CWTzd^FRL(JJI9*M&+#y^L*^jA;lB?Yam<<3y^FE`>v{+`$K zzgz2GBMTw3Z}PW(Jo}GH@BW6O74vUj^U_?+vJM$+=)xkvIiY17rW13aB!6qx&m`Fe?TTcq`06kK8hX`=_g<4WZQOG0^-B? z+K*AFF7(}x6I_*Ruds>QDa9${bDdfqA`sA-saB)Sc7b2jhd`&dOz#SoxpnFwK)&|+ z>+Ey4D!;0xx%n}WURf%nV7|Xma#r0V3^Cv8x)eL>H>eW8X=HEkdi(uaoQ$hR_R-oc ze_fq0s5ZF}%iLGy=Fj)Hxp{q6eVJ((^_p_uX!7q#OZ(mGcIic3?w&KK=d??s>WaSo z5Eru!jtF9F+MX<%kif3OIKL02o}C0xx4SJ&D+F_bzLAV}m09YC}sAOMYe~el! z-I8EWQBWvnwHOxpa@+4a-RL9t58W4&rULcDxOy;-mC9Tm9 zXqkUUbF;6`sjiwesF6fGEqbfMR$!I5Si9PAk=`=fph6bmNg`6oU95wpdhJ?~#>mzw#%`YTu zKEJp5I{jFmZZUKfFK(MK!+eB$2nnWY2pdfJYdl}r4|KL#U|v9`qf*AiZC+>+{JpAu z5s^_M+L)7-^2ax{N`EOTe^VED6b^@KKuh4Ki$%sex4{Nc0~|@%xf7o`p)jh*{5Fi; zaQgQ!d;#;bNlaj+lzR!l@md6~kKo^@gD^NzE`gC4!l4*``d=Iv>s zn<^r|eJY6R{1F1cU5|c@JPIQ3p^p?r8hQz5=ypwNgYx`UhX619e;}Usjap9mk_FM2 za9>>yt!KuhZRi(=`z7^NKX1i&zhmOh-157I#nxVEWAVd`O1c~>Y02aZ)wqW+=C15A z?#QaSHM@c0oS}rOh9BXIltMCdS1{J!jJjAqcqI`GC&5f4Pns*hcPQbBF*+_et8(%SKf3u*5Ki|fH^6))Ue>x?fxV$00Jx|fdCVZxP09sy~xAoL;efDE~sTs3Pm?j!X5a1WUD68(HU&j;1mOj z+iUfK(i^`I+v#~Af(a*lIr`$WLI|{Q_bb0I*&sm>$R+ z0?_NaLGBJU!8L&l&zEZkGysbnHOh9;RbH6`ovpp|5x*KiYh?;POj8xg+GwO4hr^H~ zuot>aIF(wU| zS zq#@Rsf0!WA#7Oef3u-NwM-Z`l?Q7#>AawYo!Z~9}e#= zu$>Y(oB^d#KR@hmdw%lE5{R8opIuyy=;=NbMS19mt`w{=nggail)-M4B<_^)Gy9}f zIJ3O?aN>r=>jWvaLdikyOmH?8YKsOCe*l-qf27M9=m|>~LqYSbvr2?=B0W%xgCeLj=XG=mYI>aSp6aClEJbp}~9OE*&TS2hN zf+SX)Hj6|L7B|Hr*30+h)0rq{JB(4xYAN=47@S4oH?AqLqLGKoGlY47n7Li`L>^Kt ze>TQ;gf!plOo-YQ1*ejQOgSLu@ssgqXqXt}QJ+No9Uleclk$Pd+uB!eL_VIC&zj># z{1_+C+_p0(DpH=G0-M>J=@6s^?{W)Bf`F2TH`7gOc;FI%yP;@ z*X%HQ(c07f*c$z>|e}6F&@dvN<)qv}+)h`)<5h zA)}ic|5fvKqb7;Zgu0-4m}?IV;Qh}C_mnqVa1+*Pee=m1vc}}nEJ4&pX#yi0f5|YW zbk>DRX_XTUn|r#cFn550(qWQrvX7=Bu|7oc4w83<74_GSd8pJdP z?=UXCrDBC3q7c6+<+;H;9>+I~e@cVKH}xm8f!S9UOj5@HuGi?d5|`6{H-h#DJ?4}_ z&yf9!96}MFPNAO~NR>IY@5Ha7KpjWNb+suW6d? z3~{7P;NCPNU3bVlZGt23V2_jD1}pjdhN)Gk(8iUr%HP>YXlOfQzWZP!EpcuozSGsI zH~pDP|K7jba|+CFnUCO3lkGc69(k2~!}F^Xc|c9`Tj_Wgkv)cyqOx`7=GJAKI?3*~YelYNt4pH{~ zXk^pZM}c8ZB52v(^Q;>T1rvPv@B@|_n~=KJ5&o?B%+8-%;z(6+rl0R>DZFQ-xZC1RcB%3+>RH7>PWbo~J^#8Iq3Q zkbZu`s3Q^o0x8!oew=@T<`{m26E21?dx^p-aL7*A3Y@ zXhz)#;&Xr;Pd-|xdc%W?=v;7FRY`dIwI&;b3Zjrq#d|h9qd8?NPRL)Z*C~ji-wO$$ zYq)gLJ~t(zQQ&D*m(`$X@V;lMgi60fjaMjIRp~{0#?|m*e|@tswKP>T2t8);5G8@a}ycvOL`1T0$(G+I_`^fBR0@ZUSx^@c`sEJ#7FDKHs$@3GU(YMkDlL`Hb=9{q1%AM0Rl;H z>OC#I9P_P%26)*nU88;^A%ty?{L}F*_u$|pq{q`9(cd_2iogR^^s>t^Pl}U3R+iOz z@qis)DXsNj>*33G+?r`6|R9dXoIA} zdvJUzAXi%Rfb;Y5+QSY<0&Tc^zA52nIA%U1kCV}5#=04EvTgkwZhZ{H7dgO6>H zn@&gme~v7dTsE^{Pwlr5Qn#t``7#$oramUt#HC&tC58yj|=QD_I_p6+CLqn6o8crS$W ze`2WD9Xn)x(vyW<`*)7&6dp1-n?~7?8j|Aj{JG$V{H9Qvl%YPFwVHXUf%TpO`T_jJpp%Zf}MGv+)-{& zR9fuXq(WIxX8JiC@QOWT7m%rMomeAXxIRAGx9>?z{hN?Jo#=r{5IuU@bD5{*2E;ZP zeX$1Q$E~PYmw5r%F*36abY|)9671%U=XgbTNYN@;v8-qKT1DyZKrm zmdEnYI1M1RCThbRJPO`z7k5awe+5z@wj zBjzr5zrE>gwEo8jo8m~3OGGz`0b>i}NlD*unOfmKI%tqW%aaBur`dBB#JMNXVhajh zmN>^W*~E{D@M75X30Q44uo;-!3^@frK3h3WJI+Y+e(p|BD{LoMi`-vC`rU zl0ckI`xYN#kh+4>z4qLk?O{{j3SX3T=`d;nd}0p~zEG}OXweNpe_qq`Q4G&dB5{Cu zqiwJ}4J{sf^35xd`kfucYe4n=l28Fz91s^nf(yt z4w|{(LT%cAkyhi~4{zqPyPN!lo^Y2KmP_6=dU+%+c}+2XNt(F(^6Q-`{Zz(c?qcWS zV%8eOStW%4BvM75f8vq+9*o~}m)GgC0wXYesJ9wn=k-f(l2;gq4&m0{*j8zal3WL? z;d%7^JrziK!Yd*Aq)$WPjq{F5JI<)9i4J}c8`b=q2>hrAI+C0GrH|H}5O*+)HhQdT zGss0YMa>wO+mAJZViw-*(yG-orz6;tT>+4JjAXgJS``>Z;j`{JeBRvu+s_>2oWl{7XpJb-2N*6eHfiyQe%CDTSJ6k0FQB}7Ksia3y_j#xhyO?^c2Z6xq7V_vzo z&^gkkl^QV+e-XX9NYeSDDBBLXS}+zh zX7IT$Gc9*&f1o^~09Kh72b=RSb`06~C{UF;q|yY)-p45J{_x62jWoP@JC;1rm_0Yv zsft*&?<4wLRv9GD?r%2W%f)chwvvQQtdWX$7lhyP4Zhs2H4o;9R~2x=*IaN&4D(bn%^}WlzA@9;yvWwE1PY!HO${BV|FG zfmTT*iF}7hF4z_r$XP9uN=*!EPlGI{*V6B4-g zWqEf$i6CW|GqEjbJOE{m$R;*70{Sp?fwaIMZ+dKpp*?`882&e9X~bf1{>tq1;Wj6p z9|uC2P|~&WE6u4LDEkN_Ir66m*h-Icl3$*14Wh#*K6yg)?j13NOekDugraPfS9Ig9 ze`tNflD2OeCN)1Hiesty-;}+{)}u<3E%-nh00JZ!)WDs-@7o!D4-yE`JpElL;+*{R zX4a}zqloxIAGn)s+qTV+8?~$*?;%Q4EZPb5GiwNyZPi{@H;?e+g{gxoPCPyQPFSv7|-Kno|rd>^Ab(<}#*(n6VMH%;z zXy6qq?!+Q`COQqZ4>QoiRObvU=u^q4H{Y#75rc7;AXMH6cURBPSS=SJ8Y8nellweB zkidcT0KTz{FK6?%^LH*GfvYi<+qI8v6Vunn55p=f9#Dxf%u5i2rHE&SKTRoAf4-B1 z?|pAIuSpxhE4J>j6bR-Nvc2ti{^UI8cR%9eGbEeHz|efQ89gcrOD;wHrtFVp{(gEP z^kK%l;QFnLb+m(32 zxNLdzbQUmDC3RyLk2RymhHtu#f3W#ao4OG?&%el`^H`{MPrrs8hUGEhHTNn^U-m9N zgZ)7hmGufk&zt7YmwmVh(}ast(@$r8&N|X6=@)rc_z-T0`Y`ug@IPNQLj2>KC9eGG zXC*;V4_6+~I5ci>Te=L1Ts0jZh=?n)_UbiB7;vso*U1cZ_Rn=S8%#@Wp#>5rgTqTeOOGsE$^S!;2fI zqeJxZWV6ofxvJDKS5XVh#co7Fm&NNj7jUJ)( zMyfB_j-ztFt!FS6ocD=Vf7DiHer_FiL23PkY|UH8UW9_K!tPPz+Ts2(eeG6#J`5O{ zpMmn{$T;)PDbs#WhyBDzU*uoTe|Ij~-EE#==696NsjGBqY=Bc``2+)_hYj0ptb$`E!Hoi z@IKb$+o)jOKEi(Vh&FJHs0p#iCxX0~Es~0w$OPa1)jqe(oN1Ke-1(j(@fSmhK8lLR zS>kiLb@#)Br2ox?e|kLk9m9`&v$vOg`#ca@pbKUcC*kSDfui>ew_?HFPC{E5PsEtc zBQvqdYLnWZ@1Gt!P6bc-2D|$bV%s>mxs~f7R`L17Pv`cPt-^@Kqj(Vab|SW&5Q8M% zalZ19+A75d%j!7&A)RA=qX(JB$i(J6^)mOo!KiR!dU+MPe+@G?c_SDjQ{dvaOR0@s zprGDNQ(0^%d#A7W2lEw7&gbduaA<=fn>94XjNwYO-GSB0TTgd$qJB9ysU68uF@g$x zX`TGriL7P0Jv!4M_SyJ6U$`_-FC;T?h3(%jOMSQKyQe*sKd9F$*Fx04aXx~K1)MMp`7Y8wFK}lD>+`T-5o5iU><#HA-AlYWoTI6> zCm#j6TU4F>^ZVL(`F(uM8zE41EVnT-m){f362GLm(d5}$X5_QJ0pbS422{4TsheEe z(pg%Vf22AqSTe*fT2ZwTgMHcBbKp$(DYMS-{kqw(ztbo77O;KyOwaahR}~s>?@v zKSu#RFJwy&T_G~-5EWmH+IUu~zjG)1u{0oMe})ee&De*gqXZHRqbW{y)S|+25}@3g z`6=abf#y#>m;g+ejFUbVui6!@_F{^Wc9#?#J4Ayh;}FF_9;Hw&M{;#3ec+D{1bJkf z0TMP}H<){0`6xUSq2=C5p8?T6Ncnm=aBoRu-*^F)sD@jPcz45l7r%G~e>+-l+iFQM ze>Lxi>drMD;~>$_bYNZY74E0^r-99at8d<6t2ah?9C)>fxq;^o#2%hr`E|RGa8Ol@ zrtJ;oC#<{Y3p+I&i^uFMrudK;bw#yJcYIO(gc_x14j$Xx`WjD+V`A{o54~gR*U5dO z&dGOQ=eS1`A4p&Q37f8K@C%1u_nrNt}Ep?G*=S>ZB%?B-kgieBd- z-mW?FDL%7h(bQf*6WzI;CF>w&?-<2*3Z87M6twxu{G79f;^}S9*Q$0~Up)^_lCDx*5o#4%GDqk{ltJKnJEZMxq2vO0Cj)?DDs0 zFxcUH<%)O{&#AsqJ#M7R1-{<*FD?*WWor5C1nd6zte(sr^(JE2*uJ8=rTMK1Y>!Tr z4h{Wh{H5~O?W^B3Z{}2XnA8|Ue;&SDy&hxHo;8Mj$4muti1E3-qImXl7Yzi7(&ZDp zErmdznlDmWe1ZgSELWD^n|n=d_{bX|KfUc4wa+y*#lCqyROg|6?8jCk$gQ^bNARTj zkkspTc^V>Nx*pGYGAKib$|E()CH-+f_V&foi9wYMP`ErDaYuSl^vu1Ce+sR1hDH4J zHGWKPPr+k@{}c={uyMD%6Evav2_ijb91KF^ReACcA@5*s}f z(ondBUzgYCS-rmd+hOCAe(v8rlJ>8kx)1R-Q*sWCx$mPA{eH7!dRDWXmJv8rn|@R7 z!cpg{*o(Z{C*96A_Slj4f6UfAe&=Y@M51$i&V>fPUwce%f{Bh4{7RD%QxjX-l!`Tv zxm=PU5jxO2${qRAkD7UW!qxXDc0`b<58{tGXN|BqY5(*yy^dZitl~GR9~IGtoPX^5 zFlOJN1YAFA-4^5MD1U&zJ-EU1^*GwG#{*G<>eP_b{VYO!yJ^KXe{4+)TX~sjqrS-> z7oRI4d}SomOMM)Pr(S2Duy2myjy{?16yI0leBEmKRkPfhRyZ|4f(t)27w}ARa?I|( zW&Fiw_Z^m|uv;H}0!+%pnrufDJZv{?nv|%)-lx2Kc$?Fw?Ss2Re}CGevYtFC2*o|_ zGA0YCX?5R}QW)>&f2?%Jt8CK!@fPjuacpH}A@OL6gp5}-&CXIKW?fM+p4%q9Lq2-V za5^qksfnJ-c5^ha7kE90FkfNiwsA5)r_Rj-;&yvJpQtVMD?bo~W_-q%qd)EK+^77b z5TQWS@4&6NXH|c~FqH2iip1a5dr@g9WM zh60KT0BbtY9?(_pSK=noA4iqO(Q`LX{K}ZxsoO)%HJnn>)JPd#2K&3vwjmo??)i#_ z?L%X@jp*n;TI|hiOTSd`eJ>Xi1nuLtaGOJ>HFf1oB-hAVC~o6`2cJv{7lpEeLDQwXI^RQi!*@15EBvvFth>k#>{l#5we_o2;6 zV?{^xE`%gY2URAxdq$Lyb51(XnNMB_Ssm5U?-}=lyfrmFWb!t7byn< zN^@Dgov3}Rp7Vtpkr1PXcKCpXjYb^k%|!~UV6|pa#=1Y&Iz&RG!C5#yO(om1K$VMu ze~@O|xXm$zJ<3y~k=xhUWj8xTWzz~~^7j|}LK>A2h@)z`lQH}zc$rA~Ry#DM)!+#$ zv?gX}qg8q;=}GtXJo6G|$Aj#1U%nL}C=_-bYu8;yz+4QvnCe=C+^WPHt=Hp(jg%DJ zch|$axTpr|9!j*<%sPm26`JCPKl=sNfB60w7$T^eRitZ`zjbEZVnoQi%--MGfE-|@ z1|&xwoBz_-S>UkD0Y}0(jcOwrIqw_sPL0E?iZ7fU$Ooo@e3fLq?FB}U?>zFtl2lE( zqfeNg=vf-K-rb5~B}$d0+e*^hW~J&g*=54$t2DV~;!H|RxC3dZh&`De*e>#Wf5K8P zdavBd7D~xH?Nr}bIdi3K4*W*TH;+1<+;svge7v-S5kx2HFwL&(wbtPL-~t&eEzb=f z=Annxb9TFSZL6NdnLMsuTRSU8)7-SW8%>q%oHJphs2jDHw6#D%YdE}HT@Gqv!8r5n zYwvq`qg8hpEy%onby`|z;Ua(8_9>p?z zw4XgX7dNDqZ%%eSf19s!&_Q<}%#j(4 zHy)H~z`~nO-(KXfZdyS2<{_B}FXLacd~logaEC5TrlUtGI}xgC@%NCbT(RB1WRhmV!PYGqt_9U!RsvB9wEqzL)SRa8g%!2UX?vM17qB}4$C&OtKuBDp#Yb5JZDAI)Q|C{ik({PfL!8># z_>IRZp&EFds^YzR^>|Z{!tFXr-fZHIOCKuH((m&b6Zf=0f33z3NlRcUkPXvAJbUDZ z@~XV3nZcq7^QoJg4!?Fm*U#wd+B98LwP&!b_2vKCODurZI?A|?0!;tQbC09w{ zHpGM+TzMv*<{V}B#CiYWyhE&?(A0hjkrN>OUPB1}J?a80&|XJK z;AXJ@bqL1+P`Y05GVbtf9w&N%?=z+psWTm6{lJjWpgAKc=l@-ORmnG1S%7^O0H1r6Gu=kWm^O2u?Ib05P?=F1;0|EQ5P&vXyMB)RRIu@W7khX^&$jzhu~N7_^-Ww=a*is>cBmHxpiMTg+e^a|FRc1pGrw;rx;OS?^Q#Dz!cUzGb z&x}oII3vRk0{tD#=dCIP?bsX-6x(_%woXlP;^PbC-(%e*nWM1JL^s_hXB4B@An#8b z!fb@@xC_|sDw{)6ew_JE#;NOzAQfWL$(erL&TvugiX^DW^i5GqzZYjzBA}Qj0;Q-~ zf7-}*fj&`pqAclNKtk--z4_ecyFKTAM&vz%z4w(qH*1WEVrb;&tmB89+>7_vu=Lo6 zSpB)9&7L}9#7T{qey6^*v-`o%Dk*n#MBely%N6`n-?7hVXv$2npD)xmYM~K-S;nY% zhlNfHc#rj;OPJwLQ|V`sAw&jmTSJ+3f74jv$lm1%Cg(Jr*g~tqjoBXGGImZ;iSzFH zw6SLzSN=k7h$l**&|hsN*i|IXkLO9Vy!7>uqeQ}|C;`;BEK36GB|0id+y}jiUMFuz z0_-CJzN62J-89zPZd^Q`AktcCmjEZ!RfJc|_7yvo*otmVqAQ3dQ;0|ZTYop3f8}Y} zvAsxulY29Squ1`?nVHW`oQr2yQ#JxVH$_g0E0s_hxA*gB@I0b6U>fk`pVmaIm zl|d11?wsG)UE@+{wc^|T9GlMPT3Vco81EU*;_r1q9bf<6?0Xp!ey(?~xcq*{ZuE=d z79cY?zcHgQuK;i$lC>74hk_DC_a(hP@hU+?HH&fZak2hHsX#EZpmQz1cb23^SAiP7&+8e|LA^^TLio_*BKwdAo!q;2dYDZC^hu_^I&YQ3c;{vIhY; z&y~Aq2ibl4 zX0+RcUbK%!PjU(S2o!8GchtSZ+gD%6)ksM&4RcN%KBsxV_sS;lIb&YuN4N>~D_ZE+e71#}F+yT!Xf1faqr32a5PHp3fvu3rI z(XAxHuoc+$bCb4SNe*0ZwP&4!5wjfb#f_Mb_RqB+!Ve4VsX1hoz?xT}MfT_YdDn`3 zi2c*-NQFPQtal!UBdAJUvd=SKg#p5zNT*f9Kt1eS--kqs+}D&PnpGHtZwh;GgbgaB`e;w*1a^$F8*Ba8&Io)D6W}=#blRdg< zowA)LZ!2+gBKm|DvC?S;&^j5{{*>fIeyu^)oJvo7XGAJtzQ2N8-2z*i==Kzzpi^TF zH61T6K9-{D{)@iNT}l>9W9NGD%+s#RZ?tFi>!(;d5vfKg%o0RMx(}@Qw%qG)f}b)D zf9gk2b=j8i`4`f@8Hk}d_j*8UX7XkfY}-=eO++bUyX$ZtTY=G?e*U-?Tu7(Z3JXPs z?07Y1#y1Zkv;+09@T$@_1lZC%ePI{=H1&Cv8X4PNM1+p?l&NK z?$oPH)O-q_s@jm-)6KYfdNWB_t_A@=e?__ibzOt6N(;zT7@AY}CHj4Twy|@bWX-p? zMmYUiFw?Y9c5*^Q#LQzZFm{Xc%r@X~OLpbX_9y1Kp7GkI%l$b&qECf6ujAFxADWVH z1be)NiQ|+p>Z>|pthWQ0mZ|tw=kH2pPn%_FLy|$y2V4D7hQSYH#rDEIakaOWf8h=j zzgvyM-N8;LS6Y_^HTD-|wv(M<)2Tq|?sznz!b=!|QQ=V}-s><8Uff3u9;g0!Qx0k! z`P5Sn)V`O=>(ufGE37X1tfmM%&TML9;(H8|zD`(_kDg67@yC5=_gJ?joIGEr^;A6u zjc)dqK18{+sN<8n^`2Q9y-uLOfA4KH9M4ZnnjKA>8!Wz$DWg)zA#Npv^EM5byUcrN zzaEH{A>~wLr7>h#$w}t-4MZFhYIG)-?C||Mq!DCHCzsT2us1$>Ivlw{e(8`M=)0fa zHl6D>o&#c==vlY3lMFa1taU;n-fs@o(r6Gm5+0%*2-P|f7AUqX;p_!re^%{&$OsG` zgWLK#@`Pw%($7AahU`SOy87@XKZ~yqBZY8|tH3M18dONeo0JU^`vQ87=>XOi%U~>; z_IV$pv6$J9;PPO2*lXO)Cz#NXE!#U{>Nmw4&O9OW(yKY(JB*-DQz4Ud<`Mrb$~$;BIzM z`Kc_;b)!Wo8$5S6?&BdI(&2dyhLDNsvs|@<*sR4sUVXbCpLcRMf4un>_Swe3yE{l1uM!XUFmP1F1#5Dc~bNX$;wIdfy#ec z-Fw!ZF1;TwJBV^L=a&gH?=r|g32$`5$9XKA(T5p8V|^7{B6w zgO!VEU#$xZSMC6pku3-mf7^VtDmIDW6{Gg}8%RRMBOKE}T5tFQ09*d9%?f{~mi*?Z zKrQ|KwH{Nk9~uVr;q|N6h$&Vh62Y%yK^v^)y1(~pN$A6GO3803$u+s;*9>CpDGhDn zOJ%xh0igey7y?KIx{Z8ybvOwC*8Xj7`!ki~H-!WMfXct$hiepIe|^*z;86i!&%Y*; zl?n8jtCIdMDge08uN|U{*Ax=?n>qpjY2=D0nkb_cVvU^TG#!2-pWq-h%lYi4!-t0xMd9wyoQCIAH#^3x5)*z1{b-S zBqvgs)R7E=dcM^Tea{SV zXpMYif^vzrQgH1RR*}fp*`!s5)O~3{jKkaMp%2^bvs9Iwu|x(N^bf|@`3$I94AB6-FCSyr{%`7RGS(=+gN2y zuu3q=9pSTqe@{woVBM5vi9|>V+4+#5;~^%aJt4c{W*L`^Og- z7A{}p5m!k*yAzyH3Fu~5I$KdSTfZN_Zl5S3w#9wGfBNfw3d=S@;7T*Jy+Lk%cE$BJ z^I^?-Vx>XV%Uupio)X?9u!B@jvMeEYtn78>cYQuZtc~-)3IK2Y z*Y&y63pm>{AjmMiy?o`N@Ixl~Hj{3s7^4uXc zrKxW;f0vej_kkPS0Q&{A1;pQdDDb<_q22W@66G_(q7itIUQ=_LM3>f*T7EnXy{#kTL;o9e+}_bZ&(NDPKG) zOSFm7+yDg36Zups54s63+qIsBzt#gBPLym;f8-nvmuwCP(FP4Gyq!#YJE{z-M+JGN zy`feH^`?Rvhybu4iyQ>NDn_~bvvAfF0JuZhvr%s^yZtx(3)g{g){8`Ww_4q09k$Ol zp@uGFbFLUtgZ!H?#R0Wq7qCQ-zv2%hC#Zh-=_NO-!NSsMl`&nhO+Wi`I6_&h3&iZ= ze<@+vBIeqU-dguRoFnV$SP)4nSQZtLfW%fh@$1xA;DsO>Jb?HCV7c&f@tp=OaPb}FFYWXm7hyTR% z70GqonI(69%Qa#zDKWad%@pMspmCgu$*_~GaKdcKI@!!aFK z5OjnFk!J)PF(bB2%dTraVn4tZ1{@?J)r4%{sSqUiE&j&FZ-STwz6e^uf7KEd0Ex>! z#xEW>PLv$U=k4s@SiEwv{3rkXm)~{{zU=mf*qm&lvXSsL<+$2|Wv!1p_^jD>2Um%T z2aXNeWa)O;TiLsq7ktg^Eu7i-Uw&3gaV~(XG55V!rY*l_GDxCS?sO`Rk5b%C=QIZA}e-pWb3}vW`NEIBts08r1)xl&<|t>7 zBAFmNH;59w1%1xH)K6TM66!4u;3I)wP-9SB-l<|~LkGQ!V&(kAdH>R7mpCNb*F#UX z#0sOMbMYOfJFObTe}UIscuuc)!+;oEI!oGs^l`Y1v!)Bry}U)O{9A8!*Y_y=qp_}d z`*F}eYrf+D@+XY9I5Pb>)sFaX*Q=b}9jxv;O6+Jyb!fq4$pLbeko7RrjtGaMcEAIMcoe3KR?O8;}9iE}Ypc$e0Ufe?R`euzuOJlK$w;+Pbva zA=OnVIb0|((!bi1Z-_=}hz3O3DmBt7{o(lyK_#$kqI~IE@fXkDKLA-kroXz@`A=QT z9e;J*@y8zj##sygy$9T=FY8D$vk{KKP>oRWqK z+e>0a(YjNm{PD-WcmopE&MJWev)yDb=cp_1 z@?h?Od*#1UjME=PpqUX z-hLdcGuaA$`@25#4<`xM(rmK^?ko%`OWHCES`sVOlCExvCD~8a?5(%C!wnvjBimle zTh$zO-P^W_R)6fgjpKmII8?~(mLr53<^&luAJ(h^-UFUjL;+C6Y6}K_w_}*ef!$dd zYd8H+O$_Go5}-0}??UFRU(T@H6^j}r26YuK3Yo)NSU~C8F{RuQgGZwVf+5O`% zO9$(Zs((RW_B>2=?j))*iMe*K z_5nl=Of|=PZUHxw-FVJ-8Q(=OlRE_qNun-;QsuI!PfsHt42Xog5SM#t>C3GcoG6IOg8Dh26X6w&cn- zNfKBh>#9%vmu3!&T4B~#tF5TRdc0GJ)_>rE<($)@HeS!Rn;KMq>cj_7Ab1nt`c}Yl zX7eBZ`Q^1M=Uug%50C6G&ac|%PfT4r%3eGJ|J3>aw{P_ye`UcRzVZZkL~z}$oU!vk za(33(Dv^g$Fxa3`Yp#MU&j>sWn-i<`ewt;{_k8A+It>(>dJ~w9iUe+1gi1c)5q|}O zjqx^1tfJKoO68d`%q&<_uHL|#SaT-uY+sXo%Re_yWhcq!jVoPc}{1lDv-9w z{PXRbFA`hw=dy!;*#Xt`pB>yR;I}_}|L+>+@+ZMa$B$?J&4)jm{iDyDXjH50r*r=D z(fv=3`NhJI)30924_koWil)<*y?@L2mJj(PZ;i7PVYNwclM>1DEp6`-wnl65#3X5- za7&9ZoBy5s;{n^?);0FjT`m$ zf8~&WIrpl+z?<0w=<}*u=R*rxE2KuB*+b`m8`FA;c2bM%Zh{TDyI5xd(ovRtdIdNk z6t43fess5k9SCshRZA|0N`LFs&PRiLWvfl@&Dbk956g14<)hl{~A+QEOx ziUU2PGWtUVlm$@es^snl&PHY@>BGM2)k}w$2!Y@WQ(H<3I1RvAhw`r;_kOk9Pm8$L zf8~}c68{@s&Itc&hbzu*kj=thoc&X8ozlPBRI2z*gVMwwlFe9(;(zp9#?*btx>YSY zo+fZot}>_(?jdh-mcUe%$?TKD22`mH(XMm-l6G0D;MAbpKR|Z0uj_gvulu#_dIRgo zFXpr+wpkFpo@V8GJNGnEVAE8_wj}nxp_)rfskhzq09xZ6Vt-~yp9pD5{uN(50CxZy zOgkon*`&YxnyrxekAFSyzdC6&E9v5&-@Y#m^Dl0;fBEer{^rACML1k3*F{s zx8s(ykzD0VX3!#VrP4~aMknzgB4(EuhqE-xqMmUG-g>2!9Ouojj#So4YzEgS-Dc@} z2cFq1thEuj%uYASWgHP~fyYduab|0C$eVHZHUL!Q`K2*XO@CLubO2jjI(C&U?VrBw zf5yHKt8DoBxYEXdbmu>M;&1QoKXT5WTypj4q(kP}eUEjU6l{CzEirqE!PF{ly+A)Y zguvvX>gHP!8})`YF;c79e?SGqBM-5wTu52v|^2Qk5?Pk8_ zM0vMG%UWQ_ZIA;W!8)Ue^Ycw~vXS42Kt13?+$_v$2}e4(K+n9ItM;a>h)HD7JM?VP zMLF7>yj*O+0i&p3>L_5{DiQ-Uz4Vo2B0ZQceFeEoUrCnrx8HR6SsMRxz>Q(E^FO%o z{`c9!uYd2vTUqm`cKI*u&+pNn+Uzf1g3+@$mm{=5AZ56VNyeFG5;YeD#g1E8p&@_$S}~@DTpd zQvanf|Jm1{e`n(#y_NlX<-dLZa`WGM=%-OG-n@D)9;!2cG57~#|IsJ=>C~%k`j1+W zV1Bmy#p|y(&i<(vr9X8-CG;Xzt#(ZeNz3JuwPXUcVvC_Frn)LrsGEbN`b`8}_mRwnH` zYXB;dEA;ANo7&DT-jQwI?y;clCJoa*cGDe-)tQn*A@#6SJQMlc9lOn}uWHT&#U`3W zUzX(8J?xvs;x=-U>w54#D-dmpl$<5vynkv^7L4d!@68F*gNIY>qI9D8g{Z(O)edgv zodprWVyJecaMtt|n8;O6&6XP7)V4oQGsO~V!AV@WE-zdQ)eqP37sEfU`wvaH{jvFv z6R&!W&940Rm+SuV*_2tMA6_AZzc#z{y6~?(tE&e=u>bJ2T1jx> zqg<=Afq_7&woGCzUx-{S2*~{Oxs8`=tz_M;;}~j@S&ju&#WI_u18XW+>wjYy&=(>` z>%{BmYcCy+Bi-F&@+6nmKyR`9o;jk!DQvZ6OvOQ9MXS?W7~0)c2}l;)GK^!1g=M1_ zR)oTV%z@B?>@hj~)}i4csoH^7qa$U;;l&Kd3Jy?*f7c%FeIxko*Y^^T!((Njpa6Ct zxt?#hUdC71Pp@*WRzUWzhJXLtH~E7#>9WZW!{WsU|1(FAJk0*!(7E`85HJ1)=fy>T z`1$_>s|j#)MLaa0Wu$M^fI?_J7StcXYOGd~az?)1rDadL0KC=fK7<1nFRvEa-@Vj; zmCV_k&Vk8Xl@_ov^kcsGV{`Ye5LskGaZ7}K7y$hrJySRU+^3M z!SC$}HtND}df_+dxL^MJ|AA3atN#ygZ`Q0jtgVavm%OE@lnt2f+?Nc=a^E1gn3oh7 zkc|xn47f?6>R?i9CkeAt}cOkt} z);Y=O)@=3M)S?T{Lk1aWs1NBnZ)_*GRhd;3wK`FNtj$c{;D5k7>8$Q8A0PDM;9#2E zNniE!x?S&Owt+U?D72n)KSgHAuT1ojew4FhtG4xi-7Y`z>`^GY%zW_`?T*KD{W`$k zyq$ASIUm1x9v^+y8rN^kA{xf?6IWExu%1YNZS7~R`@8<=@tgRy$Gok9Kmr&FnHn8?-e1Mt4zfcV^H?s_!Yc z0-2kpS`%dZ2*jt?#ZOfUBceuQM5wn#;+afWt{R7-<9}CF$ylYxRaagQE4N+V%&rNx zt@K#8jV0$|%;5xP#xA5=_EAYc`9$vDy(z9KLW6$J0R736ao_AG7W?Dx{lrZqZ5VqV zSIU3pm62>+ceQ%$O)M8ICM{E6L9 z`G0yqqk8zOyBbYPv-)J+$wg_GW{8T~h@-1ZZ&5^7r7kTKL#b>;ZvsK}9zkg9UV6}G zNIGQ^n60vdT)M0~Q#A8zqI)o+zTen>b4ul#ueV6)ZfJ+Ir|sgU)-mzz%X+ND^^mX8 zc>KK|*13C7D!$HbwaixdI@h2){`p<6bAOw6rcmu z2KL$Xvb3cg4jIbe6bJV8iqczzwvik}KoQc+Q<-EW&34{@r7xr*ud1Ac0-+@z*A3tr zjttDFD8BW!cA?>U4KmgL&OvbhjO*;5SUZs4HQzTkseQ*yCH*Id`CZ2hn2Nt*+Ul1t zRn+O~Uu(~a)ARV0J|5(D(Cz;Nj%j}759vaUZdoWvVS&U4Oz)S zi43JKEj+^*g@xZsWzmJTmaoR^Tt9-JmTrrXBcpH=9Fi;yk8Ypt{tN%cAM_yi&+Cxu z|6zS^G}V9;`{a^SZYQ~}m%nzFuVuM@s8W8 z2X(6JUKmo0mZ=Iimz?d@145=u^%qglE9+@3d(6|D)?K5er+@jR^*a{CByCMb#_Ptr zt+V=s^Bd#-F#%mMaW>vjUG;)we!3p^xJz!jSFI@$d&3j^aLis2X`&CF@+>fpZ+nG+ z2OMIme)H&_(uO%|No+S{bC@0;4n9awGe=nKd>thxjHlu?K@dhV7!saQFh00W`oVSf zVdu2uqW9tSaDP2WM(;fijk%CLrU-_ls33lKaccyQOFV6Oh_34JR;h(rK#{pLdiBNJdQH9XB=xIP@MR&3YRW53YXN${J#bk{bwS6COG)^5g zH!dC%dw&@WOUGei%V@S4)7o#JMzxyQh)hSLgW?&1Bxwb^5hM{lFaBCML!2U!+f~^0 z7{2Sw?k5j5Z&FoI5U>@q7Kjcx9e6|!@&a;Ao-HMFxjve3sgP;$`> zBn8u(jYtNmN#|>)DETK-F_66=QKg21&H3=i`A|ctH}GL=bN$PGw~dk|8=K=u+Ryy* z@qbf37++EEjIhdQ`Z;6@6yX6pTlQJB4Hd{yIP;ye_=c=P`%5^trF14!(Q+CQgP&3_ zP)swgmr+TUZpZPN1Agr&v^w&@yaLv_9~~gd@wc|dZNLiMcZfWXR>VK6$SCFRL5DD(CBFh*KJNMuR0k-wDb*R^6 zDTLvhblC|bz2trxSufv^rfAG@qB#8G-|CBw!~|20f8)7|UkrEro8$bn>Bo+122Hfz zTydX@&JRAmO{hI~bObAT(>*mGk`2)Cupwz8E6=OmkVT?8)Wdpp-hgp9S3dH5S$`L^ zt%sd5=_E<>phEdvSClX&?RJ!kp&p@iOUDs){BZA%O_CtkTTw{|%rFoL*2_8UZCq2h z4*p7;ayi*J-#Gf$j%tJM`O39(wucxP`Ue|*Dky4Bh*qu9krfKIzSGpIjIuF8MA8|# z?){sI+Iqfwu&JW9dk@>YtfTE%9DiH!62FQG!GXdoM`_0UY~~o*q3{;BeH7_D&fyB9 z8p&XufunmlZqJjp1Z@1#*F#%Ac&RM?#58{JYfUo!=p{~5JN7v}tCYD)tDjMSAP%#M z;$7CRS7az}Op_|D>=&Zp8A^()hl|oZ>8TjVQM@UZ2|iX!rAt&3n&BI~f`4!l93~UE z6ONDl$dOkCz_=WJ+cAvikKe{|vRuyjxY^o(mA_~5CWDFQ(GWpJ6qYE^ita8;?}+J& z%cN@U)pb}jzUfKf{;~}Ztz@gz4&-R*8EbQAgRxpe)4KkE+f9T~f_9Dh0-SPw)t|mt zk!BdL&$^tiA^bs(u1+gqC4U%ltRKAoCvVcD&W@BU9YLOcc$Q-ysdAKkQ{6RN6$g?= zC^b-2RYXn;M5Fl{9pX(hKj{I{Ei^Aok-g2fe97%>ziGw8(_b(AhFV4|A}`}#J7iO& z*mDb2o49jD-M5K?v~Haj%tR4(1{^TTgi(VO%Hv%2fNO<+!0Dv%uYZmF@+r`e(oDoM zl6-~T!ETsJJc1ugoFxg`K9Rmbp6>0$jy~^ghls@Wk0y)~yrSBkn|JiEFYwVn114sW zZLYt)!Cq{B?Oi9SI$wKzV;Nrz9{1s+?{7c*2R?JZ%Y=1bdbX0B5lYq?vh5RNJ#K7- z5jZN;JMhDlKP&*zA+fA#dRZtU!|n z9$YVIjQp-+P!?`9MnBlD-+m=m)TCV3aen^F-H?-ul?A5ud4H)RAO>QEfvB!)O)EGJ zQ_S-dAxNvJP4>8CW>V*0~~BEqm_j1 zIm?xn+HnSlkAGzeGf)q3Bp_4Rb9q#l#G&h49xvZK;;+wQ3^6t?3qQJeS9)ma2N%Qf znlL^xq7u|lSAP*N=v<|4zGFEa>kdeQeRWPjCI$M%;fC`@H=<~eVZWIzbR5^O?*{oB z@G<0}0MF3Zn@_&r8|(HZmD`OkmOlIky*jK5?a2P!W0k7+h+{C59quI;F-gai5{PV< zKh!t$VaK>^U)6@@M zcs;3Kcw?jciA?N-+`^1RxB(3EThKwxxrS4Q^PTds+J5ejG0_GIKC&IMkAJ zSmM6(w|_wQR8Pjx4qrS6Sr8tO)O;UDHt=g}|M2T4?(q{#YzN%YuiWa*Afs2#*UyW1 zRhxPbL2W5H^cu~Kn3}#BNGb+mC^)gVKIE{OZMZw(C((r2TF7CWqG`hy47=W-1#j2%FKqb_?tf>*^H1D=`Ro%I{J{^ew%@qf(AMvI ztVRnJgG|(F67=M1vx4ZEo#~!l>SKbStwfS37sX4|i$ZKUt6r#Vd4O6{MXJ7vZQ?W^ITyUYO>E9 z6Sk@Y>>_(JM`=^O*1yVrg^5Ql@?^;rAb-1U#0WH6h%QGJNogqeMGFO76`2Ku=?CH^ z#7~wdL$+N7Zi&H`mf5^cn*bJcY`7ugafDuo-D8xt*K#BoqP-y`?&W54nr z{qhF;>e41vm7n-DaaEPdw0|;k z4uNhJFBSDmdQ}SJrp4hQ&BZoIr$}$>+E>WUIg5MT4E|Ovj3|EcxPlyl&;5r`pO}sK z$t!)^D6@}_`fG0zUo#AH{{8(I1N*y{4F=t?(HV!Gd~Nq<-^kTqYJNV?05w#`4fSFF zB$P45�mLlz_jobX1$rkSYX;T7RW}bHsAZ_e{!Jh=JKS%*Bx`14`Aj(DFmBbj8l3 zb%#`X*Oi&?ok=feJe@2Sj(oG6tc19eR$3!Ii0@E@ib&}xbA5K!JzJG&K_RS|(N(2? zXf9SZ_z9*UQj{`n3-aKPv>N;M6ZpFF`WJq`cRsfAM}9Xv_V>-#PyYDXe1C5nADa{G zJ=)Ti+`uOk(QvAG6t5dM(q+2x0|ipwK53B3T2m0c(H)~OG(;6C_TlU{><%8?sF)mS zD9sU&oLq{+I2SJyGX?u>ziydew97Vr_hDyNp&0WlJ{%FzdUO8 z!y5Kq^J$a(<>P+%Q)g+2et-5^+D_Oy1%j^)k9p;2ly>6J4Z*?2kg`!u24D}M_! zp(~Kfv<+YYi|6}&mMLtozY51oeDcg($A*7$Y~2tuA8H7oWAdN(xw=qP6qtPj zCZ6S-+KOz4iqBBq)G`X2v)rtn8WoRxqjjy_XgC#j|B9WUi(=qfA*<- zbi)x?dj0k1|HMboy|GQQ_{2keZ&?_fvX+rF+l;F+^HUGC_J5ADrH;-zdUlZ+T;O>Y zNoV}1fSCT%a8^6m5B5(Nt^L)FU``z`lB$%y5`)n`4TaK7Gc021I7tAk|;i zmE3V_r=*!}Tz_2G)uoXFIh|=i$r4;FKKq6iBcV|Oj>8-dTkt~d4|dYl^cJ+CZ5}y0 zz5O$P8|AD|A#ya$CwqgSu^gv<((3I7R%=Y^r{<>wgN-j93iNilbK>GGT*ds=9OImk znO;^e=TaRI*q``JBk}mmjmJR%*)n3O%jb>fjGPZHk$=CR<7oIe4Ss;0^V~<;qZKH? zn#gQZtT4WhG{GAWIcjH*tTRMNw6AWTkOMSmBP>iGC%3Q@31pt*G9qdHqjGcIV_73n zp9Dy(9bJ;ndh@ZfCR3Mi820tO06_W4pEw^3*)WHwC!+)LJ(VG$&4WY_u9uP%u4=lRorM}2Y z6wYj!-t28;S4N-Cj(C{o!{FzEfw`Xtc;L0a+oAIJ>g{*$L!+d5Bq6>x$EQYso7tFL z`1Wy0T}R$6%ufq_LK_RbaEI4aky$?Af!4wWn9lw7kA4`|*7Rqu#IWA^=!>t9!R=}{ zoPRCw!JM?;cwnQJs9D}+pQ&ac>ME*^K~^M+0$Y>m9`Td)zJn0F+c{<-fdf?!*Hp_| zy4H(Ac5~`F$PS3C>;KxHBd_%(?DlLpwG|S;n8yV_fBNSqo#zA~m09z9a_ex)?dP=H z1$X|&jogA4qBSTTBun&YG=Kv|3(!L-7=J!ea}2LgRvKpm(^PJn^$1~`MAUmBiz-GF zzYr_bE$lVaOWK_=3#*_%S<&GwF=_KKu?UvPT2FQphc=T!KBevvmKRy7Dap{A=!{kf zi)2ka*HcQeOTB5UZ^(uDcVy+Ht%4nL*F+$pi@dvH72&cCYh_bMH*fP&&U$>!_J41s zuQseb%E~<4bS-mLQY6PVAFJzTYiDJmZkwM|Zimxk*1i^%tr)!cy?;C0C;Z;z_ttPP^81qCo1jeq)qFEU2Mz-pED_`c z?HD|vmOx>S2r|K23JbpCQyX&wN`e!9Zb(ajGK&fV!zPa-=VxKT%#>UD3j>Bw1#05} zAW?QXecu3L_5z@m*8qFyMR3790Ae_M3m52@pivqF3US}S9z+5vGj;$q?|+{J-kzVc zz?l-Q_&G0(K*Ur92t&&T9M}@@n4$ub(1_0vDp0QN;x$q7~&r#`A4fDZG~uGrAK-fiP$E^gS2pKm8#F zprZ>@DGb|gO%onNxYmZM;`{iANUco?{?AWx7F)Cc3s?N}qk=A8OgS4fsYzvhy_7(y z@Qf0~rT;vZ_yN~w)>zxGMnJ!nee^^KANvV_1~B}<{|e*3*u1qlKYtB-O+f&qh7Ch= zFoCH@TN8xkrzV_y%6V#@qHHL4Z4d-OkOYAVD?ymPg@dr_g~@v~mSpJ=|Fq-3`m+Em zj2lp*#ES~1la~ZA^ZW&<;rN!+GTqlW+a_wl@-j+l(92|B3=70$VxN<&&h$M%ya%A2 zLu#%ciNk04TlyfFZ_lg!P^JsO zbnHpG1-8zwMJL-@Kte6=fa#b{5U#OOYFKTcaO9dv5rlfGTYzcj;5xGr5qTTj1{9QR zqf5vAe*T=b2Vz+un+u5On4NXs&T6!kY4<~0>~)p0+D5bndVh%@dIfYWd$FU`uvKFI z&Mw-9