File tree Expand file tree Collapse file tree 1 file changed +3
-1
lines changed Expand file tree Collapse file tree 1 file changed +3
-1
lines changed Original file line number Diff line number Diff line change @@ -7285,7 +7285,9 @@ static int llama_decode_internal(
7285
7285
// TODO: this is mostly important for Apple Silicon where CBLAS is still performing very well
7286
7286
// we still need some threads to process all non-mul_mat ops, but not too much to avoid interfering
7287
7287
// with the BLAS calls. need a better solution
7288
- if (n_tokens >= 32 && ggml_cpu_has_blas () && !ggml_cpu_has_gpublas ()) {
7288
+ // MoE Special Case: This logic applies when hparams.n_expert == 0, i.e. the model is NOT an MoE model. When an MoE is
7289
+ // being processed then Accelerate/BLAS will not be involved, so capping would limit performance.
7290
+ if (n_tokens >= 32 && hparams.n_expert == 0 && ggml_cpu_has_blas () && !ggml_cpu_has_gpublas ()) {
7289
7291
n_threads = std::min (4 , n_threads);
7290
7292
}
7291
7293
You can’t perform that action at this time.
0 commit comments