8000 llama : use llm_build_granite for minicpm (#13911) · ggml-org/llama.cpp@2c90da4 · GitHub
[go: up one dir, main page]

Skip to content

Commit 2c90da4

Browse files
authored
llama : use llm_build_granite for minicpm (#13911)
1 parent ec9e030 commit 2c90da4

File tree

1 file changed

+1
-1
lines changed

1 file changed

+1
-1
lines changed

src/llama-model.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13260,7 +13260,6 @@ llm_graph_result_ptr llama_model::build_graph(
1326013260

1326113261
switch (arch) {
1326213262
case LLM_ARCH_LLAMA:
13263-
case LLM_ARCH_MINICPM:
1326413263
{
1326513264
llm = std::make_unique<llm_build_llama>(*this, params, gf);
1326613265
} break;
@@ -13501,6 +13500,7 @@ llm_graph_result_ptr llama_model::build_graph(
1350113500
} break;
1350213501
case LLM_ARCH_GRANITE:
1350313502
case LLM_ARCH_GRANITE_MOE:
13503+
case LLM_ARCH_MINICPM:
1350413504
{
1350513505
llm = std::make_unique<llm_build_granite>(*this, params, gf);
1350613506
} break;

0 commit comments

Comments
 (0)
0