mirror of https://github.com/ollama/ollama.git
57 lines
3.0 KiB
Diff
57 lines
3.0 KiB
Diff
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
From: Oliver Simons <osimons@nvidia.com>
|
|
Date: Tue, 22 Jul 2025 11:02:28 +0200
|
|
Subject: [PATCH] Enable CUDA Graphs for gemma3n.
|
|
|
|
Similar to
|
|
https://github.com/ggml-org/llama.cpp/pull/14741,
|
|
though ollama has a slightly different model graph
|
|
than llama.cpp which requires different workaround
|
|
checks.
|
|
---
|
|
ggml/src/ggml-cuda/ggml-cuda.cu | 18 ++++++++++++++++++
|
|
1 file changed, 18 insertions(+)
|
|
|
|
diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu
|
|
index 57eae461..c7f9dc3a 100644
|
|
--- a/ggml/src/ggml-cuda/ggml-cuda.cu
|
|
+++ b/ggml/src/ggml-cuda/ggml-cuda.cu
|
|
@@ -2671,12 +2671,24 @@ static bool check_node_graph_compatibility_and_refresh_copy_ops(ggml_backend_cud
|
|
// Loop over nodes in GGML graph to obtain info needed for CUDA graph
|
|
cuda_ctx->cuda_graph->cpy_dest_ptrs.clear();
|
|
|
|
+ // This fix was added in llama.cpp and Ollama in parallel, but with
|
|
+ // different tensor names.
|
|
+ // llama.cpp: https://github.com/ggml-org/llama.cpp/pull/14741
|
|
+ // ollama: https://github.com/ollama/ollama/pull/11525
|
|
+
|
|
+ const std::string gemma3n_per_layer_proj_src1_name_ollama = " (reshaped)";
|
|
+ const std::string gemma3n_node_name_ollama = "node_";
|
|
+
|
|
const std::string gemma3n_per_layer_proj_src0_name = "inp_per_layer_selected";
|
|
const std::string gemma3n_per_layer_proj_src1_name = "per_layer_proj";
|
|
+
|
|
+ const std::string ffn_moe_bias_suffix = "_exps.bias";
|
|
+
|
|
const std::string ffn_moe_gate_bias_prefix = "ffn_moe_gate_biased";
|
|
const std::string ffn_moe_up_bias_prefix = "ffn_moe_up_biased";
|
|
const std::string ffn_moe_down_bias_prefix = "ffn_moe_down_biased";
|
|
|
|
+
|
|
for (int i = 0; i < cgraph->n_nodes; i++) {
|
|
ggml_tensor * node = cgraph->nodes[i];
|
|
|
|
@@ -2700,6 +2712,12 @@ static bool check_node_graph_compatibility_and_refresh_copy_ops(ggml_backend_cud
|
|
|
|
if (node->op == GGML_OP_ADD &&
|
|
node->src[1] && node->src[1]->ne[1] > 1 &&
|
|
+ // ollama
|
|
+ // workarounds to exclude Gemma3n's `project_per_layer_input` operation from the batch-size heuristic, specific to ollama's implementation of gemma3n
|
|
+ // number of layers is different for per_layer_proj between gemma3n:2b and gemma3n:4b, which is why we don't check that value here
|
|
+ !(node->ne[0] == 256 && node->ne[2] == 1 && node->ne[3] == 1 && node->src[0] ? std::string(node->src[0]->name).find(gemma3n_node_name_ollama) != std::string::npos : false && node->src[1] ? node->src[1]->name == gemma3n_per_layer_proj_src1_name_ollama : false) &&
|
|
+ node->src[1] ? std::string(node->src[1]->name).find(ffn_moe_bias_suffix) == std::string::npos : false &&
|
|
+ // upstream
|
|
(node->src[0] ? node->src[0]->name != gemma3n_per_layer_proj_src0_name : true) &&
|
|
(node->src[1] ? node->src[1]->name != gemma3n_per_layer_proj_src1_name : true) &&
|
|
strncmp(node->name, ffn_moe_gate_bias_prefix.c_str(), ffn_moe_gate_bias_prefix.size()) != 0 &&
|