mirror of https://github.com/ollama/ollama.git
24 lines
940 B
Diff
24 lines
940 B
Diff
|
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
||
|
From: Michael Yang <git@mxy.ng>
|
||
|
Date: Mon, 18 Aug 2025 16:58:39 -0700
|
||
|
Subject: [PATCH] decode: disable output_all
|
||
|
|
||
|
---
|
||
|
src/llama-context.cpp | 3 +--
|
||
|
1 file changed, 1 insertion(+), 2 deletions(-)
|
||
|
|
||
|
diff --git a/src/llama-context.cpp b/src/llama-context.cpp
|
||
|
index 26a5cf9c..6ece5263 100644
|
||
|
--- a/src/llama-context.cpp
|
||
|
+++ b/src/llama-context.cpp
|
||
|
@@ -962,8 +962,7 @@ int llama_context::decode(const llama_batch & batch_inp) {
|
||
|
const int64_t n_vocab = vocab.n_tokens();
|
||
|
const int64_t n_embd = hparams.n_embd;
|
||
|
|
||
|
- // when computing embeddings, all tokens are output
|
||
|
- const bool output_all = cparams.embeddings;
|
||
|
+ const bool output_all = false;
|
||
|
|
||
|
if (!balloc->init(batch_inp, vocab, memory.get(), n_embd, cparams.kv_unified ? LLAMA_MAX_SEQ : cparams.n_seq_max, output_all)) {
|
||
|
LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__);
|