ggml : add GGML_SCHED_NO_REALLOC option to disable reallocations in ggml_backend_sched (#17276)
* ggml : add GGML_SCHED_NO_REALLOC option to disable reallocations in ggml_backend_sched Enabled in ggml-ci for testing. * llama : update worst-case graph for unified cache * ci : disable op offload in some tests * fix spelling --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
This commit is contained in:
parent
c6f7a423c8
commit
e072b2052e
8 changed files with 37 additions and 20 deletions
16
ci/run.sh
16
ci/run.sh
|
|
@ -45,7 +45,7 @@ sd=`dirname $0`
|
||||||
cd $sd/../
|
cd $sd/../
|
||||||
SRC=`pwd`
|
SRC=`pwd`
|
||||||
|
|
||||||
CMAKE_EXTRA="-DLLAMA_FATAL_WARNINGS=ON -DLLAMA_CURL=ON"
|
CMAKE_EXTRA="-DLLAMA_FATAL_WARNINGS=ON -DLLAMA_CURL=ON -DGGML_SCHED_NO_REALLOC=ON"
|
||||||
|
|
||||||
if [ ! -z ${GG_BUILD_METAL} ]; then
|
if [ ! -z ${GG_BUILD_METAL} ]; then
|
||||||
CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_METAL=ON"
|
CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_METAL=ON"
|
||||||
|
|
@ -428,10 +428,10 @@ function gg_run_qwen3_0_6b {
|
||||||
|
|
||||||
(time ./bin/llama-imatrix --model ${model_f16} -f ${wiki_test} -ngl 99 -c 1024 -b 512 --chunks 2 ) 2>&1 | tee -a $OUT/${ci}-imatrix.log
|
(time ./bin/llama-imatrix --model ${model_f16} -f ${wiki_test} -ngl 99 -c 1024 -b 512 --chunks 2 ) 2>&1 | tee -a $OUT/${ci}-imatrix.log
|
||||||
|
|
||||||
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 10 -c 1024 -fa off ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
|
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 10 -c 1024 -fa off --no-op-offload) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
|
||||||
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 10 -c 1024 -fa on ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
|
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 10 -c 1024 -fa on --no-op-offload) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
|
||||||
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 99 -c 1024 -fa off ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
|
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 99 -c 1024 -fa off ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
|
||||||
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 99 -c 1024 -fa on ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
|
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 99 -c 1024 -fa on ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
|
||||||
|
|
||||||
function check_ppl {
|
function check_ppl {
|
||||||
qnt="$1"
|
qnt="$1"
|
||||||
|
|
@ -523,8 +523,8 @@ function gg_run_embd_bge_small {
|
||||||
|
|
||||||
./bin/llama-quantize ${model_f16} ${model_q8_0} q8_0
|
./bin/llama-quantize ${model_f16} ${model_q8_0} q8_0
|
||||||
|
|
||||||
(time ./bin/llama-embedding --model ${model_f16} -p "I believe the meaning of life is" -ngl 99 -c 0 ) 2>&1 | tee -a $OUT/${ci}-tg-f16.log
|
(time ./bin/llama-embedding --model ${model_f16} -p "I believe the meaning of life is" -ngl 99 -c 0 --no-op-offload) 2>&1 | tee -a $OUT/${ci}-tg-f16.log
|
||||||
(time ./bin/llama-embedding --model ${model_q8_0} -p "I believe the meaning of life is" -ngl 99 -c 0 ) 2>&1 | tee -a $OUT/${ci}-tg-q8_0.log
|
(time ./bin/llama-embedding --model ${model_q8_0} -p "I believe the meaning of life is" -ngl 99 -c 0 --no-op-offload) 2>&1 | tee -a $OUT/${ci}-tg-q8_0.log
|
||||||
|
|
||||||
set +e
|
set +e
|
||||||
}
|
}
|
||||||
|
|
@ -564,7 +564,7 @@ function gg_run_rerank_tiny {
|
||||||
model_f16="${path_models}/ggml-model-f16.gguf"
|
model_f16="${path_models}/ggml-model-f16.gguf"
|
||||||
|
|
||||||
# for this model, the SEP token is "</s>"
|
# for this model, the SEP token is "</s>"
|
||||||
(time ./bin/llama-embedding --model ${model_f16} -p "what is panda?\thi\nwhat is panda?\tit's a bear\nwhat is panda?\tThe giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China." -ngl 99 -c 0 --pooling rank --embd-normalize -1 --verbose-prompt) 2>&1 | tee -a $OUT/${ci}-rk-f16.log
|
(time ./bin/llama-embedding --model ${model_f16} -p "what is panda?\thi\nwhat is panda?\tit's a bear\nwhat is panda?\tThe giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China." -ngl 99 -c 0 --pooling rank --embd-normalize -1 --no-op-offload --verbose-prompt) 2>&1 | tee -a $OUT/${ci}-rk-f16.log
|
||||||
|
|
||||||
# sample output
|
# sample output
|
||||||
# rerank score 0: 0.029
|
# rerank score 0: 0.029
|
||||||
|
|
|
||||||
|
|
@ -104,12 +104,16 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
params.embedding = true;
|
params.embedding = true;
|
||||||
|
|
||||||
|
// get max number of sequences per batch
|
||||||
|
const int n_seq_max = llama_max_parallel_sequences();
|
||||||
|
|
||||||
// if the number of prompts that would be encoded is known in advance, it's more efficient to specify the
|
// if the number of prompts that would be encoded is known in advance, it's more efficient to specify the
|
||||||
// --parallel argument accordingly. for convenience, if not specified, we fallback to unified KV cache
|
// --parallel argument accordingly. for convenience, if not specified, we fallback to unified KV cache
|
||||||
// in order to support any number of prompts
|
// in order to support any number of prompts
|
||||||
if (params.n_parallel == 1) {
|
if (params.n_parallel == 1) {
|
||||||
LOG_INF("%s: n_parallel == 1 -> unified KV cache is enabled\n", __func__);
|
LOG_INF("%s: n_parallel == 1 -> unified KV cache is enabled\n", __func__);
|
||||||
params.kv_unified = true;
|
params.kv_unified = true;
|
||||||
|
params.n_parallel = n_seq_max;
|
||||||
}
|
}
|
||||||
|
|
||||||
// utilize the full context
|
// utilize the full context
|
||||||
|
|
@ -123,9 +127,6 @@ int main(int argc, char ** argv) {
|
||||||
params.n_ubatch = params.n_batch;
|
params.n_ubatch = params.n_batch;
|
||||||
}
|
}
|
||||||
|
|
||||||
// get max number of sequences per batch
|
|
||||||
const int n_seq_max = llama_max_parallel_sequences();
|
|
||||||
|
|
||||||
llama_backend_init();
|
llama_backend_init();
|
||||||
llama_numa_init(params.numa);
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -183,6 +183,7 @@ endif()
|
||||||
# ggml core
|
# ggml core
|
||||||
set(GGML_SCHED_MAX_COPIES "4" CACHE STRING "ggml: max input copies for pipeline parallelism")
|
set(GGML_SCHED_MAX_COPIES "4" CACHE STRING "ggml: max input copies for pipeline parallelism")
|
||||||
option(GGML_CPU "ggml: enable CPU backend" ON)
|
option(GGML_CPU "ggml: enable CPU backend" ON)
|
||||||
|
option(GGML_SCHED_NO_REALLOC "ggml: disallow reallocations in ggml-alloc (for debugging)" OFF)
|
||||||
|
|
||||||
# 3rd party libs / backends
|
# 3rd party libs / backends
|
||||||
option(GGML_ACCELERATE "ggml: enable Accelerate framework" ON)
|
option(GGML_ACCELERATE "ggml: enable Accelerate framework" ON)
|
||||||
|
|
|
||||||
|
|
@ -221,6 +221,10 @@ if (GGML_BACKEND_DL)
|
||||||
target_compile_definitions(ggml-base PUBLIC GGML_BACKEND_DL)
|
target_compile_definitions(ggml-base PUBLIC GGML_BACKEND_DL)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
if (GGML_SCHED_NO_REALLOC)
|
||||||
|
target_compile_definitions(ggml-base PUBLIC GGML_SCHED_NO_REALLOC)
|
||||||
|
endif()
|
||||||
|
|
||||||
add_library(ggml
|
add_library(ggml
|
||||||
ggml-backend-reg.cpp)
|
ggml-backend-reg.cpp)
|
||||||
add_library(ggml::ggml ALIAS ggml)
|
add_library(ggml::ggml ALIAS ggml)
|
||||||
|
|
|
||||||
|
|
@ -921,10 +921,15 @@ bool ggml_gallocr_reserve_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, c
|
||||||
}
|
}
|
||||||
if (realloc) {
|
if (realloc) {
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
size_t cur_size = galloc->buffers[i] ? ggml_vbuffer_size(galloc->buffers[i]) : 0;
|
{
|
||||||
GGML_LOG_DEBUG("%s: reallocating %s buffer from size %.02f MiB to %.02f MiB\n", __func__, ggml_backend_buft_name(galloc->bufts[i]), cur_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0);
|
size_t cur_size = galloc->buffers[i] ? ggml_vbuffer_size(galloc->buffers[i]) : 0;
|
||||||
|
if (cur_size > 0) {
|
||||||
|
GGML_LOG_DEBUG("%s: reallocating %s buffer from size %.02f MiB to %.02f MiB\n",
|
||||||
|
__func__, ggml_backend_buft_name(galloc->bufts[i]),
|
||||||
|
cur_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0);
|
||||||
|
}
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
ggml_vbuffer_free(galloc->buffers[i]);
|
ggml_vbuffer_free(galloc->buffers[i]);
|
||||||
galloc->buffers[i] = ggml_vbuffer_alloc(galloc->bufts[i], galloc->buf_tallocs[i], GGML_BACKEND_BUFFER_USAGE_COMPUTE);
|
galloc->buffers[i] = ggml_vbuffer_alloc(galloc->bufts[i], galloc->buf_tallocs[i], GGML_BACKEND_BUFFER_USAGE_COMPUTE);
|
||||||
if (galloc->buffers[i] == NULL) {
|
if (galloc->buffers[i] == NULL) {
|
||||||
|
|
|
||||||
|
|
@ -1395,14 +1395,20 @@ static bool ggml_backend_sched_alloc_splits(ggml_backend_sched_t sched) {
|
||||||
|
|
||||||
// allocate graph
|
// allocate graph
|
||||||
if (backend_ids_changed || !ggml_gallocr_alloc_graph(sched->galloc, &sched->graph)) {
|
if (backend_ids_changed || !ggml_gallocr_alloc_graph(sched->galloc, &sched->graph)) {
|
||||||
|
#ifdef GGML_SCHED_NO_REALLOC
|
||||||
|
GGML_ABORT("%s: failed to allocate graph, but graph re-allocation is disabled by GGML_SCHED_NO_REALLOC\n", __func__);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef NDEBUG
|
||||||
|
GGML_LOG_DEBUG("%s: failed to allocate graph, reserving (backend_ids_changed = %d)\n", __func__, backend_ids_changed);
|
||||||
|
#endif
|
||||||
|
|
||||||
// the re-allocation may cause the split inputs to be moved to a different address
|
// the re-allocation may cause the split inputs to be moved to a different address
|
||||||
// synchronize without ggml_backend_sched_synchronize to avoid changing cur_copy
|
// synchronize without ggml_backend_sched_synchronize to avoid changing cur_copy
|
||||||
for (int i = 0; i < sched->n_backends; i++) {
|
for (int i = 0; i < sched->n_backends; i++) {
|
||||||
ggml_backend_synchronize(sched->backends[i]);
|
ggml_backend_synchronize(sched->backends[i]);
|
||||||
}
|
}
|
||||||
#ifndef NDEBUG
|
|
||||||
GGML_LOG_DEBUG("%s: failed to allocate graph, reserving (backend_ids_changed = %d)\n", __func__, backend_ids_changed);
|
|
||||||
#endif
|
|
||||||
ggml_gallocr_reserve_n(sched->galloc, &sched->graph, sched->node_backend_ids, sched->leaf_backend_ids);
|
ggml_gallocr_reserve_n(sched->galloc, &sched->graph, sched->node_backend_ids, sched->leaf_backend_ids);
|
||||||
if (!ggml_gallocr_alloc_graph(sched->galloc, &sched->graph)) {
|
if (!ggml_gallocr_alloc_graph(sched->galloc, &sched->graph)) {
|
||||||
GGML_LOG_ERROR("%s: failed to allocate graph\n", __func__);
|
GGML_LOG_ERROR("%s: failed to allocate graph\n", __func__);
|
||||||
|
|
|
||||||
|
|
@ -300,7 +300,7 @@ llama_context::llama_context(
|
||||||
|
|
||||||
cross.v_embd.clear();
|
cross.v_embd.clear();
|
||||||
|
|
||||||
const uint32_t n_seqs = cparams.kv_unified ? 1 : cparams.n_seq_max;
|
const uint32_t n_seqs = cparams.n_seq_max;
|
||||||
const uint32_t n_tokens = std::min(cparams.n_ctx, cparams.n_ubatch);
|
const uint32_t n_tokens = std::min(cparams.n_ctx, cparams.n_ubatch);
|
||||||
|
|
||||||
// avoid reserving graphs with zero outputs - assume one output per sequence
|
// avoid reserving graphs with zero outputs - assume one output per sequence
|
||||||
|
|
@ -543,7 +543,7 @@ bool llama_context::memory_update(bool optimize) {
|
||||||
throw std::runtime_error("failed to initialize memory context");
|
throw std::runtime_error("failed to initialize memory context");
|
||||||
}
|
}
|
||||||
|
|
||||||
const uint32_t n_seqs = cparams.kv_unified ? 1 : cparams.n_seq_max;
|
const uint32_t n_seqs = cparams.n_seq_max;
|
||||||
const uint32_t n_tokens = std::min(cparams.n_ctx, cparams.n_ubatch);
|
const uint32_t n_tokens = std::min(cparams.n_ctx, cparams.n_ubatch);
|
||||||
|
|
||||||
auto * gf = graph_reserve(n_tokens, n_seqs, n_tokens, mctx.get());
|
auto * gf = graph_reserve(n_tokens, n_seqs, n_tokens, mctx.get());
|
||||||
|
|
|
||||||
|
|
@ -196,7 +196,7 @@ if (NOT WIN32)
|
||||||
llama_build_and_test(test-arg-parser.cpp)
|
llama_build_and_test(test-arg-parser.cpp)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (NOT LLAMA_SANITIZE_ADDRESS)
|
if (NOT LLAMA_SANITIZE_ADDRESS AND NOT GGML_SCHED_NO_REALLOC)
|
||||||
# TODO: repair known memory leaks
|
# TODO: repair known memory leaks
|
||||||
llama_build_and_test(test-opt.cpp)
|
llama_build_and_test(test-opt.cpp)
|
||||||
endif()
|
endif()
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue