llama_fit_params: return enum for fail vs. error (#18374)

This commit is contained in:
Johannes Gäßler 2025-12-27 09:59:19 +01:00 committed by GitHub
parent 9045c9afe5
commit a52dc60ba3
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 28 additions and 15 deletions

View file

@ -26,10 +26,10 @@ int main(int argc, char ** argv) {
llama_numa_init(params.numa);
auto mparams = common_model_params_to_llama(params);
auto cparams = common_context_params_to_llama(params);
const bool success = llama_params_fit(params.model.path.c_str(), &mparams, &cparams,
const llama_params_fit_status status = llama_params_fit(params.model.path.c_str(), &mparams, &cparams,
params.tensor_split, params.tensor_buft_overrides.data(), params.fit_params_target, params.fit_params_min_ctx,
params.verbosity >= 4 ? GGML_LOG_LEVEL_DEBUG : GGML_LOG_LEVEL_ERROR);
if (!success) {
if (status != LLAMA_PARAMS_FIT_STATUS_SUCCESS) {
LOG_ERR("%s: failed to fit CLI arguments to free memory, exiting...\n", __func__);
exit(1);
}