common : move up common_init() and fix Windows UTF-8 logs (#21176)

The build info is now only for debug, so we avoid the duplicate
with `--version`.

The UTF-8 setup at the beginning is needed to avoid logging
garbage on Windows.

Signed-off-by: Adrien Gallouët <angt@huggingface.co>
This commit is contained in:
Adrien Gallouët 2026-03-31 12:53:41 +02:00 committed by GitHub
parent 62278cedde
commit 41361c8599
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
34 changed files with 73 additions and 55 deletions

View file

@ -20,12 +20,12 @@ int main(int argc, char ** argv) {
common_params params;
common_init();
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_BENCH, print_usage)) {
return 1;
}
common_init();
int is_pp_shared = params.is_pp_shared;
int is_tg_separate = params.is_tg_separate;

View file

@ -347,6 +347,8 @@ int main(int argc, char ** argv) {
params.verbosity = LOG_LEVEL_ERROR; // by default, less verbose logs
common_init();
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_CLI)) {
return 1;
}
@ -357,8 +359,6 @@ int main(int argc, char ** argv) {
console::error("please use llama-completion instead\n");
}
common_init();
// struct that contains llama context and inference
cli_context ctx_cli(params);

View file

@ -90,12 +90,12 @@ int main(int argc, char ** argv) {
common_params params;
g_params = &params;
common_init();
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMPLETION, print_usage)) {
return 1;
}
common_init();
auto & sparams = params.sampling;
// save choice to use color for later

View file

@ -400,6 +400,8 @@ int main(int argc, char ** argv) {
params.out_file = "control_vector.gguf";
common_init();
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_CVECTOR_GENERATOR, print_usage)) {
return 1;
}

View file

@ -418,6 +418,8 @@ int main(int argc, char ** argv) {
params.out_file = "ggml-lora-merged-f16.gguf";
common_init();
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_EXPORT_LORA, print_usage)) {
return 1;
}

View file

@ -17,11 +17,12 @@ using namespace std::chrono_literals;
int main(int argc, char ** argv) {
common_params params;
common_init();
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMMON)) {
return 1;
}
common_init();
llama_backend_init();
llama_numa_init(params.numa);
auto mparams = common_model_params_to_llama(params);

View file

@ -1212,6 +1212,8 @@ int main(int argc, char ** argv) {
params.n_ctx = 512;
params.escape = false;
common_init();
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_IMATRIX, print_usage)) {
return 1;
}
@ -1223,8 +1225,6 @@ int main(int argc, char ** argv) {
return 0;
}
common_init();
const int32_t n_ctx = params.n_ctx;
if (n_ctx <= 0) {

View file

@ -54,11 +54,12 @@ int main(int argc, char ** argv) {
common_params params;
common_init();
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_MTMD, show_additional_info)) {
return 1;
}
common_init();
mtmd_helper_log_set(common_log_default_callback, nullptr);
if (params.mmproj.path.empty()) {

View file

@ -281,11 +281,12 @@ int main(int argc, char ** argv) {
common_params params;
common_init();
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_MTMD, show_additional_info)) {
return 1;
}
common_init();
mtmd_helper_log_set(common_log_default_callback, nullptr);
if (params.mmproj.path.empty()) {

View file

@ -2012,12 +2012,12 @@ int main(int argc, char ** argv) {
params.n_ctx = 512;
params.escape = false;
common_init();
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_PERPLEXITY)) {
return 1;
}
common_init();
const int32_t n_ctx = params.n_ctx;
if (n_ctx <= 0) {

View file

@ -58,6 +58,9 @@ static std::vector<float> get_logits(
int main(int argc, char ** argv) {
common_params params;
params.escape = false;
common_init();
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_RESULTS)) {
return 1;
}
@ -65,7 +68,6 @@ int main(int argc, char ** argv) {
LOG_ERR("%s: an output file must be specified", __func__);
return 1;
}
common_init();
llama_backend_init();
llama_numa_init(params.numa);
common_init_result_ptr llama_init = common_init_from_params(params);

View file

@ -75,6 +75,8 @@ int main(int argc, char ** argv) {
// own arguments required by this example
common_params params;
common_init();
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_SERVER)) {
return 1;
}
@ -100,8 +102,6 @@ int main(int argc, char ** argv) {
params.model_alias.insert(params.model.name);
}
common_init();
// struct that contains llama context and inference
server_context ctx_server;

View file

@ -551,6 +551,8 @@ int main(int argc, char ** argv) {
params.sampling.top_k = 4;
params.sampling.samplers = { COMMON_SAMPLER_TYPE_TOP_K, };
common_init();
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_TTS, print_usage)) {
return 1;
}
@ -558,8 +560,6 @@ int main(int argc, char ** argv) {
const int n_parallel = params.n_parallel;
const int n_predict = params.n_predict;
common_init();
// init LLM
llama_backend_init();