server : add arg for disabling prompt caching (#18776)

* server : add arg for disabling prompt caching

Disabling prompt caching is useful for clients who are restricted to
sending only OpenAI-compat requests and want deterministic
responses.

* address review comments

* address review comments
This commit is contained in:
Radoslav Gerganov 2026-01-12 19:21:34 +02:00 committed by GitHub
parent 36c5913c45
commit bcf7546160
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 12 additions and 2 deletions

View file

@ -160,6 +160,7 @@ task_params server_task::params_from_json_cmpl(
defaults.n_keep = params_base.n_keep;
defaults.n_predict = params_base.n_predict;
defaults.n_cache_reuse = params_base.n_cache_reuse;
defaults.cache_prompt = params_base.cache_prompt;
defaults.antiprompt = params_base.antiprompt;
// enabling this will output extra debug information in the HTTP responses from the server
@ -169,7 +170,7 @@ task_params server_task::params_from_json_cmpl(
params.stream = json_value(data, "stream", false);
auto stream_opt = json_value(data, "stream_options", json::object());
params.include_usage = json_value(stream_opt, "include_usage", false);
params.cache_prompt = json_value(data, "cache_prompt", true);
params.cache_prompt = json_value(data, "cache_prompt", defaults.cache_prompt);
params.return_tokens = json_value(data, "return_tokens", false);
params.return_progress = json_value(data, "return_progress", false);
params.n_predict = json_value(data, "n_predict", json_value(data, "max_tokens", defaults.n_predict));