spec: Install rpc-server and rpc backend

No point to make a separate rpc backend package since this is a virtual
thing.

Link: https://github.com/ggml-org/llama.cpp/tree/master/tools/rpc
Signed-off-by: Vitaly Chikunov <vt@altlinux.org>
This commit is contained in:
Vitaly Chikunov 2025-06-23 10:24:42 +03:00
parent 54456bb584
commit 26f10c647c

View file

@ -163,6 +163,7 @@ export NVCC_PREPEND_FLAGS=-ccbin=g++-12
-DLLAMA_CURL=ON \
-DGGML_BACKEND_DL=ON \
-DGGML_CPU=ON \
-DGGML_RPC=ON \
%ifarch x86_64
-DGGML_CPU_ALL_VARIANTS=ON \
%endif
@ -196,6 +197,7 @@ rm %buildroot%_bindir/test-*
install -Dpm644 llama.bash %buildroot%_datadir/bash-completion/completions/llama-cli
printf '%%s\n' llama-server llama-simple llama-run llama-mtmd-cli |
xargs -ti ln -s llama-cli %buildroot%_datadir/bash-completion/completions/{}
install -Dp %_cmake__builddir/bin/rpc-server %buildroot%_bindir/llama-rpc-server
%check
# Local path are more useful for debugging becasue they are not stripped by default.
@ -235,6 +237,7 @@ llama-cli -m %_datadir/tinyllamas/stories260K.gguf -p "Once upon a time" -s 55 -
%_datadir/%name
%dir %_libexecdir/llama
%_libexecdir/llama/libggml-cpu*.so
%_libexecdir/llama/libggml-rpc.so
%_datadir/bash-completion/completions/llama-*
%if_with cuda