From 26f10c647ccbcaf8adccab7dbe80d6e013d0c008 Mon Sep 17 00:00:00 2001 From: Vitaly Chikunov Date: Mon, 23 Jun 2025 10:24:42 +0300 Subject: [PATCH] spec: Install rpc-server and rpc backend No point to make a separate rpc backend package since this is a virtual thing. Link: https://github.com/ggml-org/llama.cpp/tree/master/tools/rpc Signed-off-by: Vitaly Chikunov --- .gear/llama.cpp.spec | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gear/llama.cpp.spec b/.gear/llama.cpp.spec index 1f29c8a38..579af714d 100644 --- a/.gear/llama.cpp.spec +++ b/.gear/llama.cpp.spec @@ -163,6 +163,7 @@ export NVCC_PREPEND_FLAGS=-ccbin=g++-12 -DLLAMA_CURL=ON \ -DGGML_BACKEND_DL=ON \ -DGGML_CPU=ON \ + -DGGML_RPC=ON \ %ifarch x86_64 -DGGML_CPU_ALL_VARIANTS=ON \ %endif @@ -196,6 +197,7 @@ rm %buildroot%_bindir/test-* install -Dpm644 llama.bash %buildroot%_datadir/bash-completion/completions/llama-cli printf '%%s\n' llama-server llama-simple llama-run llama-mtmd-cli | xargs -ti ln -s llama-cli %buildroot%_datadir/bash-completion/completions/{} +install -Dp %_cmake__builddir/bin/rpc-server %buildroot%_bindir/llama-rpc-server %check # Local path are more useful for debugging becasue they are not stripped by default. @@ -235,6 +237,7 @@ llama-cli -m %_datadir/tinyllamas/stories260K.gguf -p "Once upon a time" -s 55 - %_datadir/%name %dir %_libexecdir/llama %_libexecdir/llama/libggml-cpu*.so +%_libexecdir/llama/libggml-rpc.so %_datadir/bash-completion/completions/llama-* %if_with cuda