From ce184d9ae83fc81220e83f3d4e8ebbeb2e689f74 Mon Sep 17 00:00:00 2001 From: Vitaly Chikunov Date: Wed, 25 Jun 2025 11:03:41 +0300 Subject: [PATCH] 1:5753-alt1 - Update to b5753 (2025-06-24). - Install an experimental rpc backend and server. The rpc code is a proof-of-concept, fragile, and insecure. --- .gear/llama.cpp.spec | 17 ++++++++++++----- .gear/tags/list | 2 +- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/.gear/llama.cpp.spec b/.gear/llama.cpp.spec index 579af714d..ae5471522 100644 --- a/.gear/llama.cpp.spec +++ b/.gear/llama.cpp.spec @@ -11,7 +11,7 @@ %def_with vulkan Name: llama.cpp -Version: 5332 +Version: 5753 Release: alt1 Epoch: 1 Summary: LLM inference in C/C++ @@ -147,11 +147,11 @@ cat <<-EOF >> cmake/build-info.cmake set(BUILD_COMMIT "${commit::8} [%release]") EOF sed -i '/POSITION_INDEPENDENT_CODE/s/PROPERTIES/& SOVERSION 0.0.%version/' ggml/src/CMakeLists.txt src/CMakeLists.txt -sed -i 's/POSITION_INDEPENDENT_CODE/SOVERSION 0.0.%version &/' ggml/cmake/ggml-config.cmake.in +sed -i 's/POSITION_INDEPENDENT_CODE/SOVERSION 0.0.%version &/' ggml/cmake/ggml-config.cmake.in tools/mtmd/CMakeLists.txt # We do not have Internet access (issues/13371). sed -i 's/common_has_curl()/0/' tests/test-arg-parser.cpp -# Libs with unclear purpose. -sed -i s/BUILD_SHARED_LIBS/0/ tools/mtmd/CMakeLists.txt +# This test requires GPU. +sed /test-thread-safety/d -i tests/CMakeLists.txt %build # Unless -DCMAKE_SKIP_BUILD_RPATH=yes CMake fails to strip build time RPATH @@ -217,14 +217,17 @@ llama-cli -m %_datadir/tinyllamas/stories260K.gguf -p "Once upon a time" -s 55 - %_libdir/libllama.so.0.0.%version %_libdir/libggml.so.0.0.%version %_libdir/libggml-base.so.0.0.%version +%_libdir/libmtmd.so.0.0.%version %files -n libllama-devel %_libdir/libllama.so %_libdir/libggml.so %_libdir/libggml-base.so +%_libdir/libmtmd.so %_includedir/llama*.h %_includedir/gguf.h %_includedir/ggml*.h +%_includedir/mtmd*.h %_cmakedir/ggml %_cmakedir/llama %_pkgconfigdir/llama.pc @@ -248,12 +251,16 @@ llama-cli -m %_datadir/tinyllamas/stories260K.gguf -p "Once upon a time" -s 55 - %if_with vulkan %files vulkan -%_bindir/vulkan-shaders-gen %dir %_libexecdir/llama %_libexecdir/llama/libggml-vulkan.so %endif %changelog +* Wed Jun 25 2025 Vitaly Chikunov 1:5753-alt1 +- Update to b5753 (2025-06-24). +- Install an experimental rpc backend and server. The rpc code is a + proof-of-concept, fragile, and insecure. + * Sat May 10 2025 Vitaly Chikunov 1:5332-alt1 - Update to b5332 (2025-05-09), with vision support in llama-server. - Enable Vulkan backend (for GPU) in llama.cpp-vulkan package. diff --git a/.gear/tags/list b/.gear/tags/list index 1505f6a07..f765fc91e 100644 --- a/.gear/tags/list +++ b/.gear/tags/list @@ -1,2 +1,2 @@ 4565194ed7c32d1d2efa32ceab4d3c6cae006306 submodule/kompute-0 -7c28a74e0783f4bb74a246fb9f19bf212139e365 b5332 +73e53dc834c0a2336cd104473af6897197b96277 b5753