aboutsummaryrefslogtreecommitdiff
path: root/misc/py-llama-cpp-python
diff options
context:
space:
mode:
Diffstat (limited to 'misc/py-llama-cpp-python')
-rw-r--r--misc/py-llama-cpp-python/Makefile6
-rw-r--r--misc/py-llama-cpp-python/distinfo10
-rw-r--r--misc/py-llama-cpp-python/files/patch-vendor_llama.cpp_ggml_src_ggml-cpu_ggml-cpu-impl.h11
3 files changed, 8 insertions, 19 deletions
diff --git a/misc/py-llama-cpp-python/Makefile b/misc/py-llama-cpp-python/Makefile
index 2cc013916c94..6b4387d5bf18 100644
--- a/misc/py-llama-cpp-python/Makefile
+++ b/misc/py-llama-cpp-python/Makefile
@@ -1,6 +1,6 @@
PORTNAME= llama-cpp-python
DISTVERSIONPREFIX= v
-DISTVERSION= 0.3.2
+DISTVERSION= 0.3.16
CATEGORIES= misc # machine-learning
PKGNAMEPREFIX= ${PYTHON_PKGNAMEPREFIX}
@@ -12,7 +12,7 @@ WWW= https://llama-cpp-python.readthedocs.io/en/latest/ \
LICENSE= MIT
LICENSE_FILE= ${WRKSRC}/LICENSE.md
-BUILD_DEPENDS= ${PYTHON_PKGNAMEPREFIX}scikit-build-core>0:devel/py-scikit-build-core@${PY_FLAVOR} \
+BUILD_DEPENDS= ${PYTHON_PKGNAMEPREFIX}scikit-build-core>=0.9.2:devel/py-scikit-build-core@${PY_FLAVOR} \
cmake:devel/cmake-core
LIB_DEPENDS= libvulkan.so:graphics/vulkan-loader
RUN_DEPENDS= ${PYTHON_PKGNAMEPREFIX}diskcache>=5.6.1:devel/py-diskcache@${PY_FLAVOR} \
@@ -34,7 +34,7 @@ USE_PYTHON= pep517 autoplist pytest
USE_GITHUB= yes
GH_ACCOUNT= abetlen
-GH_TUPLE= ggerganov:llama.cpp:74d73dc:cpp/vendor/llama.cpp
+GH_TUPLE= ggerganov:llama.cpp:b6598:cpp/vendor/llama.cpp
SHEBANG_GLOB= *.py
diff --git a/misc/py-llama-cpp-python/distinfo b/misc/py-llama-cpp-python/distinfo
index a25d0e5c74ba..7f4849c553a3 100644
--- a/misc/py-llama-cpp-python/distinfo
+++ b/misc/py-llama-cpp-python/distinfo
@@ -1,5 +1,5 @@
-TIMESTAMP = 1731907964
-SHA256 (abetlen-llama-cpp-python-v0.3.2_GH0.tar.gz) = 491057624a095ff6d4eae1bcc139b579f72d03d9deee1a2ab05d2f980ec824ed
-SIZE (abetlen-llama-cpp-python-v0.3.2_GH0.tar.gz) = 274530
-SHA256 (ggerganov-llama.cpp-74d73dc_GH0.tar.gz) = 9cc0aad9746b93b461c53d7038211ea7cbe70df2aa1a23b2daac07af935f6990
-SIZE (ggerganov-llama.cpp-74d73dc_GH0.tar.gz) = 19564318
+TIMESTAMP = 1758915707
+SHA256 (abetlen-llama-cpp-python-v0.3.16_GH0.tar.gz) = d6ae5a6ac40dda4d14c6bb8f5e9504d28f442cf810263661d457c948c386f2a4
+SIZE (abetlen-llama-cpp-python-v0.3.16_GH0.tar.gz) = 279565
+SHA256 (ggerganov-llama.cpp-b6598_GH0.tar.gz) = cd296792f49695bd44e885a5c1fcefe4ef72f3da7f8933be1378f944116515a3
+SIZE (ggerganov-llama.cpp-b6598_GH0.tar.gz) = 25833773
diff --git a/misc/py-llama-cpp-python/files/patch-vendor_llama.cpp_ggml_src_ggml-cpu_ggml-cpu-impl.h b/misc/py-llama-cpp-python/files/patch-vendor_llama.cpp_ggml_src_ggml-cpu_ggml-cpu-impl.h
deleted file mode 100644
index 376142c64242..000000000000
--- a/misc/py-llama-cpp-python/files/patch-vendor_llama.cpp_ggml_src_ggml-cpu_ggml-cpu-impl.h
+++ /dev/null
@@ -1,11 +0,0 @@
---- vendor/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-impl.h.orig 2025-08-02 23:17:06 UTC
-+++ vendor/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-impl.h
-@@ -319,8 +319,6 @@ inline static int32x4_t ggml_vdotq_s32(int32x4_t acc,
- #else
- #ifdef __POWER9_VECTOR__
- #include <altivec.h>
--#undef bool
--#define bool _Bool
- #else
- #if defined(_MSC_VER) || defined(__MINGW32__)
- #include <intrin.h>