From 3231baad5b67fdb117cc613e778ce1981f2d4740 Mon Sep 17 00:00:00 2001 From: PshySimon Date: Mon, 21 Jul 2025 16:49:10 +0800 Subject: [PATCH] backport-CVE-2025-53630 --- backport-CVE-2025-53630.patch | 34 ++++++++++++++++++++++++++++++++++ llama.cpp.spec | 6 +++++- 2 files changed, 39 insertions(+), 1 deletion(-) create mode 100644 backport-CVE-2025-53630.patch diff --git a/backport-CVE-2025-53630.patch b/backport-CVE-2025-53630.patch new file mode 100644 index 0000000..b293355 --- /dev/null +++ b/backport-CVE-2025-53630.patch @@ -0,0 +1,34 @@ +From 7d00e32369b13b1820d4acbf453232cef6de3171 Mon Sep 17 00:00:00 2001 +From: Miaoqian Lin +Date: Wed, 9 Jul 2025 20:33:53 +0800 +Subject: [PATCH] ggml : prevent integer overflow in gguf tensor size + calculation (#14595) + +--- + ggml/src/ggml.c | 9 ++++++++- + 1 file changed, 8 insertions(+), 1 deletion(-) + +diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c +index 058941c..8845215 100644 +--- a/ggml/src/ggml.c ++++ b/ggml/src/ggml.c +@@ -6854,7 +6854,14 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p + + const size_t size_cur = ggml_row_size(info->type, ne); + +- ctx->size += GGML_PAD(size_cur, ctx->alignment); ++ size_t padded_size = GGML_PAD(size_cur, ctx->alignment); ++ if (SIZE_MAX - ctx->size < padded_size) { ++ GGML_LOG_ERROR("%s: tensor size overflow, cannot accumulate size %zu + %zu\n", ++ __func__, ctx->size, padded_size); ++ gguf_free(ctx); ++ return NULL; ++ } ++ ctx->size += padded_size; + } + } + +-- +2.43.0 + + diff --git a/llama.cpp.spec b/llama.cpp.spec index b169596..cf9ca8b 100644 --- a/llama.cpp.spec +++ b/llama.cpp.spec @@ -3,7 +3,7 @@ Name: llama.cpp Version: 20241210 -Release: 3 +Release: 4 License: MIT Summary: Port of English lagre model LLaMA implemented based on C/C++ @@ -12,6 +12,7 @@ Source0: https://github.com/ggerganov/llama.cpp/archive/refs/tags/%{llama Patch001: backport-CVE-2025-49847.patch Patch002: backport-CVE-2025-52566.patch +Patch003: backport-CVE-2025-53630.patch BuildRequires: gcc,gcc-c++,cmake @@ -56,6 +57,9 @@ it can be used for model dialogue based on local laptops. %{_exec_prefix}/lib/pkgconfig/llama.pc %changelog +* Mon Jul 21 2025 PshySimon - 20241210-4 +- fix CVE-2025-53630 + * Fri Jul 4 2025 PshySimon - 20241210-3 - fix CVE-2025-52566 -- Gitee