From 977b4d309eb4beabfcda182834d53078cc5e8d8c Mon Sep 17 00:00:00 2001 From: luofeng14 Date: Sat, 9 Sep 2023 07:46:08 +0000 Subject: [PATCH] support clang build Signed-off-by: luofeng14 --- hyperscan.spec | 6 +- support-clang-build.patch | 132 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 137 insertions(+), 1 deletion(-) create mode 100644 support-clang-build.patch diff --git a/hyperscan.spec b/hyperscan.spec index 9fdb7ee..1b7d241 100644 --- a/hyperscan.spec +++ b/hyperscan.spec @@ -1,6 +1,6 @@ Name: hyperscan Version: 5.4.2 -Release: 1 +Release: 2 Summary: High-performance regular expression matching library License: BSD @@ -9,6 +9,7 @@ Source0: https://github.com/intel/%{name}/archive/v%{version}.tar.gz#/%{name}-%{ Patch0: hyperscan-aarch64-support.patch Patch1: Fix-hyperscan-gcc10.patch +Patch2: support-clang-build.patch BuildRequires: gcc-c++ BuildRequires: boost-devel @@ -85,6 +86,9 @@ cd - %{_includedir}/hs/ %changelog +* Fri Sep 9 2023 luofeng - 5.4.2-2 +- support clang build + * Fri Aug 18 2023 wangqia - 5.4.2-1 - Update to 5.4.2 diff --git a/support-clang-build.patch b/support-clang-build.patch new file mode 100644 index 0000000..6e8f0b2 --- /dev/null +++ b/support-clang-build.patch @@ -0,0 +1,132 @@ +From fffd652b60726c532c1f4091e4bae6ddc3b5f013 Mon Sep 17 00:00:00 2001 +From: luofeng 00425382 +Date: Sat, 9 Sep 2023 14:52:49 +0800 +Subject: [PATCH] support clang build + +--- + src/util/simd_arm.h | 45 +++++++++++++++++++++++++++++++++------------ + 1 file changed, 33 insertions(+), 12 deletions(-) + +diff --git a/src/util/simd_arm.h b/src/util/simd_arm.h +index cce119f..8bfef99 100644 +--- a/src/util/simd_arm.h ++++ b/src/util/simd_arm.h +@@ -127,20 +127,28 @@ static really_inline u32 diffrich64_128(m128 a, m128 b) { + (vgetq_lane_u64(tmp.vect_u64, 0) & 0x1)); + } + ++#define vshlq_n_s64_m(a0, a1) do{\ ++ vshlq_n_s64(a0, a1); \ ++}while(0) ++ + static really_really_inline m128 lshift64_m128(m128 a, unsigned b) { + assert(b <= 63); + m128 result; +- result.vect_s64 = vshlq_n_s64(a.vect_s64, b); ++ result.vect_s64 = vshlq_n_s64_m(a.vect_s64, b); // todo + return result; + } + ++#define vshrq_n_u64_m(a0, a1) do{\ ++ vshrq_n_u64(a0, a1); \ ++}while(0) ++ + static really_really_inline m128 rshift64_m128(m128 a, int imm8) { + assert(imm8 >= 0 && imm8 <= 63); + if (unlikely(imm8 == 0)) { + return a; + } + m128 result; +- result.vect_u64 = vshrq_n_u64(a.vect_u64, imm8); ++ result.vect_u64 = vshrq_n_u64_m(a.vect_u64, imm8); // todo + return result; + } + +@@ -160,10 +168,15 @@ static really_really_inline u32 movemask128(m128 a) { + ((u32)vgetq_lane_u8(result.vect_u8, 8) << 8)); + } + ++#define vextq_s8_m(a0, a1, a2) do{\ ++ vextq_s8(a0, a1, a2); \ ++}while(0) ++ ++ + static really_really_inline m128 rshiftbyte_m128(m128 a, int imm8) { + assert(imm8 >= 0 && imm8 <= 15); + m128 result; +- result.vect_s8 = vextq_s8(a.vect_s8, vdupq_n_s8(0), imm8); ++ result.vect_s8 = vextq_s8_m(a.vect_s8, vdupq_n_s8(0), imm8); // todo + return result; + } + +@@ -173,7 +186,7 @@ static really_really_inline m128 lshiftbyte_m128(m128 a, int imm8) { + if (unlikely(imm8 == 0)) { + return a; + } +- result.vect_s8 = vextq_s8(vdupq_n_s8(0), a.vect_s8, (16 - imm8)); ++ result.vect_s8 = vextq_s8_m(vdupq_n_s8(0), a.vect_s8, (16 - imm8)); // todo + return result; + } + +@@ -216,16 +229,24 @@ static really_inline m128 load_m128_from_u64a(const u64a *p) { + return result; + } + ++#define vgetq_lane_s32_m(a0, a1) do{\ ++ vgetq_lane_s32(a0, a1); \ ++}while(0) ++ ++#define vgetq_lane_s64_m(a0, a1) do{\ ++ vgetq_lane_s64(a0, a1); \ ++}while(0) ++ + /*The x86 platform does not perform the lower 2 bit operation. + If the value of imm exceeds 2 bit, a compilation error occurs.*/ + static really_inline u32 extract32from128(m128 a, int imm) { +- return vgetq_lane_s32(a.vect_s32, imm & 0x0003); ++ return vgetq_lane_s32_m(a.vect_s32, imm & 0x0003); // todo + } + + /*The x86 platform does not perform the lower 1 bit operation. + If the value of imm exceeds 1 bit, a compilation error occurs.*/ + static really_inline u64a extract64from128(m128 a, int imm) { +- return vgetq_lane_s64(a.vect_s64, imm & 0x0001); ++ return vgetq_lane_s64_m(a.vect_s64, imm & 0x0001); // todo + } + + #define extractlow64from256(a) movq(a.lo) +@@ -234,15 +255,15 @@ static really_inline u64a extract64from128(m128 a, int imm) { + /*The x86 platform does not perform the lower 2 bit operation. + If the value of imm exceeds 2 bit, a compilation error occurs.*/ + static really_inline u32 extract32from256(m256 a, int imm) { +- return vgetq_lane_s32((imm >> 2) ? a.hi.vect_s32 : a.lo.vect_s32, +- imm & 0x0003); ++ return vgetq_lane_s32_m((imm >> 2) ? a.hi.vect_s32 : a.lo.vect_s32, ++ imm & 0x0003); // todo + } + + /*The x86 platform does not perform the lower 1 bit operation. + If the value of imm exceeds 1 bit, a compilation error occurs.*/ + static really_inline u64a extract64from256(m256 a, int imm) { +- return vgetq_lane_s64((imm >> 1) ? a.hi.vect_s64 : a.lo.vect_s64, +- imm & 0x0001); ++ return vgetq_lane_s64_m((imm >> 1) ? a.hi.vect_s64 : a.lo.vect_s64, ++ imm & 0x0001); // todo + } + + static really_inline m128 and128(m128 a, m128 b) { +@@ -355,9 +376,9 @@ static really_inline m128 palignr(m128 a, m128 b, int count) { + m128 result; + count = count & 0xff; + if (likely(count < 16)) { +- result.vect_s8 = vextq_s8(b.vect_s8, a.vect_s8, count); ++ result.vect_s8 = vextq_s8_m(b.vect_s8, a.vect_s8, count); // todo + } else if (count < 32) { +- result.vect_s8 = vextq_s8(a.vect_s8, vdupq_n_s8(0x0), count - 16); ++ result.vect_s8 = vextq_s8_m(a.vect_s8, vdupq_n_s8(0x0), count - 16); // todo + } else { + result.vect_s32 = vdupq_n_s32(0); + } +-- +2.28.0.windows.1 + -- Gitee