这是indexloc提供的服务,不要输入任何密码
Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -981,8 +981,10 @@ if(WITH_OPTIM)
add_definitions(-DX86_AVX512)
list(APPEND AVX512_SRCS ${ARCHDIR}/adler32_avx512.c)
add_feature_info(AVX512_ADLER32 1 "Support AVX512-accelerated adler32, using \"${AVX512FLAG}\"")
list(APPEND ZLIB_ARCH_SRCS ${AVX512_SRCS})
list(APPEND AVX512_SRCS ${ARCHDIR}/chunkset_avx512.c)
add_feature_info(AVX512_CHUNKSET 1 "Support AVX512 optimized chunkset, using \"${AVX512FLAG}\"")
list(APPEND ZLIB_ARCH_HDRS ${ARCHDIR}/adler32_avx512_p.h)
list(APPEND ZLIB_ARCH_SRCS ${AVX512_SRCS})
set_property(SOURCE ${AVX512_SRCS} PROPERTY COMPILE_FLAGS "${AVX512FLAG} ${NOLTOFLAG}")
else()
set(WITH_AVX512 OFF)
Expand Down
11 changes: 9 additions & 2 deletions arch/x86/Makefile.in
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@ SFLAGS=
INCLUDES=
SUFFIX=

AVX512FLAG=-mavx512f -mavx512dq -mavx512vl -mavx512bw
AVX512VNNIFLAG=-mavx512vnni
AVX512FLAG=-mavx512f -mavx512dq -mavx512vl -mavx512bw -mbmi2
AVX512VNNIFLAG=-mavx512vnni -mbmi2
AVX2FLAG=-mavx2
SSE2FLAG=-msse2
SSSE3FLAG=-mssse3
Expand All @@ -31,6 +31,7 @@ all: \
adler32_sse42.o adler32_sse42.lo \
adler32_ssse3.o adler32_ssse3.lo \
chunkset_avx2.o chunkset_avx2.lo \
chunkset_avx512.o chunkset_avx512.lo \
chunkset_sse2.o chunkset_sse2.lo \
chunkset_ssse3.o chunkset_ssse3.lo \
compare256_avx2.o compare256_avx2.lo \
Expand All @@ -52,6 +53,12 @@ chunkset_avx2.o:
chunkset_avx2.lo:
$(CC) $(SFLAGS) $(AVX2FLAG) $(NOLTOFLAG) -DPIC $(INCLUDES) -c -o $@ $(SRCDIR)/chunkset_avx2.c

chunkset_avx512.o:
$(CC) $(CFLAGS) $(AVX512FLAG) $(NOLTOFLAG) $(INCLUDES) -c -o $@ $(SRCDIR)/chunkset_avx512.c

chunkset_avx512.lo:
$(CC) $(SFLAGS) $(AVX512FLAG) $(NOLTOFLAG) -DPIC $(INCLUDES) -c -o $@ $(SRCDIR)/chunkset_avx512.c

chunkset_sse2.o:
$(CC) $(CFLAGS) $(SSE2FLAG) $(NOLTOFLAG) $(INCLUDES) -c -o $@ $(SRCDIR)/chunkset_sse2.c

Expand Down
44 changes: 44 additions & 0 deletions arch/x86/avx2_tables.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
#ifndef _AVX2_TABLES_H
#define _AVX2_TABLES_H

#include "../generic/chunk_permute_table.h"

/* Populate don't cares so that this is a direct lookup (with some indirection into the permute table), because dist can
* never be 0 - 2, we'll start with an offset, subtracting 3 from the input */
static const lut_rem_pair perm_idx_lut[29] = {
{ 0, 2}, /* 3 */
{ 0, 0}, /* don't care */
{ 1 * 32, 2}, /* 5 */
{ 2 * 32, 2}, /* 6 */
{ 3 * 32, 4}, /* 7 */
{ 0 * 32, 0}, /* don't care */
{ 4 * 32, 5}, /* 9 */
{ 5 * 32, 22}, /* 10 */
{ 6 * 32, 21}, /* 11 */
{ 7 * 32, 20}, /* 12 */
{ 8 * 32, 6}, /* 13 */
{ 9 * 32, 4}, /* 14 */
{10 * 32, 2}, /* 15 */
{ 0 * 32, 0}, /* don't care */
{11 * 32, 15}, /* 17 */
{11 * 32 + 16, 14}, /* 18 */
{11 * 32 + 16 * 2, 13}, /* 19 */
{11 * 32 + 16 * 3, 12}, /* 20 */
{11 * 32 + 16 * 4, 11}, /* 21 */
{11 * 32 + 16 * 5, 10}, /* 22 */
{11 * 32 + 16 * 6, 9}, /* 23 */
{11 * 32 + 16 * 7, 8}, /* 24 */
{11 * 32 + 16 * 8, 7}, /* 25 */
{11 * 32 + 16 * 9, 6}, /* 26 */
{11 * 32 + 16 * 10, 5}, /* 27 */
{11 * 32 + 16 * 11, 4}, /* 28 */
{11 * 32 + 16 * 12, 3}, /* 29 */
{11 * 32 + 16 * 13, 2}, /* 30 */
{11 * 32 + 16 * 14, 1} /* 31 */
};

static const uint16_t half_rem_vals[13] = {
1, 0, 1, 4, 2, 0, 7, 6, 5, 4, 3, 2, 1
};

#endif
40 changes: 1 addition & 39 deletions arch/x86/chunkset_avx2.c
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@
#include "zbuild.h"

#ifdef X86_AVX2
#include "avx2_tables.h"
#include <immintrin.h>
#include "../generic/chunk_permute_table.h"
#include "x86_intrins.h"

typedef __m256i chunk_t;
Expand All @@ -19,44 +19,6 @@ typedef __m128i halfchunk_t;
#define HAVE_CHUNK_MAG
#define HAVE_HALF_CHUNK

/* Populate don't cares so that this is a direct lookup (with some indirection into the permute table), because dist can
* never be 0 - 2, we'll start with an offset, subtracting 3 from the input */
static const lut_rem_pair perm_idx_lut[29] = {
{ 0, 2}, /* 3 */
{ 0, 0}, /* don't care */
{ 1 * 32, 2}, /* 5 */
{ 2 * 32, 2}, /* 6 */
{ 3 * 32, 4}, /* 7 */
{ 0 * 32, 0}, /* don't care */
{ 4 * 32, 5}, /* 9 */
{ 5 * 32, 22}, /* 10 */
{ 6 * 32, 21}, /* 11 */
{ 7 * 32, 20}, /* 12 */
{ 8 * 32, 6}, /* 13 */
{ 9 * 32, 4}, /* 14 */
{10 * 32, 2}, /* 15 */
{ 0 * 32, 0}, /* don't care */
{11 * 32, 15}, /* 17 */
{11 * 32 + 16, 14}, /* 18 */
{11 * 32 + 16 * 2, 13}, /* 19 */
{11 * 32 + 16 * 3, 12}, /* 20 */
{11 * 32 + 16 * 4, 11}, /* 21 */
{11 * 32 + 16 * 5, 10}, /* 22 */
{11 * 32 + 16 * 6, 9}, /* 23 */
{11 * 32 + 16 * 7, 8}, /* 24 */
{11 * 32 + 16 * 8, 7}, /* 25 */
{11 * 32 + 16 * 9, 6}, /* 26 */
{11 * 32 + 16 * 10, 5}, /* 27 */
{11 * 32 + 16 * 11, 4}, /* 28 */
{11 * 32 + 16 * 12, 3}, /* 29 */
{11 * 32 + 16 * 13, 2}, /* 30 */
{11 * 32 + 16 * 14, 1} /* 31 */
};

static const uint16_t half_rem_vals[13] = {
1, 0, 1, 4, 2, 0, 7, 6, 5, 4, 3, 2, 1
};

static inline void chunkmemset_2(uint8_t *from, chunk_t *chunk) {
int16_t tmp;
memcpy(&tmp, from, sizeof(tmp));
Expand Down
189 changes: 189 additions & 0 deletions arch/x86/chunkset_avx512.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,189 @@
/* chunkset_avx512.c -- AVX512 inline functions to copy small data chunks.
* For conditions of distribution and use, see copyright notice in zlib.h
*/
#include "zbuild.h"

#ifdef X86_AVX512

#include "avx2_tables.h"
#include <immintrin.h>
#include "x86_intrins.h"

typedef __m256i chunk_t;
typedef __m128i halfchunk_t;
typedef __mmask32 mask_t;
typedef __mmask16 halfmask_t;

#define HAVE_CHUNKMEMSET_2
#define HAVE_CHUNKMEMSET_4
#define HAVE_CHUNKMEMSET_8
#define HAVE_CHUNKMEMSET_16
#define HAVE_CHUNKMEMSET_1
#define HAVE_CHUNK_MAG
#define HAVE_HALF_CHUNK
#define HAVE_MASKED_READWRITE
#define HAVE_CHUNKCOPY
#define HAVE_HALFCHUNKCOPY

static inline halfmask_t gen_half_mask(unsigned len) {
return (halfmask_t)_bzhi_u32(0xFFFF, len);

Check warning on line 29 in arch/x86/chunkset_avx512.c

View check run for this annotation

Codecov / codecov/patch

arch/x86/chunkset_avx512.c#L28-L29

Added lines #L28 - L29 were not covered by tests
}

static inline mask_t gen_mask(unsigned len) {
return (mask_t)_bzhi_u32(0xFFFFFFFF, len);

Check warning on line 33 in arch/x86/chunkset_avx512.c

View check run for this annotation

Codecov / codecov/patch

arch/x86/chunkset_avx512.c#L32-L33

Added lines #L32 - L33 were not covered by tests
}

static inline void chunkmemset_2(uint8_t *from, chunk_t *chunk) {
int16_t tmp;
memcpy(&tmp, from, sizeof(tmp));
*chunk = _mm256_set1_epi16(tmp);

Check warning on line 39 in arch/x86/chunkset_avx512.c

View check run for this annotation

Codecov / codecov/patch

arch/x86/chunkset_avx512.c#L36-L39

Added lines #L36 - L39 were not covered by tests
}

static inline void chunkmemset_4(uint8_t *from, chunk_t *chunk) {
int32_t tmp;
memcpy(&tmp, from, sizeof(tmp));
*chunk = _mm256_set1_epi32(tmp);

Check warning on line 45 in arch/x86/chunkset_avx512.c

View check run for this annotation

Codecov / codecov/patch

arch/x86/chunkset_avx512.c#L42-L45

Added lines #L42 - L45 were not covered by tests
}

static inline void chunkmemset_8(uint8_t *from, chunk_t *chunk) {
int64_t tmp;
memcpy(&tmp, from, sizeof(tmp));
*chunk = _mm256_set1_epi64x(tmp);

Check warning on line 51 in arch/x86/chunkset_avx512.c

View check run for this annotation

Codecov / codecov/patch

arch/x86/chunkset_avx512.c#L48-L51

Added lines #L48 - L51 were not covered by tests
}

static inline void chunkmemset_16(uint8_t *from, chunk_t *chunk) {
*chunk = _mm256_broadcastsi128_si256(_mm_loadu_si128((__m128i*)from));

Check warning on line 55 in arch/x86/chunkset_avx512.c

View check run for this annotation

Codecov / codecov/patch

arch/x86/chunkset_avx512.c#L54-L55

Added lines #L54 - L55 were not covered by tests
}

static inline void loadchunk(uint8_t const *s, chunk_t *chunk) {
*chunk = _mm256_loadu_si256((__m256i *)s);

Check warning on line 59 in arch/x86/chunkset_avx512.c

View check run for this annotation

Codecov / codecov/patch

arch/x86/chunkset_avx512.c#L58-L59

Added lines #L58 - L59 were not covered by tests
}

static inline void storechunk(uint8_t *out, chunk_t *chunk) {
_mm256_storeu_si256((__m256i *)out, *chunk);

Check warning on line 63 in arch/x86/chunkset_avx512.c

View check run for this annotation

Codecov / codecov/patch

arch/x86/chunkset_avx512.c#L62-L63

Added lines #L62 - L63 were not covered by tests
}

static inline void storechunk_mask(uint8_t *out, mask_t mask, chunk_t *chunk) {
_mm256_mask_storeu_epi8(out, mask, *chunk);

Check warning on line 67 in arch/x86/chunkset_avx512.c

View check run for this annotation

Codecov / codecov/patch

arch/x86/chunkset_avx512.c#L66-L67

Added lines #L66 - L67 were not covered by tests
}

static inline uint8_t* CHUNKCOPY(uint8_t *out, uint8_t const *from, unsigned len) {

Check warning on line 70 in arch/x86/chunkset_avx512.c

View check run for this annotation

Codecov / codecov/patch

arch/x86/chunkset_avx512.c#L70

Added line #L70 was not covered by tests
Assert(len > 0, "chunkcopy should never have a length 0");

unsigned rem = len % sizeof(chunk_t);
mask_t rem_mask = gen_mask(rem);

Check warning on line 74 in arch/x86/chunkset_avx512.c

View check run for this annotation

Codecov / codecov/patch

arch/x86/chunkset_avx512.c#L73-L74

Added lines #L73 - L74 were not covered by tests

/* Since this is only ever called if dist >= a chunk, we don't need a masked load */
chunk_t chunk;
loadchunk(from, &chunk);
_mm256_mask_storeu_epi8(out, rem_mask, chunk);
out += rem;
from += rem;
len -= rem;

Check warning on line 82 in arch/x86/chunkset_avx512.c

View check run for this annotation

Codecov / codecov/patch

arch/x86/chunkset_avx512.c#L77-L82

Added lines #L77 - L82 were not covered by tests

while (len > 0) {
loadchunk(from, &chunk);
storechunk(out, &chunk);
out += sizeof(chunk_t);
from += sizeof(chunk_t);
len -= sizeof(chunk_t);

Check warning on line 89 in arch/x86/chunkset_avx512.c

View check run for this annotation

Codecov / codecov/patch

arch/x86/chunkset_avx512.c#L85-L89

Added lines #L85 - L89 were not covered by tests
}

return out;

Check warning on line 92 in arch/x86/chunkset_avx512.c

View check run for this annotation

Codecov / codecov/patch

arch/x86/chunkset_avx512.c#L92

Added line #L92 was not covered by tests
}

static inline chunk_t GET_CHUNK_MAG(uint8_t *buf, uint32_t *chunk_rem, uint32_t dist) {
lut_rem_pair lut_rem = perm_idx_lut[dist - 3];
__m256i ret_vec;
*chunk_rem = lut_rem.remval;

Check warning on line 98 in arch/x86/chunkset_avx512.c

View check run for this annotation

Codecov / codecov/patch

arch/x86/chunkset_avx512.c#L95-L98

Added lines #L95 - L98 were not covered by tests

/* See the AVX2 implementation for more detailed comments. This is that + some masked
* loads to avoid an out of bounds read on the heap */

if (dist < 16) {
const __m256i permute_xform =
_mm256_setr_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,

Check warning on line 105 in arch/x86/chunkset_avx512.c

View check run for this annotation

Codecov / codecov/patch

arch/x86/chunkset_avx512.c#L104-L105

Added lines #L104 - L105 were not covered by tests
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16);
__m256i perm_vec = _mm256_load_si256((__m256i*)(permute_table+lut_rem.idx));
halfmask_t load_mask = gen_half_mask(dist);
__m128i ret_vec0 = _mm_maskz_loadu_epi8(load_mask, buf);
perm_vec = _mm256_add_epi8(perm_vec, permute_xform);
ret_vec = _mm256_inserti128_si256(_mm256_castsi128_si256(ret_vec0), ret_vec0, 1);
ret_vec = _mm256_shuffle_epi8(ret_vec, perm_vec);
} else {
halfmask_t load_mask = gen_half_mask(dist - 16);
__m128i ret_vec0 = _mm_loadu_si128((__m128i*)buf);
__m128i ret_vec1 = _mm_maskz_loadu_epi8(load_mask, (__m128i*)(buf + 16));
__m128i perm_vec1 = _mm_load_si128((__m128i*)(permute_table + lut_rem.idx));
halfmask_t xlane_mask = _mm_cmp_epi8_mask(perm_vec1, _mm_set1_epi8(15), _MM_CMPINT_LE);
__m128i latter_half = _mm_mask_shuffle_epi8(ret_vec1, xlane_mask, ret_vec0, perm_vec1);
ret_vec = _mm256_inserti128_si256(_mm256_castsi128_si256(ret_vec0), latter_half, 1);

Check warning on line 120 in arch/x86/chunkset_avx512.c

View check run for this annotation

Codecov / codecov/patch

arch/x86/chunkset_avx512.c#L107-L120

Added lines #L107 - L120 were not covered by tests
}

return ret_vec;

Check warning on line 123 in arch/x86/chunkset_avx512.c

View check run for this annotation

Codecov / codecov/patch

arch/x86/chunkset_avx512.c#L123

Added line #L123 was not covered by tests
}

static inline void loadhalfchunk(uint8_t const *s, halfchunk_t *chunk) {
*chunk = _mm_loadu_si128((__m128i *)s);

Check warning on line 127 in arch/x86/chunkset_avx512.c

View check run for this annotation

Codecov / codecov/patch

arch/x86/chunkset_avx512.c#L126-L127

Added lines #L126 - L127 were not covered by tests
}

static inline void storehalfchunk(uint8_t *out, halfchunk_t *chunk) {
_mm_storeu_si128((__m128i *)out, *chunk);

Check warning on line 131 in arch/x86/chunkset_avx512.c

View check run for this annotation

Codecov / codecov/patch

arch/x86/chunkset_avx512.c#L130-L131

Added lines #L130 - L131 were not covered by tests
}

static inline chunk_t halfchunk2whole(halfchunk_t *chunk) {

Check warning on line 134 in arch/x86/chunkset_avx512.c

View check run for this annotation

Codecov / codecov/patch

arch/x86/chunkset_avx512.c#L134

Added line #L134 was not covered by tests
/* We zero extend mostly to appease some memory sanitizers. These bytes are ultimately
* unlikely to be actually written or read from */
return _mm256_zextsi128_si256(*chunk);

Check warning on line 137 in arch/x86/chunkset_avx512.c

View check run for this annotation

Codecov / codecov/patch

arch/x86/chunkset_avx512.c#L137

Added line #L137 was not covered by tests
}

static inline halfchunk_t GET_HALFCHUNK_MAG(uint8_t *buf, uint32_t *chunk_rem, uint32_t dist) {
lut_rem_pair lut_rem = perm_idx_lut[dist - 3];
__m128i perm_vec, ret_vec;
halfmask_t load_mask = gen_half_mask(dist);
ret_vec = _mm_maskz_loadu_epi8(load_mask, buf);
*chunk_rem = half_rem_vals[dist - 3];

Check warning on line 145 in arch/x86/chunkset_avx512.c

View check run for this annotation

Codecov / codecov/patch

arch/x86/chunkset_avx512.c#L140-L145

Added lines #L140 - L145 were not covered by tests

perm_vec = _mm_load_si128((__m128i*)(permute_table + lut_rem.idx));
ret_vec = _mm_shuffle_epi8(ret_vec, perm_vec);

Check warning on line 148 in arch/x86/chunkset_avx512.c

View check run for this annotation

Codecov / codecov/patch

arch/x86/chunkset_avx512.c#L148

Added line #L148 was not covered by tests

return ret_vec;

Check warning on line 150 in arch/x86/chunkset_avx512.c

View check run for this annotation

Codecov / codecov/patch

arch/x86/chunkset_avx512.c#L150

Added line #L150 was not covered by tests
}

static inline uint8_t* HALFCHUNKCOPY(uint8_t *out, uint8_t const *from, unsigned len) {

Check warning on line 153 in arch/x86/chunkset_avx512.c

View check run for this annotation

Codecov / codecov/patch

arch/x86/chunkset_avx512.c#L153

Added line #L153 was not covered by tests
Assert(len > 0, "chunkcopy should never have a length 0");

unsigned rem = len % sizeof(halfchunk_t);
halfmask_t rem_mask = gen_half_mask(rem);

Check warning on line 157 in arch/x86/chunkset_avx512.c

View check run for this annotation

Codecov / codecov/patch

arch/x86/chunkset_avx512.c#L156-L157

Added lines #L156 - L157 were not covered by tests

/* Since this is only ever called if dist >= a chunk, we don't need a masked load */
halfchunk_t chunk;
loadhalfchunk(from, &chunk);
_mm_mask_storeu_epi8(out, rem_mask, chunk);
out += rem;
from += rem;
len -= rem;

Check warning on line 165 in arch/x86/chunkset_avx512.c

View check run for this annotation

Codecov / codecov/patch

arch/x86/chunkset_avx512.c#L160-L165

Added lines #L160 - L165 were not covered by tests

while (len > 0) {
loadhalfchunk(from, &chunk);
storehalfchunk(out, &chunk);
out += sizeof(halfchunk_t);
from += sizeof(halfchunk_t);
len -= sizeof(halfchunk_t);

Check warning on line 172 in arch/x86/chunkset_avx512.c

View check run for this annotation

Codecov / codecov/patch

arch/x86/chunkset_avx512.c#L168-L172

Added lines #L168 - L172 were not covered by tests
}

return out;

Check warning on line 175 in arch/x86/chunkset_avx512.c

View check run for this annotation

Codecov / codecov/patch

arch/x86/chunkset_avx512.c#L175

Added line #L175 was not covered by tests
}

#define CHUNKSIZE chunksize_avx512
#define CHUNKUNROLL chunkunroll_avx512
#define CHUNKMEMSET chunkmemset_avx512
#define CHUNKMEMSET_SAFE chunkmemset_safe_avx512

#include "chunkset_tpl.h"

#define INFLATE_FAST inflate_fast_avx512

#include "inffast_tpl.h"

#endif
4 changes: 3 additions & 1 deletion arch/x86/x86_features.c
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,8 @@ void Z_INTERNAL x86_check_features(struct x86_cpu_features *features) {
features->has_avx2 = ebx & 0x20;
}

features->has_bmi2 = ebx & 0x8;

// check AVX512 bits if the OS supports saving ZMM registers
if (features->has_os_save_zmm) {
features->has_avx512f = ebx & 0x00010000;
Expand All @@ -108,7 +110,7 @@ void Z_INTERNAL x86_check_features(struct x86_cpu_features *features) {
features->has_avx512vl = ebx & 0x80000000;
}
features->has_avx512_common = features->has_avx512f && features->has_avx512dq && features->has_avx512bw \
&& features->has_avx512vl;
&& features->has_avx512vl && features->has_bmi2;
features->has_avx512vnni = ecx & 0x800;
}
}
Expand Down
1 change: 1 addition & 0 deletions arch/x86/x86_features.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ struct x86_cpu_features {
int has_avx512vl;
int has_avx512_common; // Enabled when AVX512(F,DQ,BW,VL) are all enabled.
int has_avx512vnni;
int has_bmi2;
int has_sse2;
int has_ssse3;
int has_sse42;
Expand Down
9 changes: 9 additions & 0 deletions arch/x86/x86_functions.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,9 @@ uint8_t* chunkmemset_safe_avx2(uint8_t *out, uint8_t *from, unsigned len, unsign
#ifdef X86_AVX512
uint32_t adler32_avx512(uint32_t adler, const uint8_t *buf, size_t len);
uint32_t adler32_fold_copy_avx512(uint32_t adler, uint8_t *dst, const uint8_t *src, size_t len);
uint32_t chunksize_avx512(void);
uint8_t* chunkmemset_safe_avx512(uint8_t *out, uint8_t *from, unsigned len, unsigned left);
void inflate_fast_avx512(PREFIX3(stream)* strm, uint32_t start);
#endif
#ifdef X86_AVX512VNNI
uint32_t adler32_avx512_vnni(uint32_t adler, const uint8_t *buf, size_t len);
Expand Down Expand Up @@ -146,6 +149,12 @@ uint32_t crc32_vpclmulqdq(uint32_t crc32, const uint8_t *buf, size_t len);
# define native_adler32 adler32_avx512
# undef native_adler32_fold_copy
# define native_adler32_fold_copy adler32_fold_copy_avx512
# undef native_chunkmemset_safe
# define native_chunkmemset_safe chunkmemset_safe_avx512
# undef native_chunksize
# define native_chunksize chunksize_avx512
# undef native_inflate_fast
# define native_inflate_fast inflate_fast_avx512
// X86 - AVX512 (VNNI)
# if defined(X86_AVX512VNNI) && defined(__AVX512VNNI__)
# undef native_adler32
Expand Down
Loading
Loading