Vendor things

This commit is contained in:
John Doty 2024-03-08 11:03:01 -08:00
parent 5deceec006
commit 977e3c17e5
19434 changed files with 10682014 additions and 0 deletions

View file

@ -0,0 +1,214 @@
use super::Adler32Imp;
/// Resolves update implementation if CPU supports avx2 instructions.
pub fn get_imp() -> Option<Adler32Imp> {
get_imp_inner()
}
#[inline]
#[cfg(all(feature = "std", any(target_arch = "x86", target_arch = "x86_64")))]
fn get_imp_inner() -> Option<Adler32Imp> {
if std::is_x86_feature_detected!("avx2") {
Some(imp::update)
} else {
None
}
}
#[inline]
#[cfg(all(
target_feature = "avx2",
not(all(feature = "std", any(target_arch = "x86", target_arch = "x86_64")))
))]
fn get_imp_inner() -> Option<Adler32Imp> {
Some(imp::update)
}
#[inline]
#[cfg(all(
not(target_feature = "avx2"),
not(all(feature = "std", any(target_arch = "x86", target_arch = "x86_64")))
))]
fn get_imp_inner() -> Option<Adler32Imp> {
None
}
#[cfg(all(
any(target_arch = "x86", target_arch = "x86_64"),
any(feature = "std", target_feature = "avx2")
))]
mod imp {
const MOD: u32 = 65521;
const NMAX: usize = 5552;
const BLOCK_SIZE: usize = 32;
const CHUNK_SIZE: usize = NMAX / BLOCK_SIZE * BLOCK_SIZE;
#[cfg(target_arch = "x86")]
use core::arch::x86::*;
#[cfg(target_arch = "x86_64")]
use core::arch::x86_64::*;
pub fn update(a: u16, b: u16, data: &[u8]) -> (u16, u16) {
unsafe { update_imp(a, b, data) }
}
#[inline]
#[target_feature(enable = "avx2")]
unsafe fn update_imp(a: u16, b: u16, data: &[u8]) -> (u16, u16) {
let mut a = a as u32;
let mut b = b as u32;
let chunks = data.chunks_exact(CHUNK_SIZE);
let remainder = chunks.remainder();
for chunk in chunks {
update_chunk_block(&mut a, &mut b, chunk);
}
update_block(&mut a, &mut b, remainder);
(a as u16, b as u16)
}
#[inline]
unsafe fn update_chunk_block(a: &mut u32, b: &mut u32, chunk: &[u8]) {
debug_assert_eq!(
chunk.len(),
CHUNK_SIZE,
"Unexpected chunk size (expected {}, got {})",
CHUNK_SIZE,
chunk.len()
);
reduce_add_blocks(a, b, chunk);
*a %= MOD;
*b %= MOD;
}
#[inline]
unsafe fn update_block(a: &mut u32, b: &mut u32, chunk: &[u8]) {
debug_assert!(
chunk.len() <= CHUNK_SIZE,
"Unexpected chunk size (expected <= {}, got {})",
CHUNK_SIZE,
chunk.len()
);
for byte in reduce_add_blocks(a, b, chunk) {
*a += *byte as u32;
*b += *a;
}
*a %= MOD;
*b %= MOD;
}
#[inline(always)]
unsafe fn reduce_add_blocks<'a>(a: &mut u32, b: &mut u32, chunk: &'a [u8]) -> &'a [u8] {
if chunk.len() < BLOCK_SIZE {
return chunk;
}
let blocks = chunk.chunks_exact(BLOCK_SIZE);
let blocks_remainder = blocks.remainder();
let one_v = _mm256_set1_epi16(1);
let zero_v = _mm256_setzero_si256();
let weights = get_weights();
let mut p_v = _mm256_set_epi32(0, 0, 0, 0, 0, 0, 0, (*a * blocks.len() as u32) as _);
let mut a_v = _mm256_setzero_si256();
let mut b_v = _mm256_set_epi32(0, 0, 0, 0, 0, 0, 0, *b as _);
for block in blocks {
let block_ptr = block.as_ptr() as *const _;
let block = _mm256_loadu_si256(block_ptr);
p_v = _mm256_add_epi32(p_v, a_v);
a_v = _mm256_add_epi32(a_v, _mm256_sad_epu8(block, zero_v));
let mad = _mm256_maddubs_epi16(block, weights);
b_v = _mm256_add_epi32(b_v, _mm256_madd_epi16(mad, one_v));
}
b_v = _mm256_add_epi32(b_v, _mm256_slli_epi32(p_v, 5));
*a += reduce_add(a_v);
*b = reduce_add(b_v);
blocks_remainder
}
#[inline(always)]
unsafe fn reduce_add(v: __m256i) -> u32 {
let sum = _mm_add_epi32(_mm256_castsi256_si128(v), _mm256_extracti128_si256(v, 1));
let hi = _mm_unpackhi_epi64(sum, sum);
let sum = _mm_add_epi32(hi, sum);
let hi = _mm_shuffle_epi32(sum, crate::imp::_MM_SHUFFLE(2, 3, 0, 1));
let sum = _mm_add_epi32(sum, hi);
_mm_cvtsi128_si32(sum) as _
}
#[inline(always)]
unsafe fn get_weights() -> __m256i {
_mm256_set_epi8(
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31, 32,
)
}
}
#[cfg(test)]
mod tests {
use rand::Rng;
#[test]
fn zeroes() {
assert_sum_eq(&[]);
assert_sum_eq(&[0]);
assert_sum_eq(&[0, 0]);
assert_sum_eq(&[0; 100]);
assert_sum_eq(&[0; 1024]);
assert_sum_eq(&[0; 1024 * 1024]);
}
#[test]
fn ones() {
assert_sum_eq(&[]);
assert_sum_eq(&[1]);
assert_sum_eq(&[1, 1]);
assert_sum_eq(&[1; 100]);
assert_sum_eq(&[1; 1024]);
assert_sum_eq(&[1; 1024 * 1024]);
}
#[test]
fn random() {
let mut random = [0; 1024 * 1024];
rand::thread_rng().fill(&mut random[..]);
assert_sum_eq(&random[..1]);
assert_sum_eq(&random[..100]);
assert_sum_eq(&random[..1024]);
assert_sum_eq(&random[..1024 * 1024]);
}
/// Example calculation from https://en.wikipedia.org/wiki/Adler-32.
#[test]
fn wiki() {
assert_sum_eq(b"Wikipedia");
}
fn assert_sum_eq(data: &[u8]) {
if let Some(update) = super::get_imp() {
let (a, b) = update(1, 0, data);
let left = u32::from(b) << 16 | u32::from(a);
let right = adler::adler32_slice(data);
assert_eq!(left, right, "len({})", data.len());
}
}
}

View file

@ -0,0 +1,242 @@
use super::Adler32Imp;
/// Resolves update implementation if CPU supports avx512f and avx512bw instructions.
pub fn get_imp() -> Option<Adler32Imp> {
get_imp_inner()
}
#[inline]
#[cfg(all(
feature = "std",
feature = "nightly",
any(target_arch = "x86", target_arch = "x86_64")
))]
fn get_imp_inner() -> Option<Adler32Imp> {
let has_avx512f = std::is_x86_feature_detected!("avx512f");
let has_avx512bw = std::is_x86_feature_detected!("avx512bw");
if has_avx512f && has_avx512bw {
Some(imp::update)
} else {
None
}
}
#[inline]
#[cfg(all(
feature = "nightly",
all(target_feature = "avx512f", target_feature = "avx512bw"),
not(all(feature = "std", any(target_arch = "x86", target_arch = "x86_64")))
))]
fn get_imp_inner() -> Option<Adler32Imp> {
Some(imp::update)
}
#[inline]
#[cfg(all(
not(all(feature = "nightly", target_feature = "avx512f", target_feature = "avx512bw")),
not(all(
feature = "std",
feature = "nightly",
any(target_arch = "x86", target_arch = "x86_64")
))
))]
fn get_imp_inner() -> Option<Adler32Imp> {
None
}
#[cfg(all(
feature = "nightly",
any(target_arch = "x86", target_arch = "x86_64"),
any(
feature = "std",
all(target_feature = "avx512f", target_feature = "avx512bw")
)
))]
mod imp {
const MOD: u32 = 65521;
const NMAX: usize = 5552;
const BLOCK_SIZE: usize = 64;
const CHUNK_SIZE: usize = NMAX / BLOCK_SIZE * BLOCK_SIZE;
#[cfg(target_arch = "x86")]
use core::arch::x86::*;
#[cfg(target_arch = "x86_64")]
use core::arch::x86_64::*;
pub fn update(a: u16, b: u16, data: &[u8]) -> (u16, u16) {
unsafe { update_imp(a, b, data) }
}
#[inline]
#[target_feature(enable = "avx512f")]
#[target_feature(enable = "avx512bw")]
unsafe fn update_imp(a: u16, b: u16, data: &[u8]) -> (u16, u16) {
let mut a = a as u32;
let mut b = b as u32;
let chunks = data.chunks_exact(CHUNK_SIZE);
let remainder = chunks.remainder();
for chunk in chunks {
update_chunk_block(&mut a, &mut b, chunk);
}
update_block(&mut a, &mut b, remainder);
(a as u16, b as u16)
}
#[inline]
unsafe fn update_chunk_block(a: &mut u32, b: &mut u32, chunk: &[u8]) {
debug_assert_eq!(
chunk.len(),
CHUNK_SIZE,
"Unexpected chunk size (expected {}, got {})",
CHUNK_SIZE,
chunk.len()
);
reduce_add_blocks(a, b, chunk);
*a %= MOD;
*b %= MOD;
}
#[inline]
unsafe fn update_block(a: &mut u32, b: &mut u32, chunk: &[u8]) {
debug_assert!(
chunk.len() <= CHUNK_SIZE,
"Unexpected chunk size (expected <= {}, got {})",
CHUNK_SIZE,
chunk.len()
);
for byte in reduce_add_blocks(a, b, chunk) {
*a += *byte as u32;
*b += *a;
}
*a %= MOD;
*b %= MOD;
}
#[inline(always)]
unsafe fn reduce_add_blocks<'a>(a: &mut u32, b: &mut u32, chunk: &'a [u8]) -> &'a [u8] {
if chunk.len() < BLOCK_SIZE {
return chunk;
}
let blocks = chunk.chunks_exact(BLOCK_SIZE);
let blocks_remainder = blocks.remainder();
let one_v = _mm512_set1_epi16(1);
let zero_v = _mm512_setzero_si512();
let weights = get_weights();
let p_v = (*a * blocks.len() as u32) as _;
let mut p_v = _mm512_set_epi32(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, p_v);
let mut a_v = _mm512_setzero_si512();
let mut b_v = _mm512_set_epi32(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, *b as _);
for block in blocks {
let block_ptr = block.as_ptr() as *const _;
let block = _mm512_loadu_si512(block_ptr);
p_v = _mm512_add_epi32(p_v, a_v);
a_v = _mm512_add_epi32(a_v, _mm512_sad_epu8(block, zero_v));
let mad = _mm512_maddubs_epi16(block, weights);
b_v = _mm512_add_epi32(b_v, _mm512_madd_epi16(mad, one_v));
}
b_v = _mm512_add_epi32(b_v, _mm512_slli_epi32(p_v, 6));
*a += reduce_add(a_v);
*b = reduce_add(b_v);
blocks_remainder
}
#[inline(always)]
unsafe fn reduce_add(v: __m512i) -> u32 {
let v: [__m256i; 2] = core::mem::transmute(v);
reduce_add_256(v[0]) + reduce_add_256(v[1])
}
#[inline(always)]
unsafe fn reduce_add_256(v: __m256i) -> u32 {
let v: [__m128i; 2] = core::mem::transmute(v);
let sum = _mm_add_epi32(v[0], v[1]);
let hi = _mm_unpackhi_epi64(sum, sum);
let sum = _mm_add_epi32(hi, sum);
let hi = _mm_shuffle_epi32(sum, crate::imp::_MM_SHUFFLE(2, 3, 0, 1));
let sum = _mm_add_epi32(sum, hi);
let sum = _mm_cvtsi128_si32(sum) as _;
sum
}
#[inline(always)]
unsafe fn get_weights() -> __m512i {
_mm512_set_epi8(
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
)
}
}
#[cfg(test)]
mod tests {
use rand::Rng;
#[test]
fn zeroes() {
assert_sum_eq(&[]);
assert_sum_eq(&[0]);
assert_sum_eq(&[0, 0]);
assert_sum_eq(&[0; 100]);
assert_sum_eq(&[0; 1024]);
assert_sum_eq(&[0; 1024 * 1024]);
}
#[test]
fn ones() {
assert_sum_eq(&[]);
assert_sum_eq(&[1]);
assert_sum_eq(&[1, 1]);
assert_sum_eq(&[1; 100]);
assert_sum_eq(&[1; 1024]);
assert_sum_eq(&[1; 1024 * 1024]);
}
#[test]
fn random() {
let mut random = [0; 1024 * 1024];
rand::thread_rng().fill(&mut random[..]);
assert_sum_eq(&random[..1]);
assert_sum_eq(&random[..100]);
assert_sum_eq(&random[..1024]);
assert_sum_eq(&random[..1024 * 1024]);
}
/// Example calculation from https://en.wikipedia.org/wiki/Adler-32.
#[test]
fn wiki() {
assert_sum_eq(b"Wikipedia");
}
fn assert_sum_eq(data: &[u8]) {
if let Some(update) = super::get_imp() {
let (a, b) = update(1, 0, data);
let left = u32::from(b) << 16 | u32::from(a);
let right = adler::adler32_slice(data);
assert_eq!(left, right, "len({})", data.len());
}
}
}

View file

@ -0,0 +1,23 @@
pub mod avx2;
pub mod avx512;
pub mod scalar;
pub mod sse2;
pub mod ssse3;
pub mod wasm;
pub type Adler32Imp = fn(u16, u16, &[u8]) -> (u16, u16);
#[inline]
#[allow(non_snake_case)]
pub const fn _MM_SHUFFLE(z: u32, y: u32, x: u32, w: u32) -> i32 {
((z << 6) | (y << 4) | (x << 2) | w) as i32
}
pub fn get_imp() -> Adler32Imp {
avx512::get_imp()
.or_else(avx2::get_imp)
.or_else(ssse3::get_imp)
.or_else(sse2::get_imp)
.or_else(wasm::get_imp)
.unwrap_or(scalar::update)
}

View file

@ -0,0 +1,241 @@
use super::Adler32Imp;
/// Resolves update implementation if CPU supports avx512f and avx512bw instructions.
pub fn get_imp() -> Option<Adler32Imp> {
get_imp_inner()
}
#[inline]
#[cfg(all(feature = "std", feature = "nightly", target_arch = "arm"))]
fn get_imp_inner() -> Option<Adler32Imp> {
if std::is_arm_feature_detected("neon") {
Some(imp::update)
} else {
None
}
}
#[inline]
#[cfg(all(feature = "std", feature = "nightly", target_arch = "aarch64"))]
fn get_imp_inner() -> Option<Adler32Imp> {
if std::is_aarch64_feature_detected("neon") {
Some(imp::update)
} else {
None
}
}
#[inline]
#[cfg(all(
feature = "nightly",
target_feature = "neon",
not(all(feature = "std", any(target_arch = "arm", target_arch = "aarch64")))
))]
fn get_imp_inner() -> Option<Adler32Imp> {
Some(imp::update)
}
#[inline]
#[cfg(all(
not(target_feature = "neon"),
not(all(
feature = "std",
feature = "nightly",
any(target_arch = "arm", target_arch = "aarch64")
))
))]
fn get_imp_inner() -> Option<Adler32Imp> {
None
}
#[cfg(all(
feature = "nightly",
any(target_arch = "arm", target_arch = "aarch64"),
any(feature = "std", target_feature = "neon")
))]
mod imp {
const MOD: u32 = 65521;
const NMAX: usize = 5552;
const BLOCK_SIZE: usize = 64;
const CHUNK_SIZE: usize = NMAX / BLOCK_SIZE * BLOCK_SIZE;
#[cfg(target_arch = "aarch64")]
use core::arch::aarch64::*;
#[cfg(target_arch = "arm")]
use core::arch::arm::*;
pub fn update(a: u16, b: u16, data: &[u8]) -> (u16, u16) {
unsafe { update_imp(a, b, data) }
}
#[inline]
#[target_feature(enable = "neon")]
unsafe fn update_imp(a: u16, b: u16, data: &[u8]) -> (u16, u16) {
let mut a = a as u32;
let mut b = b as u32;
let chunks = data.chunks_exact(CHUNK_SIZE);
let remainder = chunks.remainder();
for chunk in chunks {
update_chunk_block(&mut a, &mut b, chunk);
}
update_block(&mut a, &mut b, remainder);
(a as u16, b as u16)
}
#[inline]
unsafe fn update_chunk_block(a: &mut u32, b: &mut u32, chunk: &[u8]) {
debug_assert_eq!(
chunk.len(),
CHUNK_SIZE,
"Unexpected chunk size (expected {}, got {})",
CHUNK_SIZE,
chunk.len()
);
reduce_add_blocks(a, b, chunk);
*a %= MOD;
*b %= MOD;
}
#[inline]
unsafe fn update_block(a: &mut u32, b: &mut u32, chunk: &[u8]) {
debug_assert!(
chunk.len() <= CHUNK_SIZE,
"Unexpected chunk size (expected <= {}, got {})",
CHUNK_SIZE,
chunk.len()
);
for byte in reduce_add_blocks(a, b, chunk) {
*a += *byte as u32;
*b += *a;
}
*a %= MOD;
*b %= MOD;
}
#[inline(always)]
unsafe fn reduce_add_blocks<'a>(a: &mut u32, b: &mut u32, chunk: &'a [u8]) -> &'a [u8] {
if chunk.len() < BLOCK_SIZE {
return chunk;
}
let blocks = chunk.chunks_exact(BLOCK_SIZE);
let blocks_remainder = blocks.remainder();
let one_v = _mm512_set1_epi16(1);
let zero_v = _mm512_setzero_si512();
let weights = get_weights();
let p_v = (*a * blocks.len() as u32) as _;
let mut p_v = _mm512_set_epi32(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, p_v);
let mut a_v = _mm512_setzero_si512();
let mut b_v = _mm512_set_epi32(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, *b as _);
for block in blocks {
let block_ptr = block.as_ptr() as *const _;
let block = _mm512_loadu_si512(block_ptr);
p_v = _mm512_add_epi32(p_v, a_v);
a_v = _mm512_add_epi32(a_v, _mm512_sad_epu8(block, zero_v));
let mad = _mm512_maddubs_epi16(block, weights);
b_v = _mm512_add_epi32(b_v, _mm512_madd_epi16(mad, one_v));
}
b_v = _mm512_add_epi32(b_v, _mm512_slli_epi32(p_v, 6));
*a += reduce_add(a_v);
*b = reduce_add(b_v);
blocks_remainder
}
#[inline(always)]
unsafe fn reduce_add(v: __m512i) -> u32 {
let v: [__m256i; 2] = core::mem::transmute(v);
reduce_add_256(v[0]) + reduce_add_256(v[1])
}
#[inline(always)]
unsafe fn reduce_add_256(v: __m256i) -> u32 {
let v: [__m128i; 2] = core::mem::transmute(v);
let sum = _mm_add_epi32(v[0], v[1]);
let hi = _mm_unpackhi_epi64(sum, sum);
let sum = _mm_add_epi32(hi, sum);
let hi = _mm_shuffle_epi32(sum, crate::imp::_MM_SHUFFLE(2, 3, 0, 1));
let sum = _mm_add_epi32(sum, hi);
let sum = _mm_cvtsi128_si32(sum) as _;
sum
}
#[inline(always)]
unsafe fn get_weights() -> __m512i {
_mm512_set_epi8(
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
)
}
}
#[cfg(test)]
mod tests {
use rand::Rng;
#[test]
fn zeroes() {
assert_sum_eq(&[]);
assert_sum_eq(&[0]);
assert_sum_eq(&[0, 0]);
assert_sum_eq(&[0; 100]);
assert_sum_eq(&[0; 1024]);
assert_sum_eq(&[0; 1024 * 1024]);
}
#[test]
fn ones() {
assert_sum_eq(&[]);
assert_sum_eq(&[1]);
assert_sum_eq(&[1, 1]);
assert_sum_eq(&[1; 100]);
assert_sum_eq(&[1; 1024]);
assert_sum_eq(&[1; 1024 * 1024]);
}
#[test]
fn random() {
let mut random = [0; 1024 * 1024];
rand::thread_rng().fill(&mut random[..]);
assert_sum_eq(&random[..1]);
assert_sum_eq(&random[..100]);
assert_sum_eq(&random[..1024]);
assert_sum_eq(&random[..1024 * 1024]);
}
/// Example calculation from https://en.wikipedia.org/wiki/Adler-32.
#[test]
fn wiki() {
assert_sum_eq(b"Wikipedia");
}
fn assert_sum_eq(data: &[u8]) {
if let Some(update) = super::get_imp() {
let (a, b) = update(1, 0, data);
let left = u32::from(b) << 16 | u32::from(a);
let right = adler::adler32_slice(data);
assert_eq!(left, right, "len({})", data.len());
}
}
}

View file

@ -0,0 +1,69 @@
const MOD: u32 = 65521;
const NMAX: usize = 5552;
pub fn update(a: u16, b: u16, data: &[u8]) -> (u16, u16) {
let mut a = a as u32;
let mut b = b as u32;
let chunks = data.chunks_exact(NMAX);
let remainder = chunks.remainder();
for chunk in chunks {
for byte in chunk {
a = a.wrapping_add(*byte as _);
b = b.wrapping_add(a);
}
a %= MOD;
b %= MOD;
}
for byte in remainder {
a = a.wrapping_add(*byte as _);
b = b.wrapping_add(a);
}
a %= MOD;
b %= MOD;
(a as u16, b as u16)
}
#[cfg(test)]
mod tests {
#[test]
fn zeroes() {
assert_eq!(adler32(&[]), 1);
assert_eq!(adler32(&[0]), 1 | 1 << 16);
assert_eq!(adler32(&[0, 0]), 1 | 2 << 16);
assert_eq!(adler32(&[0; 100]), 0x00640001);
assert_eq!(adler32(&[0; 1024]), 0x04000001);
assert_eq!(adler32(&[0; 1024 * 1024]), 0x00f00001);
}
#[test]
fn ones() {
assert_eq!(adler32(&[0xff; 1024]), 0x79a6fc2e);
assert_eq!(adler32(&[0xff; 1024 * 1024]), 0x8e88ef11);
}
#[test]
fn mixed() {
assert_eq!(adler32(&[1]), 2 | 2 << 16);
assert_eq!(adler32(&[40]), 41 | 41 << 16);
assert_eq!(adler32(&[0xA5; 1024 * 1024]), 0xd5009ab1);
}
/// Example calculation from https://en.wikipedia.org/wiki/Adler-32.
#[test]
fn wiki() {
assert_eq!(adler32(b"Wikipedia"), 0x11E60398);
}
fn adler32(data: &[u8]) -> u32 {
let (a, b) = super::update(1, 0, data);
u32::from(b) << 16 | u32::from(a)
}
}

View file

@ -0,0 +1,233 @@
use super::Adler32Imp;
/// Resolves update implementation if CPU supports sse2 instructions.
pub fn get_imp() -> Option<Adler32Imp> {
get_imp_inner()
}
#[inline]
#[cfg(all(feature = "std", any(target_arch = "x86", target_arch = "x86_64")))]
fn get_imp_inner() -> Option<Adler32Imp> {
if std::is_x86_feature_detected!("sse2") {
Some(imp::update)
} else {
None
}
}
#[inline]
#[cfg(all(
target_feature = "sse2",
not(all(feature = "std", any(target_arch = "x86", target_arch = "x86_64")))
))]
fn get_imp_inner() -> Option<Adler32Imp> {
Some(imp::update)
}
#[inline]
#[cfg(all(
not(target_feature = "sse2"),
not(all(feature = "std", any(target_arch = "x86", target_arch = "x86_64")))
))]
fn get_imp_inner() -> Option<Adler32Imp> {
None
}
#[cfg(all(
any(target_arch = "x86", target_arch = "x86_64"),
any(feature = "std", target_feature = "sse2")
))]
mod imp {
const MOD: u32 = 65521;
const NMAX: usize = 5552;
const BLOCK_SIZE: usize = 32;
const CHUNK_SIZE: usize = NMAX / BLOCK_SIZE * BLOCK_SIZE;
#[cfg(target_arch = "x86")]
use core::arch::x86::*;
#[cfg(target_arch = "x86_64")]
use core::arch::x86_64::*;
pub fn update(a: u16, b: u16, data: &[u8]) -> (u16, u16) {
unsafe { update_imp(a, b, data) }
}
#[inline]
#[target_feature(enable = "sse2")]
unsafe fn update_imp(a: u16, b: u16, data: &[u8]) -> (u16, u16) {
let mut a = a as u32;
let mut b = b as u32;
let chunks = data.chunks_exact(CHUNK_SIZE);
let remainder = chunks.remainder();
for chunk in chunks {
update_chunk_block(&mut a, &mut b, chunk);
}
update_block(&mut a, &mut b, remainder);
(a as u16, b as u16)
}
unsafe fn update_chunk_block(a: &mut u32, b: &mut u32, chunk: &[u8]) {
debug_assert_eq!(
chunk.len(),
CHUNK_SIZE,
"Unexpected chunk size (expected {}, got {})",
CHUNK_SIZE,
chunk.len()
);
reduce_add_blocks(a, b, chunk);
*a %= MOD;
*b %= MOD;
}
unsafe fn update_block(a: &mut u32, b: &mut u32, chunk: &[u8]) {
debug_assert!(
chunk.len() <= CHUNK_SIZE,
"Unexpected chunk size (expected <= {}, got {})",
CHUNK_SIZE,
chunk.len()
);
for byte in reduce_add_blocks(a, b, chunk) {
*a += *byte as u32;
*b += *a;
}
*a %= MOD;
*b %= MOD;
}
#[inline(always)]
unsafe fn reduce_add_blocks<'a>(a: &mut u32, b: &mut u32, chunk: &'a [u8]) -> &'a [u8] {
if chunk.len() < BLOCK_SIZE {
return chunk;
}
let blocks = chunk.chunks_exact(BLOCK_SIZE);
let blocks_remainder = blocks.remainder();
let zero_v = _mm_setzero_si128();
let weight_hi_v = get_weight_hi();
let weight_lo_v = get_weight_lo();
let mut p_v = _mm_set_epi32(0, 0, 0, (*a * blocks.len() as u32) as _);
let mut a_v = _mm_setzero_si128();
let mut b_v = _mm_set_epi32(0, 0, 0, *b as _);
for block in blocks {
let block_ptr = block.as_ptr() as *const _;
let left_v = _mm_loadu_si128(block_ptr);
let right_v = _mm_loadu_si128(block_ptr.add(1));
p_v = _mm_add_epi32(p_v, a_v);
a_v = _mm_add_epi32(a_v, _mm_sad_epu8(left_v, zero_v));
let mad = maddubs(left_v, weight_hi_v);
b_v = _mm_add_epi32(b_v, mad);
a_v = _mm_add_epi32(a_v, _mm_sad_epu8(right_v, zero_v));
let mad = maddubs(right_v, weight_lo_v);
b_v = _mm_add_epi32(b_v, mad);
}
b_v = _mm_add_epi32(b_v, _mm_slli_epi32(p_v, 5));
*a += reduce_add(a_v);
*b = reduce_add(b_v);
blocks_remainder
}
#[inline(always)]
unsafe fn maddubs(a: __m128i, b: __m128i) -> __m128i {
let a_lo = _mm_unpacklo_epi8(a, _mm_setzero_si128());
let a_hi = _mm_unpackhi_epi8(a, _mm_setzero_si128());
let b_lo = _mm_unpacklo_epi8(b, _mm_setzero_si128());
let b_hi = _mm_unpackhi_epi8(b, _mm_setzero_si128());
let lo = _mm_madd_epi16(a_lo, b_lo);
let hi = _mm_madd_epi16(a_hi, b_hi);
_mm_add_epi32(lo, hi)
}
#[inline(always)]
unsafe fn reduce_add(v: __m128i) -> u32 {
let hi = _mm_unpackhi_epi64(v, v);
let sum = _mm_add_epi32(hi, v);
let hi = _mm_shuffle_epi32(sum, crate::imp::_MM_SHUFFLE(2, 3, 0, 1));
let sum = _mm_add_epi32(sum, hi);
_mm_cvtsi128_si32(sum) as _
}
#[inline(always)]
unsafe fn get_weight_lo() -> __m128i {
_mm_set_epi8(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)
}
#[inline(always)]
unsafe fn get_weight_hi() -> __m128i {
_mm_set_epi8(
17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
)
}
}
#[cfg(test)]
mod tests {
use rand::Rng;
#[test]
fn zeroes() {
assert_sum_eq(&[]);
assert_sum_eq(&[0]);
assert_sum_eq(&[0, 0]);
assert_sum_eq(&[0; 100]);
assert_sum_eq(&[0; 1024]);
assert_sum_eq(&[0; 1024 * 1024]);
}
#[test]
fn ones() {
assert_sum_eq(&[]);
assert_sum_eq(&[1]);
assert_sum_eq(&[1, 1]);
assert_sum_eq(&[1; 100]);
assert_sum_eq(&[1; 1024]);
assert_sum_eq(&[1; 1024 * 1024]);
}
#[test]
fn random() {
let mut random = [0; 1024 * 1024];
rand::thread_rng().fill(&mut random[..]);
assert_sum_eq(&random[..1]);
assert_sum_eq(&random[..100]);
assert_sum_eq(&random[..1024]);
assert_sum_eq(&random[..1024 * 1024]);
}
/// Example calculation from https://en.wikipedia.org/wiki/Adler-32.
#[test]
fn wiki() {
assert_sum_eq(b"Wikipedia");
}
fn assert_sum_eq(data: &[u8]) {
if let Some(update) = super::get_imp() {
let (a, b) = update(1, 0, data);
let left = u32::from(b) << 16 | u32::from(a);
let right = adler::adler32_slice(data);
assert_eq!(left, right, "len({})", data.len());
}
}
}

View file

@ -0,0 +1,219 @@
use super::Adler32Imp;
/// Resolves update implementation if CPU supports ssse3 instructions.
pub fn get_imp() -> Option<Adler32Imp> {
get_imp_inner()
}
#[inline]
#[cfg(all(feature = "std", any(target_arch = "x86", target_arch = "x86_64")))]
fn get_imp_inner() -> Option<Adler32Imp> {
if std::is_x86_feature_detected!("ssse3") {
Some(imp::update)
} else {
None
}
}
#[inline]
#[cfg(all(
target_feature = "ssse3",
not(all(feature = "std", any(target_arch = "x86", target_arch = "x86_64")))
))]
fn get_imp_inner() -> Option<Adler32Imp> {
Some(imp::update)
}
#[inline]
#[cfg(all(
not(target_feature = "ssse3"),
not(all(feature = "std", any(target_arch = "x86", target_arch = "x86_64")))
))]
fn get_imp_inner() -> Option<Adler32Imp> {
None
}
#[cfg(all(
any(target_arch = "x86", target_arch = "x86_64"),
any(feature = "std", target_feature = "ssse3")
))]
mod imp {
const MOD: u32 = 65521;
const NMAX: usize = 5552;
const BLOCK_SIZE: usize = 32;
const CHUNK_SIZE: usize = NMAX / BLOCK_SIZE * BLOCK_SIZE;
#[cfg(target_arch = "x86")]
use core::arch::x86::*;
#[cfg(target_arch = "x86_64")]
use core::arch::x86_64::*;
pub fn update(a: u16, b: u16, data: &[u8]) -> (u16, u16) {
unsafe { update_imp(a, b, data) }
}
#[inline]
#[target_feature(enable = "ssse3")]
unsafe fn update_imp(a: u16, b: u16, data: &[u8]) -> (u16, u16) {
let mut a = a as u32;
let mut b = b as u32;
let chunks = data.chunks_exact(CHUNK_SIZE);
let remainder = chunks.remainder();
for chunk in chunks {
update_chunk_block(&mut a, &mut b, chunk);
}
update_block(&mut a, &mut b, remainder);
(a as u16, b as u16)
}
unsafe fn update_chunk_block(a: &mut u32, b: &mut u32, chunk: &[u8]) {
debug_assert_eq!(
chunk.len(),
CHUNK_SIZE,
"Unexpected chunk size (expected {}, got {})",
CHUNK_SIZE,
chunk.len()
);
reduce_add_blocks(a, b, chunk);
*a %= MOD;
*b %= MOD;
}
unsafe fn update_block(a: &mut u32, b: &mut u32, chunk: &[u8]) {
debug_assert!(
chunk.len() <= CHUNK_SIZE,
"Unexpected chunk size (expected <= {}, got {})",
CHUNK_SIZE,
chunk.len()
);
for byte in reduce_add_blocks(a, b, chunk) {
*a += *byte as u32;
*b += *a;
}
*a %= MOD;
*b %= MOD;
}
#[inline(always)]
unsafe fn reduce_add_blocks<'a>(a: &mut u32, b: &mut u32, chunk: &'a [u8]) -> &'a [u8] {
if chunk.len() < BLOCK_SIZE {
return chunk;
}
let blocks = chunk.chunks_exact(BLOCK_SIZE);
let blocks_remainder = blocks.remainder();
let one_v = _mm_set1_epi16(1);
let zero_v = _mm_set1_epi16(0);
let weight_hi_v = get_weight_hi();
let weight_lo_v = get_weight_lo();
let mut p_v = _mm_set_epi32(0, 0, 0, (*a * blocks.len() as u32) as _);
let mut a_v = _mm_set_epi32(0, 0, 0, 0);
let mut b_v = _mm_set_epi32(0, 0, 0, *b as _);
for block in blocks {
let block_ptr = block.as_ptr() as *const _;
let left_v = _mm_loadu_si128(block_ptr);
let right_v = _mm_loadu_si128(block_ptr.add(1));
p_v = _mm_add_epi32(p_v, a_v);
a_v = _mm_add_epi32(a_v, _mm_sad_epu8(left_v, zero_v));
let mad = _mm_maddubs_epi16(left_v, weight_hi_v);
b_v = _mm_add_epi32(b_v, _mm_madd_epi16(mad, one_v));
a_v = _mm_add_epi32(a_v, _mm_sad_epu8(right_v, zero_v));
let mad = _mm_maddubs_epi16(right_v, weight_lo_v);
b_v = _mm_add_epi32(b_v, _mm_madd_epi16(mad, one_v));
}
b_v = _mm_add_epi32(b_v, _mm_slli_epi32(p_v, 5));
*a += reduce_add(a_v);
*b = reduce_add(b_v);
blocks_remainder
}
#[inline(always)]
unsafe fn reduce_add(v: __m128i) -> u32 {
let hi = _mm_unpackhi_epi64(v, v);
let sum = _mm_add_epi32(hi, v);
let hi = _mm_shuffle_epi32(sum, crate::imp::_MM_SHUFFLE(2, 3, 0, 1));
let sum = _mm_add_epi32(sum, hi);
_mm_cvtsi128_si32(sum) as _
}
#[inline(always)]
unsafe fn get_weight_lo() -> __m128i {
_mm_set_epi8(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)
}
#[inline(always)]
unsafe fn get_weight_hi() -> __m128i {
_mm_set_epi8(
17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
)
}
}
#[cfg(test)]
mod tests {
use rand::Rng;
#[test]
fn zeroes() {
assert_sum_eq(&[]);
assert_sum_eq(&[0]);
assert_sum_eq(&[0, 0]);
assert_sum_eq(&[0; 100]);
assert_sum_eq(&[0; 1024]);
assert_sum_eq(&[0; 1024 * 1024]);
}
#[test]
fn ones() {
assert_sum_eq(&[]);
assert_sum_eq(&[1]);
assert_sum_eq(&[1, 1]);
assert_sum_eq(&[1; 100]);
assert_sum_eq(&[1; 1024]);
assert_sum_eq(&[1; 1024 * 1024]);
}
#[test]
fn random() {
let mut random = [0; 1024 * 1024];
rand::thread_rng().fill(&mut random[..]);
assert_sum_eq(&random[..1]);
assert_sum_eq(&random[..100]);
assert_sum_eq(&random[..1024]);
assert_sum_eq(&random[..1024 * 1024]);
}
/// Example calculation from https://en.wikipedia.org/wiki/Adler-32.
#[test]
fn wiki() {
assert_sum_eq(b"Wikipedia");
}
fn assert_sum_eq(data: &[u8]) {
if let Some(update) = super::get_imp() {
let (a, b) = update(1, 0, data);
let left = u32::from(b) << 16 | u32::from(a);
let right = adler::adler32_slice(data);
assert_eq!(left, right, "len({})", data.len());
}
}
}

View file

@ -0,0 +1,217 @@
use super::Adler32Imp;
/// Resolves update implementation if CPU supports simd128 instructions.
pub fn get_imp() -> Option<Adler32Imp> {
get_imp_inner()
}
#[inline]
#[cfg(target_feature = "simd128")]
fn get_imp_inner() -> Option<Adler32Imp> {
Some(imp::update)
}
#[inline]
#[cfg(not(target_feature = "simd128"))]
fn get_imp_inner() -> Option<Adler32Imp> {
None
}
#[cfg(target_feature = "simd128")]
mod imp {
const MOD: u32 = 65521;
const NMAX: usize = 5552;
const BLOCK_SIZE: usize = 32;
const CHUNK_SIZE: usize = NMAX / BLOCK_SIZE * BLOCK_SIZE;
#[cfg(target_arch = "wasm32")]
use core::arch::wasm32::*;
#[cfg(target_arch = "wasm64")]
use core::arch::wasm64::*;
pub fn update(a: u16, b: u16, data: &[u8]) -> (u16, u16) {
update_imp(a, b, data)
}
#[inline]
#[target_feature(enable = "simd128")]
fn update_imp(a: u16, b: u16, data: &[u8]) -> (u16, u16) {
let mut a = a as u32;
let mut b = b as u32;
let chunks = data.chunks_exact(CHUNK_SIZE);
let remainder = chunks.remainder();
for chunk in chunks {
update_chunk_block(&mut a, &mut b, chunk);
}
update_block(&mut a, &mut b, remainder);
(a as u16, b as u16)
}
fn update_chunk_block(a: &mut u32, b: &mut u32, chunk: &[u8]) {
debug_assert_eq!(
chunk.len(),
CHUNK_SIZE,
"Unexpected chunk size (expected {}, got {})",
CHUNK_SIZE,
chunk.len()
);
reduce_add_blocks(a, b, chunk);
*a %= MOD;
*b %= MOD;
}
fn update_block(a: &mut u32, b: &mut u32, chunk: &[u8]) {
debug_assert!(
chunk.len() <= CHUNK_SIZE,
"Unexpected chunk size (expected <= {}, got {})",
CHUNK_SIZE,
chunk.len()
);
for byte in reduce_add_blocks(a, b, chunk) {
*a += *byte as u32;
*b += *a;
}
*a %= MOD;
*b %= MOD;
}
#[inline(always)]
fn reduce_add_blocks<'a>(a: &mut u32, b: &mut u32, chunk: &'a [u8]) -> &'a [u8] {
if chunk.len() < BLOCK_SIZE {
return chunk;
}
let blocks = chunk.chunks_exact(BLOCK_SIZE);
let blocks_remainder = blocks.remainder();
let weight_hi_v = get_weight_hi();
let weight_lo_v = get_weight_lo();
let mut p_v = u32x4(*a * blocks.len() as u32, 0, 0, 0);
let mut a_v = u32x4(0, 0, 0, 0);
let mut b_v = u32x4(*b, 0, 0, 0);
for block in blocks {
let block_ptr = block.as_ptr() as *const v128;
let v_lo = unsafe { block_ptr.read_unaligned() };
let v_hi = unsafe { block_ptr.add(1).read_unaligned() };
p_v = u32x4_add(p_v, a_v);
a_v = u32x4_add(a_v, u32x4_extadd_quarters_u8x16(v_lo));
let mad = i32x4_dot_i8x16(v_lo, weight_lo_v);
b_v = u32x4_add(b_v, mad);
a_v = u32x4_add(a_v, u32x4_extadd_quarters_u8x16(v_hi));
let mad = i32x4_dot_i8x16(v_hi, weight_hi_v);
b_v = u32x4_add(b_v, mad);
}
b_v = u32x4_add(b_v, u32x4_shl(p_v, 5));
*a += reduce_add(a_v);
*b = reduce_add(b_v);
blocks_remainder
}
#[inline(always)]
fn i32x4_dot_i8x16(a: v128, b: v128) -> v128 {
let a_lo = u16x8_extend_low_u8x16(a);
let a_hi = u16x8_extend_high_u8x16(a);
let b_lo = u16x8_extend_low_u8x16(b);
let b_hi = u16x8_extend_high_u8x16(b);
let lo = i32x4_dot_i16x8(a_lo, b_lo);
let hi = i32x4_dot_i16x8(a_hi, b_hi);
i32x4_add(lo, hi)
}
#[inline(always)]
fn u32x4_extadd_quarters_u8x16(a: v128) -> v128 {
u32x4_extadd_pairwise_u16x8(u16x8_extadd_pairwise_u8x16(a))
}
#[inline(always)]
fn reduce_add(v: v128) -> u32 {
let arr: [u32; 4] = unsafe { std::mem::transmute(v) };
let mut sum = 0u32;
for val in arr {
sum = sum.wrapping_add(val);
}
sum
}
#[inline(always)]
fn get_weight_lo() -> v128 {
u8x16(
32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17,
)
}
#[inline(always)]
fn get_weight_hi() -> v128 {
u8x16(16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1)
}
}
#[cfg(test)]
mod tests {
use rand::Rng;
#[test]
fn zeroes() {
assert_sum_eq(&[]);
assert_sum_eq(&[0]);
assert_sum_eq(&[0, 0]);
assert_sum_eq(&[0; 100]);
assert_sum_eq(&[0; 1024]);
assert_sum_eq(&[0; 512 * 1024]);
}
#[test]
fn ones() {
assert_sum_eq(&[]);
assert_sum_eq(&[1]);
assert_sum_eq(&[1, 1]);
assert_sum_eq(&[1; 100]);
assert_sum_eq(&[1; 1024]);
assert_sum_eq(&[1; 512 * 1024]);
}
#[test]
fn random() {
let mut random = [0; 512 * 1024];
rand::thread_rng().fill(&mut random[..]);
assert_sum_eq(&random[..1]);
assert_sum_eq(&random[..100]);
assert_sum_eq(&random[..1024]);
assert_sum_eq(&random[..512 * 1024]);
}
/// Example calculation from https://en.wikipedia.org/wiki/Adler-32.
#[test]
fn wiki() {
assert_sum_eq(b"Wikipedia");
}
fn assert_sum_eq(data: &[u8]) {
if let Some(update) = super::get_imp() {
let (a, b) = update(1, 0, data);
let left = u32::from(b) << 16 | u32::from(a);
let right = adler::adler32_slice(data);
assert_eq!(left, right, "len({})", data.len());
}
}
}