From 567b620d92af321c88900895a41383e6ee9458d0 Mon Sep 17 00:00:00 2001 From: Peter Sollich Date: Thu, 28 May 2020 17:58:22 +0200 Subject: [PATCH 01/31] Initial snapshort of vxsort implementation incorporating source code from Dan Shechter. --- src/coreclr/src/gc/CMakeLists.txt | 3 +- src/coreclr/src/gc/bitonic_sort.h | 14 + .../src/gc/bitonic_sort.int64_t.generated.h | 1481 +++++++++++++++++ src/coreclr/src/gc/gc.cpp | 44 + src/coreclr/src/gc/gcsvr.cpp | 3 + src/coreclr/src/gc/gcwks.cpp | 3 + src/coreclr/src/gc/vxsort.cpp | 282 ++++ src/coreclr/src/gc/vxsort.h | 625 +++++++ 8 files changed, 2454 insertions(+), 1 deletion(-) create mode 100644 src/coreclr/src/gc/bitonic_sort.h create mode 100644 src/coreclr/src/gc/bitonic_sort.int64_t.generated.h create mode 100644 src/coreclr/src/gc/vxsort.cpp create mode 100644 src/coreclr/src/gc/vxsort.h diff --git a/src/coreclr/src/gc/CMakeLists.txt b/src/coreclr/src/gc/CMakeLists.txt index f94140e2241c8f..6d9f87f4e1fd19 100644 --- a/src/coreclr/src/gc/CMakeLists.txt +++ b/src/coreclr/src/gc/CMakeLists.txt @@ -74,7 +74,8 @@ if (CLR_CMAKE_TARGET_WIN32) handletable.inl handletablepriv.h objecthandle.h - softwarewritewatch.h) + softwarewritewatch.h + vxsort.h) endif(CLR_CMAKE_TARGET_WIN32) if(CLR_CMAKE_HOST_WIN32) diff --git a/src/coreclr/src/gc/bitonic_sort.h b/src/coreclr/src/gc/bitonic_sort.h new file mode 100644 index 00000000000000..e7f07235cec672 --- /dev/null +++ b/src/coreclr/src/gc/bitonic_sort.h @@ -0,0 +1,14 @@ +#ifndef BITONIC_SORT_H +#define BITONIC_SORT_H + +#include +namespace gcsort { +namespace smallsort { +template +class bitonic { + public: + static void sort(T* ptr, size_t length); +}; +} // namespace smallsort +} // namespace gcsort +#endif diff --git a/src/coreclr/src/gc/bitonic_sort.int64_t.generated.h b/src/coreclr/src/gc/bitonic_sort.int64_t.generated.h new file mode 100644 index 00000000000000..f6b4327494fed2 --- /dev/null +++ b/src/coreclr/src/gc/bitonic_sort.int64_t.generated.h @@ -0,0 +1,1481 @@ + +#ifndef BITONIC_SORT_INT64_T_H +#define BITONIC_SORT_INT64_T_H + +#include +#include "bitonic_sort.h" + +#ifdef _MSC_VER + // MSVC + #define INLINE __forceinline + #define NOINLINE __declspec(noinline) +#else + // GCC + Clang + #define INLINE __attribute__((always_inline)) + #define NOINLINE __attribute__((noinline)) +#endif + +#define i2d _mm256_castsi256_pd +#define d2i _mm256_castpd_si256 + +namespace gcsort { +namespace smallsort { +template<> struct bitonic { +public: + + static INLINE void sort_01v_ascending(__m256i& d01) { + __m256i min, max, s, cmp; + + s = d2i(_mm256_shuffle_pd(i2d(d01), i2d(d01), 0x5)); + cmp = _mm256_cmpgt_epi64(s, d01); + min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); + max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); + d01 = d2i(_mm256_blend_pd(i2d(min), i2d(max), 0xA)); + + s = d2i(_mm256_permute4x64_pd(i2d(d01), 0x1B)); + cmp = _mm256_cmpgt_epi64(s, d01); + min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); + max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); + d01 = d2i(_mm256_blend_pd(i2d(min), i2d(max), 0xC)); + + s = d2i(_mm256_shuffle_pd(i2d(d01), i2d(d01), 0x5)); + cmp = _mm256_cmpgt_epi64(s, d01); + min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); + max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); + d01 = d2i(_mm256_blend_pd(i2d(min), i2d(max), 0xA)); +} + static INLINE void sort_01v_merge_ascending(__m256i& d01) { + __m256i min, max, s, cmp; + + s = d2i(_mm256_permute4x64_pd(i2d(d01), 0x4E)); + cmp = _mm256_cmpgt_epi64(s, d01); + min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); + max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); + d01 = d2i(_mm256_blend_pd(i2d(min), i2d(max), 0xC)); + + s = d2i(_mm256_shuffle_pd(i2d(d01), i2d(d01), 0x5)); + cmp = _mm256_cmpgt_epi64(s, d01); + min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); + max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); + d01 = d2i(_mm256_blend_pd(i2d(min), i2d(max), 0xA)); +} + static INLINE void sort_01v_descending(__m256i& d01) { + __m256i min, max, s, cmp; + + s = d2i(_mm256_shuffle_pd(i2d(d01), i2d(d01), 0x5)); + cmp = _mm256_cmpgt_epi64(s, d01); + min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); + max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); + d01 = d2i(_mm256_blend_pd(i2d(max), i2d(min), 0xA)); + + s = d2i(_mm256_permute4x64_pd(i2d(d01), 0x1B)); + cmp = _mm256_cmpgt_epi64(s, d01); + min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); + max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); + d01 = d2i(_mm256_blend_pd(i2d(max), i2d(min), 0xC)); + + s = d2i(_mm256_shuffle_pd(i2d(d01), i2d(d01), 0x5)); + cmp = _mm256_cmpgt_epi64(s, d01); + min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); + max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); + d01 = d2i(_mm256_blend_pd(i2d(max), i2d(min), 0xA)); +} + static INLINE void sort_01v_merge_descending(__m256i& d01) { + __m256i min, max, s, cmp; + + s = d2i(_mm256_permute4x64_pd(i2d(d01), 0x4E)); + cmp = _mm256_cmpgt_epi64(s, d01); + min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); + max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); + d01 = d2i(_mm256_blend_pd(i2d(max), i2d(min), 0xC)); + + s = d2i(_mm256_shuffle_pd(i2d(d01), i2d(d01), 0x5)); + cmp = _mm256_cmpgt_epi64(s, d01); + min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); + max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); + d01 = d2i(_mm256_blend_pd(i2d(max), i2d(min), 0xA)); +} + static INLINE void sort_02v_ascending(__m256i& d01, __m256i& d02) { + __m256i tmp, cmp; + + sort_01v_ascending(d01); + sort_01v_descending(d02); + + tmp = d02; + cmp = _mm256_cmpgt_epi64(d01, d02); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(d01), i2d(cmp))); + d01 = d2i(_mm256_blendv_pd(i2d(d01), i2d(tmp), i2d(cmp))); + + sort_01v_merge_ascending(d01); + sort_01v_merge_ascending(d02); +} + static INLINE void sort_02v_descending(__m256i& d01, __m256i& d02) { + __m256i tmp, cmp; + + sort_01v_descending(d01); + sort_01v_ascending(d02); + + tmp = d02; + cmp = _mm256_cmpgt_epi64(d01, d02); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(d01), i2d(cmp))); + d01 = d2i(_mm256_blendv_pd(i2d(d01), i2d(tmp), i2d(cmp))); + + sort_01v_merge_descending(d01); + sort_01v_merge_descending(d02); +} + static INLINE void sort_02v_merge_ascending(__m256i& d01, __m256i& d02) { + __m256i tmp, cmp; + + tmp = d01; + cmp = _mm256_cmpgt_epi64(d02, d01); + d01 = d2i(_mm256_blendv_pd(i2d(d02), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d02, tmp); + d02 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d02), i2d(cmp))); + + sort_01v_merge_ascending(d01); + sort_01v_merge_ascending(d02); +} + static INLINE void sort_02v_merge_descending(__m256i& d01, __m256i& d02) { + __m256i tmp, cmp; + + tmp = d01; + cmp = _mm256_cmpgt_epi64(d02, d01); + d01 = d2i(_mm256_blendv_pd(i2d(d02), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d02, tmp); + d02 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d02), i2d(cmp))); + + sort_01v_merge_descending(d01); + sort_01v_merge_descending(d02); +} + static INLINE void sort_03v_ascending(__m256i& d01, __m256i& d02, __m256i& d03) { + __m256i tmp, cmp; + + sort_02v_ascending(d01, d02); + sort_01v_descending(d03); + + tmp = d03; + cmp = _mm256_cmpgt_epi64(d02, d03); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(d02), i2d(cmp))); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); + + sort_02v_merge_ascending(d01, d02); + sort_01v_merge_ascending(d03); +} + static INLINE void sort_03v_descending(__m256i& d01, __m256i& d02, __m256i& d03) { + __m256i tmp, cmp; + + sort_02v_descending(d01, d02); + sort_01v_ascending(d03); + + tmp = d03; + cmp = _mm256_cmpgt_epi64(d02, d03); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(d02), i2d(cmp))); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); + + sort_02v_merge_descending(d01, d02); + sort_01v_merge_descending(d03); +} + static INLINE void sort_03v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03) { + __m256i tmp, cmp; + + tmp = d01; + cmp = _mm256_cmpgt_epi64(d03, d01); + d01 = d2i(_mm256_blendv_pd(i2d(d03), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d03, tmp); + d03 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d03), i2d(cmp))); + + sort_02v_merge_ascending(d01, d02); + sort_01v_merge_ascending(d03); +} + static INLINE void sort_03v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03) { + __m256i tmp, cmp; + + tmp = d01; + cmp = _mm256_cmpgt_epi64(d03, d01); + d01 = d2i(_mm256_blendv_pd(i2d(d03), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d03, tmp); + d03 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d03), i2d(cmp))); + + sort_02v_merge_descending(d01, d02); + sort_01v_merge_descending(d03); +} + static INLINE void sort_04v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04) { + __m256i tmp, cmp; + + sort_02v_ascending(d01, d02); + sort_02v_descending(d03, d04); + + tmp = d03; + cmp = _mm256_cmpgt_epi64(d02, d03); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(d02), i2d(cmp))); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); + + tmp = d04; + cmp = _mm256_cmpgt_epi64(d01, d04); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(d01), i2d(cmp))); + d01 = d2i(_mm256_blendv_pd(i2d(d01), i2d(tmp), i2d(cmp))); + + sort_02v_merge_ascending(d01, d02); + sort_02v_merge_ascending(d03, d04); +} + static INLINE void sort_04v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04) { + __m256i tmp, cmp; + + sort_02v_descending(d01, d02); + sort_02v_ascending(d03, d04); + + tmp = d03; + cmp = _mm256_cmpgt_epi64(d02, d03); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(d02), i2d(cmp))); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); + + tmp = d04; + cmp = _mm256_cmpgt_epi64(d01, d04); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(d01), i2d(cmp))); + d01 = d2i(_mm256_blendv_pd(i2d(d01), i2d(tmp), i2d(cmp))); + + sort_02v_merge_descending(d01, d02); + sort_02v_merge_descending(d03, d04); +} + static INLINE void sort_04v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04) { + __m256i tmp, cmp; + + tmp = d01; + cmp = _mm256_cmpgt_epi64(d03, d01); + d01 = d2i(_mm256_blendv_pd(i2d(d03), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d03, tmp); + d03 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d03), i2d(cmp))); + + tmp = d02; + cmp = _mm256_cmpgt_epi64(d04, d02); + d02 = d2i(_mm256_blendv_pd(i2d(d04), i2d(d02), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d04, tmp); + d04 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d04), i2d(cmp))); + + sort_02v_merge_ascending(d01, d02); + sort_02v_merge_ascending(d03, d04); +} + static INLINE void sort_04v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04) { + __m256i tmp, cmp; + + tmp = d01; + cmp = _mm256_cmpgt_epi64(d03, d01); + d01 = d2i(_mm256_blendv_pd(i2d(d03), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d03, tmp); + d03 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d03), i2d(cmp))); + + tmp = d02; + cmp = _mm256_cmpgt_epi64(d04, d02); + d02 = d2i(_mm256_blendv_pd(i2d(d04), i2d(d02), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d04, tmp); + d04 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d04), i2d(cmp))); + + sort_02v_merge_descending(d01, d02); + sort_02v_merge_descending(d03, d04); +} + static INLINE void sort_05v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05) { + __m256i tmp, cmp; + + sort_04v_ascending(d01, d02, d03, d04); + sort_01v_descending(d05); + + tmp = d05; + cmp = _mm256_cmpgt_epi64(d04, d05); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_01v_merge_ascending(d05); +} + static INLINE void sort_05v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05) { + __m256i tmp, cmp; + + sort_04v_descending(d01, d02, d03, d04); + sort_01v_ascending(d05); + + tmp = d05; + cmp = _mm256_cmpgt_epi64(d04, d05); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_01v_merge_descending(d05); +} + static INLINE void sort_05v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05) { + __m256i tmp, cmp; + + tmp = d01; + cmp = _mm256_cmpgt_epi64(d05, d01); + d01 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d05, tmp); + d05 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d05), i2d(cmp))); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_01v_merge_ascending(d05); +} + static INLINE void sort_05v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05) { + __m256i tmp, cmp; + + tmp = d01; + cmp = _mm256_cmpgt_epi64(d05, d01); + d01 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d05, tmp); + d05 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d05), i2d(cmp))); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_01v_merge_descending(d05); +} + static INLINE void sort_06v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06) { + __m256i tmp, cmp; + + sort_04v_ascending(d01, d02, d03, d04); + sort_02v_descending(d05, d06); + + tmp = d05; + cmp = _mm256_cmpgt_epi64(d04, d05); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + + tmp = d06; + cmp = _mm256_cmpgt_epi64(d03, d06); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d03), i2d(cmp))); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_02v_merge_ascending(d05, d06); +} + static INLINE void sort_06v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06) { + __m256i tmp, cmp; + + sort_04v_descending(d01, d02, d03, d04); + sort_02v_ascending(d05, d06); + + tmp = d05; + cmp = _mm256_cmpgt_epi64(d04, d05); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + + tmp = d06; + cmp = _mm256_cmpgt_epi64(d03, d06); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d03), i2d(cmp))); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_02v_merge_descending(d05, d06); +} + static INLINE void sort_06v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06) { + __m256i tmp, cmp; + + tmp = d01; + cmp = _mm256_cmpgt_epi64(d05, d01); + d01 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d05, tmp); + d05 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d05), i2d(cmp))); + + tmp = d02; + cmp = _mm256_cmpgt_epi64(d06, d02); + d02 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d02), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d06, tmp); + d06 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d06), i2d(cmp))); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_02v_merge_ascending(d05, d06); +} + static INLINE void sort_06v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06) { + __m256i tmp, cmp; + + tmp = d01; + cmp = _mm256_cmpgt_epi64(d05, d01); + d01 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d05, tmp); + d05 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d05), i2d(cmp))); + + tmp = d02; + cmp = _mm256_cmpgt_epi64(d06, d02); + d02 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d02), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d06, tmp); + d06 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d06), i2d(cmp))); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_02v_merge_descending(d05, d06); +} + static INLINE void sort_07v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07) { + __m256i tmp, cmp; + + sort_04v_ascending(d01, d02, d03, d04); + sort_03v_descending(d05, d06, d07); + + tmp = d05; + cmp = _mm256_cmpgt_epi64(d04, d05); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + + tmp = d06; + cmp = _mm256_cmpgt_epi64(d03, d06); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d03), i2d(cmp))); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); + + tmp = d07; + cmp = _mm256_cmpgt_epi64(d02, d07); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(d02), i2d(cmp))); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_03v_merge_ascending(d05, d06, d07); +} + static INLINE void sort_07v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07) { + __m256i tmp, cmp; + + sort_04v_descending(d01, d02, d03, d04); + sort_03v_ascending(d05, d06, d07); + + tmp = d05; + cmp = _mm256_cmpgt_epi64(d04, d05); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + + tmp = d06; + cmp = _mm256_cmpgt_epi64(d03, d06); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d03), i2d(cmp))); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); + + tmp = d07; + cmp = _mm256_cmpgt_epi64(d02, d07); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(d02), i2d(cmp))); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_03v_merge_descending(d05, d06, d07); +} + static INLINE void sort_07v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07) { + __m256i tmp, cmp; + + tmp = d01; + cmp = _mm256_cmpgt_epi64(d05, d01); + d01 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d05, tmp); + d05 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d05), i2d(cmp))); + + tmp = d02; + cmp = _mm256_cmpgt_epi64(d06, d02); + d02 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d02), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d06, tmp); + d06 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d06), i2d(cmp))); + + tmp = d03; + cmp = _mm256_cmpgt_epi64(d07, d03); + d03 = d2i(_mm256_blendv_pd(i2d(d07), i2d(d03), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d07, tmp); + d07 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d07), i2d(cmp))); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_03v_merge_ascending(d05, d06, d07); +} + static INLINE void sort_07v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07) { + __m256i tmp, cmp; + + tmp = d01; + cmp = _mm256_cmpgt_epi64(d05, d01); + d01 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d05, tmp); + d05 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d05), i2d(cmp))); + + tmp = d02; + cmp = _mm256_cmpgt_epi64(d06, d02); + d02 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d02), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d06, tmp); + d06 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d06), i2d(cmp))); + + tmp = d03; + cmp = _mm256_cmpgt_epi64(d07, d03); + d03 = d2i(_mm256_blendv_pd(i2d(d07), i2d(d03), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d07, tmp); + d07 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d07), i2d(cmp))); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_03v_merge_descending(d05, d06, d07); +} + static INLINE void sort_08v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08) { + __m256i tmp, cmp; + + sort_04v_ascending(d01, d02, d03, d04); + sort_04v_descending(d05, d06, d07, d08); + + tmp = d05; + cmp = _mm256_cmpgt_epi64(d04, d05); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + + tmp = d06; + cmp = _mm256_cmpgt_epi64(d03, d06); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d03), i2d(cmp))); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); + + tmp = d07; + cmp = _mm256_cmpgt_epi64(d02, d07); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(d02), i2d(cmp))); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); + + tmp = d08; + cmp = _mm256_cmpgt_epi64(d01, d08); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(d01), i2d(cmp))); + d01 = d2i(_mm256_blendv_pd(i2d(d01), i2d(tmp), i2d(cmp))); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_04v_merge_ascending(d05, d06, d07, d08); +} + static INLINE void sort_08v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08) { + __m256i tmp, cmp; + + sort_04v_descending(d01, d02, d03, d04); + sort_04v_ascending(d05, d06, d07, d08); + + tmp = d05; + cmp = _mm256_cmpgt_epi64(d04, d05); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + + tmp = d06; + cmp = _mm256_cmpgt_epi64(d03, d06); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d03), i2d(cmp))); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); + + tmp = d07; + cmp = _mm256_cmpgt_epi64(d02, d07); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(d02), i2d(cmp))); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); + + tmp = d08; + cmp = _mm256_cmpgt_epi64(d01, d08); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(d01), i2d(cmp))); + d01 = d2i(_mm256_blendv_pd(i2d(d01), i2d(tmp), i2d(cmp))); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_04v_merge_descending(d05, d06, d07, d08); +} + static INLINE void sort_08v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08) { + __m256i tmp, cmp; + + tmp = d01; + cmp = _mm256_cmpgt_epi64(d05, d01); + d01 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d05, tmp); + d05 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d05), i2d(cmp))); + + tmp = d02; + cmp = _mm256_cmpgt_epi64(d06, d02); + d02 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d02), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d06, tmp); + d06 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d06), i2d(cmp))); + + tmp = d03; + cmp = _mm256_cmpgt_epi64(d07, d03); + d03 = d2i(_mm256_blendv_pd(i2d(d07), i2d(d03), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d07, tmp); + d07 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d07), i2d(cmp))); + + tmp = d04; + cmp = _mm256_cmpgt_epi64(d08, d04); + d04 = d2i(_mm256_blendv_pd(i2d(d08), i2d(d04), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d08, tmp); + d08 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d08), i2d(cmp))); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_04v_merge_ascending(d05, d06, d07, d08); +} + static INLINE void sort_08v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08) { + __m256i tmp, cmp; + + tmp = d01; + cmp = _mm256_cmpgt_epi64(d05, d01); + d01 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d05, tmp); + d05 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d05), i2d(cmp))); + + tmp = d02; + cmp = _mm256_cmpgt_epi64(d06, d02); + d02 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d02), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d06, tmp); + d06 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d06), i2d(cmp))); + + tmp = d03; + cmp = _mm256_cmpgt_epi64(d07, d03); + d03 = d2i(_mm256_blendv_pd(i2d(d07), i2d(d03), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d07, tmp); + d07 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d07), i2d(cmp))); + + tmp = d04; + cmp = _mm256_cmpgt_epi64(d08, d04); + d04 = d2i(_mm256_blendv_pd(i2d(d08), i2d(d04), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d08, tmp); + d08 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d08), i2d(cmp))); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_04v_merge_descending(d05, d06, d07, d08); +} + static INLINE void sort_09v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09) { + __m256i tmp, cmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_01v_descending(d09); + + tmp = d09; + cmp = _mm256_cmpgt_epi64(d08, d09); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_01v_merge_ascending(d09); +} + static INLINE void sort_09v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09) { + __m256i tmp, cmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_01v_ascending(d09); + + tmp = d09; + cmp = _mm256_cmpgt_epi64(d08, d09); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_01v_merge_descending(d09); +} + static INLINE void sort_10v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10) { + __m256i tmp, cmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_02v_descending(d09, d10); + + tmp = d09; + cmp = _mm256_cmpgt_epi64(d08, d09); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + + tmp = d10; + cmp = _mm256_cmpgt_epi64(d07, d10); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_02v_merge_ascending(d09, d10); +} + static INLINE void sort_10v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10) { + __m256i tmp, cmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_02v_ascending(d09, d10); + + tmp = d09; + cmp = _mm256_cmpgt_epi64(d08, d09); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + + tmp = d10; + cmp = _mm256_cmpgt_epi64(d07, d10); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_02v_merge_descending(d09, d10); +} + static INLINE void sort_11v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11) { + __m256i tmp, cmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_03v_descending(d09, d10, d11); + + tmp = d09; + cmp = _mm256_cmpgt_epi64(d08, d09); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + + tmp = d10; + cmp = _mm256_cmpgt_epi64(d07, d10); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + + tmp = d11; + cmp = _mm256_cmpgt_epi64(d06, d11); + d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_03v_merge_ascending(d09, d10, d11); +} + static INLINE void sort_11v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11) { + __m256i tmp, cmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_03v_ascending(d09, d10, d11); + + tmp = d09; + cmp = _mm256_cmpgt_epi64(d08, d09); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + + tmp = d10; + cmp = _mm256_cmpgt_epi64(d07, d10); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + + tmp = d11; + cmp = _mm256_cmpgt_epi64(d06, d11); + d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_03v_merge_descending(d09, d10, d11); +} + static INLINE void sort_12v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12) { + __m256i tmp, cmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_04v_descending(d09, d10, d11, d12); + + tmp = d09; + cmp = _mm256_cmpgt_epi64(d08, d09); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + + tmp = d10; + cmp = _mm256_cmpgt_epi64(d07, d10); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + + tmp = d11; + cmp = _mm256_cmpgt_epi64(d06, d11); + d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); + + tmp = d12; + cmp = _mm256_cmpgt_epi64(d05, d12); + d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_04v_merge_ascending(d09, d10, d11, d12); +} + static INLINE void sort_12v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12) { + __m256i tmp, cmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_04v_ascending(d09, d10, d11, d12); + + tmp = d09; + cmp = _mm256_cmpgt_epi64(d08, d09); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + + tmp = d10; + cmp = _mm256_cmpgt_epi64(d07, d10); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + + tmp = d11; + cmp = _mm256_cmpgt_epi64(d06, d11); + d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); + + tmp = d12; + cmp = _mm256_cmpgt_epi64(d05, d12); + d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_04v_merge_descending(d09, d10, d11, d12); +} + static INLINE void sort_13v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13) { + __m256i tmp, cmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_05v_descending(d09, d10, d11, d12, d13); + + tmp = d09; + cmp = _mm256_cmpgt_epi64(d08, d09); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + + tmp = d10; + cmp = _mm256_cmpgt_epi64(d07, d10); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + + tmp = d11; + cmp = _mm256_cmpgt_epi64(d06, d11); + d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); + + tmp = d12; + cmp = _mm256_cmpgt_epi64(d05, d12); + d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); + + tmp = d13; + cmp = _mm256_cmpgt_epi64(d04, d13); + d13 = d2i(_mm256_blendv_pd(i2d(d13), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_05v_merge_ascending(d09, d10, d11, d12, d13); +} + static INLINE void sort_13v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13) { + __m256i tmp, cmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_05v_ascending(d09, d10, d11, d12, d13); + + tmp = d09; + cmp = _mm256_cmpgt_epi64(d08, d09); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + + tmp = d10; + cmp = _mm256_cmpgt_epi64(d07, d10); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + + tmp = d11; + cmp = _mm256_cmpgt_epi64(d06, d11); + d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); + + tmp = d12; + cmp = _mm256_cmpgt_epi64(d05, d12); + d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); + + tmp = d13; + cmp = _mm256_cmpgt_epi64(d04, d13); + d13 = d2i(_mm256_blendv_pd(i2d(d13), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_05v_merge_descending(d09, d10, d11, d12, d13); +} + static INLINE void sort_14v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14) { + __m256i tmp, cmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_06v_descending(d09, d10, d11, d12, d13, d14); + + tmp = d09; + cmp = _mm256_cmpgt_epi64(d08, d09); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + + tmp = d10; + cmp = _mm256_cmpgt_epi64(d07, d10); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + + tmp = d11; + cmp = _mm256_cmpgt_epi64(d06, d11); + d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); + + tmp = d12; + cmp = _mm256_cmpgt_epi64(d05, d12); + d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); + + tmp = d13; + cmp = _mm256_cmpgt_epi64(d04, d13); + d13 = d2i(_mm256_blendv_pd(i2d(d13), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + + tmp = d14; + cmp = _mm256_cmpgt_epi64(d03, d14); + d14 = d2i(_mm256_blendv_pd(i2d(d14), i2d(d03), i2d(cmp))); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_06v_merge_ascending(d09, d10, d11, d12, d13, d14); +} + static INLINE void sort_14v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14) { + __m256i tmp, cmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_06v_ascending(d09, d10, d11, d12, d13, d14); + + tmp = d09; + cmp = _mm256_cmpgt_epi64(d08, d09); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + + tmp = d10; + cmp = _mm256_cmpgt_epi64(d07, d10); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + + tmp = d11; + cmp = _mm256_cmpgt_epi64(d06, d11); + d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); + + tmp = d12; + cmp = _mm256_cmpgt_epi64(d05, d12); + d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); + + tmp = d13; + cmp = _mm256_cmpgt_epi64(d04, d13); + d13 = d2i(_mm256_blendv_pd(i2d(d13), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + + tmp = d14; + cmp = _mm256_cmpgt_epi64(d03, d14); + d14 = d2i(_mm256_blendv_pd(i2d(d14), i2d(d03), i2d(cmp))); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_06v_merge_descending(d09, d10, d11, d12, d13, d14); +} + static INLINE void sort_15v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14, __m256i& d15) { + __m256i tmp, cmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_07v_descending(d09, d10, d11, d12, d13, d14, d15); + + tmp = d09; + cmp = _mm256_cmpgt_epi64(d08, d09); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + + tmp = d10; + cmp = _mm256_cmpgt_epi64(d07, d10); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + + tmp = d11; + cmp = _mm256_cmpgt_epi64(d06, d11); + d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); + + tmp = d12; + cmp = _mm256_cmpgt_epi64(d05, d12); + d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); + + tmp = d13; + cmp = _mm256_cmpgt_epi64(d04, d13); + d13 = d2i(_mm256_blendv_pd(i2d(d13), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + + tmp = d14; + cmp = _mm256_cmpgt_epi64(d03, d14); + d14 = d2i(_mm256_blendv_pd(i2d(d14), i2d(d03), i2d(cmp))); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); + + tmp = d15; + cmp = _mm256_cmpgt_epi64(d02, d15); + d15 = d2i(_mm256_blendv_pd(i2d(d15), i2d(d02), i2d(cmp))); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_07v_merge_ascending(d09, d10, d11, d12, d13, d14, d15); +} + static INLINE void sort_15v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14, __m256i& d15) { + __m256i tmp, cmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_07v_ascending(d09, d10, d11, d12, d13, d14, d15); + + tmp = d09; + cmp = _mm256_cmpgt_epi64(d08, d09); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + + tmp = d10; + cmp = _mm256_cmpgt_epi64(d07, d10); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + + tmp = d11; + cmp = _mm256_cmpgt_epi64(d06, d11); + d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); + + tmp = d12; + cmp = _mm256_cmpgt_epi64(d05, d12); + d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); + + tmp = d13; + cmp = _mm256_cmpgt_epi64(d04, d13); + d13 = d2i(_mm256_blendv_pd(i2d(d13), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + + tmp = d14; + cmp = _mm256_cmpgt_epi64(d03, d14); + d14 = d2i(_mm256_blendv_pd(i2d(d14), i2d(d03), i2d(cmp))); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); + + tmp = d15; + cmp = _mm256_cmpgt_epi64(d02, d15); + d15 = d2i(_mm256_blendv_pd(i2d(d15), i2d(d02), i2d(cmp))); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_07v_merge_descending(d09, d10, d11, d12, d13, d14, d15); +} + static INLINE void sort_16v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14, __m256i& d15, __m256i& d16) { + __m256i tmp, cmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_08v_descending(d09, d10, d11, d12, d13, d14, d15, d16); + + tmp = d09; + cmp = _mm256_cmpgt_epi64(d08, d09); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + + tmp = d10; + cmp = _mm256_cmpgt_epi64(d07, d10); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + + tmp = d11; + cmp = _mm256_cmpgt_epi64(d06, d11); + d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); + + tmp = d12; + cmp = _mm256_cmpgt_epi64(d05, d12); + d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); + + tmp = d13; + cmp = _mm256_cmpgt_epi64(d04, d13); + d13 = d2i(_mm256_blendv_pd(i2d(d13), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + + tmp = d14; + cmp = _mm256_cmpgt_epi64(d03, d14); + d14 = d2i(_mm256_blendv_pd(i2d(d14), i2d(d03), i2d(cmp))); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); + + tmp = d15; + cmp = _mm256_cmpgt_epi64(d02, d15); + d15 = d2i(_mm256_blendv_pd(i2d(d15), i2d(d02), i2d(cmp))); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); + + tmp = d16; + cmp = _mm256_cmpgt_epi64(d01, d16); + d16 = d2i(_mm256_blendv_pd(i2d(d16), i2d(d01), i2d(cmp))); + d01 = d2i(_mm256_blendv_pd(i2d(d01), i2d(tmp), i2d(cmp))); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_08v_merge_ascending(d09, d10, d11, d12, d13, d14, d15, d16); +} + static INLINE void sort_16v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14, __m256i& d15, __m256i& d16) { + __m256i tmp, cmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_08v_ascending(d09, d10, d11, d12, d13, d14, d15, d16); + + tmp = d09; + cmp = _mm256_cmpgt_epi64(d08, d09); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + + tmp = d10; + cmp = _mm256_cmpgt_epi64(d07, d10); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + + tmp = d11; + cmp = _mm256_cmpgt_epi64(d06, d11); + d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); + + tmp = d12; + cmp = _mm256_cmpgt_epi64(d05, d12); + d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); + + tmp = d13; + cmp = _mm256_cmpgt_epi64(d04, d13); + d13 = d2i(_mm256_blendv_pd(i2d(d13), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + + tmp = d14; + cmp = _mm256_cmpgt_epi64(d03, d14); + d14 = d2i(_mm256_blendv_pd(i2d(d14), i2d(d03), i2d(cmp))); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); + + tmp = d15; + cmp = _mm256_cmpgt_epi64(d02, d15); + d15 = d2i(_mm256_blendv_pd(i2d(d15), i2d(d02), i2d(cmp))); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); + + tmp = d16; + cmp = _mm256_cmpgt_epi64(d01, d16); + d16 = d2i(_mm256_blendv_pd(i2d(d16), i2d(d01), i2d(cmp))); + d01 = d2i(_mm256_blendv_pd(i2d(d01), i2d(tmp), i2d(cmp))); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_08v_merge_descending(d09, d10, d11, d12, d13, d14, d15, d16); +} + +static NOINLINE void sort_01v(int64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0); + sort_01v_ascending(d01); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); +} + +static NOINLINE void sort_02v(int64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0); + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1); + sort_02v_ascending(d01, d02); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); +} + +static NOINLINE void sort_03v(int64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0); + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1); + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2); + sort_03v_ascending(d01, d02, d03); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); +} + +static NOINLINE void sort_04v(int64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0); + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1); + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2); + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3); + sort_04v_ascending(d01, d02, d03, d04); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); +} + +static NOINLINE void sort_05v(int64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0); + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1); + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2); + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3); + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4); + sort_05v_ascending(d01, d02, d03, d04, d05); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); +} + +static NOINLINE void sort_06v(int64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0); + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1); + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2); + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3); + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4); + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5); + sort_06v_ascending(d01, d02, d03, d04, d05, d06); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); +} + +static NOINLINE void sort_07v(int64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0); + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1); + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2); + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3); + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4); + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5); + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6); + sort_07v_ascending(d01, d02, d03, d04, d05, d06, d07); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); +} + +static NOINLINE void sort_08v(int64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0); + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1); + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2); + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3); + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4); + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5); + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6); + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7); + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); +} + +static NOINLINE void sort_09v(int64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0); + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1); + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2); + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3); + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4); + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5); + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6); + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7); + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8); + sort_09v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); +} + +static NOINLINE void sort_10v(int64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0); + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1); + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2); + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3); + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4); + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5); + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6); + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7); + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8); + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9); + sort_10v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); +} + +static NOINLINE void sort_11v(int64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0); + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1); + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2); + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3); + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4); + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5); + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6); + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7); + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8); + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9); + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10); + sort_11v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); + _mm256_storeu_si256((__m256i *) ptr + 10, d11); +} + +static NOINLINE void sort_12v(int64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0); + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1); + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2); + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3); + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4); + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5); + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6); + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7); + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8); + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9); + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10); + __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11); + sort_12v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); + _mm256_storeu_si256((__m256i *) ptr + 10, d11); + _mm256_storeu_si256((__m256i *) ptr + 11, d12); +} + +static NOINLINE void sort_13v(int64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0); + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1); + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2); + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3); + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4); + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5); + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6); + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7); + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8); + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9); + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10); + __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11); + __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12); + sort_13v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); + _mm256_storeu_si256((__m256i *) ptr + 10, d11); + _mm256_storeu_si256((__m256i *) ptr + 11, d12); + _mm256_storeu_si256((__m256i *) ptr + 12, d13); +} + +static NOINLINE void sort_14v(int64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0); + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1); + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2); + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3); + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4); + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5); + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6); + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7); + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8); + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9); + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10); + __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11); + __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12); + __m256i d14 = _mm256_lddqu_si256((__m256i const *) ptr + 13); + sort_14v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); + _mm256_storeu_si256((__m256i *) ptr + 10, d11); + _mm256_storeu_si256((__m256i *) ptr + 11, d12); + _mm256_storeu_si256((__m256i *) ptr + 12, d13); + _mm256_storeu_si256((__m256i *) ptr + 13, d14); +} + +static NOINLINE void sort_15v(int64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0); + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1); + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2); + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3); + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4); + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5); + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6); + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7); + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8); + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9); + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10); + __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11); + __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12); + __m256i d14 = _mm256_lddqu_si256((__m256i const *) ptr + 13); + __m256i d15 = _mm256_lddqu_si256((__m256i const *) ptr + 14); + sort_15v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14, d15); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); + _mm256_storeu_si256((__m256i *) ptr + 10, d11); + _mm256_storeu_si256((__m256i *) ptr + 11, d12); + _mm256_storeu_si256((__m256i *) ptr + 12, d13); + _mm256_storeu_si256((__m256i *) ptr + 13, d14); + _mm256_storeu_si256((__m256i *) ptr + 14, d15); +} + +static NOINLINE void sort_16v(int64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0); + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1); + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2); + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3); + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4); + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5); + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6); + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7); + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8); + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9); + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10); + __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11); + __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12); + __m256i d14 = _mm256_lddqu_si256((__m256i const *) ptr + 13); + __m256i d15 = _mm256_lddqu_si256((__m256i const *) ptr + 14); + __m256i d16 = _mm256_lddqu_si256((__m256i const *) ptr + 15); + sort_16v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14, d15, d16); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); + _mm256_storeu_si256((__m256i *) ptr + 10, d11); + _mm256_storeu_si256((__m256i *) ptr + 11, d12); + _mm256_storeu_si256((__m256i *) ptr + 12, d13); + _mm256_storeu_si256((__m256i *) ptr + 13, d14); + _mm256_storeu_si256((__m256i *) ptr + 14, d15); + _mm256_storeu_si256((__m256i *) ptr + 15, d16); +} + static void sort(int64_t *ptr, size_t length) { + const int N = 4; + + switch(length / N) { + case 1: sort_01v(ptr); break; + case 2: sort_02v(ptr); break; + case 3: sort_03v(ptr); break; + case 4: sort_04v(ptr); break; + case 5: sort_05v(ptr); break; + case 6: sort_06v(ptr); break; + case 7: sort_07v(ptr); break; + case 8: sort_08v(ptr); break; + case 9: sort_09v(ptr); break; + case 10: sort_10v(ptr); break; + case 11: sort_11v(ptr); break; + case 12: sort_12v(ptr); break; + case 13: sort_13v(ptr); break; + case 14: sort_14v(ptr); break; + case 15: sort_15v(ptr); break; + case 16: sort_16v(ptr); break; + } +} +}; +} +} +#endif diff --git a/src/coreclr/src/gc/gc.cpp b/src/coreclr/src/gc/gc.cpp index c3e52c7540beec..47326a7c5374f9 100644 --- a/src/coreclr/src/gc/gc.cpp +++ b/src/coreclr/src/gc/gc.cpp @@ -19,7 +19,15 @@ #include "gcpriv.h" +#ifdef HOST_64BIT +#define USE_VXSORT +#else #define USE_INTROSORT +#endif + +#ifdef USE_VXSORT +#include "vxsort.h" +#endif // We just needed a simple random number generator for testing. class gc_rand @@ -2060,6 +2068,42 @@ uint8_t* tree_search (uint8_t* tree, uint8_t* old_address); #ifdef USE_INTROSORT #define _sort introsort::sort +#elif defined(USE_VXSORT) +#define _sort vxsort +namespace std +{ + template + class numeric_limits + { + public: + static _Ty Max() + { + return _Ty(); + } + }; + + template <> + class numeric_limits + { + public: + static int64_t Max() + { + return LLONG_MAX; + } + }; +} +void vxsort(uint8_t** low, uint8_t** high, unsigned int depth) +{ +// auto sorter = gcsort::vxsort(); + auto sorter = gcsort::vxsort(); + sorter.sort((int64_t*)low, (int64_t*)high); +#ifdef _DEBUG + for (uint8_t** p = low; p < high; p++) + { + assert(p[0] <= p[1]); + } +#endif +} #else //USE_INTROSORT #define _sort qsort1 void qsort1(uint8_t** low, uint8_t** high, unsigned int depth); diff --git a/src/coreclr/src/gc/gcsvr.cpp b/src/coreclr/src/gc/gcsvr.cpp index 1add45d271d3a0..a6a17d64f7549f 100644 --- a/src/coreclr/src/gc/gcsvr.cpp +++ b/src/coreclr/src/gc/gcsvr.cpp @@ -24,6 +24,9 @@ namespace SVR { #include "gcimpl.h" #include "gc.cpp" +#ifdef USE_VXSORT +#include "vxsort.cpp" +#endif //USE_VXSORT } #endif // defined(FEATURE_SVR_GC) diff --git a/src/coreclr/src/gc/gcwks.cpp b/src/coreclr/src/gc/gcwks.cpp index 3a6c7396c391e1..6529156dabf74f 100644 --- a/src/coreclr/src/gc/gcwks.cpp +++ b/src/coreclr/src/gc/gcwks.cpp @@ -24,5 +24,8 @@ namespace WKS { #include "gcimpl.h" #include "gc.cpp" +#ifdef USE_VXSORT +#include "vxsort.cpp" +#endif //USE_VXSORT } diff --git a/src/coreclr/src/gc/vxsort.cpp b/src/coreclr/src/gc/vxsort.cpp new file mode 100644 index 00000000000000..caf10215f0968b --- /dev/null +++ b/src/coreclr/src/gc/vxsort.cpp @@ -0,0 +1,282 @@ + +namespace gcsort { + +alignas(128) const int8_t vxsort_partition_traits::perm_table[128] = { + 0, 1, 2, 3, 4, 5, 6, 7, // 0b0000 (0) + 2, 3, 4, 5, 6, 7, 0, 1, // 0b0001 (1) + 0, 1, 4, 5, 6, 7, 2, 3, // 0b0010 (2) + 4, 5, 6, 7, 0, 1, 2, 3, // 0b0011 (3) + 0, 1, 2, 3, 6, 7, 4, 5, // 0b0100 (4) + 2, 3, 6, 7, 0, 1, 4, 5, // 0b0101 (5) + 0, 1, 6, 7, 2, 3, 4, 5, // 0b0110 (6) + 6, 7, 0, 1, 2, 3, 4, 5, // 0b0111 (7) + 0, 1, 2, 3, 4, 5, 6, 7, // 0b1000 (8) + 2, 3, 4, 5, 0, 1, 6, 7, // 0b1001 (9) + 0, 1, 4, 5, 2, 3, 6, 7, // 0b1010 (10) + 4, 5, 0, 1, 2, 3, 6, 7, // 0b1011 (11) + 0, 1, 2, 3, 4, 5, 6, 7, // 0b1100 (12) + 2, 3, 0, 1, 4, 5, 6, 7, // 0b1101 (13) + 0, 1, 2, 3, 4, 5, 6, 7, // 0b1110 (14) + 0, 1, 2, 3, 4, 5, 6, 7, // 0b1111 (15) +}; + +alignas(2048) const int8_t vxsort_partition_traits::perm_table[2048] = { + 0, 1, 2, 3, 4, 5, 6, 7, // 0b00000000 (0) + 1, 2, 3, 4, 5, 6, 7, 0, // 0b00000001 (1) + 0, 2, 3, 4, 5, 6, 7, 1, // 0b00000010 (2) + 2, 3, 4, 5, 6, 7, 0, 1, // 0b00000011 (3) + 0, 1, 3, 4, 5, 6, 7, 2, // 0b00000100 (4) + 1, 3, 4, 5, 6, 7, 0, 2, // 0b00000101 (5) + 0, 3, 4, 5, 6, 7, 1, 2, // 0b00000110 (6) + 3, 4, 5, 6, 7, 0, 1, 2, // 0b00000111 (7) + 0, 1, 2, 4, 5, 6, 7, 3, // 0b00001000 (8) + 1, 2, 4, 5, 6, 7, 0, 3, // 0b00001001 (9) + 0, 2, 4, 5, 6, 7, 1, 3, // 0b00001010 (10) + 2, 4, 5, 6, 7, 0, 1, 3, // 0b00001011 (11) + 0, 1, 4, 5, 6, 7, 2, 3, // 0b00001100 (12) + 1, 4, 5, 6, 7, 0, 2, 3, // 0b00001101 (13) + 0, 4, 5, 6, 7, 1, 2, 3, // 0b00001110 (14) + 4, 5, 6, 7, 0, 1, 2, 3, // 0b00001111 (15) + 0, 1, 2, 3, 5, 6, 7, 4, // 0b00010000 (16) + 1, 2, 3, 5, 6, 7, 0, 4, // 0b00010001 (17) + 0, 2, 3, 5, 6, 7, 1, 4, // 0b00010010 (18) + 2, 3, 5, 6, 7, 0, 1, 4, // 0b00010011 (19) + 0, 1, 3, 5, 6, 7, 2, 4, // 0b00010100 (20) + 1, 3, 5, 6, 7, 0, 2, 4, // 0b00010101 (21) + 0, 3, 5, 6, 7, 1, 2, 4, // 0b00010110 (22) + 3, 5, 6, 7, 0, 1, 2, 4, // 0b00010111 (23) + 0, 1, 2, 5, 6, 7, 3, 4, // 0b00011000 (24) + 1, 2, 5, 6, 7, 0, 3, 4, // 0b00011001 (25) + 0, 2, 5, 6, 7, 1, 3, 4, // 0b00011010 (26) + 2, 5, 6, 7, 0, 1, 3, 4, // 0b00011011 (27) + 0, 1, 5, 6, 7, 2, 3, 4, // 0b00011100 (28) + 1, 5, 6, 7, 0, 2, 3, 4, // 0b00011101 (29) + 0, 5, 6, 7, 1, 2, 3, 4, // 0b00011110 (30) + 5, 6, 7, 0, 1, 2, 3, 4, // 0b00011111 (31) + 0, 1, 2, 3, 4, 6, 7, 5, // 0b00100000 (32) + 1, 2, 3, 4, 6, 7, 0, 5, // 0b00100001 (33) + 0, 2, 3, 4, 6, 7, 1, 5, // 0b00100010 (34) + 2, 3, 4, 6, 7, 0, 1, 5, // 0b00100011 (35) + 0, 1, 3, 4, 6, 7, 2, 5, // 0b00100100 (36) + 1, 3, 4, 6, 7, 0, 2, 5, // 0b00100101 (37) + 0, 3, 4, 6, 7, 1, 2, 5, // 0b00100110 (38) + 3, 4, 6, 7, 0, 1, 2, 5, // 0b00100111 (39) + 0, 1, 2, 4, 6, 7, 3, 5, // 0b00101000 (40) + 1, 2, 4, 6, 7, 0, 3, 5, // 0b00101001 (41) + 0, 2, 4, 6, 7, 1, 3, 5, // 0b00101010 (42) + 2, 4, 6, 7, 0, 1, 3, 5, // 0b00101011 (43) + 0, 1, 4, 6, 7, 2, 3, 5, // 0b00101100 (44) + 1, 4, 6, 7, 0, 2, 3, 5, // 0b00101101 (45) + 0, 4, 6, 7, 1, 2, 3, 5, // 0b00101110 (46) + 4, 6, 7, 0, 1, 2, 3, 5, // 0b00101111 (47) + 0, 1, 2, 3, 6, 7, 4, 5, // 0b00110000 (48) + 1, 2, 3, 6, 7, 0, 4, 5, // 0b00110001 (49) + 0, 2, 3, 6, 7, 1, 4, 5, // 0b00110010 (50) + 2, 3, 6, 7, 0, 1, 4, 5, // 0b00110011 (51) + 0, 1, 3, 6, 7, 2, 4, 5, // 0b00110100 (52) + 1, 3, 6, 7, 0, 2, 4, 5, // 0b00110101 (53) + 0, 3, 6, 7, 1, 2, 4, 5, // 0b00110110 (54) + 3, 6, 7, 0, 1, 2, 4, 5, // 0b00110111 (55) + 0, 1, 2, 6, 7, 3, 4, 5, // 0b00111000 (56) + 1, 2, 6, 7, 0, 3, 4, 5, // 0b00111001 (57) + 0, 2, 6, 7, 1, 3, 4, 5, // 0b00111010 (58) + 2, 6, 7, 0, 1, 3, 4, 5, // 0b00111011 (59) + 0, 1, 6, 7, 2, 3, 4, 5, // 0b00111100 (60) + 1, 6, 7, 0, 2, 3, 4, 5, // 0b00111101 (61) + 0, 6, 7, 1, 2, 3, 4, 5, // 0b00111110 (62) + 6, 7, 0, 1, 2, 3, 4, 5, // 0b00111111 (63) + 0, 1, 2, 3, 4, 5, 7, 6, // 0b01000000 (64) + 1, 2, 3, 4, 5, 7, 0, 6, // 0b01000001 (65) + 0, 2, 3, 4, 5, 7, 1, 6, // 0b01000010 (66) + 2, 3, 4, 5, 7, 0, 1, 6, // 0b01000011 (67) + 0, 1, 3, 4, 5, 7, 2, 6, // 0b01000100 (68) + 1, 3, 4, 5, 7, 0, 2, 6, // 0b01000101 (69) + 0, 3, 4, 5, 7, 1, 2, 6, // 0b01000110 (70) + 3, 4, 5, 7, 0, 1, 2, 6, // 0b01000111 (71) + 0, 1, 2, 4, 5, 7, 3, 6, // 0b01001000 (72) + 1, 2, 4, 5, 7, 0, 3, 6, // 0b01001001 (73) + 0, 2, 4, 5, 7, 1, 3, 6, // 0b01001010 (74) + 2, 4, 5, 7, 0, 1, 3, 6, // 0b01001011 (75) + 0, 1, 4, 5, 7, 2, 3, 6, // 0b01001100 (76) + 1, 4, 5, 7, 0, 2, 3, 6, // 0b01001101 (77) + 0, 4, 5, 7, 1, 2, 3, 6, // 0b01001110 (78) + 4, 5, 7, 0, 1, 2, 3, 6, // 0b01001111 (79) + 0, 1, 2, 3, 5, 7, 4, 6, // 0b01010000 (80) + 1, 2, 3, 5, 7, 0, 4, 6, // 0b01010001 (81) + 0, 2, 3, 5, 7, 1, 4, 6, // 0b01010010 (82) + 2, 3, 5, 7, 0, 1, 4, 6, // 0b01010011 (83) + 0, 1, 3, 5, 7, 2, 4, 6, // 0b01010100 (84) + 1, 3, 5, 7, 0, 2, 4, 6, // 0b01010101 (85) + 0, 3, 5, 7, 1, 2, 4, 6, // 0b01010110 (86) + 3, 5, 7, 0, 1, 2, 4, 6, // 0b01010111 (87) + 0, 1, 2, 5, 7, 3, 4, 6, // 0b01011000 (88) + 1, 2, 5, 7, 0, 3, 4, 6, // 0b01011001 (89) + 0, 2, 5, 7, 1, 3, 4, 6, // 0b01011010 (90) + 2, 5, 7, 0, 1, 3, 4, 6, // 0b01011011 (91) + 0, 1, 5, 7, 2, 3, 4, 6, // 0b01011100 (92) + 1, 5, 7, 0, 2, 3, 4, 6, // 0b01011101 (93) + 0, 5, 7, 1, 2, 3, 4, 6, // 0b01011110 (94) + 5, 7, 0, 1, 2, 3, 4, 6, // 0b01011111 (95) + 0, 1, 2, 3, 4, 7, 5, 6, // 0b01100000 (96) + 1, 2, 3, 4, 7, 0, 5, 6, // 0b01100001 (97) + 0, 2, 3, 4, 7, 1, 5, 6, // 0b01100010 (98) + 2, 3, 4, 7, 0, 1, 5, 6, // 0b01100011 (99) + 0, 1, 3, 4, 7, 2, 5, 6, // 0b01100100 (100) + 1, 3, 4, 7, 0, 2, 5, 6, // 0b01100101 (101) + 0, 3, 4, 7, 1, 2, 5, 6, // 0b01100110 (102) + 3, 4, 7, 0, 1, 2, 5, 6, // 0b01100111 (103) + 0, 1, 2, 4, 7, 3, 5, 6, // 0b01101000 (104) + 1, 2, 4, 7, 0, 3, 5, 6, // 0b01101001 (105) + 0, 2, 4, 7, 1, 3, 5, 6, // 0b01101010 (106) + 2, 4, 7, 0, 1, 3, 5, 6, // 0b01101011 (107) + 0, 1, 4, 7, 2, 3, 5, 6, // 0b01101100 (108) + 1, 4, 7, 0, 2, 3, 5, 6, // 0b01101101 (109) + 0, 4, 7, 1, 2, 3, 5, 6, // 0b01101110 (110) + 4, 7, 0, 1, 2, 3, 5, 6, // 0b01101111 (111) + 0, 1, 2, 3, 7, 4, 5, 6, // 0b01110000 (112) + 1, 2, 3, 7, 0, 4, 5, 6, // 0b01110001 (113) + 0, 2, 3, 7, 1, 4, 5, 6, // 0b01110010 (114) + 2, 3, 7, 0, 1, 4, 5, 6, // 0b01110011 (115) + 0, 1, 3, 7, 2, 4, 5, 6, // 0b01110100 (116) + 1, 3, 7, 0, 2, 4, 5, 6, // 0b01110101 (117) + 0, 3, 7, 1, 2, 4, 5, 6, // 0b01110110 (118) + 3, 7, 0, 1, 2, 4, 5, 6, // 0b01110111 (119) + 0, 1, 2, 7, 3, 4, 5, 6, // 0b01111000 (120) + 1, 2, 7, 0, 3, 4, 5, 6, // 0b01111001 (121) + 0, 2, 7, 1, 3, 4, 5, 6, // 0b01111010 (122) + 2, 7, 0, 1, 3, 4, 5, 6, // 0b01111011 (123) + 0, 1, 7, 2, 3, 4, 5, 6, // 0b01111100 (124) + 1, 7, 0, 2, 3, 4, 5, 6, // 0b01111101 (125) + 0, 7, 1, 2, 3, 4, 5, 6, // 0b01111110 (126) + 7, 0, 1, 2, 3, 4, 5, 6, // 0b01111111 (127) + 0, 1, 2, 3, 4, 5, 6, 7, // 0b10000000 (128) + 1, 2, 3, 4, 5, 6, 0, 7, // 0b10000001 (129) + 0, 2, 3, 4, 5, 6, 1, 7, // 0b10000010 (130) + 2, 3, 4, 5, 6, 0, 1, 7, // 0b10000011 (131) + 0, 1, 3, 4, 5, 6, 2, 7, // 0b10000100 (132) + 1, 3, 4, 5, 6, 0, 2, 7, // 0b10000101 (133) + 0, 3, 4, 5, 6, 1, 2, 7, // 0b10000110 (134) + 3, 4, 5, 6, 0, 1, 2, 7, // 0b10000111 (135) + 0, 1, 2, 4, 5, 6, 3, 7, // 0b10001000 (136) + 1, 2, 4, 5, 6, 0, 3, 7, // 0b10001001 (137) + 0, 2, 4, 5, 6, 1, 3, 7, // 0b10001010 (138) + 2, 4, 5, 6, 0, 1, 3, 7, // 0b10001011 (139) + 0, 1, 4, 5, 6, 2, 3, 7, // 0b10001100 (140) + 1, 4, 5, 6, 0, 2, 3, 7, // 0b10001101 (141) + 0, 4, 5, 6, 1, 2, 3, 7, // 0b10001110 (142) + 4, 5, 6, 0, 1, 2, 3, 7, // 0b10001111 (143) + 0, 1, 2, 3, 5, 6, 4, 7, // 0b10010000 (144) + 1, 2, 3, 5, 6, 0, 4, 7, // 0b10010001 (145) + 0, 2, 3, 5, 6, 1, 4, 7, // 0b10010010 (146) + 2, 3, 5, 6, 0, 1, 4, 7, // 0b10010011 (147) + 0, 1, 3, 5, 6, 2, 4, 7, // 0b10010100 (148) + 1, 3, 5, 6, 0, 2, 4, 7, // 0b10010101 (149) + 0, 3, 5, 6, 1, 2, 4, 7, // 0b10010110 (150) + 3, 5, 6, 0, 1, 2, 4, 7, // 0b10010111 (151) + 0, 1, 2, 5, 6, 3, 4, 7, // 0b10011000 (152) + 1, 2, 5, 6, 0, 3, 4, 7, // 0b10011001 (153) + 0, 2, 5, 6, 1, 3, 4, 7, // 0b10011010 (154) + 2, 5, 6, 0, 1, 3, 4, 7, // 0b10011011 (155) + 0, 1, 5, 6, 2, 3, 4, 7, // 0b10011100 (156) + 1, 5, 6, 0, 2, 3, 4, 7, // 0b10011101 (157) + 0, 5, 6, 1, 2, 3, 4, 7, // 0b10011110 (158) + 5, 6, 0, 1, 2, 3, 4, 7, // 0b10011111 (159) + 0, 1, 2, 3, 4, 6, 5, 7, // 0b10100000 (160) + 1, 2, 3, 4, 6, 0, 5, 7, // 0b10100001 (161) + 0, 2, 3, 4, 6, 1, 5, 7, // 0b10100010 (162) + 2, 3, 4, 6, 0, 1, 5, 7, // 0b10100011 (163) + 0, 1, 3, 4, 6, 2, 5, 7, // 0b10100100 (164) + 1, 3, 4, 6, 0, 2, 5, 7, // 0b10100101 (165) + 0, 3, 4, 6, 1, 2, 5, 7, // 0b10100110 (166) + 3, 4, 6, 0, 1, 2, 5, 7, // 0b10100111 (167) + 0, 1, 2, 4, 6, 3, 5, 7, // 0b10101000 (168) + 1, 2, 4, 6, 0, 3, 5, 7, // 0b10101001 (169) + 0, 2, 4, 6, 1, 3, 5, 7, // 0b10101010 (170) + 2, 4, 6, 0, 1, 3, 5, 7, // 0b10101011 (171) + 0, 1, 4, 6, 2, 3, 5, 7, // 0b10101100 (172) + 1, 4, 6, 0, 2, 3, 5, 7, // 0b10101101 (173) + 0, 4, 6, 1, 2, 3, 5, 7, // 0b10101110 (174) + 4, 6, 0, 1, 2, 3, 5, 7, // 0b10101111 (175) + 0, 1, 2, 3, 6, 4, 5, 7, // 0b10110000 (176) + 1, 2, 3, 6, 0, 4, 5, 7, // 0b10110001 (177) + 0, 2, 3, 6, 1, 4, 5, 7, // 0b10110010 (178) + 2, 3, 6, 0, 1, 4, 5, 7, // 0b10110011 (179) + 0, 1, 3, 6, 2, 4, 5, 7, // 0b10110100 (180) + 1, 3, 6, 0, 2, 4, 5, 7, // 0b10110101 (181) + 0, 3, 6, 1, 2, 4, 5, 7, // 0b10110110 (182) + 3, 6, 0, 1, 2, 4, 5, 7, // 0b10110111 (183) + 0, 1, 2, 6, 3, 4, 5, 7, // 0b10111000 (184) + 1, 2, 6, 0, 3, 4, 5, 7, // 0b10111001 (185) + 0, 2, 6, 1, 3, 4, 5, 7, // 0b10111010 (186) + 2, 6, 0, 1, 3, 4, 5, 7, // 0b10111011 (187) + 0, 1, 6, 2, 3, 4, 5, 7, // 0b10111100 (188) + 1, 6, 0, 2, 3, 4, 5, 7, // 0b10111101 (189) + 0, 6, 1, 2, 3, 4, 5, 7, // 0b10111110 (190) + 6, 0, 1, 2, 3, 4, 5, 7, // 0b10111111 (191) + 0, 1, 2, 3, 4, 5, 6, 7, // 0b11000000 (192) + 1, 2, 3, 4, 5, 0, 6, 7, // 0b11000001 (193) + 0, 2, 3, 4, 5, 1, 6, 7, // 0b11000010 (194) + 2, 3, 4, 5, 0, 1, 6, 7, // 0b11000011 (195) + 0, 1, 3, 4, 5, 2, 6, 7, // 0b11000100 (196) + 1, 3, 4, 5, 0, 2, 6, 7, // 0b11000101 (197) + 0, 3, 4, 5, 1, 2, 6, 7, // 0b11000110 (198) + 3, 4, 5, 0, 1, 2, 6, 7, // 0b11000111 (199) + 0, 1, 2, 4, 5, 3, 6, 7, // 0b11001000 (200) + 1, 2, 4, 5, 0, 3, 6, 7, // 0b11001001 (201) + 0, 2, 4, 5, 1, 3, 6, 7, // 0b11001010 (202) + 2, 4, 5, 0, 1, 3, 6, 7, // 0b11001011 (203) + 0, 1, 4, 5, 2, 3, 6, 7, // 0b11001100 (204) + 1, 4, 5, 0, 2, 3, 6, 7, // 0b11001101 (205) + 0, 4, 5, 1, 2, 3, 6, 7, // 0b11001110 (206) + 4, 5, 0, 1, 2, 3, 6, 7, // 0b11001111 (207) + 0, 1, 2, 3, 5, 4, 6, 7, // 0b11010000 (208) + 1, 2, 3, 5, 0, 4, 6, 7, // 0b11010001 (209) + 0, 2, 3, 5, 1, 4, 6, 7, // 0b11010010 (210) + 2, 3, 5, 0, 1, 4, 6, 7, // 0b11010011 (211) + 0, 1, 3, 5, 2, 4, 6, 7, // 0b11010100 (212) + 1, 3, 5, 0, 2, 4, 6, 7, // 0b11010101 (213) + 0, 3, 5, 1, 2, 4, 6, 7, // 0b11010110 (214) + 3, 5, 0, 1, 2, 4, 6, 7, // 0b11010111 (215) + 0, 1, 2, 5, 3, 4, 6, 7, // 0b11011000 (216) + 1, 2, 5, 0, 3, 4, 6, 7, // 0b11011001 (217) + 0, 2, 5, 1, 3, 4, 6, 7, // 0b11011010 (218) + 2, 5, 0, 1, 3, 4, 6, 7, // 0b11011011 (219) + 0, 1, 5, 2, 3, 4, 6, 7, // 0b11011100 (220) + 1, 5, 0, 2, 3, 4, 6, 7, // 0b11011101 (221) + 0, 5, 1, 2, 3, 4, 6, 7, // 0b11011110 (222) + 5, 0, 1, 2, 3, 4, 6, 7, // 0b11011111 (223) + 0, 1, 2, 3, 4, 5, 6, 7, // 0b11100000 (224) + 1, 2, 3, 4, 0, 5, 6, 7, // 0b11100001 (225) + 0, 2, 3, 4, 1, 5, 6, 7, // 0b11100010 (226) + 2, 3, 4, 0, 1, 5, 6, 7, // 0b11100011 (227) + 0, 1, 3, 4, 2, 5, 6, 7, // 0b11100100 (228) + 1, 3, 4, 0, 2, 5, 6, 7, // 0b11100101 (229) + 0, 3, 4, 1, 2, 5, 6, 7, // 0b11100110 (230) + 3, 4, 0, 1, 2, 5, 6, 7, // 0b11100111 (231) + 0, 1, 2, 4, 3, 5, 6, 7, // 0b11101000 (232) + 1, 2, 4, 0, 3, 5, 6, 7, // 0b11101001 (233) + 0, 2, 4, 1, 3, 5, 6, 7, // 0b11101010 (234) + 2, 4, 0, 1, 3, 5, 6, 7, // 0b11101011 (235) + 0, 1, 4, 2, 3, 5, 6, 7, // 0b11101100 (236) + 1, 4, 0, 2, 3, 5, 6, 7, // 0b11101101 (237) + 0, 4, 1, 2, 3, 5, 6, 7, // 0b11101110 (238) + 4, 0, 1, 2, 3, 5, 6, 7, // 0b11101111 (239) + 0, 1, 2, 3, 4, 5, 6, 7, // 0b11110000 (240) + 1, 2, 3, 0, 4, 5, 6, 7, // 0b11110001 (241) + 0, 2, 3, 1, 4, 5, 6, 7, // 0b11110010 (242) + 2, 3, 0, 1, 4, 5, 6, 7, // 0b11110011 (243) + 0, 1, 3, 2, 4, 5, 6, 7, // 0b11110100 (244) + 1, 3, 0, 2, 4, 5, 6, 7, // 0b11110101 (245) + 0, 3, 1, 2, 4, 5, 6, 7, // 0b11110110 (246) + 3, 0, 1, 2, 4, 5, 6, 7, // 0b11110111 (247) + 0, 1, 2, 3, 4, 5, 6, 7, // 0b11111000 (248) + 1, 2, 0, 3, 4, 5, 6, 7, // 0b11111001 (249) + 0, 2, 1, 3, 4, 5, 6, 7, // 0b11111010 (250) + 2, 0, 1, 3, 4, 5, 6, 7, // 0b11111011 (251) + 0, 1, 2, 3, 4, 5, 6, 7, // 0b11111100 (252) + 1, 0, 2, 3, 4, 5, 6, 7, // 0b11111101 (253) + 0, 1, 2, 3, 4, 5, 6, 7, // 0b11111110 (254) + 0, 1, 2, 3, 4, 5, 6, 7, // 0b11111111 (255) +}; +} + diff --git a/src/coreclr/src/gc/vxsort.h b/src/coreclr/src/gc/vxsort.h new file mode 100644 index 00000000000000..59038555f9f983 --- /dev/null +++ b/src/coreclr/src/gc/vxsort.h @@ -0,0 +1,625 @@ +#ifndef GCSORT_VXSORT_H +#define GCSORT_VXSORT_H + +#include +#include +#include + +#include "bitonic_sort.int64_t.generated.h" +//#include +//#include +//#include + +#ifdef _MSC_VER +// MSVC +#include +#define mess_up_cmov() _ReadBarrier(); +#define INLINE __forceinline +#define NOINLINE __declspec(noinline) +#else +// GCC + Clang +#define mess_up_cmov() +#define INLINE __attribute__((always_inline)) +#define NOINLINE __attribute__((noinline)) +#endif + +namespace gcsort { +using gcsort::smallsort::bitonic; + +struct alignment_hint { + public: + static const size_t ALIGN = 32; + static const int8_t REALIGN = 0x66; + + alignment_hint() : left_align(REALIGN), right_align(REALIGN) {} + alignment_hint realign_left() { + alignment_hint copy = *this; + copy.left_align = REALIGN; + return copy; + } + + alignment_hint realign_right() { + alignment_hint copy = *this; + copy.left_align = REALIGN; + return copy; + } + + static bool is_aligned(void *p) { + return (size_t)p % ALIGN == 0; + } + + + int left_align : 8; + int right_align : 8; +}; + + +template +//using Tv = __m256; +struct vxsort_partition_traits { +public: + //typedef T TV __attribute__ ((__vector_size__ (32))); + typedef __m256 Tv; + + static Tv load_vec(Tv* ptr); + static Tv store_vec(Tv* ptr, Tv v); + static __m256i get_perm(int mask); + static Tv get_vec_pivot(T pivot); + static uint32_t get_cmpgt_mask(Tv a, Tv b); +}; + +template <> +class vxsort_partition_traits { +private: + const static int8_t perm_table[128]; +public: + typedef __m256i Tv; + + static INLINE Tv load_vec(Tv* p) { + return _mm256_lddqu_si256(p); + } + + static INLINE void store_vec(Tv* ptr, Tv v) { + _mm256_storeu_si256(ptr, v); + } + + static INLINE __m256i get_perm(int mask) { + assert(mask >= 0); + assert(mask <= 15); + return _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i*)(perm_table + mask * 8))); + //return _mm256_cvtepu8_epi32( + // _mm_cvtsi64_si128(*((int64_t*)perm_table + mask))); + } + static INLINE Tv get_vec_pivot(int64_t pivot) { + return _mm256_set1_epi64x(pivot); + } + static INLINE uint32_t get_cmpgt_mask(Tv a, Tv b) { + return _mm256_movemask_pd(_mm256_castsi256_pd(_mm256_cmpgt_epi64(a, b))); + } +}; + +template <> +class vxsort_partition_traits { +private: + const static int8_t perm_table[2048]; +public: + typedef __m256i Tv; + static INLINE Tv load_vec(Tv* p) { + return _mm256_lddqu_si256(p); + } + + static INLINE void store_vec(Tv* ptr, Tv v) { + _mm256_storeu_si256(ptr, v); + } + + static INLINE __m256i get_perm(int mask) { + assert(mask >= 0); + assert(mask <= 255); + return _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i*)(perm_table + mask * 8))); + } + static INLINE __m256i get_vec_pivot(int32_t pivot) { + return _mm256_set1_epi32(pivot); + } + static INLINE uint32_t get_cmpgt_mask(__m256i a, __m256i b) { + return _mm256_movemask_ps(_mm256_castsi256_ps(_mm256_cmpgt_epi32(a, b))); + } +}; + + +template +class vxsort { + static_assert(Unroll >= 1, "Unroll can be in the range 1..12"); + static_assert(Unroll <= 12, "Unroll can be in the range 1..12"); + +private: + //using Tv2 = Tp::Tv; + using Tp = vxsort_partition_traits; + typedef typename Tp::Tv TV; + + static const int ELEMENT_ALIGN = sizeof(T) - 1; + static const int N = 32 / sizeof(T); + static const int32_t MAX_BITONIC_SORT_VECTORS = 16; + static const int32_t SMALL_SORT_THRESHOLD_ELEMENTS = MAX_BITONIC_SORT_VECTORS * N; + //static const int32_t MaxInnerUnroll = ((SMALL_SORT_THRESHOLD_ELEMENTS - (N - 2*N)) / (2 * N)); + static const int32_t MaxInnerUnroll = (MAX_BITONIC_SORT_VECTORS - 3) / 2; + static const int32_t SafeInnerUnroll = MaxInnerUnroll > Unroll ? Unroll : MaxInnerUnroll; + static const int32_t SLACK_PER_SIDE_IN_VECTORS = Unroll; + static const size_t ALIGN = alignment_hint::ALIGN; + static const size_t ALIGN_MASK = ALIGN - 1; + + static const int SLACK_PER_SIDE_IN_ELEMENTS = SLACK_PER_SIDE_IN_VECTORS * N; + // The formula for figuring out how much temporary space we need for partitioning: + // 2 x the number of slack elements on each side for the purpose of partitioning in unrolled manner + + // 2 x amount of maximal bytes needed for alignment (32) + // one more vector's worth of elements since we write with N-way stores from both ends of the temporary area + // and we must make sure we do not accidentally over-write from left -> right or vice-versa right on that edge... + // In other words, while we allocated this much temp memory, the actual amount of elements inside said memory + // is smaller by 8 elements + 1 for each alignment (max alignment is actually N-1, I just round up to N...) + // This long sense just means that we over-allocate N+2 elements... + static const int PARTITION_TMP_SIZE_IN_ELEMENTS = + (2 * SLACK_PER_SIDE_IN_ELEMENTS + N + 4*N); + + static int floor_log2_plus_one(size_t n) { + auto result = 0; + while (n >= 1) { + result++; + n /= 2; + } + return result; + } + static void swap(T* left, T* right) { + auto tmp = *left; + *left = *right; + *right = tmp; + } + static void swap_if_greater(T* left, T* right) { + if (*left <= *right) + return; + swap(left, right); + } + + static void insertion_sort(T* lo, T* hi) { + for (auto i = lo + 1; i <= hi; i++) { + auto j = i; + auto t = *i; + while ((j > lo) && (t < *(j - 1))) { + *j = *(j - 1); + j--; + } + *j = t; + } + } + + static void heap_sort(T* lo, T* hi) { + size_t n = hi - lo + 1; + for (size_t i = n / 2; i >= 1; i--) { + down_heap(i, n, lo); + } + for (size_t i = n; i > 1; i--) { + swap(lo, lo + i - 1); + down_heap(1, i - 1, lo); + } + } + static void down_heap(size_t i, size_t n, T* lo) { + auto d = *(lo + i - 1); + size_t child; + while (i <= n / 2) { + child = 2 * i; + if (child < n && *(lo + child - 1) < (*(lo + child))) { + child++; + } + if (!(d < *(lo + child - 1))) { + break; + } + *(lo + i - 1) = *(lo + child - 1); + i = child; + } + *(lo + i - 1) = d; + } + + void reset(T* start, T* end) { + _depth = 0; + _startPtr = start; + _endPtr = end; + } + + + T* _startPtr = nullptr; + T* _endPtr = nullptr; + + T _temp[PARTITION_TMP_SIZE_IN_ELEMENTS]; + int _depth = 0; + + NOINLINE + T* align_left_scalar_uncommon(T* read_left, T pivot, + T*& tmp_left, T*& tmp_right) { + if (((size_t)read_left & ALIGN_MASK) == 0) + return read_left; + + auto next_align = (T*)(((size_t)read_left + ALIGN) & ~ALIGN_MASK); + while (read_left < next_align) { + auto v = *(read_left++); + if (v <= pivot) { + *(tmp_left++) = v; + } else { + *(--tmp_right) = v; + } + } + + return read_left; + } + + NOINLINE + T* align_right_scalar_uncommon(T* readRight, T pivot, + T*& tmpLeft, T*& tmpRight) { + if (((size_t) readRight & ALIGN_MASK) == 0) + return readRight; + + auto nextAlign = (T *) ((size_t) readRight & ~ALIGN_MASK); + while (readRight > nextAlign) { + auto v = *(--readRight); + if (v <= pivot) { + *(tmpLeft++) = v; + } else { + *(--tmpRight) = v; + } + } + + return readRight; + } + + void sort(T* left, T* right, + alignment_hint realignHint, + int depthLimit) { + auto length = (size_t)(right - left + 1); + + T* mid; + switch (length) { + case 0: + case 1: + return; + case 2: + swap_if_greater(left, right); + return; + case 3: + mid = right - 1; + swap_if_greater(left, mid); + swap_if_greater(left, right); + swap_if_greater(mid, right); + return; + } + + // Go to insertion sort below this threshold + if (length <= SMALL_SORT_THRESHOLD_ELEMENTS) { + + auto nextLength = (length & (N-1)) > 0 ? (length + N) & ~(N-1) : length; + + auto extraSpaceNeeded = nextLength - length; + auto fakeLeft = left - extraSpaceNeeded; + if (fakeLeft >= _startPtr) { + bitonic::sort(fakeLeft, nextLength); + } else { + insertion_sort(left, right); + } + return; + } + + // Detect a whole bunch of bad cases where partitioning + // will not do well: + // 1. Reverse sorted array + // 2. High degree of repeated values (dutch flag problem, one value) + if (depthLimit == 0) { + heap_sort(left, right); + _depth--; + return; + } + depthLimit--; + + // This is going to be a bit weird: + // Pre/Post alignment calculations happen here: we prepare hints to the + // partition function of how much to align and in which direction + // (pre/post). The motivation to do these calculations here and the actual + // alignment inside the partitioning code is that here, we can cache those + // calculations. As we recurse to the left we can reuse the left cached + // calculation, And when we recurse to the right we reuse the right + // calculation, so we can avoid re-calculating the same aligned addresses + // throughout the recursion, at the cost of a minor code complexity + // Since we branch on the magi values REALIGN_LEFT & REALIGN_RIGHT its safe + // to assume the we are not torturing the branch predictor.' + + // We use a long as a "struct" to pass on alignment hints to the + // partitioning By packing 2 32 bit elements into it, as the JIT seem to not + // do this. In reality we need more like 2x 4bits for each side, but I + // don't think there is a real difference' + + if (realignHint.left_align == alignment_hint::REALIGN) { + // Alignment flow: + // * Calculate pre-alignment on the left + // * See it would cause us an out-of bounds read + // * Since we'd like to avoid that, we adjust for post-alignment + // * There are no branches since we do branch->arithmetic + auto preAlignedLeft = (T*) ((size_t)left & ~ALIGN_MASK); + auto cannotPreAlignLeft = (preAlignedLeft - _startPtr) >> 63; + realignHint.left_align = (preAlignedLeft - left) + (N & cannotPreAlignLeft); + assert(alignment_hint::is_aligned(left + realignHint.left_align)); + } + + if (realignHint.right_align == alignment_hint::REALIGN) { + // right is pointing just PAST the last element we intend to partition + // (where we also store the pivot) So we calculate alignment based on + // right - 1, and YES: I am casting to ulong before doing the -1, this is + // intentional since the whole thing is either aligned to 32 bytes or not, + // so decrementing the POINTER value by 1 is sufficient for the alignment, + // an the JIT sucks at this anyway + auto preAlignedRight = + (T*) (((size_t)(right - 1) & ~ALIGN_MASK) + ALIGN); + auto cannotPreAlignRight = (_endPtr - preAlignedRight) >> 63; + realignHint.right_align = (preAlignedRight - right - (N & cannotPreAlignRight)); + assert(alignment_hint::is_aligned(right + realignHint.right_align)); + } + + // Compute median-of-three, of: + // the first, mid and one before last elements + mid = left + ((right - left) / 2); + swap_if_greater(left, mid); + swap_if_greater(left, right - 1); + swap_if_greater(mid, right - 1); + + // Pivot is mid, place it in the right hand side + swap(mid, right); + + auto sep = (length < PARTITION_TMP_SIZE_IN_ELEMENTS) ? + vectorized_partition(left, right, realignHint) : + vectorized_partition(left, right, realignHint); + + + + _depth++; + sort(left, sep - 2, realignHint.realign_right(), depthLimit); + sort(sep, right, realignHint.realign_left(), depthLimit); + _depth--; + } + + static INLINE void partition_block(TV& dataVec, + const TV& P, + T*& left, + T*& right) { + auto mask = Tp::get_cmpgt_mask(dataVec, P); + dataVec = _mm256_permutevar8x32_epi32(dataVec, Tp::get_perm(mask)); + Tp::store_vec(reinterpret_cast(left), dataVec); + Tp::store_vec(reinterpret_cast(right), dataVec); + auto popCount = -_mm_popcnt_u64(mask); + right += popCount; + left += popCount + N; + } + + template + T* vectorized_partition(T* left, T* right, alignment_hint hint) { + assert(right - left >= SMALL_SORT_THRESHOLD_ELEMENTS); + assert((reinterpret_cast(left) & ELEMENT_ALIGN) == 0); + assert((reinterpret_cast(right) & ELEMENT_ALIGN) == 0); + + // Vectorized double-pumped (dual-sided) partitioning: + // We start with picking a pivot using the media-of-3 "method" + // Once we have sensible pivot stored as the last element of the array + // We process the array from both ends. + // + // To get this rolling, we first read 2 Vector256 elements from the left and + // another 2 from the right, and store them in some temporary space in order + // to leave enough "space" inside the vector for storing partitioned values. + // Why 2 from each side? Because we need n+1 from each side where n is the + // number of Vector256 elements we process in each iteration... The + // reasoning behind the +1 is because of the way we decide from *which* side + // to read, we may end up reading up to one more vector from any given side + // and writing it in its entirety to the opposite side (this becomes + // slightly clearer when reading the code below...) Conceptually, the bulk + // of the processing looks like this after clearing out some initial space + // as described above: + + // [.............................................................................] + // ^wl ^rl rr^ wr^ + // Where: + // wl = writeLeft + // rl = readLeft + // rr = readRight + // wr = writeRight + + // In every iteration, we select what side to read from based on how much + // space is left between head read/write pointer on each side... + // We read from where there is a smaller gap, e.g. that side + // that is closer to the unfortunate possibility of its write head + // overwriting its read head... By reading from THAT side, we're ensuring + // this does not happen + + // An additional unfortunate complexity we need to deal with is that the + // right pointer must be decremented by another Vector256.Count elements + // Since the Load/Store primitives obviously accept start addresses + auto pivot = *right; + // We do this here just in case we need to pre-align to the right + // We end up + *right = std::numeric_limits::Max(); + + // Broadcast the selected pivot + const TV P = Tp::get_vec_pivot(pivot);//_mm256_set1_epi64x(pivot); + + auto readLeft = left; + auto readRight = right; + auto writeLeft = left; + auto writeRight = right - N; + + auto tmpStartLeft = _temp; + auto tmpLeft = tmpStartLeft; + auto tmpStartRight = _temp + PARTITION_TMP_SIZE_IN_ELEMENTS; + auto tmpRight = tmpStartRight; + + + tmpRight -= N; + + // the read heads always advance by 8 elements, or 32 bytes, + // We can spend some extra time here to align the pointers + // so they start at a cache-line boundary + // Once that happens, we can read with Avx.LoadAlignedVector256 + // And also know for sure that our reads will never cross cache-lines + // Otherwise, 50% of our AVX2 Loads will need to read from two cache-lines + + const auto leftAlign = hint.left_align; + const auto rightAlign = hint.right_align; + + auto preAlignedLeft = (TV*) (left + leftAlign); + auto preAlignedRight = (TV*) (right + rightAlign - N); + + // Read overlapped data from right (includes re-reading the pivot) + auto RT0 = Tp::load_vec(preAlignedRight); + auto LT0 = Tp::load_vec(preAlignedLeft); + auto rtMask = Tp::get_cmpgt_mask(RT0, P); + auto ltMask = Tp::get_cmpgt_mask(LT0, P); + auto rtPopCount = max(_mm_popcnt_u32(rtMask), rightAlign); + auto ltPopCount = _mm_popcnt_u32(ltMask); + RT0 = _mm256_permutevar8x32_epi32(RT0, Tp::get_perm(rtMask)); + LT0 = _mm256_permutevar8x32_epi32(LT0, Tp::get_perm(ltMask)); + Tp::store_vec((TV*) tmpRight, RT0); + Tp::store_vec((TV*) tmpLeft, LT0); + + auto rai = ~((rightAlign - 1) >> 31); + auto lai = leftAlign >> 31; + + tmpRight -= rtPopCount & rai; + rtPopCount = N - rtPopCount; + readRight += (rightAlign - N) & rai; + + Tp::store_vec((TV*) tmpRight, LT0); + tmpRight -= ltPopCount & lai; + ltPopCount = N - ltPopCount; + tmpLeft += ltPopCount & lai; + tmpStartLeft += -leftAlign & lai; + readLeft += (leftAlign + N) & lai; + + Tp::store_vec((TV*) tmpLeft, RT0); + tmpLeft += rtPopCount & rai; + tmpStartRight -= rightAlign & rai; + + if (leftAlign > 0) { + tmpRight += N; + readLeft = align_left_scalar_uncommon(readLeft, pivot, tmpLeft, tmpRight); + tmpRight -= N; + } + + if (rightAlign < 0) { + tmpRight += N; + readRight = + align_right_scalar_uncommon(readRight, pivot, tmpLeft, tmpRight); + tmpRight -= N; + } + assert(((size_t)readLeft & ALIGN_MASK) == 0); + assert(((size_t)readRight & ALIGN_MASK) == 0); + + assert((((size_t)readRight - (size_t)readLeft) % ALIGN) == 0); + assert((readRight - readLeft) >= InnerUnroll * 2); + + // From now on, we are fully aligned + // and all reading is done in full vector units + auto readLeftV = (TV*) readLeft; + auto readRightV = (TV*) readRight; + #ifndef NDEBUG + readLeft = nullptr; + readRight = nullptr; + #endif + + for (auto u = 0; u < InnerUnroll; u++) { + auto dl = Tp::load_vec(readLeftV + u); + auto dr = Tp::load_vec(readRightV - (u + 1)); + partition_block(dl, P, tmpLeft, tmpRight); + partition_block(dr, P, tmpLeft, tmpRight); + } + + tmpRight += N; + // Adjust for the reading that was made above + readLeftV += InnerUnroll; + readRightV -= InnerUnroll*2; + TV* nextPtr; + + while (readLeftV < readRightV) { + if (writeRight - ((T *) readRightV) < (2 * (InnerUnroll * N) - N)) { + nextPtr = readRightV; + readRightV -= InnerUnroll; + } else { + mess_up_cmov(); + nextPtr = readLeftV; + readLeftV += InnerUnroll; + } + + TV d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12; + + switch (InnerUnroll) { + case 12: d12 = Tp::load_vec(nextPtr + InnerUnroll - 12); + case 11: d11 = Tp::load_vec(nextPtr + InnerUnroll - 11); + case 10: d10 = Tp::load_vec(nextPtr + InnerUnroll - 10); + case 9: d09 = Tp::load_vec(nextPtr + InnerUnroll - 9); + case 8: d08 = Tp::load_vec(nextPtr + InnerUnroll - 8); + case 7: d07 = Tp::load_vec(nextPtr + InnerUnroll - 7); + case 6: d06 = Tp::load_vec(nextPtr + InnerUnroll - 6); + case 5: d05 = Tp::load_vec(nextPtr + InnerUnroll - 5); + case 4: d04 = Tp::load_vec(nextPtr + InnerUnroll - 4); + case 3: d03 = Tp::load_vec(nextPtr + InnerUnroll - 3); + case 2: d02 = Tp::load_vec(nextPtr + InnerUnroll - 2); + case 1: d01 = Tp::load_vec(nextPtr + InnerUnroll - 1); + } + + switch (InnerUnroll) { + case 12: partition_block(d12, P, writeLeft, writeRight); + case 11: partition_block(d11, P, writeLeft, writeRight); + case 10: partition_block(d10, P, writeLeft, writeRight); + case 9: partition_block(d09, P, writeLeft, writeRight); + case 8: partition_block(d08, P, writeLeft, writeRight); + case 7: partition_block(d07, P, writeLeft, writeRight); + case 6: partition_block(d06, P, writeLeft, writeRight); + case 5: partition_block(d05, P, writeLeft, writeRight); + case 4: partition_block(d04, P, writeLeft, writeRight); + case 3: partition_block(d03, P, writeLeft, writeRight); + case 2: partition_block(d02, P, writeLeft, writeRight); + case 1: partition_block(d01, P, writeLeft, writeRight); + } + } + + readRightV += (InnerUnroll - 1); + + while (readLeftV <= readRightV) { + if (writeRight - (T *) readRightV < N) { + nextPtr = readRightV; + readRightV -= 1; + } else { + mess_up_cmov(); + nextPtr = readLeftV; + readLeftV += 1; + } + + auto d = Tp::load_vec(nextPtr); + partition_block(d, P, writeLeft, writeRight); + } + + // 3. Copy-back the 4 registers + remainder we partitioned in the beginning + auto leftTmpSize = tmpLeft - tmpStartLeft; + memcpy(writeLeft, tmpStartLeft, leftTmpSize * sizeof(T)); + writeLeft += leftTmpSize; + auto rightTmpSize = tmpStartRight - tmpRight; + memcpy(writeLeft, tmpRight, rightTmpSize * sizeof(T)); + + // Shove to pivot back to the boundary + *right = *writeLeft; + *writeLeft++ = pivot; + + assert(writeLeft > left); + assert(writeLeft <= right); + + return writeLeft; + } +public: + NOINLINE void sort(T* left, T* right) { + reset(left, right); + auto depthLimit = 2 * floor_log2_plus_one(right + 1 - left); + sort(left, right, alignment_hint(), depthLimit); + } +}; + +} // namespace gcsort +#endif From 98fc7b10014a83abbb041d77c29546da775b4660 Mon Sep 17 00:00:00 2001 From: Peter Sollich Date: Fri, 29 May 2020 12:46:33 +0200 Subject: [PATCH 02/31] Bug fix from Dan. --- src/coreclr/src/gc/vxsort.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/coreclr/src/gc/vxsort.h b/src/coreclr/src/gc/vxsort.h index 59038555f9f983..27d1bfc8dac5a3 100644 --- a/src/coreclr/src/gc/vxsort.h +++ b/src/coreclr/src/gc/vxsort.h @@ -40,7 +40,7 @@ struct alignment_hint { alignment_hint realign_right() { alignment_hint copy = *this; - copy.left_align = REALIGN; + copy.right_align = REALIGN; return copy; } From 441033bd2e4b381626fc20f20ea949a60f515758 Mon Sep 17 00:00:00 2001 From: Peter Sollich Date: Fri, 29 May 2020 14:58:27 +0200 Subject: [PATCH 03/31] Use bigger mark list for experiments. --- src/coreclr/src/gc/gc.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/coreclr/src/gc/gc.cpp b/src/coreclr/src/gc/gc.cpp index 47326a7c5374f9..d12e8a5ac2c861 100644 --- a/src/coreclr/src/gc/gc.cpp +++ b/src/coreclr/src/gc/gc.cpp @@ -10074,7 +10074,7 @@ gc_heap::init_semi_shared() #ifdef MARK_LIST #ifdef MULTIPLE_HEAPS - mark_list_size = min (150*1024, max (8192, soh_segment_size/(2*10*32))); + mark_list_size = min (1024*1024, max (8192, soh_segment_size/(2*10*32))); g_mark_list = make_mark_list (mark_list_size*n_heaps); min_balance_threshold = alloc_quantum_balance_units * CLR_SIZE * 2; From 42d5acbc49408a3c2ee01b71922b1157c4daef38 Mon Sep 17 00:00:00 2001 From: Peter Sollich Date: Fri, 29 May 2020 15:35:13 +0200 Subject: [PATCH 04/31] Give up if the mark list size is bigger than a reasonable fraction of the ephemeral space. --- src/coreclr/src/gc/gc.cpp | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/src/coreclr/src/gc/gc.cpp b/src/coreclr/src/gc/gc.cpp index d12e8a5ac2c861..8de194b5f21b59 100644 --- a/src/coreclr/src/gc/gc.cpp +++ b/src/coreclr/src/gc/gc.cpp @@ -8242,6 +8242,25 @@ void gc_heap::sort_mark_list() // unsigned long start = GetCycleCount32(); + // compute total mark list size and total ephemeral size + size_t total_mark_list_size = 0; + size_t total_ephemeral_size = 0; + for (int i = 0; i < n_heaps; i++) + { + gc_heap* hp = g_heaps[i]; + size_t ephemeral_size = heap_segment_allocated (hp->ephemeral_heap_segment) - hp->gc_low; + total_ephemeral_size += ephemeral_size; + total_mark_list_size += (hp->mark_list_index - hp->mark_list); + } + + // give up if this is not an ephemeral GC or the mark list size is unreasonably large + if (settings.condemned_generation > 1 || total_mark_list_size > total_ephemeral_size/256) + { + mark_list_index = mark_list_end + 1; + // printf("sort_mark_list: overflow on heap %d\n", i); + return; + } + dprintf (3, ("Sorting mark lists")); if (mark_list_index > mark_list) _sort (mark_list, mark_list_index - 1, 0); From 9fee09e4415809bff66e63d4cc96efe79e20e09d Mon Sep 17 00:00:00 2001 From: Peter Sollich Date: Wed, 3 Jun 2020 10:37:38 +0200 Subject: [PATCH 05/31] Latest version from Dan, disable for ARM64. --- src/coreclr/src/gc/gc.cpp | 6 +- src/coreclr/src/gc/smallsort/bitonic_gen.py | 491 ++++++ .../bitonic_sort.AVX2.double.generated.h | 1492 ++++++++++++++++ .../bitonic_sort.AVX2.float.generated.h | 1532 ++++++++++++++++ .../bitonic_sort.AVX2.int32_t.generated.h | 1532 ++++++++++++++++ .../bitonic_sort.AVX2.int64_t.generated.h} | 381 ++-- .../bitonic_sort.AVX2.uint32_t.generated.h | 1532 ++++++++++++++++ .../bitonic_sort.AVX2.uint64_t.generated.h | 1540 +++++++++++++++++ .../src/gc/{ => smallsort}/bitonic_sort.h | 0 src/coreclr/src/gc/vxsort.cpp | 7 +- src/coreclr/src/gc/vxsort.h | 279 ++- 11 files changed, 8556 insertions(+), 236 deletions(-) create mode 100644 src/coreclr/src/gc/smallsort/bitonic_gen.py create mode 100644 src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.double.generated.h create mode 100644 src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.float.generated.h create mode 100644 src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.int32_t.generated.h rename src/coreclr/src/gc/{bitonic_sort.int64_t.generated.h => smallsort/bitonic_sort.AVX2.int64_t.generated.h} (97%) create mode 100644 src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.uint32_t.generated.h create mode 100644 src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.uint64_t.generated.h rename src/coreclr/src/gc/{ => smallsort}/bitonic_sort.h (100%) diff --git a/src/coreclr/src/gc/gc.cpp b/src/coreclr/src/gc/gc.cpp index 8de194b5f21b59..1d239fa1a63851 100644 --- a/src/coreclr/src/gc/gc.cpp +++ b/src/coreclr/src/gc/gc.cpp @@ -19,7 +19,7 @@ #include "gcpriv.h" -#ifdef HOST_64BIT +#ifdef TARGET_AMD64 #define USE_VXSORT #else #define USE_INTROSORT @@ -2094,8 +2094,8 @@ namespace std } void vxsort(uint8_t** low, uint8_t** high, unsigned int depth) { -// auto sorter = gcsort::vxsort(); - auto sorter = gcsort::vxsort(); +// auto sorter = gcsort::vxsort(); + auto sorter = gcsort::vxsort(); sorter.sort((int64_t*)low, (int64_t*)high); #ifdef _DEBUG for (uint8_t** p = low; p < high; p++) diff --git a/src/coreclr/src/gc/smallsort/bitonic_gen.py b/src/coreclr/src/gc/smallsort/bitonic_gen.py new file mode 100644 index 00000000000000..047fbd5fa5bf89 --- /dev/null +++ b/src/coreclr/src/gc/smallsort/bitonic_gen.py @@ -0,0 +1,491 @@ +#!/usr/bin/env python3 +import argparse +from datetime import datetime +from enum import Enum + +max_bitonic_sort_verctors = 16 + + +def next_power_of_2(v): + v = v - 1 + v |= v >> 1 + v |= v >> 2 + v |= v >> 4 + v |= v >> 8 + v |= v >> 16 + v = v + 1 + return int(v) + + +largest_merge_variant_needed = next_power_of_2(max_bitonic_sort_verctors) / 2; + +## types to function suffix +bitonic_type_map = { + "int32_t": "__m256i", + "uint32_t": "__m256i", + "float": "__m256", + "int64_t": "__m256i", + "uint64_t": "__m256i", + "double": "__m256d", +} + +bitonic_size_map = { + "int32_t": 4, + "uint32_t": 4, + "float": 4, + "int64_t": 8, + "uint64_t": 8, + "double": 8, +} + +bitonic_types = bitonic_size_map.keys() + + +def i2d(v, t): + if t == "double": + return v + elif t == "float": + return f"s2d({v})" + return f"i2d({v})" + +def i2s(v, t): + if t == "double": + raise Exception("WTF") + elif t == "float": + return f"i2s({v})" + return v + + +def d2i(v, t): + if t == "double": + return v + elif t == "float": + return f"d2s({v})" + return f"d2i({v})" + +def s2i(v, t): + if t == "double": + raise Exception("WTF") + elif t == "float": + return f"s2i({v})" + return v + + + +def generate_param_list(start, numParams): + return str.join(", ", list(map(lambda p: f"d{p:02d}", range(start, start + numParams)))) + + +def generate_param_def_list(numParams, nativeType): + return str.join(", ", list(map(lambda p: f"{bitonic_type_map[nativeType]}& d{p:02d}", range(1, numParams + 1)))) + + +def generate_shuffle_X1(v, t): + if bitonic_size_map[t] == 4: + return i2s(f"_mm256_shuffle_epi32({s2i(v, t)}, 0xB1)", t) + elif bitonic_size_map[t] == 8: + return d2i(f"_mm256_shuffle_pd({i2d(v, t)}, {i2d(v, t)}, 0x5)", t) + + +def generate_shuffle_X2(v, t): + if bitonic_size_map[t] == 4: + return i2s(f"_mm256_shuffle_epi32({s2i(v, t)}, 0x4E)", t) + elif bitonic_size_map[t] == 8: + return d2i(f"_mm256_permute4x64_pd({i2d(v, t)}, 0x4E)", t) + + +def generate_shuffle_XR(v, t): + if bitonic_size_map[t] == 4: + return i2s(f"_mm256_shuffle_epi32({s2i(v, t)}, 0x1B)", t) + elif bitonic_size_map[t] == 8: + return d2i(f"_mm256_permute4x64_pd({i2d(v, t)}, 0x1B)", t) + + +def generate_blend_B1(v1, v2, t, ascending): + if bitonic_size_map[t] == 4: + if ascending: + return i2s(f"_mm256_blend_epi32({s2i(v1, t)}, {s2i(v2, t)}, 0xAA)", t) + else: + return i2s(f"_mm256_blend_epi32({s2i(v2, t)}, {s2i(v1, t)}, 0xAA)", t) + elif bitonic_size_map[t] == 8: + if ascending: + return d2i(f"_mm256_blend_pd({i2d(v1, t)}, {i2d(v2, t)}, 0xA)", t) + else: + return d2i(f"_mm256_blend_pd({i2d(v2, t)}, {i2d(v1, t)}, 0xA)", t) + + +def generate_blend_B2(v1, v2, t, ascending): + if bitonic_size_map[t] == 4: + if ascending: + return i2s(f"_mm256_blend_epi32({s2i(v1, t)}, {s2i(v2, t)}, 0xCC)", t) + else: + return i2s(f"_mm256_blend_epi32({s2i(v2, t)}, {s2i(v1, t)}, 0xCC)", t) + elif bitonic_size_map[t] == 8: + if ascending: + return d2i(f"_mm256_blend_pd({i2d(v1, t)}, {i2d(v2, t)}, 0xC)", t) + else: + return d2i(f"_mm256_blend_pd({i2d(v2, t)}, {i2d(v1, t)}, 0xC)", t) + + +def generate_blend_B4(v1, v2, t, ascending): + if bitonic_size_map[t] == 4: + if ascending: + return i2s(f"_mm256_blend_epi32({s2i(v1, t)}, {s2i(v2, t)}, 0xF0)", t) + else: + return i2s(f"_mm256_blend_epi32({s2i(v2, t)}, {s2i(v1, t)}, 0xF0)", t) + elif bitonic_size_map[t] == 8: + raise Exception("WTF") + + +def generate_cross(v, t): + if bitonic_size_map[t] == 4: + return d2i(f"_mm256_permute4x64_pd({i2d(v, t)}, 0x4E)", t) + elif bitonic_size_map[t] == 8: + raise Exception("WTF") + + +def generate_reverse(v, t): + if bitonic_size_map[t] == 4: + v = f"_mm256_shuffle_epi32({s2i(v, t)}, 0x1B)" + return d2i(f"_mm256_permute4x64_pd({i2d(v, 'int32_t')}, 0x4E)", t) + elif bitonic_size_map[t] == 8: + return d2i(f"_mm256_permute4x64_pd({i2d(v, t)}, 0x1B)", t) + + +def crappity_crap_crap(v1, v2, t): + if t == "int64_t": + return f"cmp = _mm256_cmpgt_epi64({v1}, {v2});" + elif t == "uint64_t": + return f"cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, {v1}), _mm256_xor_si256(topBit, {v2}));" + + return "" + + +def generate_min(v1, v2, t): + if t == "int32_t": + return f"_mm256_min_epi32({v1}, {v2})" + elif t == "uint32_t": + return f"_mm256_min_epu32({v1}, {v2})" + elif t == "float": + return f"_mm256_min_ps({v1}, {v2})" + elif t == "int64_t": + return d2i(f"_mm256_blendv_pd({i2d(v1, t)}, {i2d(v2, t)}, i2d(cmp))", t) + elif t == "uint64_t": + return d2i(f"_mm256_blendv_pd({i2d(v1, t)}, {i2d(v2, t)}, i2d(cmp))", t) + elif t == "double": + return f"_mm256_min_pd({v1}, {v2})" + + +def generate_max(v1, v2, t): + if t == "int32_t": + return f"_mm256_max_epi32({v1}, {v2})" + elif t == "uint32_t": + return f"_mm256_max_epu32({v1}, {v2})" + elif t == "float": + return f"_mm256_max_ps({v1}, {v2})" + elif t == "int64_t": + return d2i(f"_mm256_blendv_pd({i2d(v2, t)}, {i2d(v1, t)}, i2d(cmp))", t) + elif t == "uint64_t": + return d2i(f"_mm256_blendv_pd({i2d(v2, t)}, {i2d(v1, t)}, i2d(cmp))", t) + elif t == "double": + return f"_mm256_max_pd({v1}, {v2})" + + +def generate_1v_basic_sorters(f, type, ascending): + maybe_cmp = lambda: ", cmp" if (type == "int64_t" or type == "uint64_t") else "" + maybe_topbit = lambda: f"\n {bitonic_type_map[type]} topBit = _mm256_set1_epi64x(1LLU << 63);" if ( + type == "uint64_t") else "" + + suffix = "ascending" if ascending else "descending" + + s = f""" static INLINE void sort_01v_{suffix}({generate_param_def_list(1, type)}) {{ + {bitonic_type_map[type]} min, max, s{maybe_cmp()};{maybe_topbit()} + + s = {generate_shuffle_X1("d01", type)}; + {crappity_crap_crap("s", "d01", type)} + min = {generate_min("s", "d01", type)}; + max = {generate_max("s", "d01", type)}; + d01 = {generate_blend_B1("min", "max", type, ascending)}; + + s = {generate_shuffle_XR("d01", type)}; + {crappity_crap_crap("s", "d01", type)} + min = {generate_min("s", "d01", type)}; + max = {generate_max("s", "d01", type)}; + d01 = {generate_blend_B2("min", "max", type, ascending)}; + + s = {generate_shuffle_X1("d01", type)}; + {crappity_crap_crap("s", "d01", type)} + min = {generate_min("s", "d01", type)}; + max = {generate_max("s", "d01", type)}; + d01 = {generate_blend_B1("min", "max", type, ascending)};""" + + print(s, file=f) + + if bitonic_size_map[type] == 4: + s = f""" + s = {generate_reverse("d01", type)}; + min = {generate_min("s", "d01", type)}; + max = {generate_max("s", "d01", type)}; + d01 = {generate_blend_B4("min", "max", type, ascending)}; + + s = {generate_shuffle_X2("d01", type)}; + min = {generate_min("s", "d01", type)}; + max = {generate_max("s", "d01", type)}; + d01 = {generate_blend_B2("min", "max", type, ascending)}; + + s = {generate_shuffle_X1("d01", type)}; + min = {generate_min("s", "d01", type)}; + max = {generate_max("s", "d01", type)}; + d01 = {generate_blend_B1("min", "max", type, ascending)};""" + print(s, file=f) + print("}", file=f) + + +def generate_1v_merge_sorters(f, type, ascending): + maybe_cmp = lambda: ", cmp" if (type == "int64_t" or type == "uint64_t") else "" + maybe_topbit = lambda: f"\n {bitonic_type_map[type]} topBit = _mm256_set1_epi64x(1LLU << 63);" if ( + type == "uint64_t") else "" + + suffix = "ascending" if ascending else "descending" + + s = f""" static INLINE void sort_01v_merge_{suffix}({generate_param_def_list(1, type)}) {{ + {bitonic_type_map[type]} min, max, s{maybe_cmp()};{maybe_topbit()}""" + print(s, file=f) + + if bitonic_size_map[type] == 4: + s = f""" + s = {generate_cross("d01", type)}; + min = {generate_min("s", "d01", type)}; + max = {generate_max("s", "d01", type)}; + d01 = {generate_blend_B4("min", "max", type, ascending)};""" + print(s, file=f) + + s = f""" + s = {generate_shuffle_X2("d01", type)}; + {crappity_crap_crap("s", "d01", type)} + min = {generate_min("s", "d01", type)}; + max = {generate_max("s", "d01", type)}; + d01 = {generate_blend_B2("min", "max", type, ascending)}; + + s = {generate_shuffle_X1("d01", type)}; + {crappity_crap_crap("s", "d01", type)} + min = {generate_min("s", "d01", type)}; + max = {generate_max("s", "d01", type)}; + d01 = {generate_blend_B1("min", "max", type, ascending)};""" + + print(s, file=f) + print(" }", file=f) + + +def generate_1v_sorters(f, type, ascending): + generate_1v_basic_sorters(f, type, ascending) + generate_1v_merge_sorters(f, type, ascending) + + +def generate_compounded_sorters(f, width, type, ascending): + maybe_cmp = lambda: ", cmp" if (type == "int64_t" or type == "uint64_t") else "" + maybe_topbit = lambda: f"\n {bitonic_type_map[type]} topBit = _mm256_set1_epi64x(1LLU << 63);" if ( + type == "uint64_t") else "" + + w1 = int(next_power_of_2(width) / 2) + w2 = int(width - w1) + + suffix = "ascending" if ascending else "descending" + rev_suffix = "descending" if ascending else "ascending" + + s = f""" static INLINE void sort_{width:02d}v_{suffix}({generate_param_def_list(width, type)}) {{ + {bitonic_type_map[type]} tmp{maybe_cmp()};{maybe_topbit()} + + sort_{w1:02d}v_{suffix}({generate_param_list(1, w1)}); + sort_{w2:02d}v_{rev_suffix}({generate_param_list(w1 + 1, w2)});""" + + print(s, file=f) + + for r in range(w1 + 1, width + 1): + x = w1 + 1 - (r - w1) + s = f""" + tmp = d{r:02d}; + {crappity_crap_crap(f"d{x:02d}", f"d{r:02d}", type)} + d{r:02d} = {generate_max(f"d{x:02d}", f"d{r:02d}", type)}; + d{x:02d} = {generate_min(f"d{x:02d}", "tmp", type)};""" + + print(s, file=f) + + s = f""" + sort_{w1:02d}v_merge_{suffix}({generate_param_list(1, w1)}); + sort_{w2:02d}v_merge_{suffix}({generate_param_list(w1 + 1, w2)});""" + print(s, file=f) + print(" }", file=f) + + +def generate_compounded_mergers(f, width, type, ascending): + maybe_cmp = lambda: ", cmp" if (type == "int64_t" or type == "uint64_t") else "" + maybe_topbit = lambda: f"\n {bitonic_type_map[type]} topBit = _mm256_set1_epi64x(1LLU << 63);" if ( + type == "uint64_t") else "" + + w1 = int(next_power_of_2(width) / 2) + w2 = int(width - w1) + + suffix = "ascending" if ascending else "descending" + rev_suffix = "descending" if ascending else "ascending" + + s = f""" static INLINE void sort_{width:02d}v_merge_{suffix}({generate_param_def_list(width, type)}) {{ + {bitonic_type_map[type]} tmp{maybe_cmp()};{maybe_topbit()}""" + print(s, file=f) + + for r in range(w1 + 1, width + 1): + x = r - w1 + s = f""" + tmp = d{x:02d}; + {crappity_crap_crap(f"d{r:02d}", f"d{x:02d}", type)} + d{x:02d} = {generate_min(f"d{r:02d}", f"d{x:02d}", type)}; + {crappity_crap_crap(f"d{r:02d}", "tmp", type)} + d{r:02d} = {generate_max(f"d{r:02d}", "tmp", type)};""" + print(s, file=f) + + s = f""" + sort_{w1:02d}v_merge_{suffix}({generate_param_list(1, w1)}); + sort_{w2:02d}v_merge_{suffix}({generate_param_list(w1 + 1, w2)});""" + print(s, file=f) + print(" }", file=f) + + +def get_load_intrinsic(type, v, offset): + if type == "double": + return f"_mm256_loadu_pd(({type} const *) ((__m256d const *) {v} + {offset}))" + if type == "float": + return f"_mm256_loadu_ps(({type} const *) ((__m256 const *) {v} + {offset}))" + return f"_mm256_lddqu_si256((__m256i const *) {v} + {offset});" + + +def get_store_intrinsic(type, ptr, offset, value): + if type == "double": + return f"_mm256_storeu_pd(({type} *) ((__m256d *) {ptr} + {offset}), {value})" + if type == "float": + return f"_mm256_storeu_ps(({type} *) ((__m256 *) {ptr} + {offset}), {value})" + return f"_mm256_storeu_si256((__m256i *) {ptr} + {offset}, {value})" + + +def generate_entry_points(f, type): + for m in range(1, max_bitonic_sort_verctors + 1): + s = f""" +static NOINLINE void sort_{m:02d}v({type} *ptr) {{""" + print(s, file=f) + + for l in range(0, m): + s = f" {bitonic_type_map[type]} d{l + 1:02d} = {get_load_intrinsic(type, 'ptr', l)};" + print(s, file=f) + + s = f" sort_{m:02d}v_ascending({generate_param_list(1, m)});" + print(s, file=f) + + for l in range(0, m): + s = f" {get_store_intrinsic(type, 'ptr', l, f'd{l + 1:02d}')};" + print(s, file=f) + + print("}", file=f) + + +def generate_master_entry_point(f, type): + s = f""" static void sort({type} *ptr, size_t length) {{ + const int N = {int(32 / bitonic_size_map[type])}; + + switch(length / N) {{""" + print(s, file=f) + + for m in range(1, max_bitonic_sort_verctors + 1): + s = f" case {m}: sort_{m:02d}v(ptr); break;" + print(s, file=f) + print(" }", file=f) + print("}", file=f) + pass + + +def autogenerated_blabber(): + return f"""///////////////////////////////////////////////////////////////////////////// +//// +// This file was auto-generated by a tool at {datetime.now().strftime("%F %H:%M:%S")} +// +// It is recommended you DO NOT directly edit this file but instead edit +// the code-generator that generated this source file instead. +/////////////////////////////////////////////////////////////////////////////""" + + +def generate_per_type(f, type, opts): + s = f"""{autogenerated_blabber()} + +#ifndef BITONIC_SORT_{str(opts.vector_isa).upper()}_{type.upper()}_H +#define BITONIC_SORT_{str + (opts.vector_isa).upper()}_{type.upper()}_H + +#include +#include "bitonic_sort.h" + +#ifdef _MSC_VER + // MSVC + #define INLINE __forceinline + #define NOINLINE __declspec(noinline) +#else + // GCC + Clang + #define INLINE __attribute__((always_inline)) + #define NOINLINE __attribute__((noinline)) +#endif + +#define i2d _mm256_castsi256_pd +#define d2i _mm256_castpd_si256 +#define i2s _mm256_castsi256_ps +#define s2i _mm256_castps_si256 +#define s2d _mm256_castps_pd +#define d2s _mm256_castpd_ps + +namespace gcsort {{ +namespace smallsort {{ +template<> struct bitonic<{type}> {{ +public: +""" + print(s, file=f) + generate_1v_sorters(f, type, ascending=True) + generate_1v_sorters(f, type, ascending=False) + for width in range(2, max_bitonic_sort_verctors + 1): + generate_compounded_sorters(f, width, type, ascending=True) + generate_compounded_sorters(f, width, type, ascending=False) + if width <= largest_merge_variant_needed: + generate_compounded_mergers(f, width, type, ascending=True) + generate_compounded_mergers(f, width, type, ascending=False) + + generate_entry_points(f, type) + generate_master_entry_point(f, type) + print("};\n}\n}\n#endif", file=f) + + +class Language(Enum): + csharp = 'csharp' + cpp = 'cpp' + rust = 'rust' + + def __str__(self): + return self.value + +class VectorISA(Enum): + AVX2 = 'AVX2' + AVX512 = 'AVX512' + SVE = 'SVE' + + def __str__(self): + return self.value + + +def generate_all_types(): + parser = argparse.ArgumentParser() + parser.add_argument("--language", type=Language, choices=list(Language), help="select output language: csharp/cpp/rust") + parser.add_argument("--vector-isa", type=VectorISA, choices=list(VectorISA), help="select vector isa: AVX2/AVX512/SVE") + + opts = parser.parse_args() + + for type in bitonic_types: + with open(f"bitonic_sort.{opts.vector_isa}.{type}.generated.h", "w") as f: + generate_per_type(f, type, opts) + + +if __name__ == '__main__': + generate_all_types() diff --git a/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.double.generated.h b/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.double.generated.h new file mode 100644 index 00000000000000..4c9fa8e4a86571 --- /dev/null +++ b/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.double.generated.h @@ -0,0 +1,1492 @@ +///////////////////////////////////////////////////////////////////////////// +//// +// This file was auto-generated by a tool at 2020-05-31 19:46:17 +// +// It is recommended you DO NOT directly edit this file but instead edit +// the code-generator that generated this source file instead. +///////////////////////////////////////////////////////////////////////////// + +#ifndef BITONIC_SORT_AVX2_DOUBLE_H +#define BITONIC_SORT_AVX2_DOUBLE_H + +#include +#include "bitonic_sort.h" + +#ifdef _MSC_VER + // MSVC + #define INLINE __forceinline + #define NOINLINE __declspec(noinline) +#else + // GCC + Clang + #define INLINE __attribute__((always_inline)) + #define NOINLINE __attribute__((noinline)) +#endif + +#define i2d _mm256_castsi256_pd +#define d2i _mm256_castpd_si256 +#define i2s _mm256_castsi256_ps +#define s2i _mm256_castps_si256 +#define s2d _mm256_castps_pd +#define d2s _mm256_castpd_ps + +namespace gcsort { +namespace smallsort { +template<> struct bitonic { +public: + + static INLINE void sort_01v_ascending(__m256d& d01) { + __m256d min, max, s; + + s = _mm256_shuffle_pd(d01, d01, 0x5); + + min = _mm256_min_pd(s, d01); + max = _mm256_max_pd(s, d01); + d01 = _mm256_blend_pd(min, max, 0xA); + + s = _mm256_permute4x64_pd(d01, 0x1B); + + min = _mm256_min_pd(s, d01); + max = _mm256_max_pd(s, d01); + d01 = _mm256_blend_pd(min, max, 0xC); + + s = _mm256_shuffle_pd(d01, d01, 0x5); + + min = _mm256_min_pd(s, d01); + max = _mm256_max_pd(s, d01); + d01 = _mm256_blend_pd(min, max, 0xA); +} + static INLINE void sort_01v_merge_ascending(__m256d& d01) { + __m256d min, max, s; + + s = _mm256_permute4x64_pd(d01, 0x4E); + + min = _mm256_min_pd(s, d01); + max = _mm256_max_pd(s, d01); + d01 = _mm256_blend_pd(min, max, 0xC); + + s = _mm256_shuffle_pd(d01, d01, 0x5); + + min = _mm256_min_pd(s, d01); + max = _mm256_max_pd(s, d01); + d01 = _mm256_blend_pd(min, max, 0xA); + } + static INLINE void sort_01v_descending(__m256d& d01) { + __m256d min, max, s; + + s = _mm256_shuffle_pd(d01, d01, 0x5); + + min = _mm256_min_pd(s, d01); + max = _mm256_max_pd(s, d01); + d01 = _mm256_blend_pd(max, min, 0xA); + + s = _mm256_permute4x64_pd(d01, 0x1B); + + min = _mm256_min_pd(s, d01); + max = _mm256_max_pd(s, d01); + d01 = _mm256_blend_pd(max, min, 0xC); + + s = _mm256_shuffle_pd(d01, d01, 0x5); + + min = _mm256_min_pd(s, d01); + max = _mm256_max_pd(s, d01); + d01 = _mm256_blend_pd(max, min, 0xA); +} + static INLINE void sort_01v_merge_descending(__m256d& d01) { + __m256d min, max, s; + + s = _mm256_permute4x64_pd(d01, 0x4E); + + min = _mm256_min_pd(s, d01); + max = _mm256_max_pd(s, d01); + d01 = _mm256_blend_pd(max, min, 0xC); + + s = _mm256_shuffle_pd(d01, d01, 0x5); + + min = _mm256_min_pd(s, d01); + max = _mm256_max_pd(s, d01); + d01 = _mm256_blend_pd(max, min, 0xA); + } + static INLINE void sort_02v_ascending(__m256d& d01, __m256d& d02) { + __m256d tmp; + + sort_01v_ascending(d01); + sort_01v_descending(d02); + + tmp = d02; + + d02 = _mm256_max_pd(d01, d02); + d01 = _mm256_min_pd(d01, tmp); + + sort_01v_merge_ascending(d01); + sort_01v_merge_ascending(d02); + } + static INLINE void sort_02v_descending(__m256d& d01, __m256d& d02) { + __m256d tmp; + + sort_01v_descending(d01); + sort_01v_ascending(d02); + + tmp = d02; + + d02 = _mm256_max_pd(d01, d02); + d01 = _mm256_min_pd(d01, tmp); + + sort_01v_merge_descending(d01); + sort_01v_merge_descending(d02); + } + static INLINE void sort_02v_merge_ascending(__m256d& d01, __m256d& d02) { + __m256d tmp; + + tmp = d01; + + d01 = _mm256_min_pd(d02, d01); + + d02 = _mm256_max_pd(d02, tmp); + + sort_01v_merge_ascending(d01); + sort_01v_merge_ascending(d02); + } + static INLINE void sort_02v_merge_descending(__m256d& d01, __m256d& d02) { + __m256d tmp; + + tmp = d01; + + d01 = _mm256_min_pd(d02, d01); + + d02 = _mm256_max_pd(d02, tmp); + + sort_01v_merge_descending(d01); + sort_01v_merge_descending(d02); + } + static INLINE void sort_03v_ascending(__m256d& d01, __m256d& d02, __m256d& d03) { + __m256d tmp; + + sort_02v_ascending(d01, d02); + sort_01v_descending(d03); + + tmp = d03; + + d03 = _mm256_max_pd(d02, d03); + d02 = _mm256_min_pd(d02, tmp); + + sort_02v_merge_ascending(d01, d02); + sort_01v_merge_ascending(d03); + } + static INLINE void sort_03v_descending(__m256d& d01, __m256d& d02, __m256d& d03) { + __m256d tmp; + + sort_02v_descending(d01, d02); + sort_01v_ascending(d03); + + tmp = d03; + + d03 = _mm256_max_pd(d02, d03); + d02 = _mm256_min_pd(d02, tmp); + + sort_02v_merge_descending(d01, d02); + sort_01v_merge_descending(d03); + } + static INLINE void sort_03v_merge_ascending(__m256d& d01, __m256d& d02, __m256d& d03) { + __m256d tmp; + + tmp = d01; + + d01 = _mm256_min_pd(d03, d01); + + d03 = _mm256_max_pd(d03, tmp); + + sort_02v_merge_ascending(d01, d02); + sort_01v_merge_ascending(d03); + } + static INLINE void sort_03v_merge_descending(__m256d& d01, __m256d& d02, __m256d& d03) { + __m256d tmp; + + tmp = d01; + + d01 = _mm256_min_pd(d03, d01); + + d03 = _mm256_max_pd(d03, tmp); + + sort_02v_merge_descending(d01, d02); + sort_01v_merge_descending(d03); + } + static INLINE void sort_04v_ascending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04) { + __m256d tmp; + + sort_02v_ascending(d01, d02); + sort_02v_descending(d03, d04); + + tmp = d03; + + d03 = _mm256_max_pd(d02, d03); + d02 = _mm256_min_pd(d02, tmp); + + tmp = d04; + + d04 = _mm256_max_pd(d01, d04); + d01 = _mm256_min_pd(d01, tmp); + + sort_02v_merge_ascending(d01, d02); + sort_02v_merge_ascending(d03, d04); + } + static INLINE void sort_04v_descending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04) { + __m256d tmp; + + sort_02v_descending(d01, d02); + sort_02v_ascending(d03, d04); + + tmp = d03; + + d03 = _mm256_max_pd(d02, d03); + d02 = _mm256_min_pd(d02, tmp); + + tmp = d04; + + d04 = _mm256_max_pd(d01, d04); + d01 = _mm256_min_pd(d01, tmp); + + sort_02v_merge_descending(d01, d02); + sort_02v_merge_descending(d03, d04); + } + static INLINE void sort_04v_merge_ascending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04) { + __m256d tmp; + + tmp = d01; + + d01 = _mm256_min_pd(d03, d01); + + d03 = _mm256_max_pd(d03, tmp); + + tmp = d02; + + d02 = _mm256_min_pd(d04, d02); + + d04 = _mm256_max_pd(d04, tmp); + + sort_02v_merge_ascending(d01, d02); + sort_02v_merge_ascending(d03, d04); + } + static INLINE void sort_04v_merge_descending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04) { + __m256d tmp; + + tmp = d01; + + d01 = _mm256_min_pd(d03, d01); + + d03 = _mm256_max_pd(d03, tmp); + + tmp = d02; + + d02 = _mm256_min_pd(d04, d02); + + d04 = _mm256_max_pd(d04, tmp); + + sort_02v_merge_descending(d01, d02); + sort_02v_merge_descending(d03, d04); + } + static INLINE void sort_05v_ascending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05) { + __m256d tmp; + + sort_04v_ascending(d01, d02, d03, d04); + sort_01v_descending(d05); + + tmp = d05; + + d05 = _mm256_max_pd(d04, d05); + d04 = _mm256_min_pd(d04, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_01v_merge_ascending(d05); + } + static INLINE void sort_05v_descending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05) { + __m256d tmp; + + sort_04v_descending(d01, d02, d03, d04); + sort_01v_ascending(d05); + + tmp = d05; + + d05 = _mm256_max_pd(d04, d05); + d04 = _mm256_min_pd(d04, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_01v_merge_descending(d05); + } + static INLINE void sort_05v_merge_ascending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05) { + __m256d tmp; + + tmp = d01; + + d01 = _mm256_min_pd(d05, d01); + + d05 = _mm256_max_pd(d05, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_01v_merge_ascending(d05); + } + static INLINE void sort_05v_merge_descending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05) { + __m256d tmp; + + tmp = d01; + + d01 = _mm256_min_pd(d05, d01); + + d05 = _mm256_max_pd(d05, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_01v_merge_descending(d05); + } + static INLINE void sort_06v_ascending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06) { + __m256d tmp; + + sort_04v_ascending(d01, d02, d03, d04); + sort_02v_descending(d05, d06); + + tmp = d05; + + d05 = _mm256_max_pd(d04, d05); + d04 = _mm256_min_pd(d04, tmp); + + tmp = d06; + + d06 = _mm256_max_pd(d03, d06); + d03 = _mm256_min_pd(d03, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_02v_merge_ascending(d05, d06); + } + static INLINE void sort_06v_descending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06) { + __m256d tmp; + + sort_04v_descending(d01, d02, d03, d04); + sort_02v_ascending(d05, d06); + + tmp = d05; + + d05 = _mm256_max_pd(d04, d05); + d04 = _mm256_min_pd(d04, tmp); + + tmp = d06; + + d06 = _mm256_max_pd(d03, d06); + d03 = _mm256_min_pd(d03, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_02v_merge_descending(d05, d06); + } + static INLINE void sort_06v_merge_ascending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06) { + __m256d tmp; + + tmp = d01; + + d01 = _mm256_min_pd(d05, d01); + + d05 = _mm256_max_pd(d05, tmp); + + tmp = d02; + + d02 = _mm256_min_pd(d06, d02); + + d06 = _mm256_max_pd(d06, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_02v_merge_ascending(d05, d06); + } + static INLINE void sort_06v_merge_descending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06) { + __m256d tmp; + + tmp = d01; + + d01 = _mm256_min_pd(d05, d01); + + d05 = _mm256_max_pd(d05, tmp); + + tmp = d02; + + d02 = _mm256_min_pd(d06, d02); + + d06 = _mm256_max_pd(d06, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_02v_merge_descending(d05, d06); + } + static INLINE void sort_07v_ascending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07) { + __m256d tmp; + + sort_04v_ascending(d01, d02, d03, d04); + sort_03v_descending(d05, d06, d07); + + tmp = d05; + + d05 = _mm256_max_pd(d04, d05); + d04 = _mm256_min_pd(d04, tmp); + + tmp = d06; + + d06 = _mm256_max_pd(d03, d06); + d03 = _mm256_min_pd(d03, tmp); + + tmp = d07; + + d07 = _mm256_max_pd(d02, d07); + d02 = _mm256_min_pd(d02, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_03v_merge_ascending(d05, d06, d07); + } + static INLINE void sort_07v_descending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07) { + __m256d tmp; + + sort_04v_descending(d01, d02, d03, d04); + sort_03v_ascending(d05, d06, d07); + + tmp = d05; + + d05 = _mm256_max_pd(d04, d05); + d04 = _mm256_min_pd(d04, tmp); + + tmp = d06; + + d06 = _mm256_max_pd(d03, d06); + d03 = _mm256_min_pd(d03, tmp); + + tmp = d07; + + d07 = _mm256_max_pd(d02, d07); + d02 = _mm256_min_pd(d02, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_03v_merge_descending(d05, d06, d07); + } + static INLINE void sort_07v_merge_ascending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07) { + __m256d tmp; + + tmp = d01; + + d01 = _mm256_min_pd(d05, d01); + + d05 = _mm256_max_pd(d05, tmp); + + tmp = d02; + + d02 = _mm256_min_pd(d06, d02); + + d06 = _mm256_max_pd(d06, tmp); + + tmp = d03; + + d03 = _mm256_min_pd(d07, d03); + + d07 = _mm256_max_pd(d07, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_03v_merge_ascending(d05, d06, d07); + } + static INLINE void sort_07v_merge_descending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07) { + __m256d tmp; + + tmp = d01; + + d01 = _mm256_min_pd(d05, d01); + + d05 = _mm256_max_pd(d05, tmp); + + tmp = d02; + + d02 = _mm256_min_pd(d06, d02); + + d06 = _mm256_max_pd(d06, tmp); + + tmp = d03; + + d03 = _mm256_min_pd(d07, d03); + + d07 = _mm256_max_pd(d07, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_03v_merge_descending(d05, d06, d07); + } + static INLINE void sort_08v_ascending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07, __m256d& d08) { + __m256d tmp; + + sort_04v_ascending(d01, d02, d03, d04); + sort_04v_descending(d05, d06, d07, d08); + + tmp = d05; + + d05 = _mm256_max_pd(d04, d05); + d04 = _mm256_min_pd(d04, tmp); + + tmp = d06; + + d06 = _mm256_max_pd(d03, d06); + d03 = _mm256_min_pd(d03, tmp); + + tmp = d07; + + d07 = _mm256_max_pd(d02, d07); + d02 = _mm256_min_pd(d02, tmp); + + tmp = d08; + + d08 = _mm256_max_pd(d01, d08); + d01 = _mm256_min_pd(d01, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_04v_merge_ascending(d05, d06, d07, d08); + } + static INLINE void sort_08v_descending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07, __m256d& d08) { + __m256d tmp; + + sort_04v_descending(d01, d02, d03, d04); + sort_04v_ascending(d05, d06, d07, d08); + + tmp = d05; + + d05 = _mm256_max_pd(d04, d05); + d04 = _mm256_min_pd(d04, tmp); + + tmp = d06; + + d06 = _mm256_max_pd(d03, d06); + d03 = _mm256_min_pd(d03, tmp); + + tmp = d07; + + d07 = _mm256_max_pd(d02, d07); + d02 = _mm256_min_pd(d02, tmp); + + tmp = d08; + + d08 = _mm256_max_pd(d01, d08); + d01 = _mm256_min_pd(d01, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_04v_merge_descending(d05, d06, d07, d08); + } + static INLINE void sort_08v_merge_ascending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07, __m256d& d08) { + __m256d tmp; + + tmp = d01; + + d01 = _mm256_min_pd(d05, d01); + + d05 = _mm256_max_pd(d05, tmp); + + tmp = d02; + + d02 = _mm256_min_pd(d06, d02); + + d06 = _mm256_max_pd(d06, tmp); + + tmp = d03; + + d03 = _mm256_min_pd(d07, d03); + + d07 = _mm256_max_pd(d07, tmp); + + tmp = d04; + + d04 = _mm256_min_pd(d08, d04); + + d08 = _mm256_max_pd(d08, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_04v_merge_ascending(d05, d06, d07, d08); + } + static INLINE void sort_08v_merge_descending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07, __m256d& d08) { + __m256d tmp; + + tmp = d01; + + d01 = _mm256_min_pd(d05, d01); + + d05 = _mm256_max_pd(d05, tmp); + + tmp = d02; + + d02 = _mm256_min_pd(d06, d02); + + d06 = _mm256_max_pd(d06, tmp); + + tmp = d03; + + d03 = _mm256_min_pd(d07, d03); + + d07 = _mm256_max_pd(d07, tmp); + + tmp = d04; + + d04 = _mm256_min_pd(d08, d04); + + d08 = _mm256_max_pd(d08, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_04v_merge_descending(d05, d06, d07, d08); + } + static INLINE void sort_09v_ascending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07, __m256d& d08, __m256d& d09) { + __m256d tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_01v_descending(d09); + + tmp = d09; + + d09 = _mm256_max_pd(d08, d09); + d08 = _mm256_min_pd(d08, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_01v_merge_ascending(d09); + } + static INLINE void sort_09v_descending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07, __m256d& d08, __m256d& d09) { + __m256d tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_01v_ascending(d09); + + tmp = d09; + + d09 = _mm256_max_pd(d08, d09); + d08 = _mm256_min_pd(d08, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_01v_merge_descending(d09); + } + static INLINE void sort_10v_ascending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07, __m256d& d08, __m256d& d09, __m256d& d10) { + __m256d tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_02v_descending(d09, d10); + + tmp = d09; + + d09 = _mm256_max_pd(d08, d09); + d08 = _mm256_min_pd(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_pd(d07, d10); + d07 = _mm256_min_pd(d07, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_02v_merge_ascending(d09, d10); + } + static INLINE void sort_10v_descending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07, __m256d& d08, __m256d& d09, __m256d& d10) { + __m256d tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_02v_ascending(d09, d10); + + tmp = d09; + + d09 = _mm256_max_pd(d08, d09); + d08 = _mm256_min_pd(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_pd(d07, d10); + d07 = _mm256_min_pd(d07, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_02v_merge_descending(d09, d10); + } + static INLINE void sort_11v_ascending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07, __m256d& d08, __m256d& d09, __m256d& d10, __m256d& d11) { + __m256d tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_03v_descending(d09, d10, d11); + + tmp = d09; + + d09 = _mm256_max_pd(d08, d09); + d08 = _mm256_min_pd(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_pd(d07, d10); + d07 = _mm256_min_pd(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_pd(d06, d11); + d06 = _mm256_min_pd(d06, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_03v_merge_ascending(d09, d10, d11); + } + static INLINE void sort_11v_descending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07, __m256d& d08, __m256d& d09, __m256d& d10, __m256d& d11) { + __m256d tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_03v_ascending(d09, d10, d11); + + tmp = d09; + + d09 = _mm256_max_pd(d08, d09); + d08 = _mm256_min_pd(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_pd(d07, d10); + d07 = _mm256_min_pd(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_pd(d06, d11); + d06 = _mm256_min_pd(d06, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_03v_merge_descending(d09, d10, d11); + } + static INLINE void sort_12v_ascending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07, __m256d& d08, __m256d& d09, __m256d& d10, __m256d& d11, __m256d& d12) { + __m256d tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_04v_descending(d09, d10, d11, d12); + + tmp = d09; + + d09 = _mm256_max_pd(d08, d09); + d08 = _mm256_min_pd(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_pd(d07, d10); + d07 = _mm256_min_pd(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_pd(d06, d11); + d06 = _mm256_min_pd(d06, tmp); + + tmp = d12; + + d12 = _mm256_max_pd(d05, d12); + d05 = _mm256_min_pd(d05, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_04v_merge_ascending(d09, d10, d11, d12); + } + static INLINE void sort_12v_descending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07, __m256d& d08, __m256d& d09, __m256d& d10, __m256d& d11, __m256d& d12) { + __m256d tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_04v_ascending(d09, d10, d11, d12); + + tmp = d09; + + d09 = _mm256_max_pd(d08, d09); + d08 = _mm256_min_pd(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_pd(d07, d10); + d07 = _mm256_min_pd(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_pd(d06, d11); + d06 = _mm256_min_pd(d06, tmp); + + tmp = d12; + + d12 = _mm256_max_pd(d05, d12); + d05 = _mm256_min_pd(d05, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_04v_merge_descending(d09, d10, d11, d12); + } + static INLINE void sort_13v_ascending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07, __m256d& d08, __m256d& d09, __m256d& d10, __m256d& d11, __m256d& d12, __m256d& d13) { + __m256d tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_05v_descending(d09, d10, d11, d12, d13); + + tmp = d09; + + d09 = _mm256_max_pd(d08, d09); + d08 = _mm256_min_pd(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_pd(d07, d10); + d07 = _mm256_min_pd(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_pd(d06, d11); + d06 = _mm256_min_pd(d06, tmp); + + tmp = d12; + + d12 = _mm256_max_pd(d05, d12); + d05 = _mm256_min_pd(d05, tmp); + + tmp = d13; + + d13 = _mm256_max_pd(d04, d13); + d04 = _mm256_min_pd(d04, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_05v_merge_ascending(d09, d10, d11, d12, d13); + } + static INLINE void sort_13v_descending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07, __m256d& d08, __m256d& d09, __m256d& d10, __m256d& d11, __m256d& d12, __m256d& d13) { + __m256d tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_05v_ascending(d09, d10, d11, d12, d13); + + tmp = d09; + + d09 = _mm256_max_pd(d08, d09); + d08 = _mm256_min_pd(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_pd(d07, d10); + d07 = _mm256_min_pd(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_pd(d06, d11); + d06 = _mm256_min_pd(d06, tmp); + + tmp = d12; + + d12 = _mm256_max_pd(d05, d12); + d05 = _mm256_min_pd(d05, tmp); + + tmp = d13; + + d13 = _mm256_max_pd(d04, d13); + d04 = _mm256_min_pd(d04, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_05v_merge_descending(d09, d10, d11, d12, d13); + } + static INLINE void sort_14v_ascending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07, __m256d& d08, __m256d& d09, __m256d& d10, __m256d& d11, __m256d& d12, __m256d& d13, __m256d& d14) { + __m256d tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_06v_descending(d09, d10, d11, d12, d13, d14); + + tmp = d09; + + d09 = _mm256_max_pd(d08, d09); + d08 = _mm256_min_pd(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_pd(d07, d10); + d07 = _mm256_min_pd(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_pd(d06, d11); + d06 = _mm256_min_pd(d06, tmp); + + tmp = d12; + + d12 = _mm256_max_pd(d05, d12); + d05 = _mm256_min_pd(d05, tmp); + + tmp = d13; + + d13 = _mm256_max_pd(d04, d13); + d04 = _mm256_min_pd(d04, tmp); + + tmp = d14; + + d14 = _mm256_max_pd(d03, d14); + d03 = _mm256_min_pd(d03, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_06v_merge_ascending(d09, d10, d11, d12, d13, d14); + } + static INLINE void sort_14v_descending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07, __m256d& d08, __m256d& d09, __m256d& d10, __m256d& d11, __m256d& d12, __m256d& d13, __m256d& d14) { + __m256d tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_06v_ascending(d09, d10, d11, d12, d13, d14); + + tmp = d09; + + d09 = _mm256_max_pd(d08, d09); + d08 = _mm256_min_pd(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_pd(d07, d10); + d07 = _mm256_min_pd(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_pd(d06, d11); + d06 = _mm256_min_pd(d06, tmp); + + tmp = d12; + + d12 = _mm256_max_pd(d05, d12); + d05 = _mm256_min_pd(d05, tmp); + + tmp = d13; + + d13 = _mm256_max_pd(d04, d13); + d04 = _mm256_min_pd(d04, tmp); + + tmp = d14; + + d14 = _mm256_max_pd(d03, d14); + d03 = _mm256_min_pd(d03, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_06v_merge_descending(d09, d10, d11, d12, d13, d14); + } + static INLINE void sort_15v_ascending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07, __m256d& d08, __m256d& d09, __m256d& d10, __m256d& d11, __m256d& d12, __m256d& d13, __m256d& d14, __m256d& d15) { + __m256d tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_07v_descending(d09, d10, d11, d12, d13, d14, d15); + + tmp = d09; + + d09 = _mm256_max_pd(d08, d09); + d08 = _mm256_min_pd(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_pd(d07, d10); + d07 = _mm256_min_pd(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_pd(d06, d11); + d06 = _mm256_min_pd(d06, tmp); + + tmp = d12; + + d12 = _mm256_max_pd(d05, d12); + d05 = _mm256_min_pd(d05, tmp); + + tmp = d13; + + d13 = _mm256_max_pd(d04, d13); + d04 = _mm256_min_pd(d04, tmp); + + tmp = d14; + + d14 = _mm256_max_pd(d03, d14); + d03 = _mm256_min_pd(d03, tmp); + + tmp = d15; + + d15 = _mm256_max_pd(d02, d15); + d02 = _mm256_min_pd(d02, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_07v_merge_ascending(d09, d10, d11, d12, d13, d14, d15); + } + static INLINE void sort_15v_descending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07, __m256d& d08, __m256d& d09, __m256d& d10, __m256d& d11, __m256d& d12, __m256d& d13, __m256d& d14, __m256d& d15) { + __m256d tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_07v_ascending(d09, d10, d11, d12, d13, d14, d15); + + tmp = d09; + + d09 = _mm256_max_pd(d08, d09); + d08 = _mm256_min_pd(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_pd(d07, d10); + d07 = _mm256_min_pd(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_pd(d06, d11); + d06 = _mm256_min_pd(d06, tmp); + + tmp = d12; + + d12 = _mm256_max_pd(d05, d12); + d05 = _mm256_min_pd(d05, tmp); + + tmp = d13; + + d13 = _mm256_max_pd(d04, d13); + d04 = _mm256_min_pd(d04, tmp); + + tmp = d14; + + d14 = _mm256_max_pd(d03, d14); + d03 = _mm256_min_pd(d03, tmp); + + tmp = d15; + + d15 = _mm256_max_pd(d02, d15); + d02 = _mm256_min_pd(d02, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_07v_merge_descending(d09, d10, d11, d12, d13, d14, d15); + } + static INLINE void sort_16v_ascending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07, __m256d& d08, __m256d& d09, __m256d& d10, __m256d& d11, __m256d& d12, __m256d& d13, __m256d& d14, __m256d& d15, __m256d& d16) { + __m256d tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_08v_descending(d09, d10, d11, d12, d13, d14, d15, d16); + + tmp = d09; + + d09 = _mm256_max_pd(d08, d09); + d08 = _mm256_min_pd(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_pd(d07, d10); + d07 = _mm256_min_pd(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_pd(d06, d11); + d06 = _mm256_min_pd(d06, tmp); + + tmp = d12; + + d12 = _mm256_max_pd(d05, d12); + d05 = _mm256_min_pd(d05, tmp); + + tmp = d13; + + d13 = _mm256_max_pd(d04, d13); + d04 = _mm256_min_pd(d04, tmp); + + tmp = d14; + + d14 = _mm256_max_pd(d03, d14); + d03 = _mm256_min_pd(d03, tmp); + + tmp = d15; + + d15 = _mm256_max_pd(d02, d15); + d02 = _mm256_min_pd(d02, tmp); + + tmp = d16; + + d16 = _mm256_max_pd(d01, d16); + d01 = _mm256_min_pd(d01, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_08v_merge_ascending(d09, d10, d11, d12, d13, d14, d15, d16); + } + static INLINE void sort_16v_descending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07, __m256d& d08, __m256d& d09, __m256d& d10, __m256d& d11, __m256d& d12, __m256d& d13, __m256d& d14, __m256d& d15, __m256d& d16) { + __m256d tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_08v_ascending(d09, d10, d11, d12, d13, d14, d15, d16); + + tmp = d09; + + d09 = _mm256_max_pd(d08, d09); + d08 = _mm256_min_pd(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_pd(d07, d10); + d07 = _mm256_min_pd(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_pd(d06, d11); + d06 = _mm256_min_pd(d06, tmp); + + tmp = d12; + + d12 = _mm256_max_pd(d05, d12); + d05 = _mm256_min_pd(d05, tmp); + + tmp = d13; + + d13 = _mm256_max_pd(d04, d13); + d04 = _mm256_min_pd(d04, tmp); + + tmp = d14; + + d14 = _mm256_max_pd(d03, d14); + d03 = _mm256_min_pd(d03, tmp); + + tmp = d15; + + d15 = _mm256_max_pd(d02, d15); + d02 = _mm256_min_pd(d02, tmp); + + tmp = d16; + + d16 = _mm256_max_pd(d01, d16); + d01 = _mm256_min_pd(d01, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_08v_merge_descending(d09, d10, d11, d12, d13, d14, d15, d16); + } + +static NOINLINE void sort_01v(double *ptr) { + __m256d d01 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 0)); + sort_01v_ascending(d01); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 0), d01); +} + +static NOINLINE void sort_02v(double *ptr) { + __m256d d01 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 0)); + __m256d d02 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 1)); + sort_02v_ascending(d01, d02); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 0), d01); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 1), d02); +} + +static NOINLINE void sort_03v(double *ptr) { + __m256d d01 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 0)); + __m256d d02 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 1)); + __m256d d03 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 2)); + sort_03v_ascending(d01, d02, d03); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 0), d01); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 1), d02); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 2), d03); +} + +static NOINLINE void sort_04v(double *ptr) { + __m256d d01 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 0)); + __m256d d02 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 1)); + __m256d d03 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 2)); + __m256d d04 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 3)); + sort_04v_ascending(d01, d02, d03, d04); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 0), d01); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 1), d02); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 2), d03); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 3), d04); +} + +static NOINLINE void sort_05v(double *ptr) { + __m256d d01 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 0)); + __m256d d02 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 1)); + __m256d d03 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 2)); + __m256d d04 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 3)); + __m256d d05 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 4)); + sort_05v_ascending(d01, d02, d03, d04, d05); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 0), d01); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 1), d02); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 2), d03); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 3), d04); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 4), d05); +} + +static NOINLINE void sort_06v(double *ptr) { + __m256d d01 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 0)); + __m256d d02 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 1)); + __m256d d03 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 2)); + __m256d d04 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 3)); + __m256d d05 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 4)); + __m256d d06 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 5)); + sort_06v_ascending(d01, d02, d03, d04, d05, d06); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 0), d01); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 1), d02); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 2), d03); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 3), d04); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 4), d05); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 5), d06); +} + +static NOINLINE void sort_07v(double *ptr) { + __m256d d01 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 0)); + __m256d d02 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 1)); + __m256d d03 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 2)); + __m256d d04 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 3)); + __m256d d05 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 4)); + __m256d d06 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 5)); + __m256d d07 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 6)); + sort_07v_ascending(d01, d02, d03, d04, d05, d06, d07); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 0), d01); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 1), d02); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 2), d03); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 3), d04); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 4), d05); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 5), d06); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 6), d07); +} + +static NOINLINE void sort_08v(double *ptr) { + __m256d d01 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 0)); + __m256d d02 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 1)); + __m256d d03 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 2)); + __m256d d04 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 3)); + __m256d d05 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 4)); + __m256d d06 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 5)); + __m256d d07 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 6)); + __m256d d08 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 7)); + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 0), d01); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 1), d02); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 2), d03); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 3), d04); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 4), d05); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 5), d06); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 6), d07); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 7), d08); +} + +static NOINLINE void sort_09v(double *ptr) { + __m256d d01 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 0)); + __m256d d02 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 1)); + __m256d d03 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 2)); + __m256d d04 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 3)); + __m256d d05 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 4)); + __m256d d06 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 5)); + __m256d d07 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 6)); + __m256d d08 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 7)); + __m256d d09 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 8)); + sort_09v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 0), d01); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 1), d02); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 2), d03); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 3), d04); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 4), d05); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 5), d06); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 6), d07); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 7), d08); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 8), d09); +} + +static NOINLINE void sort_10v(double *ptr) { + __m256d d01 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 0)); + __m256d d02 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 1)); + __m256d d03 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 2)); + __m256d d04 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 3)); + __m256d d05 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 4)); + __m256d d06 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 5)); + __m256d d07 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 6)); + __m256d d08 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 7)); + __m256d d09 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 8)); + __m256d d10 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 9)); + sort_10v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 0), d01); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 1), d02); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 2), d03); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 3), d04); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 4), d05); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 5), d06); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 6), d07); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 7), d08); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 8), d09); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 9), d10); +} + +static NOINLINE void sort_11v(double *ptr) { + __m256d d01 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 0)); + __m256d d02 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 1)); + __m256d d03 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 2)); + __m256d d04 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 3)); + __m256d d05 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 4)); + __m256d d06 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 5)); + __m256d d07 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 6)); + __m256d d08 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 7)); + __m256d d09 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 8)); + __m256d d10 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 9)); + __m256d d11 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 10)); + sort_11v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 0), d01); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 1), d02); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 2), d03); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 3), d04); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 4), d05); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 5), d06); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 6), d07); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 7), d08); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 8), d09); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 9), d10); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 10), d11); +} + +static NOINLINE void sort_12v(double *ptr) { + __m256d d01 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 0)); + __m256d d02 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 1)); + __m256d d03 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 2)); + __m256d d04 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 3)); + __m256d d05 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 4)); + __m256d d06 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 5)); + __m256d d07 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 6)); + __m256d d08 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 7)); + __m256d d09 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 8)); + __m256d d10 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 9)); + __m256d d11 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 10)); + __m256d d12 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 11)); + sort_12v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 0), d01); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 1), d02); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 2), d03); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 3), d04); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 4), d05); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 5), d06); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 6), d07); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 7), d08); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 8), d09); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 9), d10); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 10), d11); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 11), d12); +} + +static NOINLINE void sort_13v(double *ptr) { + __m256d d01 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 0)); + __m256d d02 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 1)); + __m256d d03 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 2)); + __m256d d04 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 3)); + __m256d d05 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 4)); + __m256d d06 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 5)); + __m256d d07 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 6)); + __m256d d08 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 7)); + __m256d d09 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 8)); + __m256d d10 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 9)); + __m256d d11 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 10)); + __m256d d12 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 11)); + __m256d d13 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 12)); + sort_13v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 0), d01); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 1), d02); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 2), d03); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 3), d04); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 4), d05); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 5), d06); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 6), d07); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 7), d08); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 8), d09); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 9), d10); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 10), d11); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 11), d12); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 12), d13); +} + +static NOINLINE void sort_14v(double *ptr) { + __m256d d01 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 0)); + __m256d d02 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 1)); + __m256d d03 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 2)); + __m256d d04 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 3)); + __m256d d05 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 4)); + __m256d d06 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 5)); + __m256d d07 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 6)); + __m256d d08 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 7)); + __m256d d09 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 8)); + __m256d d10 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 9)); + __m256d d11 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 10)); + __m256d d12 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 11)); + __m256d d13 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 12)); + __m256d d14 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 13)); + sort_14v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 0), d01); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 1), d02); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 2), d03); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 3), d04); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 4), d05); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 5), d06); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 6), d07); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 7), d08); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 8), d09); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 9), d10); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 10), d11); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 11), d12); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 12), d13); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 13), d14); +} + +static NOINLINE void sort_15v(double *ptr) { + __m256d d01 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 0)); + __m256d d02 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 1)); + __m256d d03 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 2)); + __m256d d04 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 3)); + __m256d d05 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 4)); + __m256d d06 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 5)); + __m256d d07 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 6)); + __m256d d08 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 7)); + __m256d d09 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 8)); + __m256d d10 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 9)); + __m256d d11 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 10)); + __m256d d12 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 11)); + __m256d d13 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 12)); + __m256d d14 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 13)); + __m256d d15 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 14)); + sort_15v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14, d15); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 0), d01); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 1), d02); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 2), d03); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 3), d04); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 4), d05); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 5), d06); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 6), d07); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 7), d08); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 8), d09); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 9), d10); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 10), d11); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 11), d12); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 12), d13); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 13), d14); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 14), d15); +} + +static NOINLINE void sort_16v(double *ptr) { + __m256d d01 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 0)); + __m256d d02 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 1)); + __m256d d03 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 2)); + __m256d d04 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 3)); + __m256d d05 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 4)); + __m256d d06 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 5)); + __m256d d07 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 6)); + __m256d d08 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 7)); + __m256d d09 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 8)); + __m256d d10 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 9)); + __m256d d11 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 10)); + __m256d d12 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 11)); + __m256d d13 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 12)); + __m256d d14 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 13)); + __m256d d15 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 14)); + __m256d d16 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 15)); + sort_16v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14, d15, d16); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 0), d01); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 1), d02); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 2), d03); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 3), d04); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 4), d05); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 5), d06); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 6), d07); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 7), d08); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 8), d09); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 9), d10); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 10), d11); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 11), d12); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 12), d13); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 13), d14); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 14), d15); + _mm256_storeu_pd((double *) ((__m256d *) ptr + 15), d16); +} + static void sort(double *ptr, size_t length) { + const int N = 4; + + switch(length / N) { + case 1: sort_01v(ptr); break; + case 2: sort_02v(ptr); break; + case 3: sort_03v(ptr); break; + case 4: sort_04v(ptr); break; + case 5: sort_05v(ptr); break; + case 6: sort_06v(ptr); break; + case 7: sort_07v(ptr); break; + case 8: sort_08v(ptr); break; + case 9: sort_09v(ptr); break; + case 10: sort_10v(ptr); break; + case 11: sort_11v(ptr); break; + case 12: sort_12v(ptr); break; + case 13: sort_13v(ptr); break; + case 14: sort_14v(ptr); break; + case 15: sort_15v(ptr); break; + case 16: sort_16v(ptr); break; + } +} +}; +} +} +#endif diff --git a/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.float.generated.h b/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.float.generated.h new file mode 100644 index 00000000000000..358727f582990c --- /dev/null +++ b/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.float.generated.h @@ -0,0 +1,1532 @@ +///////////////////////////////////////////////////////////////////////////// +//// +// This file was auto-generated by a tool at 2020-05-31 19:46:17 +// +// It is recommended you DO NOT directly edit this file but instead edit +// the code-generator that generated this source file instead. +///////////////////////////////////////////////////////////////////////////// + +#ifndef BITONIC_SORT_AVX2_FLOAT_H +#define BITONIC_SORT_AVX2_FLOAT_H + +#include +#include "bitonic_sort.h" + +#ifdef _MSC_VER + // MSVC + #define INLINE __forceinline + #define NOINLINE __declspec(noinline) +#else + // GCC + Clang + #define INLINE __attribute__((always_inline)) + #define NOINLINE __attribute__((noinline)) +#endif + +#define i2d _mm256_castsi256_pd +#define d2i _mm256_castpd_si256 +#define i2s _mm256_castsi256_ps +#define s2i _mm256_castps_si256 +#define s2d _mm256_castps_pd +#define d2s _mm256_castpd_ps + +namespace gcsort { +namespace smallsort { +template<> struct bitonic { +public: + + static INLINE void sort_01v_ascending(__m256& d01) { + __m256 min, max, s; + + s = i2s(_mm256_shuffle_epi32(s2i(d01), 0xB1)); + + min = _mm256_min_ps(s, d01); + max = _mm256_max_ps(s, d01); + d01 = i2s(_mm256_blend_epi32(s2i(min), s2i(max), 0xAA)); + + s = i2s(_mm256_shuffle_epi32(s2i(d01), 0x1B)); + + min = _mm256_min_ps(s, d01); + max = _mm256_max_ps(s, d01); + d01 = i2s(_mm256_blend_epi32(s2i(min), s2i(max), 0xCC)); + + s = i2s(_mm256_shuffle_epi32(s2i(d01), 0xB1)); + + min = _mm256_min_ps(s, d01); + max = _mm256_max_ps(s, d01); + d01 = i2s(_mm256_blend_epi32(s2i(min), s2i(max), 0xAA)); + + s = d2s(_mm256_permute4x64_pd(i2d(_mm256_shuffle_epi32(s2i(d01), 0x1B)), 0x4E)); + min = _mm256_min_ps(s, d01); + max = _mm256_max_ps(s, d01); + d01 = i2s(_mm256_blend_epi32(s2i(min), s2i(max), 0xF0)); + + s = i2s(_mm256_shuffle_epi32(s2i(d01), 0x4E)); + min = _mm256_min_ps(s, d01); + max = _mm256_max_ps(s, d01); + d01 = i2s(_mm256_blend_epi32(s2i(min), s2i(max), 0xCC)); + + s = i2s(_mm256_shuffle_epi32(s2i(d01), 0xB1)); + min = _mm256_min_ps(s, d01); + max = _mm256_max_ps(s, d01); + d01 = i2s(_mm256_blend_epi32(s2i(min), s2i(max), 0xAA)); +} + static INLINE void sort_01v_merge_ascending(__m256& d01) { + __m256 min, max, s; + + s = d2s(_mm256_permute4x64_pd(s2d(d01), 0x4E)); + min = _mm256_min_ps(s, d01); + max = _mm256_max_ps(s, d01); + d01 = i2s(_mm256_blend_epi32(s2i(min), s2i(max), 0xF0)); + + s = i2s(_mm256_shuffle_epi32(s2i(d01), 0x4E)); + + min = _mm256_min_ps(s, d01); + max = _mm256_max_ps(s, d01); + d01 = i2s(_mm256_blend_epi32(s2i(min), s2i(max), 0xCC)); + + s = i2s(_mm256_shuffle_epi32(s2i(d01), 0xB1)); + + min = _mm256_min_ps(s, d01); + max = _mm256_max_ps(s, d01); + d01 = i2s(_mm256_blend_epi32(s2i(min), s2i(max), 0xAA)); + } + static INLINE void sort_01v_descending(__m256& d01) { + __m256 min, max, s; + + s = i2s(_mm256_shuffle_epi32(s2i(d01), 0xB1)); + + min = _mm256_min_ps(s, d01); + max = _mm256_max_ps(s, d01); + d01 = i2s(_mm256_blend_epi32(s2i(max), s2i(min), 0xAA)); + + s = i2s(_mm256_shuffle_epi32(s2i(d01), 0x1B)); + + min = _mm256_min_ps(s, d01); + max = _mm256_max_ps(s, d01); + d01 = i2s(_mm256_blend_epi32(s2i(max), s2i(min), 0xCC)); + + s = i2s(_mm256_shuffle_epi32(s2i(d01), 0xB1)); + + min = _mm256_min_ps(s, d01); + max = _mm256_max_ps(s, d01); + d01 = i2s(_mm256_blend_epi32(s2i(max), s2i(min), 0xAA)); + + s = d2s(_mm256_permute4x64_pd(i2d(_mm256_shuffle_epi32(s2i(d01), 0x1B)), 0x4E)); + min = _mm256_min_ps(s, d01); + max = _mm256_max_ps(s, d01); + d01 = i2s(_mm256_blend_epi32(s2i(max), s2i(min), 0xF0)); + + s = i2s(_mm256_shuffle_epi32(s2i(d01), 0x4E)); + min = _mm256_min_ps(s, d01); + max = _mm256_max_ps(s, d01); + d01 = i2s(_mm256_blend_epi32(s2i(max), s2i(min), 0xCC)); + + s = i2s(_mm256_shuffle_epi32(s2i(d01), 0xB1)); + min = _mm256_min_ps(s, d01); + max = _mm256_max_ps(s, d01); + d01 = i2s(_mm256_blend_epi32(s2i(max), s2i(min), 0xAA)); +} + static INLINE void sort_01v_merge_descending(__m256& d01) { + __m256 min, max, s; + + s = d2s(_mm256_permute4x64_pd(s2d(d01), 0x4E)); + min = _mm256_min_ps(s, d01); + max = _mm256_max_ps(s, d01); + d01 = i2s(_mm256_blend_epi32(s2i(max), s2i(min), 0xF0)); + + s = i2s(_mm256_shuffle_epi32(s2i(d01), 0x4E)); + + min = _mm256_min_ps(s, d01); + max = _mm256_max_ps(s, d01); + d01 = i2s(_mm256_blend_epi32(s2i(max), s2i(min), 0xCC)); + + s = i2s(_mm256_shuffle_epi32(s2i(d01), 0xB1)); + + min = _mm256_min_ps(s, d01); + max = _mm256_max_ps(s, d01); + d01 = i2s(_mm256_blend_epi32(s2i(max), s2i(min), 0xAA)); + } + static INLINE void sort_02v_ascending(__m256& d01, __m256& d02) { + __m256 tmp; + + sort_01v_ascending(d01); + sort_01v_descending(d02); + + tmp = d02; + + d02 = _mm256_max_ps(d01, d02); + d01 = _mm256_min_ps(d01, tmp); + + sort_01v_merge_ascending(d01); + sort_01v_merge_ascending(d02); + } + static INLINE void sort_02v_descending(__m256& d01, __m256& d02) { + __m256 tmp; + + sort_01v_descending(d01); + sort_01v_ascending(d02); + + tmp = d02; + + d02 = _mm256_max_ps(d01, d02); + d01 = _mm256_min_ps(d01, tmp); + + sort_01v_merge_descending(d01); + sort_01v_merge_descending(d02); + } + static INLINE void sort_02v_merge_ascending(__m256& d01, __m256& d02) { + __m256 tmp; + + tmp = d01; + + d01 = _mm256_min_ps(d02, d01); + + d02 = _mm256_max_ps(d02, tmp); + + sort_01v_merge_ascending(d01); + sort_01v_merge_ascending(d02); + } + static INLINE void sort_02v_merge_descending(__m256& d01, __m256& d02) { + __m256 tmp; + + tmp = d01; + + d01 = _mm256_min_ps(d02, d01); + + d02 = _mm256_max_ps(d02, tmp); + + sort_01v_merge_descending(d01); + sort_01v_merge_descending(d02); + } + static INLINE void sort_03v_ascending(__m256& d01, __m256& d02, __m256& d03) { + __m256 tmp; + + sort_02v_ascending(d01, d02); + sort_01v_descending(d03); + + tmp = d03; + + d03 = _mm256_max_ps(d02, d03); + d02 = _mm256_min_ps(d02, tmp); + + sort_02v_merge_ascending(d01, d02); + sort_01v_merge_ascending(d03); + } + static INLINE void sort_03v_descending(__m256& d01, __m256& d02, __m256& d03) { + __m256 tmp; + + sort_02v_descending(d01, d02); + sort_01v_ascending(d03); + + tmp = d03; + + d03 = _mm256_max_ps(d02, d03); + d02 = _mm256_min_ps(d02, tmp); + + sort_02v_merge_descending(d01, d02); + sort_01v_merge_descending(d03); + } + static INLINE void sort_03v_merge_ascending(__m256& d01, __m256& d02, __m256& d03) { + __m256 tmp; + + tmp = d01; + + d01 = _mm256_min_ps(d03, d01); + + d03 = _mm256_max_ps(d03, tmp); + + sort_02v_merge_ascending(d01, d02); + sort_01v_merge_ascending(d03); + } + static INLINE void sort_03v_merge_descending(__m256& d01, __m256& d02, __m256& d03) { + __m256 tmp; + + tmp = d01; + + d01 = _mm256_min_ps(d03, d01); + + d03 = _mm256_max_ps(d03, tmp); + + sort_02v_merge_descending(d01, d02); + sort_01v_merge_descending(d03); + } + static INLINE void sort_04v_ascending(__m256& d01, __m256& d02, __m256& d03, __m256& d04) { + __m256 tmp; + + sort_02v_ascending(d01, d02); + sort_02v_descending(d03, d04); + + tmp = d03; + + d03 = _mm256_max_ps(d02, d03); + d02 = _mm256_min_ps(d02, tmp); + + tmp = d04; + + d04 = _mm256_max_ps(d01, d04); + d01 = _mm256_min_ps(d01, tmp); + + sort_02v_merge_ascending(d01, d02); + sort_02v_merge_ascending(d03, d04); + } + static INLINE void sort_04v_descending(__m256& d01, __m256& d02, __m256& d03, __m256& d04) { + __m256 tmp; + + sort_02v_descending(d01, d02); + sort_02v_ascending(d03, d04); + + tmp = d03; + + d03 = _mm256_max_ps(d02, d03); + d02 = _mm256_min_ps(d02, tmp); + + tmp = d04; + + d04 = _mm256_max_ps(d01, d04); + d01 = _mm256_min_ps(d01, tmp); + + sort_02v_merge_descending(d01, d02); + sort_02v_merge_descending(d03, d04); + } + static INLINE void sort_04v_merge_ascending(__m256& d01, __m256& d02, __m256& d03, __m256& d04) { + __m256 tmp; + + tmp = d01; + + d01 = _mm256_min_ps(d03, d01); + + d03 = _mm256_max_ps(d03, tmp); + + tmp = d02; + + d02 = _mm256_min_ps(d04, d02); + + d04 = _mm256_max_ps(d04, tmp); + + sort_02v_merge_ascending(d01, d02); + sort_02v_merge_ascending(d03, d04); + } + static INLINE void sort_04v_merge_descending(__m256& d01, __m256& d02, __m256& d03, __m256& d04) { + __m256 tmp; + + tmp = d01; + + d01 = _mm256_min_ps(d03, d01); + + d03 = _mm256_max_ps(d03, tmp); + + tmp = d02; + + d02 = _mm256_min_ps(d04, d02); + + d04 = _mm256_max_ps(d04, tmp); + + sort_02v_merge_descending(d01, d02); + sort_02v_merge_descending(d03, d04); + } + static INLINE void sort_05v_ascending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05) { + __m256 tmp; + + sort_04v_ascending(d01, d02, d03, d04); + sort_01v_descending(d05); + + tmp = d05; + + d05 = _mm256_max_ps(d04, d05); + d04 = _mm256_min_ps(d04, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_01v_merge_ascending(d05); + } + static INLINE void sort_05v_descending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05) { + __m256 tmp; + + sort_04v_descending(d01, d02, d03, d04); + sort_01v_ascending(d05); + + tmp = d05; + + d05 = _mm256_max_ps(d04, d05); + d04 = _mm256_min_ps(d04, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_01v_merge_descending(d05); + } + static INLINE void sort_05v_merge_ascending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05) { + __m256 tmp; + + tmp = d01; + + d01 = _mm256_min_ps(d05, d01); + + d05 = _mm256_max_ps(d05, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_01v_merge_ascending(d05); + } + static INLINE void sort_05v_merge_descending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05) { + __m256 tmp; + + tmp = d01; + + d01 = _mm256_min_ps(d05, d01); + + d05 = _mm256_max_ps(d05, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_01v_merge_descending(d05); + } + static INLINE void sort_06v_ascending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06) { + __m256 tmp; + + sort_04v_ascending(d01, d02, d03, d04); + sort_02v_descending(d05, d06); + + tmp = d05; + + d05 = _mm256_max_ps(d04, d05); + d04 = _mm256_min_ps(d04, tmp); + + tmp = d06; + + d06 = _mm256_max_ps(d03, d06); + d03 = _mm256_min_ps(d03, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_02v_merge_ascending(d05, d06); + } + static INLINE void sort_06v_descending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06) { + __m256 tmp; + + sort_04v_descending(d01, d02, d03, d04); + sort_02v_ascending(d05, d06); + + tmp = d05; + + d05 = _mm256_max_ps(d04, d05); + d04 = _mm256_min_ps(d04, tmp); + + tmp = d06; + + d06 = _mm256_max_ps(d03, d06); + d03 = _mm256_min_ps(d03, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_02v_merge_descending(d05, d06); + } + static INLINE void sort_06v_merge_ascending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06) { + __m256 tmp; + + tmp = d01; + + d01 = _mm256_min_ps(d05, d01); + + d05 = _mm256_max_ps(d05, tmp); + + tmp = d02; + + d02 = _mm256_min_ps(d06, d02); + + d06 = _mm256_max_ps(d06, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_02v_merge_ascending(d05, d06); + } + static INLINE void sort_06v_merge_descending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06) { + __m256 tmp; + + tmp = d01; + + d01 = _mm256_min_ps(d05, d01); + + d05 = _mm256_max_ps(d05, tmp); + + tmp = d02; + + d02 = _mm256_min_ps(d06, d02); + + d06 = _mm256_max_ps(d06, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_02v_merge_descending(d05, d06); + } + static INLINE void sort_07v_ascending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07) { + __m256 tmp; + + sort_04v_ascending(d01, d02, d03, d04); + sort_03v_descending(d05, d06, d07); + + tmp = d05; + + d05 = _mm256_max_ps(d04, d05); + d04 = _mm256_min_ps(d04, tmp); + + tmp = d06; + + d06 = _mm256_max_ps(d03, d06); + d03 = _mm256_min_ps(d03, tmp); + + tmp = d07; + + d07 = _mm256_max_ps(d02, d07); + d02 = _mm256_min_ps(d02, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_03v_merge_ascending(d05, d06, d07); + } + static INLINE void sort_07v_descending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07) { + __m256 tmp; + + sort_04v_descending(d01, d02, d03, d04); + sort_03v_ascending(d05, d06, d07); + + tmp = d05; + + d05 = _mm256_max_ps(d04, d05); + d04 = _mm256_min_ps(d04, tmp); + + tmp = d06; + + d06 = _mm256_max_ps(d03, d06); + d03 = _mm256_min_ps(d03, tmp); + + tmp = d07; + + d07 = _mm256_max_ps(d02, d07); + d02 = _mm256_min_ps(d02, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_03v_merge_descending(d05, d06, d07); + } + static INLINE void sort_07v_merge_ascending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07) { + __m256 tmp; + + tmp = d01; + + d01 = _mm256_min_ps(d05, d01); + + d05 = _mm256_max_ps(d05, tmp); + + tmp = d02; + + d02 = _mm256_min_ps(d06, d02); + + d06 = _mm256_max_ps(d06, tmp); + + tmp = d03; + + d03 = _mm256_min_ps(d07, d03); + + d07 = _mm256_max_ps(d07, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_03v_merge_ascending(d05, d06, d07); + } + static INLINE void sort_07v_merge_descending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07) { + __m256 tmp; + + tmp = d01; + + d01 = _mm256_min_ps(d05, d01); + + d05 = _mm256_max_ps(d05, tmp); + + tmp = d02; + + d02 = _mm256_min_ps(d06, d02); + + d06 = _mm256_max_ps(d06, tmp); + + tmp = d03; + + d03 = _mm256_min_ps(d07, d03); + + d07 = _mm256_max_ps(d07, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_03v_merge_descending(d05, d06, d07); + } + static INLINE void sort_08v_ascending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07, __m256& d08) { + __m256 tmp; + + sort_04v_ascending(d01, d02, d03, d04); + sort_04v_descending(d05, d06, d07, d08); + + tmp = d05; + + d05 = _mm256_max_ps(d04, d05); + d04 = _mm256_min_ps(d04, tmp); + + tmp = d06; + + d06 = _mm256_max_ps(d03, d06); + d03 = _mm256_min_ps(d03, tmp); + + tmp = d07; + + d07 = _mm256_max_ps(d02, d07); + d02 = _mm256_min_ps(d02, tmp); + + tmp = d08; + + d08 = _mm256_max_ps(d01, d08); + d01 = _mm256_min_ps(d01, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_04v_merge_ascending(d05, d06, d07, d08); + } + static INLINE void sort_08v_descending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07, __m256& d08) { + __m256 tmp; + + sort_04v_descending(d01, d02, d03, d04); + sort_04v_ascending(d05, d06, d07, d08); + + tmp = d05; + + d05 = _mm256_max_ps(d04, d05); + d04 = _mm256_min_ps(d04, tmp); + + tmp = d06; + + d06 = _mm256_max_ps(d03, d06); + d03 = _mm256_min_ps(d03, tmp); + + tmp = d07; + + d07 = _mm256_max_ps(d02, d07); + d02 = _mm256_min_ps(d02, tmp); + + tmp = d08; + + d08 = _mm256_max_ps(d01, d08); + d01 = _mm256_min_ps(d01, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_04v_merge_descending(d05, d06, d07, d08); + } + static INLINE void sort_08v_merge_ascending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07, __m256& d08) { + __m256 tmp; + + tmp = d01; + + d01 = _mm256_min_ps(d05, d01); + + d05 = _mm256_max_ps(d05, tmp); + + tmp = d02; + + d02 = _mm256_min_ps(d06, d02); + + d06 = _mm256_max_ps(d06, tmp); + + tmp = d03; + + d03 = _mm256_min_ps(d07, d03); + + d07 = _mm256_max_ps(d07, tmp); + + tmp = d04; + + d04 = _mm256_min_ps(d08, d04); + + d08 = _mm256_max_ps(d08, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_04v_merge_ascending(d05, d06, d07, d08); + } + static INLINE void sort_08v_merge_descending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07, __m256& d08) { + __m256 tmp; + + tmp = d01; + + d01 = _mm256_min_ps(d05, d01); + + d05 = _mm256_max_ps(d05, tmp); + + tmp = d02; + + d02 = _mm256_min_ps(d06, d02); + + d06 = _mm256_max_ps(d06, tmp); + + tmp = d03; + + d03 = _mm256_min_ps(d07, d03); + + d07 = _mm256_max_ps(d07, tmp); + + tmp = d04; + + d04 = _mm256_min_ps(d08, d04); + + d08 = _mm256_max_ps(d08, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_04v_merge_descending(d05, d06, d07, d08); + } + static INLINE void sort_09v_ascending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07, __m256& d08, __m256& d09) { + __m256 tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_01v_descending(d09); + + tmp = d09; + + d09 = _mm256_max_ps(d08, d09); + d08 = _mm256_min_ps(d08, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_01v_merge_ascending(d09); + } + static INLINE void sort_09v_descending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07, __m256& d08, __m256& d09) { + __m256 tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_01v_ascending(d09); + + tmp = d09; + + d09 = _mm256_max_ps(d08, d09); + d08 = _mm256_min_ps(d08, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_01v_merge_descending(d09); + } + static INLINE void sort_10v_ascending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07, __m256& d08, __m256& d09, __m256& d10) { + __m256 tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_02v_descending(d09, d10); + + tmp = d09; + + d09 = _mm256_max_ps(d08, d09); + d08 = _mm256_min_ps(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_ps(d07, d10); + d07 = _mm256_min_ps(d07, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_02v_merge_ascending(d09, d10); + } + static INLINE void sort_10v_descending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07, __m256& d08, __m256& d09, __m256& d10) { + __m256 tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_02v_ascending(d09, d10); + + tmp = d09; + + d09 = _mm256_max_ps(d08, d09); + d08 = _mm256_min_ps(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_ps(d07, d10); + d07 = _mm256_min_ps(d07, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_02v_merge_descending(d09, d10); + } + static INLINE void sort_11v_ascending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07, __m256& d08, __m256& d09, __m256& d10, __m256& d11) { + __m256 tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_03v_descending(d09, d10, d11); + + tmp = d09; + + d09 = _mm256_max_ps(d08, d09); + d08 = _mm256_min_ps(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_ps(d07, d10); + d07 = _mm256_min_ps(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_ps(d06, d11); + d06 = _mm256_min_ps(d06, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_03v_merge_ascending(d09, d10, d11); + } + static INLINE void sort_11v_descending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07, __m256& d08, __m256& d09, __m256& d10, __m256& d11) { + __m256 tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_03v_ascending(d09, d10, d11); + + tmp = d09; + + d09 = _mm256_max_ps(d08, d09); + d08 = _mm256_min_ps(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_ps(d07, d10); + d07 = _mm256_min_ps(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_ps(d06, d11); + d06 = _mm256_min_ps(d06, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_03v_merge_descending(d09, d10, d11); + } + static INLINE void sort_12v_ascending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07, __m256& d08, __m256& d09, __m256& d10, __m256& d11, __m256& d12) { + __m256 tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_04v_descending(d09, d10, d11, d12); + + tmp = d09; + + d09 = _mm256_max_ps(d08, d09); + d08 = _mm256_min_ps(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_ps(d07, d10); + d07 = _mm256_min_ps(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_ps(d06, d11); + d06 = _mm256_min_ps(d06, tmp); + + tmp = d12; + + d12 = _mm256_max_ps(d05, d12); + d05 = _mm256_min_ps(d05, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_04v_merge_ascending(d09, d10, d11, d12); + } + static INLINE void sort_12v_descending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07, __m256& d08, __m256& d09, __m256& d10, __m256& d11, __m256& d12) { + __m256 tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_04v_ascending(d09, d10, d11, d12); + + tmp = d09; + + d09 = _mm256_max_ps(d08, d09); + d08 = _mm256_min_ps(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_ps(d07, d10); + d07 = _mm256_min_ps(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_ps(d06, d11); + d06 = _mm256_min_ps(d06, tmp); + + tmp = d12; + + d12 = _mm256_max_ps(d05, d12); + d05 = _mm256_min_ps(d05, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_04v_merge_descending(d09, d10, d11, d12); + } + static INLINE void sort_13v_ascending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07, __m256& d08, __m256& d09, __m256& d10, __m256& d11, __m256& d12, __m256& d13) { + __m256 tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_05v_descending(d09, d10, d11, d12, d13); + + tmp = d09; + + d09 = _mm256_max_ps(d08, d09); + d08 = _mm256_min_ps(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_ps(d07, d10); + d07 = _mm256_min_ps(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_ps(d06, d11); + d06 = _mm256_min_ps(d06, tmp); + + tmp = d12; + + d12 = _mm256_max_ps(d05, d12); + d05 = _mm256_min_ps(d05, tmp); + + tmp = d13; + + d13 = _mm256_max_ps(d04, d13); + d04 = _mm256_min_ps(d04, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_05v_merge_ascending(d09, d10, d11, d12, d13); + } + static INLINE void sort_13v_descending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07, __m256& d08, __m256& d09, __m256& d10, __m256& d11, __m256& d12, __m256& d13) { + __m256 tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_05v_ascending(d09, d10, d11, d12, d13); + + tmp = d09; + + d09 = _mm256_max_ps(d08, d09); + d08 = _mm256_min_ps(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_ps(d07, d10); + d07 = _mm256_min_ps(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_ps(d06, d11); + d06 = _mm256_min_ps(d06, tmp); + + tmp = d12; + + d12 = _mm256_max_ps(d05, d12); + d05 = _mm256_min_ps(d05, tmp); + + tmp = d13; + + d13 = _mm256_max_ps(d04, d13); + d04 = _mm256_min_ps(d04, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_05v_merge_descending(d09, d10, d11, d12, d13); + } + static INLINE void sort_14v_ascending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07, __m256& d08, __m256& d09, __m256& d10, __m256& d11, __m256& d12, __m256& d13, __m256& d14) { + __m256 tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_06v_descending(d09, d10, d11, d12, d13, d14); + + tmp = d09; + + d09 = _mm256_max_ps(d08, d09); + d08 = _mm256_min_ps(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_ps(d07, d10); + d07 = _mm256_min_ps(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_ps(d06, d11); + d06 = _mm256_min_ps(d06, tmp); + + tmp = d12; + + d12 = _mm256_max_ps(d05, d12); + d05 = _mm256_min_ps(d05, tmp); + + tmp = d13; + + d13 = _mm256_max_ps(d04, d13); + d04 = _mm256_min_ps(d04, tmp); + + tmp = d14; + + d14 = _mm256_max_ps(d03, d14); + d03 = _mm256_min_ps(d03, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_06v_merge_ascending(d09, d10, d11, d12, d13, d14); + } + static INLINE void sort_14v_descending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07, __m256& d08, __m256& d09, __m256& d10, __m256& d11, __m256& d12, __m256& d13, __m256& d14) { + __m256 tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_06v_ascending(d09, d10, d11, d12, d13, d14); + + tmp = d09; + + d09 = _mm256_max_ps(d08, d09); + d08 = _mm256_min_ps(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_ps(d07, d10); + d07 = _mm256_min_ps(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_ps(d06, d11); + d06 = _mm256_min_ps(d06, tmp); + + tmp = d12; + + d12 = _mm256_max_ps(d05, d12); + d05 = _mm256_min_ps(d05, tmp); + + tmp = d13; + + d13 = _mm256_max_ps(d04, d13); + d04 = _mm256_min_ps(d04, tmp); + + tmp = d14; + + d14 = _mm256_max_ps(d03, d14); + d03 = _mm256_min_ps(d03, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_06v_merge_descending(d09, d10, d11, d12, d13, d14); + } + static INLINE void sort_15v_ascending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07, __m256& d08, __m256& d09, __m256& d10, __m256& d11, __m256& d12, __m256& d13, __m256& d14, __m256& d15) { + __m256 tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_07v_descending(d09, d10, d11, d12, d13, d14, d15); + + tmp = d09; + + d09 = _mm256_max_ps(d08, d09); + d08 = _mm256_min_ps(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_ps(d07, d10); + d07 = _mm256_min_ps(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_ps(d06, d11); + d06 = _mm256_min_ps(d06, tmp); + + tmp = d12; + + d12 = _mm256_max_ps(d05, d12); + d05 = _mm256_min_ps(d05, tmp); + + tmp = d13; + + d13 = _mm256_max_ps(d04, d13); + d04 = _mm256_min_ps(d04, tmp); + + tmp = d14; + + d14 = _mm256_max_ps(d03, d14); + d03 = _mm256_min_ps(d03, tmp); + + tmp = d15; + + d15 = _mm256_max_ps(d02, d15); + d02 = _mm256_min_ps(d02, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_07v_merge_ascending(d09, d10, d11, d12, d13, d14, d15); + } + static INLINE void sort_15v_descending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07, __m256& d08, __m256& d09, __m256& d10, __m256& d11, __m256& d12, __m256& d13, __m256& d14, __m256& d15) { + __m256 tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_07v_ascending(d09, d10, d11, d12, d13, d14, d15); + + tmp = d09; + + d09 = _mm256_max_ps(d08, d09); + d08 = _mm256_min_ps(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_ps(d07, d10); + d07 = _mm256_min_ps(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_ps(d06, d11); + d06 = _mm256_min_ps(d06, tmp); + + tmp = d12; + + d12 = _mm256_max_ps(d05, d12); + d05 = _mm256_min_ps(d05, tmp); + + tmp = d13; + + d13 = _mm256_max_ps(d04, d13); + d04 = _mm256_min_ps(d04, tmp); + + tmp = d14; + + d14 = _mm256_max_ps(d03, d14); + d03 = _mm256_min_ps(d03, tmp); + + tmp = d15; + + d15 = _mm256_max_ps(d02, d15); + d02 = _mm256_min_ps(d02, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_07v_merge_descending(d09, d10, d11, d12, d13, d14, d15); + } + static INLINE void sort_16v_ascending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07, __m256& d08, __m256& d09, __m256& d10, __m256& d11, __m256& d12, __m256& d13, __m256& d14, __m256& d15, __m256& d16) { + __m256 tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_08v_descending(d09, d10, d11, d12, d13, d14, d15, d16); + + tmp = d09; + + d09 = _mm256_max_ps(d08, d09); + d08 = _mm256_min_ps(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_ps(d07, d10); + d07 = _mm256_min_ps(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_ps(d06, d11); + d06 = _mm256_min_ps(d06, tmp); + + tmp = d12; + + d12 = _mm256_max_ps(d05, d12); + d05 = _mm256_min_ps(d05, tmp); + + tmp = d13; + + d13 = _mm256_max_ps(d04, d13); + d04 = _mm256_min_ps(d04, tmp); + + tmp = d14; + + d14 = _mm256_max_ps(d03, d14); + d03 = _mm256_min_ps(d03, tmp); + + tmp = d15; + + d15 = _mm256_max_ps(d02, d15); + d02 = _mm256_min_ps(d02, tmp); + + tmp = d16; + + d16 = _mm256_max_ps(d01, d16); + d01 = _mm256_min_ps(d01, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_08v_merge_ascending(d09, d10, d11, d12, d13, d14, d15, d16); + } + static INLINE void sort_16v_descending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07, __m256& d08, __m256& d09, __m256& d10, __m256& d11, __m256& d12, __m256& d13, __m256& d14, __m256& d15, __m256& d16) { + __m256 tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_08v_ascending(d09, d10, d11, d12, d13, d14, d15, d16); + + tmp = d09; + + d09 = _mm256_max_ps(d08, d09); + d08 = _mm256_min_ps(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_ps(d07, d10); + d07 = _mm256_min_ps(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_ps(d06, d11); + d06 = _mm256_min_ps(d06, tmp); + + tmp = d12; + + d12 = _mm256_max_ps(d05, d12); + d05 = _mm256_min_ps(d05, tmp); + + tmp = d13; + + d13 = _mm256_max_ps(d04, d13); + d04 = _mm256_min_ps(d04, tmp); + + tmp = d14; + + d14 = _mm256_max_ps(d03, d14); + d03 = _mm256_min_ps(d03, tmp); + + tmp = d15; + + d15 = _mm256_max_ps(d02, d15); + d02 = _mm256_min_ps(d02, tmp); + + tmp = d16; + + d16 = _mm256_max_ps(d01, d16); + d01 = _mm256_min_ps(d01, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_08v_merge_descending(d09, d10, d11, d12, d13, d14, d15, d16); + } + +static NOINLINE void sort_01v(float *ptr) { + __m256 d01 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 0)); + sort_01v_ascending(d01); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 0), d01); +} + +static NOINLINE void sort_02v(float *ptr) { + __m256 d01 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 0)); + __m256 d02 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 1)); + sort_02v_ascending(d01, d02); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 0), d01); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 1), d02); +} + +static NOINLINE void sort_03v(float *ptr) { + __m256 d01 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 0)); + __m256 d02 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 1)); + __m256 d03 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 2)); + sort_03v_ascending(d01, d02, d03); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 0), d01); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 1), d02); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 2), d03); +} + +static NOINLINE void sort_04v(float *ptr) { + __m256 d01 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 0)); + __m256 d02 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 1)); + __m256 d03 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 2)); + __m256 d04 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 3)); + sort_04v_ascending(d01, d02, d03, d04); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 0), d01); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 1), d02); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 2), d03); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 3), d04); +} + +static NOINLINE void sort_05v(float *ptr) { + __m256 d01 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 0)); + __m256 d02 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 1)); + __m256 d03 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 2)); + __m256 d04 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 3)); + __m256 d05 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 4)); + sort_05v_ascending(d01, d02, d03, d04, d05); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 0), d01); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 1), d02); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 2), d03); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 3), d04); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 4), d05); +} + +static NOINLINE void sort_06v(float *ptr) { + __m256 d01 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 0)); + __m256 d02 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 1)); + __m256 d03 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 2)); + __m256 d04 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 3)); + __m256 d05 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 4)); + __m256 d06 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 5)); + sort_06v_ascending(d01, d02, d03, d04, d05, d06); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 0), d01); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 1), d02); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 2), d03); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 3), d04); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 4), d05); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 5), d06); +} + +static NOINLINE void sort_07v(float *ptr) { + __m256 d01 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 0)); + __m256 d02 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 1)); + __m256 d03 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 2)); + __m256 d04 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 3)); + __m256 d05 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 4)); + __m256 d06 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 5)); + __m256 d07 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 6)); + sort_07v_ascending(d01, d02, d03, d04, d05, d06, d07); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 0), d01); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 1), d02); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 2), d03); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 3), d04); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 4), d05); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 5), d06); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 6), d07); +} + +static NOINLINE void sort_08v(float *ptr) { + __m256 d01 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 0)); + __m256 d02 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 1)); + __m256 d03 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 2)); + __m256 d04 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 3)); + __m256 d05 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 4)); + __m256 d06 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 5)); + __m256 d07 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 6)); + __m256 d08 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 7)); + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 0), d01); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 1), d02); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 2), d03); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 3), d04); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 4), d05); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 5), d06); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 6), d07); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 7), d08); +} + +static NOINLINE void sort_09v(float *ptr) { + __m256 d01 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 0)); + __m256 d02 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 1)); + __m256 d03 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 2)); + __m256 d04 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 3)); + __m256 d05 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 4)); + __m256 d06 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 5)); + __m256 d07 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 6)); + __m256 d08 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 7)); + __m256 d09 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 8)); + sort_09v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 0), d01); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 1), d02); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 2), d03); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 3), d04); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 4), d05); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 5), d06); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 6), d07); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 7), d08); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 8), d09); +} + +static NOINLINE void sort_10v(float *ptr) { + __m256 d01 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 0)); + __m256 d02 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 1)); + __m256 d03 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 2)); + __m256 d04 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 3)); + __m256 d05 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 4)); + __m256 d06 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 5)); + __m256 d07 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 6)); + __m256 d08 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 7)); + __m256 d09 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 8)); + __m256 d10 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 9)); + sort_10v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 0), d01); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 1), d02); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 2), d03); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 3), d04); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 4), d05); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 5), d06); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 6), d07); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 7), d08); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 8), d09); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 9), d10); +} + +static NOINLINE void sort_11v(float *ptr) { + __m256 d01 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 0)); + __m256 d02 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 1)); + __m256 d03 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 2)); + __m256 d04 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 3)); + __m256 d05 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 4)); + __m256 d06 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 5)); + __m256 d07 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 6)); + __m256 d08 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 7)); + __m256 d09 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 8)); + __m256 d10 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 9)); + __m256 d11 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 10)); + sort_11v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 0), d01); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 1), d02); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 2), d03); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 3), d04); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 4), d05); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 5), d06); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 6), d07); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 7), d08); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 8), d09); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 9), d10); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 10), d11); +} + +static NOINLINE void sort_12v(float *ptr) { + __m256 d01 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 0)); + __m256 d02 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 1)); + __m256 d03 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 2)); + __m256 d04 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 3)); + __m256 d05 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 4)); + __m256 d06 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 5)); + __m256 d07 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 6)); + __m256 d08 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 7)); + __m256 d09 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 8)); + __m256 d10 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 9)); + __m256 d11 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 10)); + __m256 d12 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 11)); + sort_12v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 0), d01); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 1), d02); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 2), d03); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 3), d04); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 4), d05); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 5), d06); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 6), d07); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 7), d08); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 8), d09); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 9), d10); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 10), d11); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 11), d12); +} + +static NOINLINE void sort_13v(float *ptr) { + __m256 d01 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 0)); + __m256 d02 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 1)); + __m256 d03 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 2)); + __m256 d04 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 3)); + __m256 d05 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 4)); + __m256 d06 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 5)); + __m256 d07 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 6)); + __m256 d08 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 7)); + __m256 d09 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 8)); + __m256 d10 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 9)); + __m256 d11 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 10)); + __m256 d12 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 11)); + __m256 d13 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 12)); + sort_13v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 0), d01); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 1), d02); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 2), d03); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 3), d04); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 4), d05); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 5), d06); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 6), d07); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 7), d08); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 8), d09); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 9), d10); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 10), d11); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 11), d12); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 12), d13); +} + +static NOINLINE void sort_14v(float *ptr) { + __m256 d01 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 0)); + __m256 d02 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 1)); + __m256 d03 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 2)); + __m256 d04 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 3)); + __m256 d05 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 4)); + __m256 d06 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 5)); + __m256 d07 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 6)); + __m256 d08 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 7)); + __m256 d09 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 8)); + __m256 d10 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 9)); + __m256 d11 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 10)); + __m256 d12 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 11)); + __m256 d13 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 12)); + __m256 d14 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 13)); + sort_14v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 0), d01); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 1), d02); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 2), d03); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 3), d04); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 4), d05); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 5), d06); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 6), d07); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 7), d08); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 8), d09); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 9), d10); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 10), d11); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 11), d12); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 12), d13); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 13), d14); +} + +static NOINLINE void sort_15v(float *ptr) { + __m256 d01 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 0)); + __m256 d02 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 1)); + __m256 d03 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 2)); + __m256 d04 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 3)); + __m256 d05 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 4)); + __m256 d06 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 5)); + __m256 d07 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 6)); + __m256 d08 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 7)); + __m256 d09 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 8)); + __m256 d10 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 9)); + __m256 d11 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 10)); + __m256 d12 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 11)); + __m256 d13 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 12)); + __m256 d14 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 13)); + __m256 d15 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 14)); + sort_15v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14, d15); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 0), d01); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 1), d02); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 2), d03); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 3), d04); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 4), d05); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 5), d06); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 6), d07); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 7), d08); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 8), d09); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 9), d10); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 10), d11); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 11), d12); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 12), d13); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 13), d14); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 14), d15); +} + +static NOINLINE void sort_16v(float *ptr) { + __m256 d01 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 0)); + __m256 d02 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 1)); + __m256 d03 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 2)); + __m256 d04 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 3)); + __m256 d05 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 4)); + __m256 d06 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 5)); + __m256 d07 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 6)); + __m256 d08 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 7)); + __m256 d09 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 8)); + __m256 d10 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 9)); + __m256 d11 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 10)); + __m256 d12 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 11)); + __m256 d13 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 12)); + __m256 d14 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 13)); + __m256 d15 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 14)); + __m256 d16 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 15)); + sort_16v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14, d15, d16); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 0), d01); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 1), d02); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 2), d03); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 3), d04); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 4), d05); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 5), d06); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 6), d07); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 7), d08); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 8), d09); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 9), d10); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 10), d11); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 11), d12); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 12), d13); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 13), d14); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 14), d15); + _mm256_storeu_ps((float *) ((__m256 *) ptr + 15), d16); +} + static void sort(float *ptr, size_t length) { + const int N = 8; + + switch(length / N) { + case 1: sort_01v(ptr); break; + case 2: sort_02v(ptr); break; + case 3: sort_03v(ptr); break; + case 4: sort_04v(ptr); break; + case 5: sort_05v(ptr); break; + case 6: sort_06v(ptr); break; + case 7: sort_07v(ptr); break; + case 8: sort_08v(ptr); break; + case 9: sort_09v(ptr); break; + case 10: sort_10v(ptr); break; + case 11: sort_11v(ptr); break; + case 12: sort_12v(ptr); break; + case 13: sort_13v(ptr); break; + case 14: sort_14v(ptr); break; + case 15: sort_15v(ptr); break; + case 16: sort_16v(ptr); break; + } +} +}; +} +} +#endif diff --git a/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.int32_t.generated.h b/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.int32_t.generated.h new file mode 100644 index 00000000000000..308bae8fa32694 --- /dev/null +++ b/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.int32_t.generated.h @@ -0,0 +1,1532 @@ +///////////////////////////////////////////////////////////////////////////// +//// +// This file was auto-generated by a tool at 2020-05-31 19:46:17 +// +// It is recommended you DO NOT directly edit this file but instead edit +// the code-generator that generated this source file instead. +///////////////////////////////////////////////////////////////////////////// + +#ifndef BITONIC_SORT_AVX2_INT32_T_H +#define BITONIC_SORT_AVX2_INT32_T_H + +#include +#include "bitonic_sort.h" + +#ifdef _MSC_VER + // MSVC + #define INLINE __forceinline + #define NOINLINE __declspec(noinline) +#else + // GCC + Clang + #define INLINE __attribute__((always_inline)) + #define NOINLINE __attribute__((noinline)) +#endif + +#define i2d _mm256_castsi256_pd +#define d2i _mm256_castpd_si256 +#define i2s _mm256_castsi256_ps +#define s2i _mm256_castps_si256 +#define s2d _mm256_castps_pd +#define d2s _mm256_castpd_ps + +namespace gcsort { +namespace smallsort { +template<> struct bitonic { +public: + + static INLINE void sort_01v_ascending(__m256i& d01) { + __m256i min, max, s; + + s = _mm256_shuffle_epi32(d01, 0xB1); + + min = _mm256_min_epi32(s, d01); + max = _mm256_max_epi32(s, d01); + d01 = _mm256_blend_epi32(min, max, 0xAA); + + s = _mm256_shuffle_epi32(d01, 0x1B); + + min = _mm256_min_epi32(s, d01); + max = _mm256_max_epi32(s, d01); + d01 = _mm256_blend_epi32(min, max, 0xCC); + + s = _mm256_shuffle_epi32(d01, 0xB1); + + min = _mm256_min_epi32(s, d01); + max = _mm256_max_epi32(s, d01); + d01 = _mm256_blend_epi32(min, max, 0xAA); + + s = d2i(_mm256_permute4x64_pd(i2d(_mm256_shuffle_epi32(d01, 0x1B)), 0x4E)); + min = _mm256_min_epi32(s, d01); + max = _mm256_max_epi32(s, d01); + d01 = _mm256_blend_epi32(min, max, 0xF0); + + s = _mm256_shuffle_epi32(d01, 0x4E); + min = _mm256_min_epi32(s, d01); + max = _mm256_max_epi32(s, d01); + d01 = _mm256_blend_epi32(min, max, 0xCC); + + s = _mm256_shuffle_epi32(d01, 0xB1); + min = _mm256_min_epi32(s, d01); + max = _mm256_max_epi32(s, d01); + d01 = _mm256_blend_epi32(min, max, 0xAA); +} + static INLINE void sort_01v_merge_ascending(__m256i& d01) { + __m256i min, max, s; + + s = d2i(_mm256_permute4x64_pd(i2d(d01), 0x4E)); + min = _mm256_min_epi32(s, d01); + max = _mm256_max_epi32(s, d01); + d01 = _mm256_blend_epi32(min, max, 0xF0); + + s = _mm256_shuffle_epi32(d01, 0x4E); + + min = _mm256_min_epi32(s, d01); + max = _mm256_max_epi32(s, d01); + d01 = _mm256_blend_epi32(min, max, 0xCC); + + s = _mm256_shuffle_epi32(d01, 0xB1); + + min = _mm256_min_epi32(s, d01); + max = _mm256_max_epi32(s, d01); + d01 = _mm256_blend_epi32(min, max, 0xAA); + } + static INLINE void sort_01v_descending(__m256i& d01) { + __m256i min, max, s; + + s = _mm256_shuffle_epi32(d01, 0xB1); + + min = _mm256_min_epi32(s, d01); + max = _mm256_max_epi32(s, d01); + d01 = _mm256_blend_epi32(max, min, 0xAA); + + s = _mm256_shuffle_epi32(d01, 0x1B); + + min = _mm256_min_epi32(s, d01); + max = _mm256_max_epi32(s, d01); + d01 = _mm256_blend_epi32(max, min, 0xCC); + + s = _mm256_shuffle_epi32(d01, 0xB1); + + min = _mm256_min_epi32(s, d01); + max = _mm256_max_epi32(s, d01); + d01 = _mm256_blend_epi32(max, min, 0xAA); + + s = d2i(_mm256_permute4x64_pd(i2d(_mm256_shuffle_epi32(d01, 0x1B)), 0x4E)); + min = _mm256_min_epi32(s, d01); + max = _mm256_max_epi32(s, d01); + d01 = _mm256_blend_epi32(max, min, 0xF0); + + s = _mm256_shuffle_epi32(d01, 0x4E); + min = _mm256_min_epi32(s, d01); + max = _mm256_max_epi32(s, d01); + d01 = _mm256_blend_epi32(max, min, 0xCC); + + s = _mm256_shuffle_epi32(d01, 0xB1); + min = _mm256_min_epi32(s, d01); + max = _mm256_max_epi32(s, d01); + d01 = _mm256_blend_epi32(max, min, 0xAA); +} + static INLINE void sort_01v_merge_descending(__m256i& d01) { + __m256i min, max, s; + + s = d2i(_mm256_permute4x64_pd(i2d(d01), 0x4E)); + min = _mm256_min_epi32(s, d01); + max = _mm256_max_epi32(s, d01); + d01 = _mm256_blend_epi32(max, min, 0xF0); + + s = _mm256_shuffle_epi32(d01, 0x4E); + + min = _mm256_min_epi32(s, d01); + max = _mm256_max_epi32(s, d01); + d01 = _mm256_blend_epi32(max, min, 0xCC); + + s = _mm256_shuffle_epi32(d01, 0xB1); + + min = _mm256_min_epi32(s, d01); + max = _mm256_max_epi32(s, d01); + d01 = _mm256_blend_epi32(max, min, 0xAA); + } + static INLINE void sort_02v_ascending(__m256i& d01, __m256i& d02) { + __m256i tmp; + + sort_01v_ascending(d01); + sort_01v_descending(d02); + + tmp = d02; + + d02 = _mm256_max_epi32(d01, d02); + d01 = _mm256_min_epi32(d01, tmp); + + sort_01v_merge_ascending(d01); + sort_01v_merge_ascending(d02); + } + static INLINE void sort_02v_descending(__m256i& d01, __m256i& d02) { + __m256i tmp; + + sort_01v_descending(d01); + sort_01v_ascending(d02); + + tmp = d02; + + d02 = _mm256_max_epi32(d01, d02); + d01 = _mm256_min_epi32(d01, tmp); + + sort_01v_merge_descending(d01); + sort_01v_merge_descending(d02); + } + static INLINE void sort_02v_merge_ascending(__m256i& d01, __m256i& d02) { + __m256i tmp; + + tmp = d01; + + d01 = _mm256_min_epi32(d02, d01); + + d02 = _mm256_max_epi32(d02, tmp); + + sort_01v_merge_ascending(d01); + sort_01v_merge_ascending(d02); + } + static INLINE void sort_02v_merge_descending(__m256i& d01, __m256i& d02) { + __m256i tmp; + + tmp = d01; + + d01 = _mm256_min_epi32(d02, d01); + + d02 = _mm256_max_epi32(d02, tmp); + + sort_01v_merge_descending(d01); + sort_01v_merge_descending(d02); + } + static INLINE void sort_03v_ascending(__m256i& d01, __m256i& d02, __m256i& d03) { + __m256i tmp; + + sort_02v_ascending(d01, d02); + sort_01v_descending(d03); + + tmp = d03; + + d03 = _mm256_max_epi32(d02, d03); + d02 = _mm256_min_epi32(d02, tmp); + + sort_02v_merge_ascending(d01, d02); + sort_01v_merge_ascending(d03); + } + static INLINE void sort_03v_descending(__m256i& d01, __m256i& d02, __m256i& d03) { + __m256i tmp; + + sort_02v_descending(d01, d02); + sort_01v_ascending(d03); + + tmp = d03; + + d03 = _mm256_max_epi32(d02, d03); + d02 = _mm256_min_epi32(d02, tmp); + + sort_02v_merge_descending(d01, d02); + sort_01v_merge_descending(d03); + } + static INLINE void sort_03v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03) { + __m256i tmp; + + tmp = d01; + + d01 = _mm256_min_epi32(d03, d01); + + d03 = _mm256_max_epi32(d03, tmp); + + sort_02v_merge_ascending(d01, d02); + sort_01v_merge_ascending(d03); + } + static INLINE void sort_03v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03) { + __m256i tmp; + + tmp = d01; + + d01 = _mm256_min_epi32(d03, d01); + + d03 = _mm256_max_epi32(d03, tmp); + + sort_02v_merge_descending(d01, d02); + sort_01v_merge_descending(d03); + } + static INLINE void sort_04v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04) { + __m256i tmp; + + sort_02v_ascending(d01, d02); + sort_02v_descending(d03, d04); + + tmp = d03; + + d03 = _mm256_max_epi32(d02, d03); + d02 = _mm256_min_epi32(d02, tmp); + + tmp = d04; + + d04 = _mm256_max_epi32(d01, d04); + d01 = _mm256_min_epi32(d01, tmp); + + sort_02v_merge_ascending(d01, d02); + sort_02v_merge_ascending(d03, d04); + } + static INLINE void sort_04v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04) { + __m256i tmp; + + sort_02v_descending(d01, d02); + sort_02v_ascending(d03, d04); + + tmp = d03; + + d03 = _mm256_max_epi32(d02, d03); + d02 = _mm256_min_epi32(d02, tmp); + + tmp = d04; + + d04 = _mm256_max_epi32(d01, d04); + d01 = _mm256_min_epi32(d01, tmp); + + sort_02v_merge_descending(d01, d02); + sort_02v_merge_descending(d03, d04); + } + static INLINE void sort_04v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04) { + __m256i tmp; + + tmp = d01; + + d01 = _mm256_min_epi32(d03, d01); + + d03 = _mm256_max_epi32(d03, tmp); + + tmp = d02; + + d02 = _mm256_min_epi32(d04, d02); + + d04 = _mm256_max_epi32(d04, tmp); + + sort_02v_merge_ascending(d01, d02); + sort_02v_merge_ascending(d03, d04); + } + static INLINE void sort_04v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04) { + __m256i tmp; + + tmp = d01; + + d01 = _mm256_min_epi32(d03, d01); + + d03 = _mm256_max_epi32(d03, tmp); + + tmp = d02; + + d02 = _mm256_min_epi32(d04, d02); + + d04 = _mm256_max_epi32(d04, tmp); + + sort_02v_merge_descending(d01, d02); + sort_02v_merge_descending(d03, d04); + } + static INLINE void sort_05v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05) { + __m256i tmp; + + sort_04v_ascending(d01, d02, d03, d04); + sort_01v_descending(d05); + + tmp = d05; + + d05 = _mm256_max_epi32(d04, d05); + d04 = _mm256_min_epi32(d04, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_01v_merge_ascending(d05); + } + static INLINE void sort_05v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05) { + __m256i tmp; + + sort_04v_descending(d01, d02, d03, d04); + sort_01v_ascending(d05); + + tmp = d05; + + d05 = _mm256_max_epi32(d04, d05); + d04 = _mm256_min_epi32(d04, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_01v_merge_descending(d05); + } + static INLINE void sort_05v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05) { + __m256i tmp; + + tmp = d01; + + d01 = _mm256_min_epi32(d05, d01); + + d05 = _mm256_max_epi32(d05, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_01v_merge_ascending(d05); + } + static INLINE void sort_05v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05) { + __m256i tmp; + + tmp = d01; + + d01 = _mm256_min_epi32(d05, d01); + + d05 = _mm256_max_epi32(d05, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_01v_merge_descending(d05); + } + static INLINE void sort_06v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06) { + __m256i tmp; + + sort_04v_ascending(d01, d02, d03, d04); + sort_02v_descending(d05, d06); + + tmp = d05; + + d05 = _mm256_max_epi32(d04, d05); + d04 = _mm256_min_epi32(d04, tmp); + + tmp = d06; + + d06 = _mm256_max_epi32(d03, d06); + d03 = _mm256_min_epi32(d03, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_02v_merge_ascending(d05, d06); + } + static INLINE void sort_06v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06) { + __m256i tmp; + + sort_04v_descending(d01, d02, d03, d04); + sort_02v_ascending(d05, d06); + + tmp = d05; + + d05 = _mm256_max_epi32(d04, d05); + d04 = _mm256_min_epi32(d04, tmp); + + tmp = d06; + + d06 = _mm256_max_epi32(d03, d06); + d03 = _mm256_min_epi32(d03, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_02v_merge_descending(d05, d06); + } + static INLINE void sort_06v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06) { + __m256i tmp; + + tmp = d01; + + d01 = _mm256_min_epi32(d05, d01); + + d05 = _mm256_max_epi32(d05, tmp); + + tmp = d02; + + d02 = _mm256_min_epi32(d06, d02); + + d06 = _mm256_max_epi32(d06, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_02v_merge_ascending(d05, d06); + } + static INLINE void sort_06v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06) { + __m256i tmp; + + tmp = d01; + + d01 = _mm256_min_epi32(d05, d01); + + d05 = _mm256_max_epi32(d05, tmp); + + tmp = d02; + + d02 = _mm256_min_epi32(d06, d02); + + d06 = _mm256_max_epi32(d06, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_02v_merge_descending(d05, d06); + } + static INLINE void sort_07v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07) { + __m256i tmp; + + sort_04v_ascending(d01, d02, d03, d04); + sort_03v_descending(d05, d06, d07); + + tmp = d05; + + d05 = _mm256_max_epi32(d04, d05); + d04 = _mm256_min_epi32(d04, tmp); + + tmp = d06; + + d06 = _mm256_max_epi32(d03, d06); + d03 = _mm256_min_epi32(d03, tmp); + + tmp = d07; + + d07 = _mm256_max_epi32(d02, d07); + d02 = _mm256_min_epi32(d02, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_03v_merge_ascending(d05, d06, d07); + } + static INLINE void sort_07v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07) { + __m256i tmp; + + sort_04v_descending(d01, d02, d03, d04); + sort_03v_ascending(d05, d06, d07); + + tmp = d05; + + d05 = _mm256_max_epi32(d04, d05); + d04 = _mm256_min_epi32(d04, tmp); + + tmp = d06; + + d06 = _mm256_max_epi32(d03, d06); + d03 = _mm256_min_epi32(d03, tmp); + + tmp = d07; + + d07 = _mm256_max_epi32(d02, d07); + d02 = _mm256_min_epi32(d02, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_03v_merge_descending(d05, d06, d07); + } + static INLINE void sort_07v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07) { + __m256i tmp; + + tmp = d01; + + d01 = _mm256_min_epi32(d05, d01); + + d05 = _mm256_max_epi32(d05, tmp); + + tmp = d02; + + d02 = _mm256_min_epi32(d06, d02); + + d06 = _mm256_max_epi32(d06, tmp); + + tmp = d03; + + d03 = _mm256_min_epi32(d07, d03); + + d07 = _mm256_max_epi32(d07, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_03v_merge_ascending(d05, d06, d07); + } + static INLINE void sort_07v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07) { + __m256i tmp; + + tmp = d01; + + d01 = _mm256_min_epi32(d05, d01); + + d05 = _mm256_max_epi32(d05, tmp); + + tmp = d02; + + d02 = _mm256_min_epi32(d06, d02); + + d06 = _mm256_max_epi32(d06, tmp); + + tmp = d03; + + d03 = _mm256_min_epi32(d07, d03); + + d07 = _mm256_max_epi32(d07, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_03v_merge_descending(d05, d06, d07); + } + static INLINE void sort_08v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08) { + __m256i tmp; + + sort_04v_ascending(d01, d02, d03, d04); + sort_04v_descending(d05, d06, d07, d08); + + tmp = d05; + + d05 = _mm256_max_epi32(d04, d05); + d04 = _mm256_min_epi32(d04, tmp); + + tmp = d06; + + d06 = _mm256_max_epi32(d03, d06); + d03 = _mm256_min_epi32(d03, tmp); + + tmp = d07; + + d07 = _mm256_max_epi32(d02, d07); + d02 = _mm256_min_epi32(d02, tmp); + + tmp = d08; + + d08 = _mm256_max_epi32(d01, d08); + d01 = _mm256_min_epi32(d01, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_04v_merge_ascending(d05, d06, d07, d08); + } + static INLINE void sort_08v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08) { + __m256i tmp; + + sort_04v_descending(d01, d02, d03, d04); + sort_04v_ascending(d05, d06, d07, d08); + + tmp = d05; + + d05 = _mm256_max_epi32(d04, d05); + d04 = _mm256_min_epi32(d04, tmp); + + tmp = d06; + + d06 = _mm256_max_epi32(d03, d06); + d03 = _mm256_min_epi32(d03, tmp); + + tmp = d07; + + d07 = _mm256_max_epi32(d02, d07); + d02 = _mm256_min_epi32(d02, tmp); + + tmp = d08; + + d08 = _mm256_max_epi32(d01, d08); + d01 = _mm256_min_epi32(d01, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_04v_merge_descending(d05, d06, d07, d08); + } + static INLINE void sort_08v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08) { + __m256i tmp; + + tmp = d01; + + d01 = _mm256_min_epi32(d05, d01); + + d05 = _mm256_max_epi32(d05, tmp); + + tmp = d02; + + d02 = _mm256_min_epi32(d06, d02); + + d06 = _mm256_max_epi32(d06, tmp); + + tmp = d03; + + d03 = _mm256_min_epi32(d07, d03); + + d07 = _mm256_max_epi32(d07, tmp); + + tmp = d04; + + d04 = _mm256_min_epi32(d08, d04); + + d08 = _mm256_max_epi32(d08, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_04v_merge_ascending(d05, d06, d07, d08); + } + static INLINE void sort_08v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08) { + __m256i tmp; + + tmp = d01; + + d01 = _mm256_min_epi32(d05, d01); + + d05 = _mm256_max_epi32(d05, tmp); + + tmp = d02; + + d02 = _mm256_min_epi32(d06, d02); + + d06 = _mm256_max_epi32(d06, tmp); + + tmp = d03; + + d03 = _mm256_min_epi32(d07, d03); + + d07 = _mm256_max_epi32(d07, tmp); + + tmp = d04; + + d04 = _mm256_min_epi32(d08, d04); + + d08 = _mm256_max_epi32(d08, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_04v_merge_descending(d05, d06, d07, d08); + } + static INLINE void sort_09v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09) { + __m256i tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_01v_descending(d09); + + tmp = d09; + + d09 = _mm256_max_epi32(d08, d09); + d08 = _mm256_min_epi32(d08, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_01v_merge_ascending(d09); + } + static INLINE void sort_09v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09) { + __m256i tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_01v_ascending(d09); + + tmp = d09; + + d09 = _mm256_max_epi32(d08, d09); + d08 = _mm256_min_epi32(d08, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_01v_merge_descending(d09); + } + static INLINE void sort_10v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10) { + __m256i tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_02v_descending(d09, d10); + + tmp = d09; + + d09 = _mm256_max_epi32(d08, d09); + d08 = _mm256_min_epi32(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_epi32(d07, d10); + d07 = _mm256_min_epi32(d07, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_02v_merge_ascending(d09, d10); + } + static INLINE void sort_10v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10) { + __m256i tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_02v_ascending(d09, d10); + + tmp = d09; + + d09 = _mm256_max_epi32(d08, d09); + d08 = _mm256_min_epi32(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_epi32(d07, d10); + d07 = _mm256_min_epi32(d07, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_02v_merge_descending(d09, d10); + } + static INLINE void sort_11v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11) { + __m256i tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_03v_descending(d09, d10, d11); + + tmp = d09; + + d09 = _mm256_max_epi32(d08, d09); + d08 = _mm256_min_epi32(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_epi32(d07, d10); + d07 = _mm256_min_epi32(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_epi32(d06, d11); + d06 = _mm256_min_epi32(d06, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_03v_merge_ascending(d09, d10, d11); + } + static INLINE void sort_11v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11) { + __m256i tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_03v_ascending(d09, d10, d11); + + tmp = d09; + + d09 = _mm256_max_epi32(d08, d09); + d08 = _mm256_min_epi32(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_epi32(d07, d10); + d07 = _mm256_min_epi32(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_epi32(d06, d11); + d06 = _mm256_min_epi32(d06, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_03v_merge_descending(d09, d10, d11); + } + static INLINE void sort_12v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12) { + __m256i tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_04v_descending(d09, d10, d11, d12); + + tmp = d09; + + d09 = _mm256_max_epi32(d08, d09); + d08 = _mm256_min_epi32(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_epi32(d07, d10); + d07 = _mm256_min_epi32(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_epi32(d06, d11); + d06 = _mm256_min_epi32(d06, tmp); + + tmp = d12; + + d12 = _mm256_max_epi32(d05, d12); + d05 = _mm256_min_epi32(d05, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_04v_merge_ascending(d09, d10, d11, d12); + } + static INLINE void sort_12v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12) { + __m256i tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_04v_ascending(d09, d10, d11, d12); + + tmp = d09; + + d09 = _mm256_max_epi32(d08, d09); + d08 = _mm256_min_epi32(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_epi32(d07, d10); + d07 = _mm256_min_epi32(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_epi32(d06, d11); + d06 = _mm256_min_epi32(d06, tmp); + + tmp = d12; + + d12 = _mm256_max_epi32(d05, d12); + d05 = _mm256_min_epi32(d05, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_04v_merge_descending(d09, d10, d11, d12); + } + static INLINE void sort_13v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13) { + __m256i tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_05v_descending(d09, d10, d11, d12, d13); + + tmp = d09; + + d09 = _mm256_max_epi32(d08, d09); + d08 = _mm256_min_epi32(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_epi32(d07, d10); + d07 = _mm256_min_epi32(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_epi32(d06, d11); + d06 = _mm256_min_epi32(d06, tmp); + + tmp = d12; + + d12 = _mm256_max_epi32(d05, d12); + d05 = _mm256_min_epi32(d05, tmp); + + tmp = d13; + + d13 = _mm256_max_epi32(d04, d13); + d04 = _mm256_min_epi32(d04, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_05v_merge_ascending(d09, d10, d11, d12, d13); + } + static INLINE void sort_13v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13) { + __m256i tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_05v_ascending(d09, d10, d11, d12, d13); + + tmp = d09; + + d09 = _mm256_max_epi32(d08, d09); + d08 = _mm256_min_epi32(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_epi32(d07, d10); + d07 = _mm256_min_epi32(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_epi32(d06, d11); + d06 = _mm256_min_epi32(d06, tmp); + + tmp = d12; + + d12 = _mm256_max_epi32(d05, d12); + d05 = _mm256_min_epi32(d05, tmp); + + tmp = d13; + + d13 = _mm256_max_epi32(d04, d13); + d04 = _mm256_min_epi32(d04, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_05v_merge_descending(d09, d10, d11, d12, d13); + } + static INLINE void sort_14v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14) { + __m256i tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_06v_descending(d09, d10, d11, d12, d13, d14); + + tmp = d09; + + d09 = _mm256_max_epi32(d08, d09); + d08 = _mm256_min_epi32(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_epi32(d07, d10); + d07 = _mm256_min_epi32(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_epi32(d06, d11); + d06 = _mm256_min_epi32(d06, tmp); + + tmp = d12; + + d12 = _mm256_max_epi32(d05, d12); + d05 = _mm256_min_epi32(d05, tmp); + + tmp = d13; + + d13 = _mm256_max_epi32(d04, d13); + d04 = _mm256_min_epi32(d04, tmp); + + tmp = d14; + + d14 = _mm256_max_epi32(d03, d14); + d03 = _mm256_min_epi32(d03, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_06v_merge_ascending(d09, d10, d11, d12, d13, d14); + } + static INLINE void sort_14v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14) { + __m256i tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_06v_ascending(d09, d10, d11, d12, d13, d14); + + tmp = d09; + + d09 = _mm256_max_epi32(d08, d09); + d08 = _mm256_min_epi32(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_epi32(d07, d10); + d07 = _mm256_min_epi32(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_epi32(d06, d11); + d06 = _mm256_min_epi32(d06, tmp); + + tmp = d12; + + d12 = _mm256_max_epi32(d05, d12); + d05 = _mm256_min_epi32(d05, tmp); + + tmp = d13; + + d13 = _mm256_max_epi32(d04, d13); + d04 = _mm256_min_epi32(d04, tmp); + + tmp = d14; + + d14 = _mm256_max_epi32(d03, d14); + d03 = _mm256_min_epi32(d03, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_06v_merge_descending(d09, d10, d11, d12, d13, d14); + } + static INLINE void sort_15v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14, __m256i& d15) { + __m256i tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_07v_descending(d09, d10, d11, d12, d13, d14, d15); + + tmp = d09; + + d09 = _mm256_max_epi32(d08, d09); + d08 = _mm256_min_epi32(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_epi32(d07, d10); + d07 = _mm256_min_epi32(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_epi32(d06, d11); + d06 = _mm256_min_epi32(d06, tmp); + + tmp = d12; + + d12 = _mm256_max_epi32(d05, d12); + d05 = _mm256_min_epi32(d05, tmp); + + tmp = d13; + + d13 = _mm256_max_epi32(d04, d13); + d04 = _mm256_min_epi32(d04, tmp); + + tmp = d14; + + d14 = _mm256_max_epi32(d03, d14); + d03 = _mm256_min_epi32(d03, tmp); + + tmp = d15; + + d15 = _mm256_max_epi32(d02, d15); + d02 = _mm256_min_epi32(d02, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_07v_merge_ascending(d09, d10, d11, d12, d13, d14, d15); + } + static INLINE void sort_15v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14, __m256i& d15) { + __m256i tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_07v_ascending(d09, d10, d11, d12, d13, d14, d15); + + tmp = d09; + + d09 = _mm256_max_epi32(d08, d09); + d08 = _mm256_min_epi32(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_epi32(d07, d10); + d07 = _mm256_min_epi32(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_epi32(d06, d11); + d06 = _mm256_min_epi32(d06, tmp); + + tmp = d12; + + d12 = _mm256_max_epi32(d05, d12); + d05 = _mm256_min_epi32(d05, tmp); + + tmp = d13; + + d13 = _mm256_max_epi32(d04, d13); + d04 = _mm256_min_epi32(d04, tmp); + + tmp = d14; + + d14 = _mm256_max_epi32(d03, d14); + d03 = _mm256_min_epi32(d03, tmp); + + tmp = d15; + + d15 = _mm256_max_epi32(d02, d15); + d02 = _mm256_min_epi32(d02, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_07v_merge_descending(d09, d10, d11, d12, d13, d14, d15); + } + static INLINE void sort_16v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14, __m256i& d15, __m256i& d16) { + __m256i tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_08v_descending(d09, d10, d11, d12, d13, d14, d15, d16); + + tmp = d09; + + d09 = _mm256_max_epi32(d08, d09); + d08 = _mm256_min_epi32(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_epi32(d07, d10); + d07 = _mm256_min_epi32(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_epi32(d06, d11); + d06 = _mm256_min_epi32(d06, tmp); + + tmp = d12; + + d12 = _mm256_max_epi32(d05, d12); + d05 = _mm256_min_epi32(d05, tmp); + + tmp = d13; + + d13 = _mm256_max_epi32(d04, d13); + d04 = _mm256_min_epi32(d04, tmp); + + tmp = d14; + + d14 = _mm256_max_epi32(d03, d14); + d03 = _mm256_min_epi32(d03, tmp); + + tmp = d15; + + d15 = _mm256_max_epi32(d02, d15); + d02 = _mm256_min_epi32(d02, tmp); + + tmp = d16; + + d16 = _mm256_max_epi32(d01, d16); + d01 = _mm256_min_epi32(d01, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_08v_merge_ascending(d09, d10, d11, d12, d13, d14, d15, d16); + } + static INLINE void sort_16v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14, __m256i& d15, __m256i& d16) { + __m256i tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_08v_ascending(d09, d10, d11, d12, d13, d14, d15, d16); + + tmp = d09; + + d09 = _mm256_max_epi32(d08, d09); + d08 = _mm256_min_epi32(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_epi32(d07, d10); + d07 = _mm256_min_epi32(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_epi32(d06, d11); + d06 = _mm256_min_epi32(d06, tmp); + + tmp = d12; + + d12 = _mm256_max_epi32(d05, d12); + d05 = _mm256_min_epi32(d05, tmp); + + tmp = d13; + + d13 = _mm256_max_epi32(d04, d13); + d04 = _mm256_min_epi32(d04, tmp); + + tmp = d14; + + d14 = _mm256_max_epi32(d03, d14); + d03 = _mm256_min_epi32(d03, tmp); + + tmp = d15; + + d15 = _mm256_max_epi32(d02, d15); + d02 = _mm256_min_epi32(d02, tmp); + + tmp = d16; + + d16 = _mm256_max_epi32(d01, d16); + d01 = _mm256_min_epi32(d01, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_08v_merge_descending(d09, d10, d11, d12, d13, d14, d15, d16); + } + +static NOINLINE void sort_01v(int32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + sort_01v_ascending(d01); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); +} + +static NOINLINE void sort_02v(int32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + sort_02v_ascending(d01, d02); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); +} + +static NOINLINE void sort_03v(int32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + sort_03v_ascending(d01, d02, d03); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); +} + +static NOINLINE void sort_04v(int32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + sort_04v_ascending(d01, d02, d03, d04); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); +} + +static NOINLINE void sort_05v(int32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + sort_05v_ascending(d01, d02, d03, d04, d05); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); +} + +static NOINLINE void sort_06v(int32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + sort_06v_ascending(d01, d02, d03, d04, d05, d06); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); +} + +static NOINLINE void sort_07v(int32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + sort_07v_ascending(d01, d02, d03, d04, d05, d06, d07); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); +} + +static NOINLINE void sort_08v(int32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); +} + +static NOINLINE void sort_09v(int32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + sort_09v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); +} + +static NOINLINE void sort_10v(int32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + sort_10v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); +} + +static NOINLINE void sort_11v(int32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; + sort_11v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); + _mm256_storeu_si256((__m256i *) ptr + 10, d11); +} + +static NOINLINE void sort_12v(int32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; + __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; + sort_12v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); + _mm256_storeu_si256((__m256i *) ptr + 10, d11); + _mm256_storeu_si256((__m256i *) ptr + 11, d12); +} + +static NOINLINE void sort_13v(int32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; + __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; + __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12);; + sort_13v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); + _mm256_storeu_si256((__m256i *) ptr + 10, d11); + _mm256_storeu_si256((__m256i *) ptr + 11, d12); + _mm256_storeu_si256((__m256i *) ptr + 12, d13); +} + +static NOINLINE void sort_14v(int32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; + __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; + __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12);; + __m256i d14 = _mm256_lddqu_si256((__m256i const *) ptr + 13);; + sort_14v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); + _mm256_storeu_si256((__m256i *) ptr + 10, d11); + _mm256_storeu_si256((__m256i *) ptr + 11, d12); + _mm256_storeu_si256((__m256i *) ptr + 12, d13); + _mm256_storeu_si256((__m256i *) ptr + 13, d14); +} + +static NOINLINE void sort_15v(int32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; + __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; + __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12);; + __m256i d14 = _mm256_lddqu_si256((__m256i const *) ptr + 13);; + __m256i d15 = _mm256_lddqu_si256((__m256i const *) ptr + 14);; + sort_15v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14, d15); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); + _mm256_storeu_si256((__m256i *) ptr + 10, d11); + _mm256_storeu_si256((__m256i *) ptr + 11, d12); + _mm256_storeu_si256((__m256i *) ptr + 12, d13); + _mm256_storeu_si256((__m256i *) ptr + 13, d14); + _mm256_storeu_si256((__m256i *) ptr + 14, d15); +} + +static NOINLINE void sort_16v(int32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; + __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; + __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12);; + __m256i d14 = _mm256_lddqu_si256((__m256i const *) ptr + 13);; + __m256i d15 = _mm256_lddqu_si256((__m256i const *) ptr + 14);; + __m256i d16 = _mm256_lddqu_si256((__m256i const *) ptr + 15);; + sort_16v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14, d15, d16); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); + _mm256_storeu_si256((__m256i *) ptr + 10, d11); + _mm256_storeu_si256((__m256i *) ptr + 11, d12); + _mm256_storeu_si256((__m256i *) ptr + 12, d13); + _mm256_storeu_si256((__m256i *) ptr + 13, d14); + _mm256_storeu_si256((__m256i *) ptr + 14, d15); + _mm256_storeu_si256((__m256i *) ptr + 15, d16); +} + static void sort(int32_t *ptr, size_t length) { + const int N = 8; + + switch(length / N) { + case 1: sort_01v(ptr); break; + case 2: sort_02v(ptr); break; + case 3: sort_03v(ptr); break; + case 4: sort_04v(ptr); break; + case 5: sort_05v(ptr); break; + case 6: sort_06v(ptr); break; + case 7: sort_07v(ptr); break; + case 8: sort_08v(ptr); break; + case 9: sort_09v(ptr); break; + case 10: sort_10v(ptr); break; + case 11: sort_11v(ptr); break; + case 12: sort_12v(ptr); break; + case 13: sort_13v(ptr); break; + case 14: sort_14v(ptr); break; + case 15: sort_15v(ptr); break; + case 16: sort_16v(ptr); break; + } +} +}; +} +} +#endif diff --git a/src/coreclr/src/gc/bitonic_sort.int64_t.generated.h b/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.int64_t.generated.h similarity index 97% rename from src/coreclr/src/gc/bitonic_sort.int64_t.generated.h rename to src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.int64_t.generated.h index f6b4327494fed2..782b44d673914b 100644 --- a/src/coreclr/src/gc/bitonic_sort.int64_t.generated.h +++ b/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.int64_t.generated.h @@ -1,10 +1,17 @@ +///////////////////////////////////////////////////////////////////////////// +//// +// This file was auto-generated by a tool at 2020-05-31 19:46:17 +// +// It is recommended you DO NOT directly edit this file but instead edit +// the code-generator that generated this source file instead. +///////////////////////////////////////////////////////////////////////////// -#ifndef BITONIC_SORT_INT64_T_H -#define BITONIC_SORT_INT64_T_H +#ifndef BITONIC_SORT_AVX2_INT64_T_H +#define BITONIC_SORT_AVX2_INT64_T_H #include #include "bitonic_sort.h" - + #ifdef _MSC_VER // MSVC #define INLINE __forceinline @@ -17,6 +24,10 @@ #define i2d _mm256_castsi256_pd #define d2i _mm256_castpd_si256 +#define i2s _mm256_castsi256_ps +#define s2i _mm256_castps_si256 +#define s2d _mm256_castps_pd +#define d2s _mm256_castpd_ps namespace gcsort { namespace smallsort { @@ -58,7 +69,7 @@ template<> struct bitonic { min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); d01 = d2i(_mm256_blend_pd(i2d(min), i2d(max), 0xA)); -} + } static INLINE void sort_01v_descending(__m256i& d01) { __m256i min, max, s, cmp; @@ -94,7 +105,7 @@ template<> struct bitonic { min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); d01 = d2i(_mm256_blend_pd(i2d(max), i2d(min), 0xA)); -} + } static INLINE void sort_02v_ascending(__m256i& d01, __m256i& d02) { __m256i tmp, cmp; @@ -108,7 +119,7 @@ template<> struct bitonic { sort_01v_merge_ascending(d01); sort_01v_merge_ascending(d02); -} + } static INLINE void sort_02v_descending(__m256i& d01, __m256i& d02) { __m256i tmp, cmp; @@ -122,7 +133,7 @@ template<> struct bitonic { sort_01v_merge_descending(d01); sort_01v_merge_descending(d02); -} + } static INLINE void sort_02v_merge_ascending(__m256i& d01, __m256i& d02) { __m256i tmp, cmp; @@ -134,7 +145,7 @@ template<> struct bitonic { sort_01v_merge_ascending(d01); sort_01v_merge_ascending(d02); -} + } static INLINE void sort_02v_merge_descending(__m256i& d01, __m256i& d02) { __m256i tmp, cmp; @@ -146,7 +157,7 @@ template<> struct bitonic { sort_01v_merge_descending(d01); sort_01v_merge_descending(d02); -} + } static INLINE void sort_03v_ascending(__m256i& d01, __m256i& d02, __m256i& d03) { __m256i tmp, cmp; @@ -160,7 +171,7 @@ template<> struct bitonic { sort_02v_merge_ascending(d01, d02); sort_01v_merge_ascending(d03); -} + } static INLINE void sort_03v_descending(__m256i& d01, __m256i& d02, __m256i& d03) { __m256i tmp, cmp; @@ -174,7 +185,7 @@ template<> struct bitonic { sort_02v_merge_descending(d01, d02); sort_01v_merge_descending(d03); -} + } static INLINE void sort_03v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03) { __m256i tmp, cmp; @@ -186,7 +197,7 @@ template<> struct bitonic { sort_02v_merge_ascending(d01, d02); sort_01v_merge_ascending(d03); -} + } static INLINE void sort_03v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03) { __m256i tmp, cmp; @@ -198,7 +209,7 @@ template<> struct bitonic { sort_02v_merge_descending(d01, d02); sort_01v_merge_descending(d03); -} + } static INLINE void sort_04v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04) { __m256i tmp, cmp; @@ -217,7 +228,7 @@ template<> struct bitonic { sort_02v_merge_ascending(d01, d02); sort_02v_merge_ascending(d03, d04); -} + } static INLINE void sort_04v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04) { __m256i tmp, cmp; @@ -236,7 +247,7 @@ template<> struct bitonic { sort_02v_merge_descending(d01, d02); sort_02v_merge_descending(d03, d04); -} + } static INLINE void sort_04v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04) { __m256i tmp, cmp; @@ -254,7 +265,7 @@ template<> struct bitonic { sort_02v_merge_ascending(d01, d02); sort_02v_merge_ascending(d03, d04); -} + } static INLINE void sort_04v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04) { __m256i tmp, cmp; @@ -272,7 +283,7 @@ template<> struct bitonic { sort_02v_merge_descending(d01, d02); sort_02v_merge_descending(d03, d04); -} + } static INLINE void sort_05v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05) { __m256i tmp, cmp; @@ -286,7 +297,7 @@ template<> struct bitonic { sort_04v_merge_ascending(d01, d02, d03, d04); sort_01v_merge_ascending(d05); -} + } static INLINE void sort_05v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05) { __m256i tmp, cmp; @@ -300,7 +311,7 @@ template<> struct bitonic { sort_04v_merge_descending(d01, d02, d03, d04); sort_01v_merge_descending(d05); -} + } static INLINE void sort_05v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05) { __m256i tmp, cmp; @@ -312,7 +323,7 @@ template<> struct bitonic { sort_04v_merge_ascending(d01, d02, d03, d04); sort_01v_merge_ascending(d05); -} + } static INLINE void sort_05v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05) { __m256i tmp, cmp; @@ -324,7 +335,7 @@ template<> struct bitonic { sort_04v_merge_descending(d01, d02, d03, d04); sort_01v_merge_descending(d05); -} + } static INLINE void sort_06v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06) { __m256i tmp, cmp; @@ -343,7 +354,7 @@ template<> struct bitonic { sort_04v_merge_ascending(d01, d02, d03, d04); sort_02v_merge_ascending(d05, d06); -} + } static INLINE void sort_06v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06) { __m256i tmp, cmp; @@ -362,7 +373,7 @@ template<> struct bitonic { sort_04v_merge_descending(d01, d02, d03, d04); sort_02v_merge_descending(d05, d06); -} + } static INLINE void sort_06v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06) { __m256i tmp, cmp; @@ -380,7 +391,7 @@ template<> struct bitonic { sort_04v_merge_ascending(d01, d02, d03, d04); sort_02v_merge_ascending(d05, d06); -} + } static INLINE void sort_06v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06) { __m256i tmp, cmp; @@ -398,7 +409,7 @@ template<> struct bitonic { sort_04v_merge_descending(d01, d02, d03, d04); sort_02v_merge_descending(d05, d06); -} + } static INLINE void sort_07v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07) { __m256i tmp, cmp; @@ -422,7 +433,7 @@ template<> struct bitonic { sort_04v_merge_ascending(d01, d02, d03, d04); sort_03v_merge_ascending(d05, d06, d07); -} + } static INLINE void sort_07v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07) { __m256i tmp, cmp; @@ -446,7 +457,7 @@ template<> struct bitonic { sort_04v_merge_descending(d01, d02, d03, d04); sort_03v_merge_descending(d05, d06, d07); -} + } static INLINE void sort_07v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07) { __m256i tmp, cmp; @@ -470,7 +481,7 @@ template<> struct bitonic { sort_04v_merge_ascending(d01, d02, d03, d04); sort_03v_merge_ascending(d05, d06, d07); -} + } static INLINE void sort_07v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07) { __m256i tmp, cmp; @@ -494,7 +505,7 @@ template<> struct bitonic { sort_04v_merge_descending(d01, d02, d03, d04); sort_03v_merge_descending(d05, d06, d07); -} + } static INLINE void sort_08v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08) { __m256i tmp, cmp; @@ -523,7 +534,7 @@ template<> struct bitonic { sort_04v_merge_ascending(d01, d02, d03, d04); sort_04v_merge_ascending(d05, d06, d07, d08); -} + } static INLINE void sort_08v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08) { __m256i tmp, cmp; @@ -552,7 +563,7 @@ template<> struct bitonic { sort_04v_merge_descending(d01, d02, d03, d04); sort_04v_merge_descending(d05, d06, d07, d08); -} + } static INLINE void sort_08v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08) { __m256i tmp, cmp; @@ -582,7 +593,7 @@ template<> struct bitonic { sort_04v_merge_ascending(d01, d02, d03, d04); sort_04v_merge_ascending(d05, d06, d07, d08); -} + } static INLINE void sort_08v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08) { __m256i tmp, cmp; @@ -612,7 +623,7 @@ template<> struct bitonic { sort_04v_merge_descending(d01, d02, d03, d04); sort_04v_merge_descending(d05, d06, d07, d08); -} + } static INLINE void sort_09v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09) { __m256i tmp, cmp; @@ -626,7 +637,7 @@ template<> struct bitonic { sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); sort_01v_merge_ascending(d09); -} + } static INLINE void sort_09v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09) { __m256i tmp, cmp; @@ -640,7 +651,7 @@ template<> struct bitonic { sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); sort_01v_merge_descending(d09); -} + } static INLINE void sort_10v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10) { __m256i tmp, cmp; @@ -659,7 +670,7 @@ template<> struct bitonic { sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); sort_02v_merge_ascending(d09, d10); -} + } static INLINE void sort_10v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10) { __m256i tmp, cmp; @@ -678,7 +689,7 @@ template<> struct bitonic { sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); sort_02v_merge_descending(d09, d10); -} + } static INLINE void sort_11v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11) { __m256i tmp, cmp; @@ -702,7 +713,7 @@ template<> struct bitonic { sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); sort_03v_merge_ascending(d09, d10, d11); -} + } static INLINE void sort_11v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11) { __m256i tmp, cmp; @@ -726,7 +737,7 @@ template<> struct bitonic { sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); sort_03v_merge_descending(d09, d10, d11); -} + } static INLINE void sort_12v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12) { __m256i tmp, cmp; @@ -755,7 +766,7 @@ template<> struct bitonic { sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); sort_04v_merge_ascending(d09, d10, d11, d12); -} + } static INLINE void sort_12v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12) { __m256i tmp, cmp; @@ -784,7 +795,7 @@ template<> struct bitonic { sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); sort_04v_merge_descending(d09, d10, d11, d12); -} + } static INLINE void sort_13v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13) { __m256i tmp, cmp; @@ -818,7 +829,7 @@ template<> struct bitonic { sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); sort_05v_merge_ascending(d09, d10, d11, d12, d13); -} + } static INLINE void sort_13v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13) { __m256i tmp, cmp; @@ -852,7 +863,7 @@ template<> struct bitonic { sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); sort_05v_merge_descending(d09, d10, d11, d12, d13); -} + } static INLINE void sort_14v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14) { __m256i tmp, cmp; @@ -891,7 +902,7 @@ template<> struct bitonic { sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); sort_06v_merge_ascending(d09, d10, d11, d12, d13, d14); -} + } static INLINE void sort_14v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14) { __m256i tmp, cmp; @@ -930,7 +941,7 @@ template<> struct bitonic { sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); sort_06v_merge_descending(d09, d10, d11, d12, d13, d14); -} + } static INLINE void sort_15v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14, __m256i& d15) { __m256i tmp, cmp; @@ -974,7 +985,7 @@ template<> struct bitonic { sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); sort_07v_merge_ascending(d09, d10, d11, d12, d13, d14, d15); -} + } static INLINE void sort_15v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14, __m256i& d15) { __m256i tmp, cmp; @@ -1018,7 +1029,7 @@ template<> struct bitonic { sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); sort_07v_merge_descending(d09, d10, d11, d12, d13, d14, d15); -} + } static INLINE void sort_16v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14, __m256i& d15, __m256i& d16) { __m256i tmp, cmp; @@ -1067,7 +1078,7 @@ template<> struct bitonic { sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); sort_08v_merge_ascending(d09, d10, d11, d12, d13, d14, d15, d16); -} + } static INLINE void sort_16v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14, __m256i& d15, __m256i& d16) { __m256i tmp, cmp; @@ -1116,26 +1127,26 @@ template<> struct bitonic { sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); sort_08v_merge_descending(d09, d10, d11, d12, d13, d14, d15, d16); -} + } static NOINLINE void sort_01v(int64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0); + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; sort_01v_ascending(d01); _mm256_storeu_si256((__m256i *) ptr + 0, d01); } static NOINLINE void sort_02v(int64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0); - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1); + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; sort_02v_ascending(d01, d02); _mm256_storeu_si256((__m256i *) ptr + 0, d01); _mm256_storeu_si256((__m256i *) ptr + 1, d02); } static NOINLINE void sort_03v(int64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0); - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1); - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2); + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; sort_03v_ascending(d01, d02, d03); _mm256_storeu_si256((__m256i *) ptr + 0, d01); _mm256_storeu_si256((__m256i *) ptr + 1, d02); @@ -1143,10 +1154,10 @@ static NOINLINE void sort_03v(int64_t *ptr) { } static NOINLINE void sort_04v(int64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0); - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1); - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2); - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3); + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; sort_04v_ascending(d01, d02, d03, d04); _mm256_storeu_si256((__m256i *) ptr + 0, d01); _mm256_storeu_si256((__m256i *) ptr + 1, d02); @@ -1155,11 +1166,11 @@ static NOINLINE void sort_04v(int64_t *ptr) { } static NOINLINE void sort_05v(int64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0); - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1); - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2); - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3); - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4); + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; sort_05v_ascending(d01, d02, d03, d04, d05); _mm256_storeu_si256((__m256i *) ptr + 0, d01); _mm256_storeu_si256((__m256i *) ptr + 1, d02); @@ -1169,12 +1180,12 @@ static NOINLINE void sort_05v(int64_t *ptr) { } static NOINLINE void sort_06v(int64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0); - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1); - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2); - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3); - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4); - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5); + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; sort_06v_ascending(d01, d02, d03, d04, d05, d06); _mm256_storeu_si256((__m256i *) ptr + 0, d01); _mm256_storeu_si256((__m256i *) ptr + 1, d02); @@ -1185,13 +1196,13 @@ static NOINLINE void sort_06v(int64_t *ptr) { } static NOINLINE void sort_07v(int64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0); - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1); - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2); - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3); - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4); - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5); - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6); + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; sort_07v_ascending(d01, d02, d03, d04, d05, d06, d07); _mm256_storeu_si256((__m256i *) ptr + 0, d01); _mm256_storeu_si256((__m256i *) ptr + 1, d02); @@ -1203,14 +1214,14 @@ static NOINLINE void sort_07v(int64_t *ptr) { } static NOINLINE void sort_08v(int64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0); - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1); - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2); - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3); - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4); - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5); - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6); - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7); + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); _mm256_storeu_si256((__m256i *) ptr + 0, d01); _mm256_storeu_si256((__m256i *) ptr + 1, d02); @@ -1223,15 +1234,15 @@ static NOINLINE void sort_08v(int64_t *ptr) { } static NOINLINE void sort_09v(int64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0); - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1); - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2); - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3); - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4); - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5); - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6); - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7); - __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8); + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; sort_09v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09); _mm256_storeu_si256((__m256i *) ptr + 0, d01); _mm256_storeu_si256((__m256i *) ptr + 1, d02); @@ -1245,16 +1256,16 @@ static NOINLINE void sort_09v(int64_t *ptr) { } static NOINLINE void sort_10v(int64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0); - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1); - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2); - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3); - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4); - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5); - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6); - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7); - __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8); - __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9); + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; sort_10v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10); _mm256_storeu_si256((__m256i *) ptr + 0, d01); _mm256_storeu_si256((__m256i *) ptr + 1, d02); @@ -1269,17 +1280,17 @@ static NOINLINE void sort_10v(int64_t *ptr) { } static NOINLINE void sort_11v(int64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0); - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1); - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2); - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3); - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4); - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5); - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6); - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7); - __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8); - __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9); - __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10); + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; sort_11v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11); _mm256_storeu_si256((__m256i *) ptr + 0, d01); _mm256_storeu_si256((__m256i *) ptr + 1, d02); @@ -1295,18 +1306,18 @@ static NOINLINE void sort_11v(int64_t *ptr) { } static NOINLINE void sort_12v(int64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0); - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1); - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2); - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3); - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4); - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5); - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6); - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7); - __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8); - __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9); - __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10); - __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11); + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; + __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; sort_12v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12); _mm256_storeu_si256((__m256i *) ptr + 0, d01); _mm256_storeu_si256((__m256i *) ptr + 1, d02); @@ -1323,19 +1334,19 @@ static NOINLINE void sort_12v(int64_t *ptr) { } static NOINLINE void sort_13v(int64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0); - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1); - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2); - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3); - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4); - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5); - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6); - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7); - __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8); - __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9); - __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10); - __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11); - __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12); + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; + __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; + __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12);; sort_13v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13); _mm256_storeu_si256((__m256i *) ptr + 0, d01); _mm256_storeu_si256((__m256i *) ptr + 1, d02); @@ -1353,20 +1364,20 @@ static NOINLINE void sort_13v(int64_t *ptr) { } static NOINLINE void sort_14v(int64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0); - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1); - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2); - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3); - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4); - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5); - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6); - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7); - __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8); - __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9); - __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10); - __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11); - __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12); - __m256i d14 = _mm256_lddqu_si256((__m256i const *) ptr + 13); + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; + __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; + __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12);; + __m256i d14 = _mm256_lddqu_si256((__m256i const *) ptr + 13);; sort_14v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14); _mm256_storeu_si256((__m256i *) ptr + 0, d01); _mm256_storeu_si256((__m256i *) ptr + 1, d02); @@ -1385,21 +1396,21 @@ static NOINLINE void sort_14v(int64_t *ptr) { } static NOINLINE void sort_15v(int64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0); - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1); - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2); - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3); - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4); - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5); - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6); - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7); - __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8); - __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9); - __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10); - __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11); - __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12); - __m256i d14 = _mm256_lddqu_si256((__m256i const *) ptr + 13); - __m256i d15 = _mm256_lddqu_si256((__m256i const *) ptr + 14); + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; + __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; + __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12);; + __m256i d14 = _mm256_lddqu_si256((__m256i const *) ptr + 13);; + __m256i d15 = _mm256_lddqu_si256((__m256i const *) ptr + 14);; sort_15v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14, d15); _mm256_storeu_si256((__m256i *) ptr + 0, d01); _mm256_storeu_si256((__m256i *) ptr + 1, d02); @@ -1419,22 +1430,22 @@ static NOINLINE void sort_15v(int64_t *ptr) { } static NOINLINE void sort_16v(int64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0); - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1); - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2); - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3); - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4); - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5); - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6); - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7); - __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8); - __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9); - __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10); - __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11); - __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12); - __m256i d14 = _mm256_lddqu_si256((__m256i const *) ptr + 13); - __m256i d15 = _mm256_lddqu_si256((__m256i const *) ptr + 14); - __m256i d16 = _mm256_lddqu_si256((__m256i const *) ptr + 15); + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; + __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; + __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12);; + __m256i d14 = _mm256_lddqu_si256((__m256i const *) ptr + 13);; + __m256i d15 = _mm256_lddqu_si256((__m256i const *) ptr + 14);; + __m256i d16 = _mm256_lddqu_si256((__m256i const *) ptr + 15);; sort_16v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14, d15, d16); _mm256_storeu_si256((__m256i *) ptr + 0, d01); _mm256_storeu_si256((__m256i *) ptr + 1, d02); diff --git a/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.uint32_t.generated.h b/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.uint32_t.generated.h new file mode 100644 index 00000000000000..cf1d35a3a90f14 --- /dev/null +++ b/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.uint32_t.generated.h @@ -0,0 +1,1532 @@ +///////////////////////////////////////////////////////////////////////////// +//// +// This file was auto-generated by a tool at 2020-05-31 19:46:17 +// +// It is recommended you DO NOT directly edit this file but instead edit +// the code-generator that generated this source file instead. +///////////////////////////////////////////////////////////////////////////// + +#ifndef BITONIC_SORT_AVX2_UINT32_T_H +#define BITONIC_SORT_AVX2_UINT32_T_H + +#include +#include "bitonic_sort.h" + +#ifdef _MSC_VER + // MSVC + #define INLINE __forceinline + #define NOINLINE __declspec(noinline) +#else + // GCC + Clang + #define INLINE __attribute__((always_inline)) + #define NOINLINE __attribute__((noinline)) +#endif + +#define i2d _mm256_castsi256_pd +#define d2i _mm256_castpd_si256 +#define i2s _mm256_castsi256_ps +#define s2i _mm256_castps_si256 +#define s2d _mm256_castps_pd +#define d2s _mm256_castpd_ps + +namespace gcsort { +namespace smallsort { +template<> struct bitonic { +public: + + static INLINE void sort_01v_ascending(__m256i& d01) { + __m256i min, max, s; + + s = _mm256_shuffle_epi32(d01, 0xB1); + + min = _mm256_min_epu32(s, d01); + max = _mm256_max_epu32(s, d01); + d01 = _mm256_blend_epi32(min, max, 0xAA); + + s = _mm256_shuffle_epi32(d01, 0x1B); + + min = _mm256_min_epu32(s, d01); + max = _mm256_max_epu32(s, d01); + d01 = _mm256_blend_epi32(min, max, 0xCC); + + s = _mm256_shuffle_epi32(d01, 0xB1); + + min = _mm256_min_epu32(s, d01); + max = _mm256_max_epu32(s, d01); + d01 = _mm256_blend_epi32(min, max, 0xAA); + + s = d2i(_mm256_permute4x64_pd(i2d(_mm256_shuffle_epi32(d01, 0x1B)), 0x4E)); + min = _mm256_min_epu32(s, d01); + max = _mm256_max_epu32(s, d01); + d01 = _mm256_blend_epi32(min, max, 0xF0); + + s = _mm256_shuffle_epi32(d01, 0x4E); + min = _mm256_min_epu32(s, d01); + max = _mm256_max_epu32(s, d01); + d01 = _mm256_blend_epi32(min, max, 0xCC); + + s = _mm256_shuffle_epi32(d01, 0xB1); + min = _mm256_min_epu32(s, d01); + max = _mm256_max_epu32(s, d01); + d01 = _mm256_blend_epi32(min, max, 0xAA); +} + static INLINE void sort_01v_merge_ascending(__m256i& d01) { + __m256i min, max, s; + + s = d2i(_mm256_permute4x64_pd(i2d(d01), 0x4E)); + min = _mm256_min_epu32(s, d01); + max = _mm256_max_epu32(s, d01); + d01 = _mm256_blend_epi32(min, max, 0xF0); + + s = _mm256_shuffle_epi32(d01, 0x4E); + + min = _mm256_min_epu32(s, d01); + max = _mm256_max_epu32(s, d01); + d01 = _mm256_blend_epi32(min, max, 0xCC); + + s = _mm256_shuffle_epi32(d01, 0xB1); + + min = _mm256_min_epu32(s, d01); + max = _mm256_max_epu32(s, d01); + d01 = _mm256_blend_epi32(min, max, 0xAA); + } + static INLINE void sort_01v_descending(__m256i& d01) { + __m256i min, max, s; + + s = _mm256_shuffle_epi32(d01, 0xB1); + + min = _mm256_min_epu32(s, d01); + max = _mm256_max_epu32(s, d01); + d01 = _mm256_blend_epi32(max, min, 0xAA); + + s = _mm256_shuffle_epi32(d01, 0x1B); + + min = _mm256_min_epu32(s, d01); + max = _mm256_max_epu32(s, d01); + d01 = _mm256_blend_epi32(max, min, 0xCC); + + s = _mm256_shuffle_epi32(d01, 0xB1); + + min = _mm256_min_epu32(s, d01); + max = _mm256_max_epu32(s, d01); + d01 = _mm256_blend_epi32(max, min, 0xAA); + + s = d2i(_mm256_permute4x64_pd(i2d(_mm256_shuffle_epi32(d01, 0x1B)), 0x4E)); + min = _mm256_min_epu32(s, d01); + max = _mm256_max_epu32(s, d01); + d01 = _mm256_blend_epi32(max, min, 0xF0); + + s = _mm256_shuffle_epi32(d01, 0x4E); + min = _mm256_min_epu32(s, d01); + max = _mm256_max_epu32(s, d01); + d01 = _mm256_blend_epi32(max, min, 0xCC); + + s = _mm256_shuffle_epi32(d01, 0xB1); + min = _mm256_min_epu32(s, d01); + max = _mm256_max_epu32(s, d01); + d01 = _mm256_blend_epi32(max, min, 0xAA); +} + static INLINE void sort_01v_merge_descending(__m256i& d01) { + __m256i min, max, s; + + s = d2i(_mm256_permute4x64_pd(i2d(d01), 0x4E)); + min = _mm256_min_epu32(s, d01); + max = _mm256_max_epu32(s, d01); + d01 = _mm256_blend_epi32(max, min, 0xF0); + + s = _mm256_shuffle_epi32(d01, 0x4E); + + min = _mm256_min_epu32(s, d01); + max = _mm256_max_epu32(s, d01); + d01 = _mm256_blend_epi32(max, min, 0xCC); + + s = _mm256_shuffle_epi32(d01, 0xB1); + + min = _mm256_min_epu32(s, d01); + max = _mm256_max_epu32(s, d01); + d01 = _mm256_blend_epi32(max, min, 0xAA); + } + static INLINE void sort_02v_ascending(__m256i& d01, __m256i& d02) { + __m256i tmp; + + sort_01v_ascending(d01); + sort_01v_descending(d02); + + tmp = d02; + + d02 = _mm256_max_epu32(d01, d02); + d01 = _mm256_min_epu32(d01, tmp); + + sort_01v_merge_ascending(d01); + sort_01v_merge_ascending(d02); + } + static INLINE void sort_02v_descending(__m256i& d01, __m256i& d02) { + __m256i tmp; + + sort_01v_descending(d01); + sort_01v_ascending(d02); + + tmp = d02; + + d02 = _mm256_max_epu32(d01, d02); + d01 = _mm256_min_epu32(d01, tmp); + + sort_01v_merge_descending(d01); + sort_01v_merge_descending(d02); + } + static INLINE void sort_02v_merge_ascending(__m256i& d01, __m256i& d02) { + __m256i tmp; + + tmp = d01; + + d01 = _mm256_min_epu32(d02, d01); + + d02 = _mm256_max_epu32(d02, tmp); + + sort_01v_merge_ascending(d01); + sort_01v_merge_ascending(d02); + } + static INLINE void sort_02v_merge_descending(__m256i& d01, __m256i& d02) { + __m256i tmp; + + tmp = d01; + + d01 = _mm256_min_epu32(d02, d01); + + d02 = _mm256_max_epu32(d02, tmp); + + sort_01v_merge_descending(d01); + sort_01v_merge_descending(d02); + } + static INLINE void sort_03v_ascending(__m256i& d01, __m256i& d02, __m256i& d03) { + __m256i tmp; + + sort_02v_ascending(d01, d02); + sort_01v_descending(d03); + + tmp = d03; + + d03 = _mm256_max_epu32(d02, d03); + d02 = _mm256_min_epu32(d02, tmp); + + sort_02v_merge_ascending(d01, d02); + sort_01v_merge_ascending(d03); + } + static INLINE void sort_03v_descending(__m256i& d01, __m256i& d02, __m256i& d03) { + __m256i tmp; + + sort_02v_descending(d01, d02); + sort_01v_ascending(d03); + + tmp = d03; + + d03 = _mm256_max_epu32(d02, d03); + d02 = _mm256_min_epu32(d02, tmp); + + sort_02v_merge_descending(d01, d02); + sort_01v_merge_descending(d03); + } + static INLINE void sort_03v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03) { + __m256i tmp; + + tmp = d01; + + d01 = _mm256_min_epu32(d03, d01); + + d03 = _mm256_max_epu32(d03, tmp); + + sort_02v_merge_ascending(d01, d02); + sort_01v_merge_ascending(d03); + } + static INLINE void sort_03v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03) { + __m256i tmp; + + tmp = d01; + + d01 = _mm256_min_epu32(d03, d01); + + d03 = _mm256_max_epu32(d03, tmp); + + sort_02v_merge_descending(d01, d02); + sort_01v_merge_descending(d03); + } + static INLINE void sort_04v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04) { + __m256i tmp; + + sort_02v_ascending(d01, d02); + sort_02v_descending(d03, d04); + + tmp = d03; + + d03 = _mm256_max_epu32(d02, d03); + d02 = _mm256_min_epu32(d02, tmp); + + tmp = d04; + + d04 = _mm256_max_epu32(d01, d04); + d01 = _mm256_min_epu32(d01, tmp); + + sort_02v_merge_ascending(d01, d02); + sort_02v_merge_ascending(d03, d04); + } + static INLINE void sort_04v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04) { + __m256i tmp; + + sort_02v_descending(d01, d02); + sort_02v_ascending(d03, d04); + + tmp = d03; + + d03 = _mm256_max_epu32(d02, d03); + d02 = _mm256_min_epu32(d02, tmp); + + tmp = d04; + + d04 = _mm256_max_epu32(d01, d04); + d01 = _mm256_min_epu32(d01, tmp); + + sort_02v_merge_descending(d01, d02); + sort_02v_merge_descending(d03, d04); + } + static INLINE void sort_04v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04) { + __m256i tmp; + + tmp = d01; + + d01 = _mm256_min_epu32(d03, d01); + + d03 = _mm256_max_epu32(d03, tmp); + + tmp = d02; + + d02 = _mm256_min_epu32(d04, d02); + + d04 = _mm256_max_epu32(d04, tmp); + + sort_02v_merge_ascending(d01, d02); + sort_02v_merge_ascending(d03, d04); + } + static INLINE void sort_04v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04) { + __m256i tmp; + + tmp = d01; + + d01 = _mm256_min_epu32(d03, d01); + + d03 = _mm256_max_epu32(d03, tmp); + + tmp = d02; + + d02 = _mm256_min_epu32(d04, d02); + + d04 = _mm256_max_epu32(d04, tmp); + + sort_02v_merge_descending(d01, d02); + sort_02v_merge_descending(d03, d04); + } + static INLINE void sort_05v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05) { + __m256i tmp; + + sort_04v_ascending(d01, d02, d03, d04); + sort_01v_descending(d05); + + tmp = d05; + + d05 = _mm256_max_epu32(d04, d05); + d04 = _mm256_min_epu32(d04, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_01v_merge_ascending(d05); + } + static INLINE void sort_05v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05) { + __m256i tmp; + + sort_04v_descending(d01, d02, d03, d04); + sort_01v_ascending(d05); + + tmp = d05; + + d05 = _mm256_max_epu32(d04, d05); + d04 = _mm256_min_epu32(d04, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_01v_merge_descending(d05); + } + static INLINE void sort_05v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05) { + __m256i tmp; + + tmp = d01; + + d01 = _mm256_min_epu32(d05, d01); + + d05 = _mm256_max_epu32(d05, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_01v_merge_ascending(d05); + } + static INLINE void sort_05v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05) { + __m256i tmp; + + tmp = d01; + + d01 = _mm256_min_epu32(d05, d01); + + d05 = _mm256_max_epu32(d05, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_01v_merge_descending(d05); + } + static INLINE void sort_06v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06) { + __m256i tmp; + + sort_04v_ascending(d01, d02, d03, d04); + sort_02v_descending(d05, d06); + + tmp = d05; + + d05 = _mm256_max_epu32(d04, d05); + d04 = _mm256_min_epu32(d04, tmp); + + tmp = d06; + + d06 = _mm256_max_epu32(d03, d06); + d03 = _mm256_min_epu32(d03, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_02v_merge_ascending(d05, d06); + } + static INLINE void sort_06v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06) { + __m256i tmp; + + sort_04v_descending(d01, d02, d03, d04); + sort_02v_ascending(d05, d06); + + tmp = d05; + + d05 = _mm256_max_epu32(d04, d05); + d04 = _mm256_min_epu32(d04, tmp); + + tmp = d06; + + d06 = _mm256_max_epu32(d03, d06); + d03 = _mm256_min_epu32(d03, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_02v_merge_descending(d05, d06); + } + static INLINE void sort_06v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06) { + __m256i tmp; + + tmp = d01; + + d01 = _mm256_min_epu32(d05, d01); + + d05 = _mm256_max_epu32(d05, tmp); + + tmp = d02; + + d02 = _mm256_min_epu32(d06, d02); + + d06 = _mm256_max_epu32(d06, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_02v_merge_ascending(d05, d06); + } + static INLINE void sort_06v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06) { + __m256i tmp; + + tmp = d01; + + d01 = _mm256_min_epu32(d05, d01); + + d05 = _mm256_max_epu32(d05, tmp); + + tmp = d02; + + d02 = _mm256_min_epu32(d06, d02); + + d06 = _mm256_max_epu32(d06, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_02v_merge_descending(d05, d06); + } + static INLINE void sort_07v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07) { + __m256i tmp; + + sort_04v_ascending(d01, d02, d03, d04); + sort_03v_descending(d05, d06, d07); + + tmp = d05; + + d05 = _mm256_max_epu32(d04, d05); + d04 = _mm256_min_epu32(d04, tmp); + + tmp = d06; + + d06 = _mm256_max_epu32(d03, d06); + d03 = _mm256_min_epu32(d03, tmp); + + tmp = d07; + + d07 = _mm256_max_epu32(d02, d07); + d02 = _mm256_min_epu32(d02, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_03v_merge_ascending(d05, d06, d07); + } + static INLINE void sort_07v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07) { + __m256i tmp; + + sort_04v_descending(d01, d02, d03, d04); + sort_03v_ascending(d05, d06, d07); + + tmp = d05; + + d05 = _mm256_max_epu32(d04, d05); + d04 = _mm256_min_epu32(d04, tmp); + + tmp = d06; + + d06 = _mm256_max_epu32(d03, d06); + d03 = _mm256_min_epu32(d03, tmp); + + tmp = d07; + + d07 = _mm256_max_epu32(d02, d07); + d02 = _mm256_min_epu32(d02, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_03v_merge_descending(d05, d06, d07); + } + static INLINE void sort_07v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07) { + __m256i tmp; + + tmp = d01; + + d01 = _mm256_min_epu32(d05, d01); + + d05 = _mm256_max_epu32(d05, tmp); + + tmp = d02; + + d02 = _mm256_min_epu32(d06, d02); + + d06 = _mm256_max_epu32(d06, tmp); + + tmp = d03; + + d03 = _mm256_min_epu32(d07, d03); + + d07 = _mm256_max_epu32(d07, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_03v_merge_ascending(d05, d06, d07); + } + static INLINE void sort_07v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07) { + __m256i tmp; + + tmp = d01; + + d01 = _mm256_min_epu32(d05, d01); + + d05 = _mm256_max_epu32(d05, tmp); + + tmp = d02; + + d02 = _mm256_min_epu32(d06, d02); + + d06 = _mm256_max_epu32(d06, tmp); + + tmp = d03; + + d03 = _mm256_min_epu32(d07, d03); + + d07 = _mm256_max_epu32(d07, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_03v_merge_descending(d05, d06, d07); + } + static INLINE void sort_08v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08) { + __m256i tmp; + + sort_04v_ascending(d01, d02, d03, d04); + sort_04v_descending(d05, d06, d07, d08); + + tmp = d05; + + d05 = _mm256_max_epu32(d04, d05); + d04 = _mm256_min_epu32(d04, tmp); + + tmp = d06; + + d06 = _mm256_max_epu32(d03, d06); + d03 = _mm256_min_epu32(d03, tmp); + + tmp = d07; + + d07 = _mm256_max_epu32(d02, d07); + d02 = _mm256_min_epu32(d02, tmp); + + tmp = d08; + + d08 = _mm256_max_epu32(d01, d08); + d01 = _mm256_min_epu32(d01, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_04v_merge_ascending(d05, d06, d07, d08); + } + static INLINE void sort_08v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08) { + __m256i tmp; + + sort_04v_descending(d01, d02, d03, d04); + sort_04v_ascending(d05, d06, d07, d08); + + tmp = d05; + + d05 = _mm256_max_epu32(d04, d05); + d04 = _mm256_min_epu32(d04, tmp); + + tmp = d06; + + d06 = _mm256_max_epu32(d03, d06); + d03 = _mm256_min_epu32(d03, tmp); + + tmp = d07; + + d07 = _mm256_max_epu32(d02, d07); + d02 = _mm256_min_epu32(d02, tmp); + + tmp = d08; + + d08 = _mm256_max_epu32(d01, d08); + d01 = _mm256_min_epu32(d01, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_04v_merge_descending(d05, d06, d07, d08); + } + static INLINE void sort_08v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08) { + __m256i tmp; + + tmp = d01; + + d01 = _mm256_min_epu32(d05, d01); + + d05 = _mm256_max_epu32(d05, tmp); + + tmp = d02; + + d02 = _mm256_min_epu32(d06, d02); + + d06 = _mm256_max_epu32(d06, tmp); + + tmp = d03; + + d03 = _mm256_min_epu32(d07, d03); + + d07 = _mm256_max_epu32(d07, tmp); + + tmp = d04; + + d04 = _mm256_min_epu32(d08, d04); + + d08 = _mm256_max_epu32(d08, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_04v_merge_ascending(d05, d06, d07, d08); + } + static INLINE void sort_08v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08) { + __m256i tmp; + + tmp = d01; + + d01 = _mm256_min_epu32(d05, d01); + + d05 = _mm256_max_epu32(d05, tmp); + + tmp = d02; + + d02 = _mm256_min_epu32(d06, d02); + + d06 = _mm256_max_epu32(d06, tmp); + + tmp = d03; + + d03 = _mm256_min_epu32(d07, d03); + + d07 = _mm256_max_epu32(d07, tmp); + + tmp = d04; + + d04 = _mm256_min_epu32(d08, d04); + + d08 = _mm256_max_epu32(d08, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_04v_merge_descending(d05, d06, d07, d08); + } + static INLINE void sort_09v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09) { + __m256i tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_01v_descending(d09); + + tmp = d09; + + d09 = _mm256_max_epu32(d08, d09); + d08 = _mm256_min_epu32(d08, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_01v_merge_ascending(d09); + } + static INLINE void sort_09v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09) { + __m256i tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_01v_ascending(d09); + + tmp = d09; + + d09 = _mm256_max_epu32(d08, d09); + d08 = _mm256_min_epu32(d08, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_01v_merge_descending(d09); + } + static INLINE void sort_10v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10) { + __m256i tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_02v_descending(d09, d10); + + tmp = d09; + + d09 = _mm256_max_epu32(d08, d09); + d08 = _mm256_min_epu32(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_epu32(d07, d10); + d07 = _mm256_min_epu32(d07, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_02v_merge_ascending(d09, d10); + } + static INLINE void sort_10v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10) { + __m256i tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_02v_ascending(d09, d10); + + tmp = d09; + + d09 = _mm256_max_epu32(d08, d09); + d08 = _mm256_min_epu32(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_epu32(d07, d10); + d07 = _mm256_min_epu32(d07, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_02v_merge_descending(d09, d10); + } + static INLINE void sort_11v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11) { + __m256i tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_03v_descending(d09, d10, d11); + + tmp = d09; + + d09 = _mm256_max_epu32(d08, d09); + d08 = _mm256_min_epu32(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_epu32(d07, d10); + d07 = _mm256_min_epu32(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_epu32(d06, d11); + d06 = _mm256_min_epu32(d06, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_03v_merge_ascending(d09, d10, d11); + } + static INLINE void sort_11v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11) { + __m256i tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_03v_ascending(d09, d10, d11); + + tmp = d09; + + d09 = _mm256_max_epu32(d08, d09); + d08 = _mm256_min_epu32(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_epu32(d07, d10); + d07 = _mm256_min_epu32(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_epu32(d06, d11); + d06 = _mm256_min_epu32(d06, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_03v_merge_descending(d09, d10, d11); + } + static INLINE void sort_12v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12) { + __m256i tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_04v_descending(d09, d10, d11, d12); + + tmp = d09; + + d09 = _mm256_max_epu32(d08, d09); + d08 = _mm256_min_epu32(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_epu32(d07, d10); + d07 = _mm256_min_epu32(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_epu32(d06, d11); + d06 = _mm256_min_epu32(d06, tmp); + + tmp = d12; + + d12 = _mm256_max_epu32(d05, d12); + d05 = _mm256_min_epu32(d05, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_04v_merge_ascending(d09, d10, d11, d12); + } + static INLINE void sort_12v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12) { + __m256i tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_04v_ascending(d09, d10, d11, d12); + + tmp = d09; + + d09 = _mm256_max_epu32(d08, d09); + d08 = _mm256_min_epu32(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_epu32(d07, d10); + d07 = _mm256_min_epu32(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_epu32(d06, d11); + d06 = _mm256_min_epu32(d06, tmp); + + tmp = d12; + + d12 = _mm256_max_epu32(d05, d12); + d05 = _mm256_min_epu32(d05, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_04v_merge_descending(d09, d10, d11, d12); + } + static INLINE void sort_13v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13) { + __m256i tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_05v_descending(d09, d10, d11, d12, d13); + + tmp = d09; + + d09 = _mm256_max_epu32(d08, d09); + d08 = _mm256_min_epu32(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_epu32(d07, d10); + d07 = _mm256_min_epu32(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_epu32(d06, d11); + d06 = _mm256_min_epu32(d06, tmp); + + tmp = d12; + + d12 = _mm256_max_epu32(d05, d12); + d05 = _mm256_min_epu32(d05, tmp); + + tmp = d13; + + d13 = _mm256_max_epu32(d04, d13); + d04 = _mm256_min_epu32(d04, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_05v_merge_ascending(d09, d10, d11, d12, d13); + } + static INLINE void sort_13v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13) { + __m256i tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_05v_ascending(d09, d10, d11, d12, d13); + + tmp = d09; + + d09 = _mm256_max_epu32(d08, d09); + d08 = _mm256_min_epu32(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_epu32(d07, d10); + d07 = _mm256_min_epu32(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_epu32(d06, d11); + d06 = _mm256_min_epu32(d06, tmp); + + tmp = d12; + + d12 = _mm256_max_epu32(d05, d12); + d05 = _mm256_min_epu32(d05, tmp); + + tmp = d13; + + d13 = _mm256_max_epu32(d04, d13); + d04 = _mm256_min_epu32(d04, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_05v_merge_descending(d09, d10, d11, d12, d13); + } + static INLINE void sort_14v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14) { + __m256i tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_06v_descending(d09, d10, d11, d12, d13, d14); + + tmp = d09; + + d09 = _mm256_max_epu32(d08, d09); + d08 = _mm256_min_epu32(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_epu32(d07, d10); + d07 = _mm256_min_epu32(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_epu32(d06, d11); + d06 = _mm256_min_epu32(d06, tmp); + + tmp = d12; + + d12 = _mm256_max_epu32(d05, d12); + d05 = _mm256_min_epu32(d05, tmp); + + tmp = d13; + + d13 = _mm256_max_epu32(d04, d13); + d04 = _mm256_min_epu32(d04, tmp); + + tmp = d14; + + d14 = _mm256_max_epu32(d03, d14); + d03 = _mm256_min_epu32(d03, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_06v_merge_ascending(d09, d10, d11, d12, d13, d14); + } + static INLINE void sort_14v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14) { + __m256i tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_06v_ascending(d09, d10, d11, d12, d13, d14); + + tmp = d09; + + d09 = _mm256_max_epu32(d08, d09); + d08 = _mm256_min_epu32(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_epu32(d07, d10); + d07 = _mm256_min_epu32(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_epu32(d06, d11); + d06 = _mm256_min_epu32(d06, tmp); + + tmp = d12; + + d12 = _mm256_max_epu32(d05, d12); + d05 = _mm256_min_epu32(d05, tmp); + + tmp = d13; + + d13 = _mm256_max_epu32(d04, d13); + d04 = _mm256_min_epu32(d04, tmp); + + tmp = d14; + + d14 = _mm256_max_epu32(d03, d14); + d03 = _mm256_min_epu32(d03, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_06v_merge_descending(d09, d10, d11, d12, d13, d14); + } + static INLINE void sort_15v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14, __m256i& d15) { + __m256i tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_07v_descending(d09, d10, d11, d12, d13, d14, d15); + + tmp = d09; + + d09 = _mm256_max_epu32(d08, d09); + d08 = _mm256_min_epu32(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_epu32(d07, d10); + d07 = _mm256_min_epu32(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_epu32(d06, d11); + d06 = _mm256_min_epu32(d06, tmp); + + tmp = d12; + + d12 = _mm256_max_epu32(d05, d12); + d05 = _mm256_min_epu32(d05, tmp); + + tmp = d13; + + d13 = _mm256_max_epu32(d04, d13); + d04 = _mm256_min_epu32(d04, tmp); + + tmp = d14; + + d14 = _mm256_max_epu32(d03, d14); + d03 = _mm256_min_epu32(d03, tmp); + + tmp = d15; + + d15 = _mm256_max_epu32(d02, d15); + d02 = _mm256_min_epu32(d02, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_07v_merge_ascending(d09, d10, d11, d12, d13, d14, d15); + } + static INLINE void sort_15v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14, __m256i& d15) { + __m256i tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_07v_ascending(d09, d10, d11, d12, d13, d14, d15); + + tmp = d09; + + d09 = _mm256_max_epu32(d08, d09); + d08 = _mm256_min_epu32(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_epu32(d07, d10); + d07 = _mm256_min_epu32(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_epu32(d06, d11); + d06 = _mm256_min_epu32(d06, tmp); + + tmp = d12; + + d12 = _mm256_max_epu32(d05, d12); + d05 = _mm256_min_epu32(d05, tmp); + + tmp = d13; + + d13 = _mm256_max_epu32(d04, d13); + d04 = _mm256_min_epu32(d04, tmp); + + tmp = d14; + + d14 = _mm256_max_epu32(d03, d14); + d03 = _mm256_min_epu32(d03, tmp); + + tmp = d15; + + d15 = _mm256_max_epu32(d02, d15); + d02 = _mm256_min_epu32(d02, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_07v_merge_descending(d09, d10, d11, d12, d13, d14, d15); + } + static INLINE void sort_16v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14, __m256i& d15, __m256i& d16) { + __m256i tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_08v_descending(d09, d10, d11, d12, d13, d14, d15, d16); + + tmp = d09; + + d09 = _mm256_max_epu32(d08, d09); + d08 = _mm256_min_epu32(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_epu32(d07, d10); + d07 = _mm256_min_epu32(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_epu32(d06, d11); + d06 = _mm256_min_epu32(d06, tmp); + + tmp = d12; + + d12 = _mm256_max_epu32(d05, d12); + d05 = _mm256_min_epu32(d05, tmp); + + tmp = d13; + + d13 = _mm256_max_epu32(d04, d13); + d04 = _mm256_min_epu32(d04, tmp); + + tmp = d14; + + d14 = _mm256_max_epu32(d03, d14); + d03 = _mm256_min_epu32(d03, tmp); + + tmp = d15; + + d15 = _mm256_max_epu32(d02, d15); + d02 = _mm256_min_epu32(d02, tmp); + + tmp = d16; + + d16 = _mm256_max_epu32(d01, d16); + d01 = _mm256_min_epu32(d01, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_08v_merge_ascending(d09, d10, d11, d12, d13, d14, d15, d16); + } + static INLINE void sort_16v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14, __m256i& d15, __m256i& d16) { + __m256i tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_08v_ascending(d09, d10, d11, d12, d13, d14, d15, d16); + + tmp = d09; + + d09 = _mm256_max_epu32(d08, d09); + d08 = _mm256_min_epu32(d08, tmp); + + tmp = d10; + + d10 = _mm256_max_epu32(d07, d10); + d07 = _mm256_min_epu32(d07, tmp); + + tmp = d11; + + d11 = _mm256_max_epu32(d06, d11); + d06 = _mm256_min_epu32(d06, tmp); + + tmp = d12; + + d12 = _mm256_max_epu32(d05, d12); + d05 = _mm256_min_epu32(d05, tmp); + + tmp = d13; + + d13 = _mm256_max_epu32(d04, d13); + d04 = _mm256_min_epu32(d04, tmp); + + tmp = d14; + + d14 = _mm256_max_epu32(d03, d14); + d03 = _mm256_min_epu32(d03, tmp); + + tmp = d15; + + d15 = _mm256_max_epu32(d02, d15); + d02 = _mm256_min_epu32(d02, tmp); + + tmp = d16; + + d16 = _mm256_max_epu32(d01, d16); + d01 = _mm256_min_epu32(d01, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_08v_merge_descending(d09, d10, d11, d12, d13, d14, d15, d16); + } + +static NOINLINE void sort_01v(uint32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + sort_01v_ascending(d01); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); +} + +static NOINLINE void sort_02v(uint32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + sort_02v_ascending(d01, d02); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); +} + +static NOINLINE void sort_03v(uint32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + sort_03v_ascending(d01, d02, d03); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); +} + +static NOINLINE void sort_04v(uint32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + sort_04v_ascending(d01, d02, d03, d04); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); +} + +static NOINLINE void sort_05v(uint32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + sort_05v_ascending(d01, d02, d03, d04, d05); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); +} + +static NOINLINE void sort_06v(uint32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + sort_06v_ascending(d01, d02, d03, d04, d05, d06); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); +} + +static NOINLINE void sort_07v(uint32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + sort_07v_ascending(d01, d02, d03, d04, d05, d06, d07); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); +} + +static NOINLINE void sort_08v(uint32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); +} + +static NOINLINE void sort_09v(uint32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + sort_09v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); +} + +static NOINLINE void sort_10v(uint32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + sort_10v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); +} + +static NOINLINE void sort_11v(uint32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; + sort_11v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); + _mm256_storeu_si256((__m256i *) ptr + 10, d11); +} + +static NOINLINE void sort_12v(uint32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; + __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; + sort_12v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); + _mm256_storeu_si256((__m256i *) ptr + 10, d11); + _mm256_storeu_si256((__m256i *) ptr + 11, d12); +} + +static NOINLINE void sort_13v(uint32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; + __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; + __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12);; + sort_13v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); + _mm256_storeu_si256((__m256i *) ptr + 10, d11); + _mm256_storeu_si256((__m256i *) ptr + 11, d12); + _mm256_storeu_si256((__m256i *) ptr + 12, d13); +} + +static NOINLINE void sort_14v(uint32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; + __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; + __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12);; + __m256i d14 = _mm256_lddqu_si256((__m256i const *) ptr + 13);; + sort_14v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); + _mm256_storeu_si256((__m256i *) ptr + 10, d11); + _mm256_storeu_si256((__m256i *) ptr + 11, d12); + _mm256_storeu_si256((__m256i *) ptr + 12, d13); + _mm256_storeu_si256((__m256i *) ptr + 13, d14); +} + +static NOINLINE void sort_15v(uint32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; + __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; + __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12);; + __m256i d14 = _mm256_lddqu_si256((__m256i const *) ptr + 13);; + __m256i d15 = _mm256_lddqu_si256((__m256i const *) ptr + 14);; + sort_15v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14, d15); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); + _mm256_storeu_si256((__m256i *) ptr + 10, d11); + _mm256_storeu_si256((__m256i *) ptr + 11, d12); + _mm256_storeu_si256((__m256i *) ptr + 12, d13); + _mm256_storeu_si256((__m256i *) ptr + 13, d14); + _mm256_storeu_si256((__m256i *) ptr + 14, d15); +} + +static NOINLINE void sort_16v(uint32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; + __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; + __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12);; + __m256i d14 = _mm256_lddqu_si256((__m256i const *) ptr + 13);; + __m256i d15 = _mm256_lddqu_si256((__m256i const *) ptr + 14);; + __m256i d16 = _mm256_lddqu_si256((__m256i const *) ptr + 15);; + sort_16v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14, d15, d16); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); + _mm256_storeu_si256((__m256i *) ptr + 10, d11); + _mm256_storeu_si256((__m256i *) ptr + 11, d12); + _mm256_storeu_si256((__m256i *) ptr + 12, d13); + _mm256_storeu_si256((__m256i *) ptr + 13, d14); + _mm256_storeu_si256((__m256i *) ptr + 14, d15); + _mm256_storeu_si256((__m256i *) ptr + 15, d16); +} + static void sort(uint32_t *ptr, size_t length) { + const int N = 8; + + switch(length / N) { + case 1: sort_01v(ptr); break; + case 2: sort_02v(ptr); break; + case 3: sort_03v(ptr); break; + case 4: sort_04v(ptr); break; + case 5: sort_05v(ptr); break; + case 6: sort_06v(ptr); break; + case 7: sort_07v(ptr); break; + case 8: sort_08v(ptr); break; + case 9: sort_09v(ptr); break; + case 10: sort_10v(ptr); break; + case 11: sort_11v(ptr); break; + case 12: sort_12v(ptr); break; + case 13: sort_13v(ptr); break; + case 14: sort_14v(ptr); break; + case 15: sort_15v(ptr); break; + case 16: sort_16v(ptr); break; + } +} +}; +} +} +#endif diff --git a/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.uint64_t.generated.h b/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.uint64_t.generated.h new file mode 100644 index 00000000000000..4edbed703eb26a --- /dev/null +++ b/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.uint64_t.generated.h @@ -0,0 +1,1540 @@ +///////////////////////////////////////////////////////////////////////////// +//// +// This file was auto-generated by a tool at 2020-05-31 19:46:17 +// +// It is recommended you DO NOT directly edit this file but instead edit +// the code-generator that generated this source file instead. +///////////////////////////////////////////////////////////////////////////// + +#ifndef BITONIC_SORT_AVX2_UINT64_T_H +#define BITONIC_SORT_AVX2_UINT64_T_H + +#include +#include "bitonic_sort.h" + +#ifdef _MSC_VER + // MSVC + #define INLINE __forceinline + #define NOINLINE __declspec(noinline) +#else + // GCC + Clang + #define INLINE __attribute__((always_inline)) + #define NOINLINE __attribute__((noinline)) +#endif + +#define i2d _mm256_castsi256_pd +#define d2i _mm256_castpd_si256 +#define i2s _mm256_castsi256_ps +#define s2i _mm256_castps_si256 +#define s2d _mm256_castps_pd +#define d2s _mm256_castpd_ps + +namespace gcsort { +namespace smallsort { +template<> struct bitonic { +public: + + static INLINE void sort_01v_ascending(__m256i& d01) { + __m256i min, max, s, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + s = d2i(_mm256_shuffle_pd(i2d(d01), i2d(d01), 0x5)); + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, s), _mm256_xor_si256(topBit, d01)); + min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); + max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); + d01 = d2i(_mm256_blend_pd(i2d(min), i2d(max), 0xA)); + + s = d2i(_mm256_permute4x64_pd(i2d(d01), 0x1B)); + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, s), _mm256_xor_si256(topBit, d01)); + min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); + max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); + d01 = d2i(_mm256_blend_pd(i2d(min), i2d(max), 0xC)); + + s = d2i(_mm256_shuffle_pd(i2d(d01), i2d(d01), 0x5)); + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, s), _mm256_xor_si256(topBit, d01)); + min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); + max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); + d01 = d2i(_mm256_blend_pd(i2d(min), i2d(max), 0xA)); +} + static INLINE void sort_01v_merge_ascending(__m256i& d01) { + __m256i min, max, s, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + s = d2i(_mm256_permute4x64_pd(i2d(d01), 0x4E)); + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, s), _mm256_xor_si256(topBit, d01)); + min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); + max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); + d01 = d2i(_mm256_blend_pd(i2d(min), i2d(max), 0xC)); + + s = d2i(_mm256_shuffle_pd(i2d(d01), i2d(d01), 0x5)); + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, s), _mm256_xor_si256(topBit, d01)); + min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); + max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); + d01 = d2i(_mm256_blend_pd(i2d(min), i2d(max), 0xA)); + } + static INLINE void sort_01v_descending(__m256i& d01) { + __m256i min, max, s, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + s = d2i(_mm256_shuffle_pd(i2d(d01), i2d(d01), 0x5)); + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, s), _mm256_xor_si256(topBit, d01)); + min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); + max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); + d01 = d2i(_mm256_blend_pd(i2d(max), i2d(min), 0xA)); + + s = d2i(_mm256_permute4x64_pd(i2d(d01), 0x1B)); + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, s), _mm256_xor_si256(topBit, d01)); + min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); + max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); + d01 = d2i(_mm256_blend_pd(i2d(max), i2d(min), 0xC)); + + s = d2i(_mm256_shuffle_pd(i2d(d01), i2d(d01), 0x5)); + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, s), _mm256_xor_si256(topBit, d01)); + min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); + max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); + d01 = d2i(_mm256_blend_pd(i2d(max), i2d(min), 0xA)); +} + static INLINE void sort_01v_merge_descending(__m256i& d01) { + __m256i min, max, s, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + s = d2i(_mm256_permute4x64_pd(i2d(d01), 0x4E)); + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, s), _mm256_xor_si256(topBit, d01)); + min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); + max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); + d01 = d2i(_mm256_blend_pd(i2d(max), i2d(min), 0xC)); + + s = d2i(_mm256_shuffle_pd(i2d(d01), i2d(d01), 0x5)); + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, s), _mm256_xor_si256(topBit, d01)); + min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); + max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); + d01 = d2i(_mm256_blend_pd(i2d(max), i2d(min), 0xA)); + } + static INLINE void sort_02v_ascending(__m256i& d01, __m256i& d02) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + sort_01v_ascending(d01); + sort_01v_descending(d02); + + tmp = d02; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d01), _mm256_xor_si256(topBit, d02)); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(d01), i2d(cmp))); + d01 = d2i(_mm256_blendv_pd(i2d(d01), i2d(tmp), i2d(cmp))); + + sort_01v_merge_ascending(d01); + sort_01v_merge_ascending(d02); + } + static INLINE void sort_02v_descending(__m256i& d01, __m256i& d02) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + sort_01v_descending(d01); + sort_01v_ascending(d02); + + tmp = d02; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d01), _mm256_xor_si256(topBit, d02)); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(d01), i2d(cmp))); + d01 = d2i(_mm256_blendv_pd(i2d(d01), i2d(tmp), i2d(cmp))); + + sort_01v_merge_descending(d01); + sort_01v_merge_descending(d02); + } + static INLINE void sort_02v_merge_ascending(__m256i& d01, __m256i& d02) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + tmp = d01; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d02), _mm256_xor_si256(topBit, d01)); + d01 = d2i(_mm256_blendv_pd(i2d(d02), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d02), _mm256_xor_si256(topBit, tmp)); + d02 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d02), i2d(cmp))); + + sort_01v_merge_ascending(d01); + sort_01v_merge_ascending(d02); + } + static INLINE void sort_02v_merge_descending(__m256i& d01, __m256i& d02) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + tmp = d01; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d02), _mm256_xor_si256(topBit, d01)); + d01 = d2i(_mm256_blendv_pd(i2d(d02), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d02), _mm256_xor_si256(topBit, tmp)); + d02 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d02), i2d(cmp))); + + sort_01v_merge_descending(d01); + sort_01v_merge_descending(d02); + } + static INLINE void sort_03v_ascending(__m256i& d01, __m256i& d02, __m256i& d03) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + sort_02v_ascending(d01, d02); + sort_01v_descending(d03); + + tmp = d03; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d02), _mm256_xor_si256(topBit, d03)); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(d02), i2d(cmp))); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); + + sort_02v_merge_ascending(d01, d02); + sort_01v_merge_ascending(d03); + } + static INLINE void sort_03v_descending(__m256i& d01, __m256i& d02, __m256i& d03) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + sort_02v_descending(d01, d02); + sort_01v_ascending(d03); + + tmp = d03; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d02), _mm256_xor_si256(topBit, d03)); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(d02), i2d(cmp))); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); + + sort_02v_merge_descending(d01, d02); + sort_01v_merge_descending(d03); + } + static INLINE void sort_03v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + tmp = d01; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d03), _mm256_xor_si256(topBit, d01)); + d01 = d2i(_mm256_blendv_pd(i2d(d03), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d03), _mm256_xor_si256(topBit, tmp)); + d03 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d03), i2d(cmp))); + + sort_02v_merge_ascending(d01, d02); + sort_01v_merge_ascending(d03); + } + static INLINE void sort_03v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + tmp = d01; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d03), _mm256_xor_si256(topBit, d01)); + d01 = d2i(_mm256_blendv_pd(i2d(d03), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d03), _mm256_xor_si256(topBit, tmp)); + d03 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d03), i2d(cmp))); + + sort_02v_merge_descending(d01, d02); + sort_01v_merge_descending(d03); + } + static INLINE void sort_04v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + sort_02v_ascending(d01, d02); + sort_02v_descending(d03, d04); + + tmp = d03; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d02), _mm256_xor_si256(topBit, d03)); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(d02), i2d(cmp))); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); + + tmp = d04; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d01), _mm256_xor_si256(topBit, d04)); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(d01), i2d(cmp))); + d01 = d2i(_mm256_blendv_pd(i2d(d01), i2d(tmp), i2d(cmp))); + + sort_02v_merge_ascending(d01, d02); + sort_02v_merge_ascending(d03, d04); + } + static INLINE void sort_04v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + sort_02v_descending(d01, d02); + sort_02v_ascending(d03, d04); + + tmp = d03; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d02), _mm256_xor_si256(topBit, d03)); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(d02), i2d(cmp))); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); + + tmp = d04; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d01), _mm256_xor_si256(topBit, d04)); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(d01), i2d(cmp))); + d01 = d2i(_mm256_blendv_pd(i2d(d01), i2d(tmp), i2d(cmp))); + + sort_02v_merge_descending(d01, d02); + sort_02v_merge_descending(d03, d04); + } + static INLINE void sort_04v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + tmp = d01; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d03), _mm256_xor_si256(topBit, d01)); + d01 = d2i(_mm256_blendv_pd(i2d(d03), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d03), _mm256_xor_si256(topBit, tmp)); + d03 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d03), i2d(cmp))); + + tmp = d02; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d04), _mm256_xor_si256(topBit, d02)); + d02 = d2i(_mm256_blendv_pd(i2d(d04), i2d(d02), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d04), _mm256_xor_si256(topBit, tmp)); + d04 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d04), i2d(cmp))); + + sort_02v_merge_ascending(d01, d02); + sort_02v_merge_ascending(d03, d04); + } + static INLINE void sort_04v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + tmp = d01; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d03), _mm256_xor_si256(topBit, d01)); + d01 = d2i(_mm256_blendv_pd(i2d(d03), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d03), _mm256_xor_si256(topBit, tmp)); + d03 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d03), i2d(cmp))); + + tmp = d02; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d04), _mm256_xor_si256(topBit, d02)); + d02 = d2i(_mm256_blendv_pd(i2d(d04), i2d(d02), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d04), _mm256_xor_si256(topBit, tmp)); + d04 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d04), i2d(cmp))); + + sort_02v_merge_descending(d01, d02); + sort_02v_merge_descending(d03, d04); + } + static INLINE void sort_05v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + sort_04v_ascending(d01, d02, d03, d04); + sort_01v_descending(d05); + + tmp = d05; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d04), _mm256_xor_si256(topBit, d05)); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_01v_merge_ascending(d05); + } + static INLINE void sort_05v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + sort_04v_descending(d01, d02, d03, d04); + sort_01v_ascending(d05); + + tmp = d05; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d04), _mm256_xor_si256(topBit, d05)); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_01v_merge_descending(d05); + } + static INLINE void sort_05v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + tmp = d01; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, d01)); + d01 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, tmp)); + d05 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d05), i2d(cmp))); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_01v_merge_ascending(d05); + } + static INLINE void sort_05v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + tmp = d01; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, d01)); + d01 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, tmp)); + d05 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d05), i2d(cmp))); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_01v_merge_descending(d05); + } + static INLINE void sort_06v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + sort_04v_ascending(d01, d02, d03, d04); + sort_02v_descending(d05, d06); + + tmp = d05; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d04), _mm256_xor_si256(topBit, d05)); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + + tmp = d06; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d03), _mm256_xor_si256(topBit, d06)); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d03), i2d(cmp))); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_02v_merge_ascending(d05, d06); + } + static INLINE void sort_06v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + sort_04v_descending(d01, d02, d03, d04); + sort_02v_ascending(d05, d06); + + tmp = d05; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d04), _mm256_xor_si256(topBit, d05)); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + + tmp = d06; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d03), _mm256_xor_si256(topBit, d06)); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d03), i2d(cmp))); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_02v_merge_descending(d05, d06); + } + static INLINE void sort_06v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + tmp = d01; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, d01)); + d01 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, tmp)); + d05 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d05), i2d(cmp))); + + tmp = d02; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, d02)); + d02 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d02), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, tmp)); + d06 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d06), i2d(cmp))); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_02v_merge_ascending(d05, d06); + } + static INLINE void sort_06v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + tmp = d01; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, d01)); + d01 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, tmp)); + d05 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d05), i2d(cmp))); + + tmp = d02; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, d02)); + d02 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d02), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, tmp)); + d06 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d06), i2d(cmp))); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_02v_merge_descending(d05, d06); + } + static INLINE void sort_07v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + sort_04v_ascending(d01, d02, d03, d04); + sort_03v_descending(d05, d06, d07); + + tmp = d05; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d04), _mm256_xor_si256(topBit, d05)); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + + tmp = d06; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d03), _mm256_xor_si256(topBit, d06)); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d03), i2d(cmp))); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); + + tmp = d07; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d02), _mm256_xor_si256(topBit, d07)); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(d02), i2d(cmp))); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_03v_merge_ascending(d05, d06, d07); + } + static INLINE void sort_07v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + sort_04v_descending(d01, d02, d03, d04); + sort_03v_ascending(d05, d06, d07); + + tmp = d05; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d04), _mm256_xor_si256(topBit, d05)); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + + tmp = d06; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d03), _mm256_xor_si256(topBit, d06)); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d03), i2d(cmp))); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); + + tmp = d07; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d02), _mm256_xor_si256(topBit, d07)); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(d02), i2d(cmp))); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_03v_merge_descending(d05, d06, d07); + } + static INLINE void sort_07v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + tmp = d01; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, d01)); + d01 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, tmp)); + d05 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d05), i2d(cmp))); + + tmp = d02; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, d02)); + d02 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d02), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, tmp)); + d06 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d06), i2d(cmp))); + + tmp = d03; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, d03)); + d03 = d2i(_mm256_blendv_pd(i2d(d07), i2d(d03), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, tmp)); + d07 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d07), i2d(cmp))); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_03v_merge_ascending(d05, d06, d07); + } + static INLINE void sort_07v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + tmp = d01; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, d01)); + d01 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, tmp)); + d05 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d05), i2d(cmp))); + + tmp = d02; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, d02)); + d02 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d02), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, tmp)); + d06 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d06), i2d(cmp))); + + tmp = d03; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, d03)); + d03 = d2i(_mm256_blendv_pd(i2d(d07), i2d(d03), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, tmp)); + d07 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d07), i2d(cmp))); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_03v_merge_descending(d05, d06, d07); + } + static INLINE void sort_08v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + sort_04v_ascending(d01, d02, d03, d04); + sort_04v_descending(d05, d06, d07, d08); + + tmp = d05; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d04), _mm256_xor_si256(topBit, d05)); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + + tmp = d06; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d03), _mm256_xor_si256(topBit, d06)); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d03), i2d(cmp))); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); + + tmp = d07; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d02), _mm256_xor_si256(topBit, d07)); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(d02), i2d(cmp))); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); + + tmp = d08; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d01), _mm256_xor_si256(topBit, d08)); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(d01), i2d(cmp))); + d01 = d2i(_mm256_blendv_pd(i2d(d01), i2d(tmp), i2d(cmp))); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_04v_merge_ascending(d05, d06, d07, d08); + } + static INLINE void sort_08v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + sort_04v_descending(d01, d02, d03, d04); + sort_04v_ascending(d05, d06, d07, d08); + + tmp = d05; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d04), _mm256_xor_si256(topBit, d05)); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + + tmp = d06; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d03), _mm256_xor_si256(topBit, d06)); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d03), i2d(cmp))); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); + + tmp = d07; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d02), _mm256_xor_si256(topBit, d07)); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(d02), i2d(cmp))); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); + + tmp = d08; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d01), _mm256_xor_si256(topBit, d08)); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(d01), i2d(cmp))); + d01 = d2i(_mm256_blendv_pd(i2d(d01), i2d(tmp), i2d(cmp))); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_04v_merge_descending(d05, d06, d07, d08); + } + static INLINE void sort_08v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + tmp = d01; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, d01)); + d01 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, tmp)); + d05 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d05), i2d(cmp))); + + tmp = d02; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, d02)); + d02 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d02), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, tmp)); + d06 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d06), i2d(cmp))); + + tmp = d03; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, d03)); + d03 = d2i(_mm256_blendv_pd(i2d(d07), i2d(d03), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, tmp)); + d07 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d07), i2d(cmp))); + + tmp = d04; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d08), _mm256_xor_si256(topBit, d04)); + d04 = d2i(_mm256_blendv_pd(i2d(d08), i2d(d04), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d08), _mm256_xor_si256(topBit, tmp)); + d08 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d08), i2d(cmp))); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_04v_merge_ascending(d05, d06, d07, d08); + } + static INLINE void sort_08v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + tmp = d01; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, d01)); + d01 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, tmp)); + d05 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d05), i2d(cmp))); + + tmp = d02; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, d02)); + d02 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d02), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, tmp)); + d06 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d06), i2d(cmp))); + + tmp = d03; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, d03)); + d03 = d2i(_mm256_blendv_pd(i2d(d07), i2d(d03), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, tmp)); + d07 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d07), i2d(cmp))); + + tmp = d04; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d08), _mm256_xor_si256(topBit, d04)); + d04 = d2i(_mm256_blendv_pd(i2d(d08), i2d(d04), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d08), _mm256_xor_si256(topBit, tmp)); + d08 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d08), i2d(cmp))); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_04v_merge_descending(d05, d06, d07, d08); + } + static INLINE void sort_09v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_01v_descending(d09); + + tmp = d09; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d08), _mm256_xor_si256(topBit, d09)); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_01v_merge_ascending(d09); + } + static INLINE void sort_09v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_01v_ascending(d09); + + tmp = d09; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d08), _mm256_xor_si256(topBit, d09)); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_01v_merge_descending(d09); + } + static INLINE void sort_10v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_02v_descending(d09, d10); + + tmp = d09; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d08), _mm256_xor_si256(topBit, d09)); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + + tmp = d10; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, d10)); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_02v_merge_ascending(d09, d10); + } + static INLINE void sort_10v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_02v_ascending(d09, d10); + + tmp = d09; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d08), _mm256_xor_si256(topBit, d09)); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + + tmp = d10; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, d10)); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_02v_merge_descending(d09, d10); + } + static INLINE void sort_11v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_03v_descending(d09, d10, d11); + + tmp = d09; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d08), _mm256_xor_si256(topBit, d09)); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + + tmp = d10; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, d10)); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + + tmp = d11; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, d11)); + d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_03v_merge_ascending(d09, d10, d11); + } + static INLINE void sort_11v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_03v_ascending(d09, d10, d11); + + tmp = d09; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d08), _mm256_xor_si256(topBit, d09)); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + + tmp = d10; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, d10)); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + + tmp = d11; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, d11)); + d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_03v_merge_descending(d09, d10, d11); + } + static INLINE void sort_12v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_04v_descending(d09, d10, d11, d12); + + tmp = d09; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d08), _mm256_xor_si256(topBit, d09)); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + + tmp = d10; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, d10)); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + + tmp = d11; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, d11)); + d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); + + tmp = d12; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, d12)); + d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_04v_merge_ascending(d09, d10, d11, d12); + } + static INLINE void sort_12v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_04v_ascending(d09, d10, d11, d12); + + tmp = d09; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d08), _mm256_xor_si256(topBit, d09)); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + + tmp = d10; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, d10)); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + + tmp = d11; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, d11)); + d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); + + tmp = d12; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, d12)); + d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_04v_merge_descending(d09, d10, d11, d12); + } + static INLINE void sort_13v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_05v_descending(d09, d10, d11, d12, d13); + + tmp = d09; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d08), _mm256_xor_si256(topBit, d09)); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + + tmp = d10; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, d10)); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + + tmp = d11; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, d11)); + d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); + + tmp = d12; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, d12)); + d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); + + tmp = d13; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d04), _mm256_xor_si256(topBit, d13)); + d13 = d2i(_mm256_blendv_pd(i2d(d13), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_05v_merge_ascending(d09, d10, d11, d12, d13); + } + static INLINE void sort_13v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_05v_ascending(d09, d10, d11, d12, d13); + + tmp = d09; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d08), _mm256_xor_si256(topBit, d09)); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + + tmp = d10; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, d10)); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + + tmp = d11; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, d11)); + d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); + + tmp = d12; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, d12)); + d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); + + tmp = d13; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d04), _mm256_xor_si256(topBit, d13)); + d13 = d2i(_mm256_blendv_pd(i2d(d13), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_05v_merge_descending(d09, d10, d11, d12, d13); + } + static INLINE void sort_14v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_06v_descending(d09, d10, d11, d12, d13, d14); + + tmp = d09; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d08), _mm256_xor_si256(topBit, d09)); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + + tmp = d10; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, d10)); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + + tmp = d11; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, d11)); + d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); + + tmp = d12; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, d12)); + d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); + + tmp = d13; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d04), _mm256_xor_si256(topBit, d13)); + d13 = d2i(_mm256_blendv_pd(i2d(d13), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + + tmp = d14; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d03), _mm256_xor_si256(topBit, d14)); + d14 = d2i(_mm256_blendv_pd(i2d(d14), i2d(d03), i2d(cmp))); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_06v_merge_ascending(d09, d10, d11, d12, d13, d14); + } + static INLINE void sort_14v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_06v_ascending(d09, d10, d11, d12, d13, d14); + + tmp = d09; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d08), _mm256_xor_si256(topBit, d09)); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + + tmp = d10; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, d10)); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + + tmp = d11; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, d11)); + d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); + + tmp = d12; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, d12)); + d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); + + tmp = d13; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d04), _mm256_xor_si256(topBit, d13)); + d13 = d2i(_mm256_blendv_pd(i2d(d13), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + + tmp = d14; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d03), _mm256_xor_si256(topBit, d14)); + d14 = d2i(_mm256_blendv_pd(i2d(d14), i2d(d03), i2d(cmp))); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_06v_merge_descending(d09, d10, d11, d12, d13, d14); + } + static INLINE void sort_15v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14, __m256i& d15) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_07v_descending(d09, d10, d11, d12, d13, d14, d15); + + tmp = d09; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d08), _mm256_xor_si256(topBit, d09)); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + + tmp = d10; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, d10)); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + + tmp = d11; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, d11)); + d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); + + tmp = d12; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, d12)); + d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); + + tmp = d13; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d04), _mm256_xor_si256(topBit, d13)); + d13 = d2i(_mm256_blendv_pd(i2d(d13), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + + tmp = d14; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d03), _mm256_xor_si256(topBit, d14)); + d14 = d2i(_mm256_blendv_pd(i2d(d14), i2d(d03), i2d(cmp))); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); + + tmp = d15; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d02), _mm256_xor_si256(topBit, d15)); + d15 = d2i(_mm256_blendv_pd(i2d(d15), i2d(d02), i2d(cmp))); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_07v_merge_ascending(d09, d10, d11, d12, d13, d14, d15); + } + static INLINE void sort_15v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14, __m256i& d15) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_07v_ascending(d09, d10, d11, d12, d13, d14, d15); + + tmp = d09; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d08), _mm256_xor_si256(topBit, d09)); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + + tmp = d10; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, d10)); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + + tmp = d11; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, d11)); + d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); + + tmp = d12; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, d12)); + d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); + + tmp = d13; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d04), _mm256_xor_si256(topBit, d13)); + d13 = d2i(_mm256_blendv_pd(i2d(d13), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + + tmp = d14; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d03), _mm256_xor_si256(topBit, d14)); + d14 = d2i(_mm256_blendv_pd(i2d(d14), i2d(d03), i2d(cmp))); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); + + tmp = d15; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d02), _mm256_xor_si256(topBit, d15)); + d15 = d2i(_mm256_blendv_pd(i2d(d15), i2d(d02), i2d(cmp))); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_07v_merge_descending(d09, d10, d11, d12, d13, d14, d15); + } + static INLINE void sort_16v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14, __m256i& d15, __m256i& d16) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_08v_descending(d09, d10, d11, d12, d13, d14, d15, d16); + + tmp = d09; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d08), _mm256_xor_si256(topBit, d09)); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + + tmp = d10; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, d10)); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + + tmp = d11; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, d11)); + d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); + + tmp = d12; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, d12)); + d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); + + tmp = d13; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d04), _mm256_xor_si256(topBit, d13)); + d13 = d2i(_mm256_blendv_pd(i2d(d13), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + + tmp = d14; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d03), _mm256_xor_si256(topBit, d14)); + d14 = d2i(_mm256_blendv_pd(i2d(d14), i2d(d03), i2d(cmp))); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); + + tmp = d15; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d02), _mm256_xor_si256(topBit, d15)); + d15 = d2i(_mm256_blendv_pd(i2d(d15), i2d(d02), i2d(cmp))); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); + + tmp = d16; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d01), _mm256_xor_si256(topBit, d16)); + d16 = d2i(_mm256_blendv_pd(i2d(d16), i2d(d01), i2d(cmp))); + d01 = d2i(_mm256_blendv_pd(i2d(d01), i2d(tmp), i2d(cmp))); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_08v_merge_ascending(d09, d10, d11, d12, d13, d14, d15, d16); + } + static INLINE void sort_16v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14, __m256i& d15, __m256i& d16) { + __m256i tmp, cmp; + __m256i topBit = _mm256_set1_epi64x(1LLU << 63); + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_08v_ascending(d09, d10, d11, d12, d13, d14, d15, d16); + + tmp = d09; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d08), _mm256_xor_si256(topBit, d09)); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + + tmp = d10; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, d10)); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + + tmp = d11; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, d11)); + d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); + + tmp = d12; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, d12)); + d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); + + tmp = d13; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d04), _mm256_xor_si256(topBit, d13)); + d13 = d2i(_mm256_blendv_pd(i2d(d13), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + + tmp = d14; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d03), _mm256_xor_si256(topBit, d14)); + d14 = d2i(_mm256_blendv_pd(i2d(d14), i2d(d03), i2d(cmp))); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); + + tmp = d15; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d02), _mm256_xor_si256(topBit, d15)); + d15 = d2i(_mm256_blendv_pd(i2d(d15), i2d(d02), i2d(cmp))); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); + + tmp = d16; + cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d01), _mm256_xor_si256(topBit, d16)); + d16 = d2i(_mm256_blendv_pd(i2d(d16), i2d(d01), i2d(cmp))); + d01 = d2i(_mm256_blendv_pd(i2d(d01), i2d(tmp), i2d(cmp))); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_08v_merge_descending(d09, d10, d11, d12, d13, d14, d15, d16); + } + +static NOINLINE void sort_01v(uint64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + sort_01v_ascending(d01); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); +} + +static NOINLINE void sort_02v(uint64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + sort_02v_ascending(d01, d02); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); +} + +static NOINLINE void sort_03v(uint64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + sort_03v_ascending(d01, d02, d03); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); +} + +static NOINLINE void sort_04v(uint64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + sort_04v_ascending(d01, d02, d03, d04); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); +} + +static NOINLINE void sort_05v(uint64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + sort_05v_ascending(d01, d02, d03, d04, d05); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); +} + +static NOINLINE void sort_06v(uint64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + sort_06v_ascending(d01, d02, d03, d04, d05, d06); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); +} + +static NOINLINE void sort_07v(uint64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + sort_07v_ascending(d01, d02, d03, d04, d05, d06, d07); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); +} + +static NOINLINE void sort_08v(uint64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); +} + +static NOINLINE void sort_09v(uint64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + sort_09v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); +} + +static NOINLINE void sort_10v(uint64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + sort_10v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); +} + +static NOINLINE void sort_11v(uint64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; + sort_11v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); + _mm256_storeu_si256((__m256i *) ptr + 10, d11); +} + +static NOINLINE void sort_12v(uint64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; + __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; + sort_12v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); + _mm256_storeu_si256((__m256i *) ptr + 10, d11); + _mm256_storeu_si256((__m256i *) ptr + 11, d12); +} + +static NOINLINE void sort_13v(uint64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; + __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; + __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12);; + sort_13v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); + _mm256_storeu_si256((__m256i *) ptr + 10, d11); + _mm256_storeu_si256((__m256i *) ptr + 11, d12); + _mm256_storeu_si256((__m256i *) ptr + 12, d13); +} + +static NOINLINE void sort_14v(uint64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; + __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; + __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12);; + __m256i d14 = _mm256_lddqu_si256((__m256i const *) ptr + 13);; + sort_14v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); + _mm256_storeu_si256((__m256i *) ptr + 10, d11); + _mm256_storeu_si256((__m256i *) ptr + 11, d12); + _mm256_storeu_si256((__m256i *) ptr + 12, d13); + _mm256_storeu_si256((__m256i *) ptr + 13, d14); +} + +static NOINLINE void sort_15v(uint64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; + __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; + __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12);; + __m256i d14 = _mm256_lddqu_si256((__m256i const *) ptr + 13);; + __m256i d15 = _mm256_lddqu_si256((__m256i const *) ptr + 14);; + sort_15v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14, d15); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); + _mm256_storeu_si256((__m256i *) ptr + 10, d11); + _mm256_storeu_si256((__m256i *) ptr + 11, d12); + _mm256_storeu_si256((__m256i *) ptr + 12, d13); + _mm256_storeu_si256((__m256i *) ptr + 13, d14); + _mm256_storeu_si256((__m256i *) ptr + 14, d15); +} + +static NOINLINE void sort_16v(uint64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; + __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; + __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12);; + __m256i d14 = _mm256_lddqu_si256((__m256i const *) ptr + 13);; + __m256i d15 = _mm256_lddqu_si256((__m256i const *) ptr + 14);; + __m256i d16 = _mm256_lddqu_si256((__m256i const *) ptr + 15);; + sort_16v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14, d15, d16); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); + _mm256_storeu_si256((__m256i *) ptr + 10, d11); + _mm256_storeu_si256((__m256i *) ptr + 11, d12); + _mm256_storeu_si256((__m256i *) ptr + 12, d13); + _mm256_storeu_si256((__m256i *) ptr + 13, d14); + _mm256_storeu_si256((__m256i *) ptr + 14, d15); + _mm256_storeu_si256((__m256i *) ptr + 15, d16); +} + static void sort(uint64_t *ptr, size_t length) { + const int N = 4; + + switch(length / N) { + case 1: sort_01v(ptr); break; + case 2: sort_02v(ptr); break; + case 3: sort_03v(ptr); break; + case 4: sort_04v(ptr); break; + case 5: sort_05v(ptr); break; + case 6: sort_06v(ptr); break; + case 7: sort_07v(ptr); break; + case 8: sort_08v(ptr); break; + case 9: sort_09v(ptr); break; + case 10: sort_10v(ptr); break; + case 11: sort_11v(ptr); break; + case 12: sort_12v(ptr); break; + case 13: sort_13v(ptr); break; + case 14: sort_14v(ptr); break; + case 15: sort_15v(ptr); break; + case 16: sort_16v(ptr); break; + } +} +}; +} +} +#endif diff --git a/src/coreclr/src/gc/bitonic_sort.h b/src/coreclr/src/gc/smallsort/bitonic_sort.h similarity index 100% rename from src/coreclr/src/gc/bitonic_sort.h rename to src/coreclr/src/gc/smallsort/bitonic_sort.h diff --git a/src/coreclr/src/gc/vxsort.cpp b/src/coreclr/src/gc/vxsort.cpp index caf10215f0968b..c7953f02229de1 100644 --- a/src/coreclr/src/gc/vxsort.cpp +++ b/src/coreclr/src/gc/vxsort.cpp @@ -1,7 +1,9 @@ +#include "vxsort.h" +#include namespace gcsort { -alignas(128) const int8_t vxsort_partition_traits::perm_table[128] = { +alignas(128) const int8_t perm_table_64[128] = { 0, 1, 2, 3, 4, 5, 6, 7, // 0b0000 (0) 2, 3, 4, 5, 6, 7, 0, 1, // 0b0001 (1) 0, 1, 4, 5, 6, 7, 2, 3, // 0b0010 (2) @@ -20,7 +22,7 @@ alignas(128) const int8_t vxsort_partition_traits::perm_table[128] = { 0, 1, 2, 3, 4, 5, 6, 7, // 0b1111 (15) }; -alignas(2048) const int8_t vxsort_partition_traits::perm_table[2048] = { +alignas(2048) const int8_t perm_table_32[2048] = { 0, 1, 2, 3, 4, 5, 6, 7, // 0b00000000 (0) 1, 2, 3, 4, 5, 6, 7, 0, // 0b00000001 (1) 0, 2, 3, 4, 5, 6, 7, 1, // 0b00000010 (2) @@ -278,5 +280,6 @@ alignas(2048) const int8_t vxsort_partition_traits::perm_table[2048] = 0, 1, 2, 3, 4, 5, 6, 7, // 0b11111110 (254) 0, 1, 2, 3, 4, 5, 6, 7, // 0b11111111 (255) }; + } diff --git a/src/coreclr/src/gc/vxsort.h b/src/coreclr/src/gc/vxsort.h index 27d1bfc8dac5a3..3f18979299339f 100644 --- a/src/coreclr/src/gc/vxsort.h +++ b/src/coreclr/src/gc/vxsort.h @@ -5,10 +5,34 @@ #include #include -#include "bitonic_sort.int64_t.generated.h" -//#include -//#include -//#include +#include "smallsort/bitonic_sort.AVX2.int64_t.generated.h" +#include "smallsort/bitonic_sort.AVX2.uint64_t.generated.h" +#include "smallsort/bitonic_sort.AVX2.double.generated.h" +#include "smallsort/bitonic_sort.AVX2.float.generated.h" +#include "smallsort/bitonic_sort.AVX2.int32_t.generated.h" +#include "smallsort/bitonic_sort.AVX2.uint32_t.generated.h" + +#if _MSC_VER +#ifdef _M_X86 +#define ARCH_X86 +#endif +#ifdef _M_X64 +#define ARCH_X64 +#endif +#ifdef _M_ARM64 +#define ARCH_ARM +#endif +#else +#ifdef __i386__ +#define ARCH_X86 +#endif +#ifdef __amd64__ +#define ARCH_X64 +#endif +#ifdef __arm__ +#define ARCH_ARM +#endif +#endif #ifdef _MSC_VER // MSVC @@ -23,6 +47,14 @@ #define NOINLINE __attribute__((noinline)) #endif +#define i2d _mm256_castsi256_pd +#define d2i _mm256_castpd_si256 +#define i2s _mm256_castsi256_ps +#define s2i _mm256_castps_si256 +#define s2d _mm256_castps_pd +#define d2s _mm256_castpd_ps + + namespace gcsort { using gcsort::smallsort::bitonic; @@ -53,25 +85,33 @@ struct alignment_hint { int right_align : 8; }; +enum vector_machine { + AVX2, + AVX512, + SVE, +}; -template -//using Tv = __m256; -struct vxsort_partition_traits { +template +struct vxsort_machine_traits { public: - //typedef T TV __attribute__ ((__vector_size__ (32))); typedef __m256 Tv; static Tv load_vec(Tv* ptr); static Tv store_vec(Tv* ptr, Tv v); - static __m256i get_perm(int mask); + //static __m256i get_perm(int mask); + static Tv partition_vector(Tv v, int mask); static Tv get_vec_pivot(T pivot); static uint32_t get_cmpgt_mask(Tv a, Tv b); }; +#ifdef ARCH_X64 + +extern const int8_t perm_table_64[128]; +extern const int8_t perm_table_32[2048]; + template <> -class vxsort_partition_traits { +class vxsort_machine_traits { private: - const static int8_t perm_table[128]; public: typedef __m256i Tv; @@ -83,25 +123,80 @@ class vxsort_partition_traits { _mm256_storeu_si256(ptr, v); } - static INLINE __m256i get_perm(int mask) { + static INLINE Tv partition_vector(Tv v, int mask) { assert(mask >= 0); assert(mask <= 15); - return _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i*)(perm_table + mask * 8))); - //return _mm256_cvtepu8_epi32( - // _mm_cvtsi64_si128(*((int64_t*)perm_table + mask))); + return s2i(_mm256_permutevar8x32_ps(i2s(v), _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i*)(perm_table_64 + mask * 8))))); } + static INLINE Tv get_vec_pivot(int64_t pivot) { return _mm256_set1_epi64x(pivot); } static INLINE uint32_t get_cmpgt_mask(Tv a, Tv b) { - return _mm256_movemask_pd(_mm256_castsi256_pd(_mm256_cmpgt_epi64(a, b))); + return _mm256_movemask_pd(i2d(_mm256_cmpgt_epi64(a, b))); } }; template <> -class vxsort_partition_traits { -private: - const static int8_t perm_table[2048]; +class vxsort_machine_traits { + private: + public: + typedef __m256i Tv; + + static INLINE Tv load_vec(Tv* p) { + return _mm256_lddqu_si256(p); + } + + static INLINE void store_vec(Tv* ptr, Tv v) { + _mm256_storeu_si256(ptr, v); + } + + static INLINE Tv partition_vector(Tv v, int mask) { + assert(mask >= 0); + assert(mask <= 15); + return s2i(_mm256_permutevar8x32_ps(i2s(v), _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i*)(perm_table_64 + mask * 8))))); + } + static INLINE Tv get_vec_pivot(int64_t pivot) { + return _mm256_set1_epi64x(pivot); + } + static INLINE uint32_t get_cmpgt_mask(Tv a, Tv b) { + __m256i top_bit = _mm256_set1_epi64x(1LLU << 63); + return _mm256_movemask_pd(i2d(_mm256_cmpgt_epi64(_mm256_xor_si256(top_bit, a), _mm256_xor_si256(top_bit, b)))); + } +}; + +template <> +class vxsort_machine_traits { + private: + public: + typedef __m256d Tv; + + static INLINE Tv load_vec(Tv* p) { + return _mm256_loadu_pd((double *) p); + } + + static INLINE void store_vec(Tv* ptr, Tv v) { + _mm256_storeu_pd((double *) ptr, v); + } + + static INLINE Tv partition_vector(Tv v, int mask) { + assert(mask >= 0); + assert(mask <= 15); + return s2d(_mm256_permutevar8x32_ps(d2s(v), _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i*)(perm_table_64 + mask * 8))))); + } + + static INLINE Tv get_vec_pivot(double pivot) { + return _mm256_set1_pd(pivot); + } + static INLINE uint32_t get_cmpgt_mask(Tv a, Tv b) { + /// 0x0E: Greater-than (ordered, signaling) \n + /// 0x1E: Greater-than (ordered, non-signaling) + return _mm256_movemask_pd(_mm256_cmp_pd(a, b, 0x0E)); + } +}; + +template <> +class vxsort_machine_traits { public: typedef __m256i Tv; static INLINE Tv load_vec(Tv* p) { @@ -112,28 +207,123 @@ class vxsort_partition_traits { _mm256_storeu_si256(ptr, v); } - static INLINE __m256i get_perm(int mask) { - assert(mask >= 0); - assert(mask <= 255); - return _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i*)(perm_table + mask * 8))); + static INLINE Tv partition_vector(Tv v, int mask) { + assert(mask >= 0); + assert(mask <= 255); + return s2i(_mm256_permutevar8x32_ps(i2s(v), _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i*)(perm_table_32 + mask * 8))))); } - static INLINE __m256i get_vec_pivot(int32_t pivot) { + + static INLINE Tv get_vec_pivot(int32_t pivot) { return _mm256_set1_epi32(pivot); } - static INLINE uint32_t get_cmpgt_mask(__m256i a, __m256i b) { - return _mm256_movemask_ps(_mm256_castsi256_ps(_mm256_cmpgt_epi32(a, b))); + static INLINE uint32_t get_cmpgt_mask(Tv a, Tv b) { + return _mm256_movemask_ps(i2s(_mm256_cmpgt_epi32(a, b))); } }; +template <> +class vxsort_machine_traits { + public: + typedef __m256i Tv; + static INLINE Tv load_vec(Tv* p) { + return _mm256_lddqu_si256(p); + } + + static INLINE void store_vec(Tv* ptr, Tv v) { + _mm256_storeu_si256(ptr, v); + } + + static INLINE Tv partition_vector(Tv v, int mask) { + assert(mask >= 0); + assert(mask <= 255); + return s2i(_mm256_permutevar8x32_ps(i2s(v), _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i*)(perm_table_32 + mask * 8))))); + } + + static INLINE Tv get_vec_pivot(uint32_t pivot) { + return _mm256_set1_epi32(pivot); + } + static INLINE uint32_t get_cmpgt_mask(Tv a, Tv b) { + __m256i top_bit = _mm256_set1_epi32(1U << 31); + return _mm256_movemask_ps(i2s(_mm256_cmpgt_epi32(_mm256_xor_si256(top_bit, a), _mm256_xor_si256(top_bit, b)))); + } +}; + +template <> +class vxsort_machine_traits { + public: + typedef __m256 Tv; + static INLINE Tv load_vec(Tv* p) { + return _mm256_loadu_ps((float *)p); + } + + static INLINE void store_vec(Tv* ptr, Tv v) { + _mm256_storeu_ps((float *) ptr, v); + } + + static INLINE Tv partition_vector(Tv v, int mask) { + assert(mask >= 0); + assert(mask <= 255); + return _mm256_permutevar8x32_ps(v, _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i*)(perm_table_32 + mask * 8)))); + } + + static INLINE Tv get_vec_pivot(float pivot) { + return _mm256_set1_ps(pivot); + } + + static INLINE uint32_t get_cmpgt_mask(Tv a, Tv b) { + /// 0x0E: Greater-than (ordered, signaling) \n + /// 0x1E: Greater-than (ordered, non-signaling) + return _mm256_movemask_ps(_mm256_cmp_ps(a, b, 0x0E)); + } +}; + +#endif +#ifdef ARCH_ARM64 +#error "─│─│──╫▓▓▓╫──│─────│─│─│──────╫▓▓╫│──│─│" +#error "──│─▓███████▓─╫╫╫╫╫╫╫╫╫╫╫╫╫│▓███████╫──" +#error "───██████████████████████████████████▓─" +#error "│─████████████│─│─│─│─────▓███████████╫" +#error "╫█████────│───│─│───│─────│─│─│───╫████▓" +#error "│████│──│───│───│─│───│───────│─│─│▓███╫" +#error "─▓███│───────│─▓██───│╫██╫─│─│─│───▓███│" +#error "──███─│──────╫████▓───█████────────▓███─" +#error "──╫██──│─│──╫██████│─│██████─│─────▓██─│" +#error "│─│▓█││─│─││███▓▓██─│─██▓▓███─│─│──▓█─│─" +#error "────█│─│───███╫▓▓█▓│──█▓▓▓▓██▓─────▓█───" +#error "│─││█││───▓███╫██▓╫─│─▓▓█▓▓███─────▓█───" +#error "─│─╫█│─│─│████▓╫▓▓─────█▓╫████▓──│─▓█───" +#error "│─││█╫│─││███████─│██╫│▓███████─│─│██─│─" +#error "─│─│█▓╫╫─▓██████╫│─▓█│──▓██████│╫╫│██│─│" +#error "│─│─██│╫│▓█████╫│───▓───│▓█████╫╫╫╫█▓──" +#error "─│─│▓█╫││╫████╫│││╫██▓││││▓████│╫─▓█╫│─│" +#error "│─│─│██│││╫▓▓││╫╫╫╫╫▓╫╫╫╫╫│╫▓▓╫││╫██──│─" +#error "─│───▓██╫─────││││││─││││││────│▓██│────" +#error "│─│─│─▓██▓╫╫╫╫╫╫╫╫▓▓▓▓▓╫╫╫╫╫╫╫▓███│────" +#error "───────╫██████████▓▓▓▓▓██████████│────│" +#error "│─│─│───▓█████████╫─│─▓█████████│─│─│─│" +#error "─────────██████████──│█████████╫─│───││" +#error "│─│─│───│▓█╫███████││▓███████╫█││─│─│─│" +#error "───────│─██─╫██████▓─███████││█╫───│──│" +#error "│───│───│██─││█████▓─█████▓─│╫█╫│──────" +#error "─│─│───│─▓█──│─╫▓██│─▓██▓│─│─▓█│───────" +#error "│───│─│─│─██────│─│───│─────│██───│─│─│" +#error "─│─│───│─│▓██╫─│─│─────│─│─▓██││─│───│─│" +#error "│───────│─│██████████████████▓│─│─│─│─│" +#error "─│───│─│───│███████▓▓████████│─│───│──│" +#error "│─│───│─│─│─│██████╫─▓█████▓────│─│─│──" +#error "─────│─────╫│╫▓████▓─█████▓│╫╫───────│" +#error "│─│───│───╫─╫╫╫╫███╫╫╫██▓╫│╫╫╫│─│─────" +#endif + -template +template class vxsort { static_assert(Unroll >= 1, "Unroll can be in the range 1..12"); static_assert(Unroll <= 12, "Unroll can be in the range 1..12"); private: //using Tv2 = Tp::Tv; - using Tp = vxsort_partition_traits; + using Tp = vxsort_machine_traits; typedef typename Tp::Tv TV; static const int ELEMENT_ALIGN = sizeof(T) - 1; @@ -223,7 +413,6 @@ class vxsort { _endPtr = end; } - T* _startPtr = nullptr; T* _endPtr = nullptr; @@ -337,22 +526,19 @@ class vxsort { // * Calculate pre-alignment on the left // * See it would cause us an out-of bounds read // * Since we'd like to avoid that, we adjust for post-alignment - // * There are no branches since we do branch->arithmetic - auto preAlignedLeft = (T*) ((size_t)left & ~ALIGN_MASK); + // * No branches since we do branch->arithmetic + auto preAlignedLeft = reinterpret_cast(reinterpret_cast(left) & ~ALIGN_MASK); auto cannotPreAlignLeft = (preAlignedLeft - _startPtr) >> 63; realignHint.left_align = (preAlignedLeft - left) + (N & cannotPreAlignLeft); assert(alignment_hint::is_aligned(left + realignHint.left_align)); } if (realignHint.right_align == alignment_hint::REALIGN) { + // Same as above, but in addition: // right is pointing just PAST the last element we intend to partition - // (where we also store the pivot) So we calculate alignment based on - // right - 1, and YES: I am casting to ulong before doing the -1, this is - // intentional since the whole thing is either aligned to 32 bytes or not, - // so decrementing the POINTER value by 1 is sufficient for the alignment, - // an the JIT sucks at this anyway - auto preAlignedRight = - (T*) (((size_t)(right - 1) & ~ALIGN_MASK) + ALIGN); + // (it's pointing to where we will store the pivot!) So we calculate alignment based on + // right - 1 + auto preAlignedRight = reinterpret_cast(((reinterpret_cast(right) - 1) & ~ALIGN_MASK) + ALIGN); auto cannotPreAlignRight = (_endPtr - preAlignedRight) >> 63; realignHint.right_align = (preAlignedRight - right - (N & cannotPreAlignRight)); assert(alignment_hint::is_aligned(right + realignHint.right_align)); @@ -385,7 +571,7 @@ class vxsort { T*& left, T*& right) { auto mask = Tp::get_cmpgt_mask(dataVec, P); - dataVec = _mm256_permutevar8x32_epi32(dataVec, Tp::get_perm(mask)); + dataVec = Tp::partition_vector(dataVec, mask); Tp::store_vec(reinterpret_cast(left), dataVec); Tp::store_vec(reinterpret_cast(right), dataVec); auto popCount = -_mm_popcnt_u64(mask); @@ -394,7 +580,7 @@ class vxsort { } template - T* vectorized_partition(T* left, T* right, alignment_hint hint) { + T* vectorized_partition(T* const left, T* const right, const alignment_hint hint) { assert(right - left >= SMALL_SORT_THRESHOLD_ELEMENTS); assert((reinterpret_cast(left) & ELEMENT_ALIGN) == 0); assert((reinterpret_cast(right) & ELEMENT_ALIGN) == 0); @@ -440,19 +626,16 @@ class vxsort { *right = std::numeric_limits::Max(); // Broadcast the selected pivot - const TV P = Tp::get_vec_pivot(pivot);//_mm256_set1_epi64x(pivot); + const TV P = Tp::get_vec_pivot(pivot); auto readLeft = left; auto readRight = right; - auto writeLeft = left; - auto writeRight = right - N; auto tmpStartLeft = _temp; auto tmpLeft = tmpStartLeft; auto tmpStartRight = _temp + PARTITION_TMP_SIZE_IN_ELEMENTS; auto tmpRight = tmpStartRight; - tmpRight -= N; // the read heads always advance by 8 elements, or 32 bytes, @@ -475,8 +658,8 @@ class vxsort { auto ltMask = Tp::get_cmpgt_mask(LT0, P); auto rtPopCount = max(_mm_popcnt_u32(rtMask), rightAlign); auto ltPopCount = _mm_popcnt_u32(ltMask); - RT0 = _mm256_permutevar8x32_epi32(RT0, Tp::get_perm(rtMask)); - LT0 = _mm256_permutevar8x32_epi32(LT0, Tp::get_perm(ltMask)); + RT0 = Tp::partition_vector(RT0, rtMask); + LT0 = Tp::partition_vector(LT0, ltMask); Tp::store_vec((TV*) tmpRight, RT0); Tp::store_vec((TV*) tmpLeft, LT0); @@ -510,6 +693,7 @@ class vxsort { align_right_scalar_uncommon(readRight, pivot, tmpLeft, tmpRight); tmpRight -= N; } + assert(((size_t)readLeft & ALIGN_MASK) == 0); assert(((size_t)readRight & ALIGN_MASK) == 0); @@ -538,6 +722,9 @@ class vxsort { readRightV -= InnerUnroll*2; TV* nextPtr; + auto writeLeft = left; + auto writeRight = right - N; + while (readLeftV < readRightV) { if (writeRight - ((T *) readRightV) < (2 * (InnerUnroll * N) - N)) { nextPtr = readRightV; From 6af96dd2e15246b9bafd64895f76b91df6f35248 Mon Sep 17 00:00:00 2001 From: Peter Sollich Date: Wed, 3 Jun 2020 15:57:55 +0200 Subject: [PATCH 06/31] Fixes for Linux compile, ended up disabling vxsort for Linux for now. --- src/coreclr/src/gc/gc.cpp | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/coreclr/src/gc/gc.cpp b/src/coreclr/src/gc/gc.cpp index 1d239fa1a63851..3b23dcf5739f74 100644 --- a/src/coreclr/src/gc/gc.cpp +++ b/src/coreclr/src/gc/gc.cpp @@ -19,16 +19,12 @@ #include "gcpriv.h" -#ifdef TARGET_AMD64 +#if defined(TARGET_AMD64) && defined(TARGET_WINDOWS) #define USE_VXSORT #else #define USE_INTROSORT #endif -#ifdef USE_VXSORT -#include "vxsort.h" -#endif - // We just needed a simple random number generator for testing. class gc_rand { @@ -2088,10 +2084,15 @@ namespace std public: static int64_t Max() { - return LLONG_MAX; + return 0x7fffffffffffffff; } }; } + +#ifdef USE_VXSORT +#include "vxsort.h" +#endif + void vxsort(uint8_t** low, uint8_t** high, unsigned int depth) { // auto sorter = gcsort::vxsort(); From af23cd2faf680f4e0e78b9c2dd26ded8127752be Mon Sep 17 00:00:00 2001 From: Peter Sollich Date: Mon, 8 Jun 2020 09:43:36 +0200 Subject: [PATCH 07/31] Experimenting with 32-bit sort - the variation that gathers mark list entries pertaining to the local heap by reading the mark lists from all the heaps appears to be too slow and scales very badly with increasing number of heaps. --- src/coreclr/src/gc/gc.cpp | 122 +++++++++++++++++++++++++++++++++-- src/coreclr/src/gc/gcpriv.h | 5 ++ src/coreclr/src/gc/gcsvr.cpp | 4 +- src/coreclr/src/gc/gcwks.cpp | 4 +- src/coreclr/src/gc/vxsort.h | 8 +-- 5 files changed, 129 insertions(+), 14 deletions(-) diff --git a/src/coreclr/src/gc/gc.cpp b/src/coreclr/src/gc/gc.cpp index 3b23dcf5739f74..edbf46e28f4e09 100644 --- a/src/coreclr/src/gc/gc.cpp +++ b/src/coreclr/src/gc/gc.cpp @@ -20,7 +20,7 @@ #include "gcpriv.h" #if defined(TARGET_AMD64) && defined(TARGET_WINDOWS) -#define USE_VXSORT +#define USE_VXSORT32 #else #define USE_INTROSORT #endif @@ -2064,7 +2064,7 @@ uint8_t* tree_search (uint8_t* tree, uint8_t* old_address); #ifdef USE_INTROSORT #define _sort introsort::sort -#elif defined(USE_VXSORT) +#elif defined(USE_VXSORT64) || defined(USE_VXSORT32) #define _sort vxsort namespace std { @@ -2078,6 +2078,18 @@ namespace std } }; +#ifdef USE_VXSORT32 + template <> + class numeric_limits + { + public: + static uint32_t Max() + { + return 0xffffffff; + } + }; +#endif //USE_VXSORT64 +#ifdef USE_VXSORT64 template <> class numeric_limits { @@ -2087,15 +2099,15 @@ namespace std return 0x7fffffffffffffff; } }; +#endif //USE_VXSORT64 } -#ifdef USE_VXSORT #include "vxsort.h" -#endif +#ifdef USE_VXSORT64 void vxsort(uint8_t** low, uint8_t** high, unsigned int depth) { -// auto sorter = gcsort::vxsort(); + // auto sorter = gcsort::vxsort(); auto sorter = gcsort::vxsort(); sorter.sort((int64_t*)low, (int64_t*)high); #ifdef _DEBUG @@ -2105,6 +2117,21 @@ void vxsort(uint8_t** low, uint8_t** high, unsigned int depth) } #endif } +#elif defined(USE_VXSORT32) +void vxsort(uint32_t* low, uint32_t* high, unsigned int depth) +{ + // auto sorter = gcsort::vxsort(); + auto sorter = gcsort::vxsort(); + sorter.sort(low, high); +#ifdef _DEBUG + for (uint32_t* p = low; p < high; p++) + { + assert(p[0] <= p[1]); + } +#endif +} +#endif + #else //USE_INTROSORT #define _sort qsort1 void qsort1(uint8_t** low, uint8_t** high, unsigned int depth); @@ -8220,6 +8247,7 @@ inline static void swap_elements(uint8_t** i,uint8_t** j) #ifdef MULTIPLE_HEAPS #ifdef PARALLEL_MARK_LIST_SORT +NOINLINE void gc_heap::sort_mark_list() { // if this heap had a mark list overflow, we don't do anything @@ -8262,6 +8290,39 @@ void gc_heap::sort_mark_list() return; } +#ifdef USE_VXSORT32 + uint8_t* low = gc_low; + uint8_t* high = heap_segment_allocated (ephemeral_heap_segment); + size_t size = high - low; + assert((uint32_t)size == size); + uint8_t** mark_list = &g_mark_list_copy[heap_number * mark_list_size]; + uint8_t** mark_list_end = &mark_list[mark_list_size - 1]; + uint32_t* mark_list_32 = (uint32_t*)mark_list; + uint32_t* mark_list_curr_32 = mark_list_32; + uint32_t* mark_list_end_32 = (uint32_t*)mark_list_end; + for (int i = 0; i < n_heaps; i++) + { + gc_heap* hp = g_heaps[i]; + uint8_t** end = hp->mark_list_index; + for (uint8_t** p = hp->mark_list; p < end; p++) + { + uint8_t* item = *p; + if (item < low || high <= item) + continue; + size_t offset = item - low; + assert((uint32_t)offset == offset); + if (mark_list_curr_32 < mark_list_end_32) + *mark_list_curr_32++ = (uint32_t)offset; + else + break; + } + } + mark_list_index_32 = mark_list_curr_32; + if (mark_list_curr_32 < mark_list_end_32) + { + _sort (mark_list_32, mark_list_curr_32 - 1, 0); + } +#else dprintf (3, ("Sorting mark lists")); if (mark_list_index > mark_list) _sort (mark_list, mark_list_index - 1, 0); @@ -8355,6 +8416,7 @@ void gc_heap::sort_mark_list() #undef predicate // printf("second phase of sort_mark_list for heap %d took %u cycles\n", this->heap_number, GetCycleCount32() - start); +#endif //USE_VXSORT32 } void gc_heap::append_to_mark_list(uint8_t **start, uint8_t **end) @@ -8367,6 +8429,34 @@ void gc_heap::append_to_mark_list(uint8_t **start, uint8_t **end) // printf("heap %d: appended %Id slots to mark_list\n", heap_number, slots_to_copy); } +#ifdef USE_VXSORT32 +void gc_heap::merge_mark_lists() +{ + // in case of mark list overflow, don't bother + if (mark_list_index > mark_list_end) + { + // printf("merge_mark_lists: overflow\n"); + return; + } + + // in this case we have a sorted list of 32 bit offsets from gc_low + uint32_t* mark_list_32 = (uint32_t*)&g_mark_list_copy[heap_number * mark_list_size]; + size_t element_count = mark_list_index_32 - mark_list_32; + uint8_t** mark_list = &g_mark_list[heap_number * mark_list_size]; + uint8_t** mark_list_end = &mark_list[mark_list_size - 1]; + if (element_count > mark_list_size) + { + mark_list_index = mark_list_end + 1; + return; + } + uint8_t* low = gc_low; + for (size_t i = 0; i < element_count; i++) + { + mark_list[i] = low + mark_list_32[i]; + } + mark_list_index = &mark_list[element_count]; +} +#else //!USE_VXSORT32 void gc_heap::merge_mark_lists() { uint8_t** source[MAX_SUPPORTED_CPUS]; @@ -8507,6 +8597,7 @@ void gc_heap::merge_mark_lists() } #endif //defined(_DEBUG) || defined(TRACE_GC) } +#endif //!USE_VXSORT32 #else //PARALLEL_MARK_LIST_SORT void gc_heap::combine_mark_lists() { @@ -10094,7 +10185,7 @@ gc_heap::init_semi_shared() #ifdef MARK_LIST #ifdef MULTIPLE_HEAPS - mark_list_size = min (1024*1024, max (8192, soh_segment_size/(2*10*32))); + mark_list_size = min (300*1024, max (8192, soh_segment_size/(2*10*32))); g_mark_list = make_mark_list (mark_list_size*n_heaps); min_balance_threshold = alloc_quantum_balance_units * CLR_SIZE * 2; @@ -21997,7 +22088,26 @@ void gc_heap::plan_phase (int condemned_gen_number) ) { #ifndef MULTIPLE_HEAPS +#ifdef USE_VXSORT32 + ptrdiff_t entry_count = mark_list_index - mark_list; + uint32_t* mark_list_32 = (uint32_t*)mark_list; + uint8_t* low = gc_low; + for (ptrdiff_t i = 0; i < entry_count; i++) + { + uint8_t* item = mark_list[i]; + size_t offset = item - low; + assert((uint32_t)offset == offset); + mark_list_32[i] = (uint32_t)offset; + } + _sort(&mark_list_32[0], &mark_list_32[entry_count-1], 0); + for (ptrdiff_t i = entry_count - 1; i >= 0; i--) + { + uint32_t offset = mark_list_32[i]; + mark_list[i] = low + offset; + } +#else //!USE_VXSORT32 _sort (&mark_list[0], mark_list_index-1, 0); +#endif //printf ("using mark list at GC #%d", dd_collection_count (dynamic_data_of (0))); //verify_qsort_array (&mark_list[0], mark_list_index-1); #endif //!MULTIPLE_HEAPS diff --git a/src/coreclr/src/gc/gcpriv.h b/src/coreclr/src/gc/gcpriv.h index e9ac7a7bfd15f4..3efdf02554b0d0 100644 --- a/src/coreclr/src/gc/gcpriv.h +++ b/src/coreclr/src/gc/gcpriv.h @@ -3734,6 +3734,11 @@ class gc_heap PER_HEAP uint8_t** mark_list_end; +#if defined(TARGET_AMD64) && defined(TARGET_WINDOWS) && defined(MULTIPLE_HEAPS) + PER_HEAP + uint32_t* mark_list_index_32; +#endif //defined(TARGET_AMD64) && defined(TARGET_WINDOWS) && defined(MULTIPLE_HEAPS) + PER_HEAP uint8_t** mark_list_index; diff --git a/src/coreclr/src/gc/gcsvr.cpp b/src/coreclr/src/gc/gcsvr.cpp index a6a17d64f7549f..dcf0945778b5f2 100644 --- a/src/coreclr/src/gc/gcsvr.cpp +++ b/src/coreclr/src/gc/gcsvr.cpp @@ -24,9 +24,9 @@ namespace SVR { #include "gcimpl.h" #include "gc.cpp" -#ifdef USE_VXSORT +#if defined(USE_VXSORT64) || defined(USE_VXSORT32) #include "vxsort.cpp" -#endif //USE_VXSORT +#endif //USE_VXSORT64 || USE_VXSORT32 } #endif // defined(FEATURE_SVR_GC) diff --git a/src/coreclr/src/gc/gcwks.cpp b/src/coreclr/src/gc/gcwks.cpp index 6529156dabf74f..8f58addcfe150b 100644 --- a/src/coreclr/src/gc/gcwks.cpp +++ b/src/coreclr/src/gc/gcwks.cpp @@ -24,8 +24,8 @@ namespace WKS { #include "gcimpl.h" #include "gc.cpp" -#ifdef USE_VXSORT +#if defined(USE_VXSORT64) || defined(USE_VXSORT32) #include "vxsort.cpp" -#endif //USE_VXSORT +#endif //USE_VXSORT64 || USE_VXSORT32 } diff --git a/src/coreclr/src/gc/vxsort.h b/src/coreclr/src/gc/vxsort.h index 3f18979299339f..7a0198eff7cd20 100644 --- a/src/coreclr/src/gc/vxsort.h +++ b/src/coreclr/src/gc/vxsort.h @@ -457,7 +457,7 @@ class vxsort { return readRight; } - void sort(T* left, T* right, + void realsort(T* left, T* right, alignment_hint realignHint, int depthLimit) { auto length = (size_t)(right - left + 1); @@ -561,8 +561,8 @@ class vxsort { _depth++; - sort(left, sep - 2, realignHint.realign_right(), depthLimit); - sort(sep, right, realignHint.realign_left(), depthLimit); + realsort(left, sep - 2, realignHint.realign_right(), depthLimit); + realsort(sep, right, realignHint.realign_left(), depthLimit); _depth--; } @@ -804,7 +804,7 @@ class vxsort { NOINLINE void sort(T* left, T* right) { reset(left, right); auto depthLimit = 2 * floor_log2_plus_one(right + 1 - left); - sort(left, right, alignment_hint(), depthLimit); + realsort(left, right, alignment_hint(), depthLimit); } }; From 4c03ee4abfa4794e53be3adc6094c95e2cb3bfa2 Mon Sep 17 00:00:00 2001 From: Peter Sollich Date: Mon, 8 Jun 2020 12:23:14 +0200 Subject: [PATCH 08/31] 32-bit sort - preserve failing case. --- src/coreclr/src/gc/gc.cpp | 151 +++++++++++++++++------------------ src/coreclr/src/gc/gcsvr.cpp | 4 +- src/coreclr/src/gc/gcwks.cpp | 4 +- 3 files changed, 79 insertions(+), 80 deletions(-) diff --git a/src/coreclr/src/gc/gc.cpp b/src/coreclr/src/gc/gc.cpp index edbf46e28f4e09..efc4f111c6270b 100644 --- a/src/coreclr/src/gc/gc.cpp +++ b/src/coreclr/src/gc/gc.cpp @@ -20,7 +20,7 @@ #include "gcpriv.h" #if defined(TARGET_AMD64) && defined(TARGET_WINDOWS) -#define USE_VXSORT32 +#define USE_VXSORT #else #define USE_INTROSORT #endif @@ -2064,7 +2064,7 @@ uint8_t* tree_search (uint8_t* tree, uint8_t* old_address); #ifdef USE_INTROSORT #define _sort introsort::sort -#elif defined(USE_VXSORT64) || defined(USE_VXSORT32) +#elif defined(USE_VXSORT) #define _sort vxsort namespace std { @@ -2077,8 +2077,6 @@ namespace std return _Ty(); } }; - -#ifdef USE_VXSORT32 template <> class numeric_limits { @@ -2088,8 +2086,6 @@ namespace std return 0xffffffff; } }; -#endif //USE_VXSORT64 -#ifdef USE_VXSORT64 template <> class numeric_limits { @@ -2099,12 +2095,11 @@ namespace std return 0x7fffffffffffffff; } }; -#endif //USE_VXSORT64 } #include "vxsort.h" -#ifdef USE_VXSORT64 +#ifdef USE_VXSORT void vxsort(uint8_t** low, uint8_t** high, unsigned int depth) { // auto sorter = gcsort::vxsort(); @@ -2117,7 +2112,6 @@ void vxsort(uint8_t** low, uint8_t** high, unsigned int depth) } #endif } -#elif defined(USE_VXSORT32) void vxsort(uint32_t* low, uint32_t* high, unsigned int depth) { // auto sorter = gcsort::vxsort(); @@ -2130,7 +2124,7 @@ void vxsort(uint32_t* low, uint32_t* high, unsigned int depth) } #endif } -#endif +#endif //USE_VXSORT #else //USE_INTROSORT #define _sort qsort1 @@ -8274,12 +8268,16 @@ void gc_heap::sort_mark_list() // compute total mark list size and total ephemeral size size_t total_mark_list_size = 0; size_t total_ephemeral_size = 0; + uint8_t* low = (uint8_t*)~0; + uint8_t* high = 0; for (int i = 0; i < n_heaps; i++) { gc_heap* hp = g_heaps[i]; size_t ephemeral_size = heap_segment_allocated (hp->ephemeral_heap_segment) - hp->gc_low; total_ephemeral_size += ephemeral_size; total_mark_list_size += (hp->mark_list_index - hp->mark_list); + low = min (low, hp->gc_low); + high = max (high, heap_segment_allocated (hp->ephemeral_heap_segment)); } // give up if this is not an ephemeral GC or the mark list size is unreasonably large @@ -8290,42 +8288,65 @@ void gc_heap::sort_mark_list() return; } -#ifdef USE_VXSORT32 - uint8_t* low = gc_low; - uint8_t* high = heap_segment_allocated (ephemeral_heap_segment); - size_t size = high - low; - assert((uint32_t)size == size); - uint8_t** mark_list = &g_mark_list_copy[heap_number * mark_list_size]; - uint8_t** mark_list_end = &mark_list[mark_list_size - 1]; - uint32_t* mark_list_32 = (uint32_t*)mark_list; - uint32_t* mark_list_curr_32 = mark_list_32; - uint32_t* mark_list_end_32 = (uint32_t*)mark_list_end; - for (int i = 0; i < n_heaps; i++) +#ifdef USE_VXSORT + // is the range small enough for a 32-bit sort? + ptrdiff_t range = high - low; + ptrdiff_t scaled_range = range >> 3; + if ((uint32_t)scaled_range == scaled_range) { - gc_heap* hp = g_heaps[i]; - uint8_t** end = hp->mark_list_index; - for (uint8_t** p = hp->mark_list; p < end; p++) + dprintf(3, ("Sorting mark lists as 32-bit offsets")); + + // first step: scale the pointers down to 32-bit offsets + uint8_t** mark_list = &g_mark_list_copy[heap_number * mark_list_size]; + uint8_t** mark_list_end = &mark_list[mark_list_size - 1]; + uint32_t* mark_list_32 = (uint32_t*)mark_list; + uint32_t* mark_list_curr_32 = mark_list_32; + uint32_t* mark_list_end_32 = (uint32_t*)mark_list_end; + uint8_t** end = this->mark_list_index; + for (uint8_t** p = this->mark_list; p < end; p++) { uint8_t* item = *p; - if (item < low || high <= item) - continue; - size_t offset = item - low; - assert((uint32_t)offset == offset); - if (mark_list_curr_32 < mark_list_end_32) - *mark_list_curr_32++ = (uint32_t)offset; - else - break; + ptrdiff_t scaled_item_offset = (item - low) >> 8; + assert((uint32_t)scaled_item_offset == scaled_item_offset); + assert(mark_list_curr_32 < mark_list_end_32); + *mark_list_curr_32++ = (uint32_t)scaled_item_offset; + } + + // sort the 32-bit offsets + if (mark_list_curr_32 > mark_list_32) + { + _sort(mark_list_32, mark_list_curr_32 - 1, 0); + } + +#ifdef _DEBUG + if (this->mark_list_index > this->mark_list) + { + _sort(this->mark_list, this->mark_list_index - 1, 0); } +#endif + + // scale the 32-bit offsets back to 64-bit pointers + mark_list_index = this->mark_list; + for (uint32_t* p = mark_list_32; p < mark_list_curr_32; p++) + { + ptrdiff_t scaled_item_offset = *p; + uint8_t* item = low + (scaled_item_offset << 3); + assert(*mark_list_index == item); + *mark_list_index++ = item; + } + assert(mark_list_index == this->mark_list_index); } - mark_list_index_32 = mark_list_curr_32; - if (mark_list_curr_32 < mark_list_end_32) + else { - _sort (mark_list_32, mark_list_curr_32 - 1, 0); + dprintf(3, ("Sorting mark lists")); + if (mark_list_index > mark_list) + _sort(mark_list, mark_list_index - 1, 0); } #else dprintf (3, ("Sorting mark lists")); if (mark_list_index > mark_list) _sort (mark_list, mark_list_index - 1, 0); +#endif //USE_VXSORT // printf("first phase of sort_mark_list for heap %d took %u cycles to sort %u entries\n", this->heap_number, GetCycleCount32() - start, mark_list_index - mark_list); // start = GetCycleCount32(); @@ -8416,7 +8437,6 @@ void gc_heap::sort_mark_list() #undef predicate // printf("second phase of sort_mark_list for heap %d took %u cycles\n", this->heap_number, GetCycleCount32() - start); -#endif //USE_VXSORT32 } void gc_heap::append_to_mark_list(uint8_t **start, uint8_t **end) @@ -8429,34 +8449,6 @@ void gc_heap::append_to_mark_list(uint8_t **start, uint8_t **end) // printf("heap %d: appended %Id slots to mark_list\n", heap_number, slots_to_copy); } -#ifdef USE_VXSORT32 -void gc_heap::merge_mark_lists() -{ - // in case of mark list overflow, don't bother - if (mark_list_index > mark_list_end) - { - // printf("merge_mark_lists: overflow\n"); - return; - } - - // in this case we have a sorted list of 32 bit offsets from gc_low - uint32_t* mark_list_32 = (uint32_t*)&g_mark_list_copy[heap_number * mark_list_size]; - size_t element_count = mark_list_index_32 - mark_list_32; - uint8_t** mark_list = &g_mark_list[heap_number * mark_list_size]; - uint8_t** mark_list_end = &mark_list[mark_list_size - 1]; - if (element_count > mark_list_size) - { - mark_list_index = mark_list_end + 1; - return; - } - uint8_t* low = gc_low; - for (size_t i = 0; i < element_count; i++) - { - mark_list[i] = low + mark_list_32[i]; - } - mark_list_index = &mark_list[element_count]; -} -#else //!USE_VXSORT32 void gc_heap::merge_mark_lists() { uint8_t** source[MAX_SUPPORTED_CPUS]; @@ -8597,7 +8589,6 @@ void gc_heap::merge_mark_lists() } #endif //defined(_DEBUG) || defined(TRACE_GC) } -#endif //!USE_VXSORT32 #else //PARALLEL_MARK_LIST_SORT void gc_heap::combine_mark_lists() { @@ -22088,24 +22079,32 @@ void gc_heap::plan_phase (int condemned_gen_number) ) { #ifndef MULTIPLE_HEAPS -#ifdef USE_VXSORT32 +#ifdef USE_VXSORT ptrdiff_t entry_count = mark_list_index - mark_list; uint32_t* mark_list_32 = (uint32_t*)mark_list; uint8_t* low = gc_low; - for (ptrdiff_t i = 0; i < entry_count; i++) + ptrdiff_t range = heap_segment_allocated (ephemeral_heap_segment) - low; + if ((uint32_t)range == range) { - uint8_t* item = mark_list[i]; - size_t offset = item - low; - assert((uint32_t)offset == offset); - mark_list_32[i] = (uint32_t)offset; + for (ptrdiff_t i = 0; i < entry_count; i++) + { + uint8_t* item = mark_list[i]; + size_t offset = item - low; + assert((uint32_t)offset == offset); + mark_list_32[i] = (uint32_t)offset; + } + _sort(&mark_list_32[0], &mark_list_32[entry_count - 1], 0); + for (ptrdiff_t i = entry_count - 1; i >= 0; i--) + { + uint32_t offset = mark_list_32[i]; + mark_list[i] = low + offset; + } } - _sort(&mark_list_32[0], &mark_list_32[entry_count-1], 0); - for (ptrdiff_t i = entry_count - 1; i >= 0; i--) + else { - uint32_t offset = mark_list_32[i]; - mark_list[i] = low + offset; + _sort(&mark_list[0], mark_list_index - 1, 0); } -#else //!USE_VXSORT32 +#else //USE_VXSORT _sort (&mark_list[0], mark_list_index-1, 0); #endif //printf ("using mark list at GC #%d", dd_collection_count (dynamic_data_of (0))); diff --git a/src/coreclr/src/gc/gcsvr.cpp b/src/coreclr/src/gc/gcsvr.cpp index dcf0945778b5f2..a6a17d64f7549f 100644 --- a/src/coreclr/src/gc/gcsvr.cpp +++ b/src/coreclr/src/gc/gcsvr.cpp @@ -24,9 +24,9 @@ namespace SVR { #include "gcimpl.h" #include "gc.cpp" -#if defined(USE_VXSORT64) || defined(USE_VXSORT32) +#ifdef USE_VXSORT #include "vxsort.cpp" -#endif //USE_VXSORT64 || USE_VXSORT32 +#endif //USE_VXSORT } #endif // defined(FEATURE_SVR_GC) diff --git a/src/coreclr/src/gc/gcwks.cpp b/src/coreclr/src/gc/gcwks.cpp index 8f58addcfe150b..6529156dabf74f 100644 --- a/src/coreclr/src/gc/gcwks.cpp +++ b/src/coreclr/src/gc/gcwks.cpp @@ -24,8 +24,8 @@ namespace WKS { #include "gcimpl.h" #include "gc.cpp" -#if defined(USE_VXSORT64) || defined(USE_VXSORT32) +#ifdef USE_VXSORT #include "vxsort.cpp" -#endif //USE_VXSORT64 || USE_VXSORT32 +#endif //USE_VXSORT } From a6b2305458e9752e4792fc5c609f107d6cabe7ed Mon Sep 17 00:00:00 2001 From: Peter Sollich Date: Wed, 10 Jun 2020 10:47:15 +0200 Subject: [PATCH 09/31] Do the pointer compression/decompression in place, to improve performance, optionally write mark lists and associated information to binary files for further analysis. --- src/coreclr/src/gc/gc.cpp | 82 +++++++++++++++++++++++++++++---------- 1 file changed, 62 insertions(+), 20 deletions(-) diff --git a/src/coreclr/src/gc/gc.cpp b/src/coreclr/src/gc/gc.cpp index efc4f111c6270b..edcbca268466c0 100644 --- a/src/coreclr/src/gc/gc.cpp +++ b/src/coreclr/src/gc/gc.cpp @@ -8296,45 +8296,87 @@ void gc_heap::sort_mark_list() { dprintf(3, ("Sorting mark lists as 32-bit offsets")); + ptrdiff_t item_count = mark_list_index - mark_list; + +#define WRITE_SORT_DATA + +#if defined(_DEBUG) || defined(WRITE_SORT_DATA) + uint8_t** mark_list_copy = &g_mark_list_copy[heap_number * mark_list_size]; + uint8_t** mark_list_copy_index = &mark_list_copy[item_count]; +#endif // first step: scale the pointers down to 32-bit offsets - uint8_t** mark_list = &g_mark_list_copy[heap_number * mark_list_size]; - uint8_t** mark_list_end = &mark_list[mark_list_size - 1]; + uint8_t** mark_list = this->mark_list; uint32_t* mark_list_32 = (uint32_t*)mark_list; - uint32_t* mark_list_curr_32 = mark_list_32; - uint32_t* mark_list_end_32 = (uint32_t*)mark_list_end; - uint8_t** end = this->mark_list_index; - for (uint8_t** p = this->mark_list; p < end; p++) + for (ptrdiff_t i = 0; i < item_count; i++) { - uint8_t* item = *p; - ptrdiff_t scaled_item_offset = (item - low) >> 8; + uint8_t* item = mark_list[i]; + ptrdiff_t scaled_item_offset = (item - low) >> 3; assert((uint32_t)scaled_item_offset == scaled_item_offset); - assert(mark_list_curr_32 < mark_list_end_32); - *mark_list_curr_32++ = (uint32_t)scaled_item_offset; + assert((low + (scaled_item_offset << 3)) == item); + mark_list_32[i] = (uint32_t)scaled_item_offset; +#if defined(_DEBUG) || defined(WRITE_SORT_DATA) + mark_list_copy[i] = item; +#endif } // sort the 32-bit offsets - if (mark_list_curr_32 > mark_list_32) + if (item_count > 0) { - _sort(mark_list_32, mark_list_curr_32 - 1, 0); + ptrdiff_t start = get_cycle_count(); + + _sort(&mark_list_32[0], &mark_list_32[item_count - 1], 0); + + ptrdiff_t elapsed_cycles = get_cycle_count() - start; + int log2_item_count = index_of_highest_set_bit(item_count); + double elapsed_cyles_by_n_log_n = (double)elapsed_cycles / item_count / log2_item_count; + + printf("GC#%d: first phase of sort_mark_list for heap %d took %u cycles to sort %u entries (cost/(n*log2(n) = %5.2f)\n", settings.gc_index, this->heap_number, elapsed_cycles, item_count, elapsed_cyles_by_n_log_n); + +#ifdef WRITE_SORT_DATA + char file_name[256]; + sprintf_s(file_name, _countof(file_name), "sort_data_gc%d_heap%d", settings.gc_index, heap_number); + + FILE* f; + errno_t err = fopen_s(&f, file_name, "wb"); + + if (err == 0) + { + size_t magic = 'SDAT'; + if (fwrite(&magic, sizeof(magic), 1, f) != 1) + printf("fwrite failed\n"); + if (fwrite(&elapsed_cycles, sizeof(elapsed_cycles), 1, f) != 1) + printf("fwrite failed\n"); + if (fwrite(&low, sizeof(low), 1, f) != 1) + printf("fwrite failed\n"); + if (fwrite(&item_count, sizeof(item_count), 1, f) != 1) + printf("fwrite failed\n"); + if (fwrite(mark_list_copy, sizeof(mark_list_copy[0]), item_count, f) != item_count) + printf("fwrite failed\n"); + if (fwrite(&magic, sizeof(magic), 1, f) != 1) + printf("fwrite failed\n"); + if (fclose(f) != 0) + printf("fclose failed\n"); + } +#endif } #ifdef _DEBUG - if (this->mark_list_index > this->mark_list) + // in debug, sort the copy as well, so we can check we got the right result + if (mark_list_copy_index > mark_list_copy) { - _sort(this->mark_list, this->mark_list_index - 1, 0); + _sort(mark_list_copy, mark_list_copy_index - 1, 0); } #endif // scale the 32-bit offsets back to 64-bit pointers - mark_list_index = this->mark_list; - for (uint32_t* p = mark_list_32; p < mark_list_curr_32; p++) + // work backwards to avoid overwriting information that is still needed + for (ptrdiff_t i = item_count-1; i >= 0; i--) { - ptrdiff_t scaled_item_offset = *p; + ptrdiff_t scaled_item_offset = mark_list_32[i]; uint8_t* item = low + (scaled_item_offset << 3); - assert(*mark_list_index == item); - *mark_list_index++ = item; + assert (mark_list_copy[i] == item); + mark_list[i] = item; } - assert(mark_list_index == this->mark_list_index); } else { From 2f3d3302479c4d46f8626a9247c75330d95065a0 Mon Sep 17 00:00:00 2001 From: Peter Sollich Date: Thu, 18 Jun 2020 11:50:52 +0200 Subject: [PATCH 10/31] Introduce runtime check whether CPU supports AVX2 instruction set. --- src/coreclr/src/gc/env/gcenv.ee.h | 1 + src/coreclr/src/gc/gc.cpp | 197 +++++++++++---------- src/coreclr/src/gc/gcenv.ee.standalone.inl | 6 + src/coreclr/src/gc/gcinterface.ee.h | 11 ++ src/coreclr/src/gc/sample/gcenv.ee.cpp | 5 + src/coreclr/src/vm/gcenv.ee.cpp | 15 ++ src/coreclr/src/vm/gcenv.ee.h | 2 + 7 files changed, 145 insertions(+), 92 deletions(-) diff --git a/src/coreclr/src/gc/env/gcenv.ee.h b/src/coreclr/src/gc/env/gcenv.ee.h index fa4f2dcd765889..0212bf60164604 100644 --- a/src/coreclr/src/gc/env/gcenv.ee.h +++ b/src/coreclr/src/gc/env/gcenv.ee.h @@ -91,6 +91,7 @@ class GCToEEInterface static void VerifySyncTableEntry(); static void UpdateGCEventStatus(int publicLevel, int publicKeywords, int privateLevel, int privateKeywords); + static bool HasInstructionSet(InstructionSet requestedInstructionSet); }; #endif // __GCENV_EE_H__ diff --git a/src/coreclr/src/gc/gc.cpp b/src/coreclr/src/gc/gc.cpp index 0367946aaa2bdc..f36edba29e4eb9 100644 --- a/src/coreclr/src/gc/gc.cpp +++ b/src/coreclr/src/gc/gc.cpp @@ -8191,7 +8191,8 @@ void rqsort1( uint8_t* *low, uint8_t* *high) } } -#ifdef USE_INTROSORT +// vxsort uses introsort as a fallback if the AVX2 instruction set is not supported +#if defined(USE_INTROSORT) || defined(USE_VXSORT) class introsort { @@ -8367,106 +8368,112 @@ void gc_heap::sort_mark_list() } #ifdef USE_VXSORT - // is the range small enough for a 32-bit sort? - ptrdiff_t range = high - low; - ptrdiff_t scaled_range = range >> 3; - if ((uint32_t)scaled_range == scaled_range) + // runtime test if AVX2 is indeed available + if (GCToEEInterface::HasInstructionSet(kInstructionSetAVX2)) { - dprintf(3, ("Sorting mark lists as 32-bit offsets")); + // is the range small enough for a 32-bit sort? + ptrdiff_t range = high - low; + ptrdiff_t scaled_range = range >> 3; + if ((uint32_t)scaled_range == scaled_range) + { + dprintf(3, ("Sorting mark lists as 32-bit offsets")); - ptrdiff_t item_count = mark_list_index - mark_list; + ptrdiff_t item_count = mark_list_index - mark_list; -#define WRITE_SORT_DATA +//#define WRITE_SORT_DATA #if defined(_DEBUG) || defined(WRITE_SORT_DATA) - uint8_t** mark_list_copy = &g_mark_list_copy[heap_number * mark_list_size]; - uint8_t** mark_list_copy_index = &mark_list_copy[item_count]; + uint8_t** mark_list_copy = &g_mark_list_copy[heap_number * mark_list_size]; + uint8_t** mark_list_copy_index = &mark_list_copy[item_count]; #endif - // first step: scale the pointers down to 32-bit offsets - uint8_t** mark_list = this->mark_list; - uint32_t* mark_list_32 = (uint32_t*)mark_list; - for (ptrdiff_t i = 0; i < item_count; i++) - { - uint8_t* item = mark_list[i]; - ptrdiff_t scaled_item_offset = (item - low) >> 3; - assert((uint32_t)scaled_item_offset == scaled_item_offset); - assert((low + (scaled_item_offset << 3)) == item); - mark_list_32[i] = (uint32_t)scaled_item_offset; + // first step: scale the pointers down to 32-bit offsets + uint8_t** mark_list = this->mark_list; + uint32_t* mark_list_32 = (uint32_t*)mark_list; + for (ptrdiff_t i = 0; i < item_count; i++) + { + uint8_t* item = mark_list[i]; + ptrdiff_t scaled_item_offset = (item - low) >> 3; + assert((uint32_t)scaled_item_offset == scaled_item_offset); + assert((low + (scaled_item_offset << 3)) == item); + mark_list_32[i] = (uint32_t)scaled_item_offset; #if defined(_DEBUG) || defined(WRITE_SORT_DATA) - mark_list_copy[i] = item; + mark_list_copy[i] = item; #endif - } + } - // sort the 32-bit offsets - if (item_count > 0) - { - ptrdiff_t start = get_cycle_count(); + // sort the 32-bit offsets + if (item_count > 0) + { + ptrdiff_t start = get_cycle_count(); - _sort(&mark_list_32[0], &mark_list_32[item_count - 1], 0); + _sort(&mark_list_32[0], &mark_list_32[item_count - 1], 0); - ptrdiff_t elapsed_cycles = get_cycle_count() - start; - int log2_item_count = index_of_highest_set_bit(item_count); - double elapsed_cyles_by_n_log_n = (double)elapsed_cycles / item_count / log2_item_count; + ptrdiff_t elapsed_cycles = get_cycle_count() - start; + int log2_item_count = index_of_highest_set_bit(item_count); + double elapsed_cyles_by_n_log_n = (double)elapsed_cycles / item_count / log2_item_count; - printf("GC#%d: first phase of sort_mark_list for heap %d took %u cycles to sort %u entries (cost/(n*log2(n) = %5.2f)\n", settings.gc_index, this->heap_number, elapsed_cycles, item_count, elapsed_cyles_by_n_log_n); + printf("GC#%d: first phase of sort_mark_list for heap %d took %u cycles to sort %u entries (cost/(n*log2(n) = %5.2f)\n", settings.gc_index, this->heap_number, elapsed_cycles, item_count, elapsed_cyles_by_n_log_n); #ifdef WRITE_SORT_DATA - char file_name[256]; - sprintf_s(file_name, _countof(file_name), "sort_data_gc%d_heap%d", settings.gc_index, heap_number); - - FILE* f; - errno_t err = fopen_s(&f, file_name, "wb"); - - if (err == 0) - { - size_t magic = 'SDAT'; - if (fwrite(&magic, sizeof(magic), 1, f) != 1) - printf("fwrite failed\n"); - if (fwrite(&elapsed_cycles, sizeof(elapsed_cycles), 1, f) != 1) - printf("fwrite failed\n"); - if (fwrite(&low, sizeof(low), 1, f) != 1) - printf("fwrite failed\n"); - if (fwrite(&item_count, sizeof(item_count), 1, f) != 1) - printf("fwrite failed\n"); - if (fwrite(mark_list_copy, sizeof(mark_list_copy[0]), item_count, f) != item_count) - printf("fwrite failed\n"); - if (fwrite(&magic, sizeof(magic), 1, f) != 1) - printf("fwrite failed\n"); - if (fclose(f) != 0) - printf("fclose failed\n"); - } + char file_name[256]; + sprintf_s(file_name, _countof(file_name), "sort_data_gc%d_heap%d", settings.gc_index, heap_number); + + FILE* f; + errno_t err = fopen_s(&f, file_name, "wb"); + + if (err == 0) + { + size_t magic = 'SDAT'; + if (fwrite(&magic, sizeof(magic), 1, f) != 1) + printf("fwrite failed\n"); + if (fwrite(&elapsed_cycles, sizeof(elapsed_cycles), 1, f) != 1) + printf("fwrite failed\n"); + if (fwrite(&low, sizeof(low), 1, f) != 1) + printf("fwrite failed\n"); + if (fwrite(&item_count, sizeof(item_count), 1, f) != 1) + printf("fwrite failed\n"); + if (fwrite(mark_list_copy, sizeof(mark_list_copy[0]), item_count, f) != item_count) + printf("fwrite failed\n"); + if (fwrite(&magic, sizeof(magic), 1, f) != 1) + printf("fwrite failed\n"); + if (fclose(f) != 0) + printf("fclose failed\n"); + } #endif - } + } #ifdef _DEBUG - // in debug, sort the copy as well, so we can check we got the right result - if (mark_list_copy_index > mark_list_copy) - { - _sort(mark_list_copy, mark_list_copy_index - 1, 0); - } + // in debug, sort the copy as well, so we can check we got the right result + if (mark_list_copy_index > mark_list_copy) + { + _sort(mark_list_copy, mark_list_copy_index - 1, 0); + } #endif - // scale the 32-bit offsets back to 64-bit pointers - // work backwards to avoid overwriting information that is still needed - for (ptrdiff_t i = item_count-1; i >= 0; i--) + // scale the 32-bit offsets back to 64-bit pointers + // work backwards to avoid overwriting information that is still needed + for (ptrdiff_t i = item_count - 1; i >= 0; i--) + { + ptrdiff_t scaled_item_offset = mark_list_32[i]; + uint8_t* item = low + (scaled_item_offset << 3); + assert(mark_list_copy[i] == item); + mark_list[i] = item; + } + } + else { - ptrdiff_t scaled_item_offset = mark_list_32[i]; - uint8_t* item = low + (scaled_item_offset << 3); - assert (mark_list_copy[i] == item); - mark_list[i] = item; + dprintf(3, ("Sorting mark lists")); + if (mark_list_index > mark_list) + _sort(mark_list, mark_list_index - 1, 0); } } else +#endif //USE_VXSORT { dprintf(3, ("Sorting mark lists")); if (mark_list_index > mark_list) - _sort(mark_list, mark_list_index - 1, 0); + introsort::sort(mark_list, mark_list_index - 1, 0); } -#else - dprintf (3, ("Sorting mark lists")); - if (mark_list_index > mark_list) - _sort (mark_list, mark_list_index - 1, 0); -#endif //USE_VXSORT // printf("first phase of sort_mark_list for heap %d took %u cycles to sort %u entries\n", this->heap_number, GetCycleCount32() - start, mark_list_index - mark_list); // start = GetCycleCount32(); @@ -22221,33 +22228,39 @@ void gc_heap::plan_phase (int condemned_gen_number) { #ifndef MULTIPLE_HEAPS #ifdef USE_VXSORT - ptrdiff_t entry_count = mark_list_index - mark_list; - uint32_t* mark_list_32 = (uint32_t*)mark_list; - uint8_t* low = gc_low; - ptrdiff_t range = heap_segment_allocated (ephemeral_heap_segment) - low; - if ((uint32_t)range == range) + if (GCToEEInterface::HasInstructionSet(kInstructionSetAVX2)) { - for (ptrdiff_t i = 0; i < entry_count; i++) + ptrdiff_t entry_count = mark_list_index - mark_list; + uint32_t* mark_list_32 = (uint32_t*)mark_list; + uint8_t* low = gc_low; + ptrdiff_t range = heap_segment_allocated(ephemeral_heap_segment) - low; + if ((uint32_t)range == range) { - uint8_t* item = mark_list[i]; - size_t offset = item - low; - assert((uint32_t)offset == offset); - mark_list_32[i] = (uint32_t)offset; + for (ptrdiff_t i = 0; i < entry_count; i++) + { + uint8_t* item = mark_list[i]; + size_t offset = item - low; + assert((uint32_t)offset == offset); + mark_list_32[i] = (uint32_t)offset; + } + _sort(&mark_list_32[0], &mark_list_32[entry_count - 1], 0); + for (ptrdiff_t i = entry_count - 1; i >= 0; i--) + { + uint32_t offset = mark_list_32[i]; + mark_list[i] = low + offset; + } } - _sort(&mark_list_32[0], &mark_list_32[entry_count - 1], 0); - for (ptrdiff_t i = entry_count - 1; i >= 0; i--) + else { - uint32_t offset = mark_list_32[i]; - mark_list[i] = low + offset; + _sort(&mark_list[0], mark_list_index - 1, 0); } } else +#endif //USE_VXSORT { - _sort(&mark_list[0], mark_list_index - 1, 0); + introsort::sort(&mark_list[0], mark_list_index - 1, 0); } -#else //USE_VXSORT - _sort (&mark_list[0], mark_list_index-1, 0); -#endif + //printf ("using mark list at GC #%d", dd_collection_count (dynamic_data_of (0))); //verify_qsort_array (&mark_list[0], mark_list_index-1); #endif //!MULTIPLE_HEAPS diff --git a/src/coreclr/src/gc/gcenv.ee.standalone.inl b/src/coreclr/src/gc/gcenv.ee.standalone.inl index b91d0c4d5b8915..0cd24cf2ae0673 100644 --- a/src/coreclr/src/gc/gcenv.ee.standalone.inl +++ b/src/coreclr/src/gc/gcenv.ee.standalone.inl @@ -295,4 +295,10 @@ inline void GCToEEInterface::UpdateGCEventStatus(int publicLevel, int publicKeyw #endif // __linux__ } +inline bool GCToEEInterface::HasInstructionSet(InstructionSet instructionSet) +{ + assert(g_theGCToCLR != nullptr); + return g_theGCToCLR->HasInstructionSet(instructionSet); +} + #endif // __GCTOENV_EE_STANDALONE_INL__ diff --git a/src/coreclr/src/gc/gcinterface.ee.h b/src/coreclr/src/gc/gcinterface.ee.h index bc9a0ab162c34d..35aee4a47f5996 100644 --- a/src/coreclr/src/gc/gcinterface.ee.h +++ b/src/coreclr/src/gc/gcinterface.ee.h @@ -21,6 +21,14 @@ enum EtwGCRootKind kEtwGCRootKindOther = 3, }; +enum InstructionSet +{ +#if defined(TARGET_X86) || defined(TARGET_AMD64) + kInstructionSetAVX2 = 0, + kInstructionSetAVX512 = 1, +#endif //defined(TARGET_X86) || defined(TARGET_AMD64) +}; + // This interface provides functions that the GC can use to fire events. // Events fired on this interface are split into two categories: "known" // events and "dynamic" events. Known events are events that are baked-in @@ -422,6 +430,9 @@ class IGCToCLR { virtual void UpdateGCEventStatus(int publicLevel, int publicKeywords, int privateLEvel, int privateKeywords) = 0; + + virtual + bool HasInstructionSet(InstructionSet requestedInstructionSet) = 0; }; #endif // _GCINTERFACE_EE_H_ diff --git a/src/coreclr/src/gc/sample/gcenv.ee.cpp b/src/coreclr/src/gc/sample/gcenv.ee.cpp index 6f5151ee1534cd..4b99a9e2714aa2 100644 --- a/src/coreclr/src/gc/sample/gcenv.ee.cpp +++ b/src/coreclr/src/gc/sample/gcenv.ee.cpp @@ -344,3 +344,8 @@ inline void GCToEEInterface::AnalyzeSurvivorsFinished(int condemnedGeneration) { } + +bool GCToEEInterface::HasInstructionSet(InstructionSet requestedInstructionSet) +{ + return false; +} diff --git a/src/coreclr/src/vm/gcenv.ee.cpp b/src/coreclr/src/vm/gcenv.ee.cpp index 7bb9fdde70a612..baf8c11a57d101 100644 --- a/src/coreclr/src/vm/gcenv.ee.cpp +++ b/src/coreclr/src/vm/gcenv.ee.cpp @@ -1654,3 +1654,18 @@ void GCToEEInterface::UpdateGCEventStatus(int currentPublicLevel, int currentPub } #endif // __linux__ && FEATURE_EVENT_TRACE } + +bool GCToEEInterface::HasInstructionSet(InstructionSet requestedInstructionSet) +{ +#if defined(TARGET_X86) || defined(TARGET_AMD64) + CORJIT_FLAGS cpuCompileFlags = ExecutionManager::GetEEJitManager()->GetCPUCompileFlags(); + switch (requestedInstructionSet) + { + case kInstructionSetAVX2: return cpuCompileFlags.IsSet(InstructionSet_AVX2); + case kInstructionSetAVX512: return false; + default: return false; + } +#else + return false; +#endif //defined(TARGET_X86) || defined(TARGET_AMD64) +} diff --git a/src/coreclr/src/vm/gcenv.ee.h b/src/coreclr/src/vm/gcenv.ee.h index 37f9dc9f68c4df..3132b455b22781 100644 --- a/src/coreclr/src/vm/gcenv.ee.h +++ b/src/coreclr/src/vm/gcenv.ee.h @@ -83,6 +83,8 @@ class GCToEEInterface : public IGCToCLR { void VerifySyncTableEntry(); void UpdateGCEventStatus(int publicLevel, int publicKeywords, int privateLevel, int privateKeywords); + + bool HasInstructionSet(InstructionSet requestedInstructionSet); }; } // namespace standalone From 912f9e89f9edf31b241887a6bc410a325ef1a92a Mon Sep 17 00:00:00 2001 From: Peter Sollich Date: Fri, 19 Jun 2020 17:22:50 +0200 Subject: [PATCH 11/31] Implement mark list growth. --- src/coreclr/src/gc/gc.cpp | 68 +++++++++++++++++++++++++++++++++++-- src/coreclr/src/gc/gcpriv.h | 13 ++++--- 2 files changed, 73 insertions(+), 8 deletions(-) diff --git a/src/coreclr/src/gc/gc.cpp b/src/coreclr/src/gc/gc.cpp index f36edba29e4eb9..3d4832f8976bc4 100644 --- a/src/coreclr/src/gc/gc.cpp +++ b/src/coreclr/src/gc/gc.cpp @@ -2166,6 +2166,7 @@ uint8_t** gc_heap::g_mark_list_copy; #endif //PARALLEL_MARK_LIST_SORT size_t gc_heap::mark_list_size; +bool gc_heap::mark_list_overflow; #endif //MARK_LIST seg_mapping* seg_mapping_table; @@ -8327,6 +8328,7 @@ void gc_heap::sort_mark_list() if (mark_list_index > mark_list_end) { // printf("sort_mark_list: overflow on heap %d\n", heap_number); + mark_list_overflow = true; return; } @@ -8363,7 +8365,8 @@ void gc_heap::sort_mark_list() if (settings.condemned_generation > 1 || total_mark_list_size > total_ephemeral_size/256) { mark_list_index = mark_list_end + 1; - // printf("sort_mark_list: overflow on heap %d\n", i); + // let's not count this as a mark list overflow + mark_list_overflow = false; return; } @@ -8373,6 +8376,7 @@ void gc_heap::sort_mark_list() { // is the range small enough for a 32-bit sort? ptrdiff_t range = high - low; + assert(sizeof(uint8_t*) == (1<<3)); ptrdiff_t scaled_range = range >> 3; if ((uint32_t)scaled_range == scaled_range) { @@ -8412,7 +8416,7 @@ void gc_heap::sort_mark_list() int log2_item_count = index_of_highest_set_bit(item_count); double elapsed_cyles_by_n_log_n = (double)elapsed_cycles / item_count / log2_item_count; - printf("GC#%d: first phase of sort_mark_list for heap %d took %u cycles to sort %u entries (cost/(n*log2(n) = %5.2f)\n", settings.gc_index, this->heap_number, elapsed_cycles, item_count, elapsed_cyles_by_n_log_n); +// printf("GC#%d: first phase of sort_mark_list for heap %d took %u cycles to sort %u entries (cost/(n*log2(n) = %5.2f)\n", settings.gc_index, this->heap_number, elapsed_cycles, item_count, elapsed_cyles_by_n_log_n); #ifdef WRITE_SORT_DATA char file_name[256]; @@ -8804,6 +8808,55 @@ void gc_heap::combine_mark_lists() } #endif // PARALLEL_MARK_LIST_SORT #endif //MULTIPLE_HEAPS + +void gc_heap::grow_mark_list() +{ + size_t new_mark_list_size = min(mark_list_size * 2, 1000 * 1024); + if (new_mark_list_size == mark_list_size) + return; + +#ifdef MULTIPLE_HEAPS + uint8_t** new_mark_list = make_mark_list(new_mark_list_size * n_heaps); + +#ifdef PARALLEL_MARK_LIST_SORT + uint8_t** new_mark_list_copy = make_mark_list(new_mark_list_size * n_heaps); +#ifdef BIT_MAP_SORT + memset(new_mark_list_copy, 0, new_mark_list_size * n_heaps * sizeof(new_mark_list_copy[0])); +#endif //BIT_MAP_SORT +#endif //PARALLEL_MARK_LIST_SORT + + if (new_mark_list != nullptr +#ifdef PARALLEL_MARK_LIST_SORT + && new_mark_list_copy != nullptr +#endif //PARALLEL_MARK_LIST_SORT + ) + { + delete[] g_mark_list; + g_mark_list = new_mark_list; +#ifdef PARALLEL_MARK_LIST_SORT + delete[] g_mark_list_copy; + g_mark_list_copy = new_mark_list_copy; +#endif //PARALLEL_MARK_LIST_SORT + mark_list_size = new_mark_list_size; + } + else + { + delete[] new_mark_list; +#ifdef PARALLEL_MARK_LIST_SORT + delete[] new_mark_list_copy; +#endif //PARALLEL_MARK_LIST_SORT + } + +#else //MULTIPLE_HEAPS + uint8_t** new_mark_list = make_mark_list(new_mark_list_size); + if (new_mark_list != nullptr) + { + delete[] mark_list; + g_mark_list = new_mark_list; + mark_list_size = new_mark_list_size; + } +#endif //MULTIPLE_HEAPS +} #endif //MARK_LIST class seg_free_spaces @@ -10315,7 +10368,7 @@ gc_heap::init_semi_shared() #ifdef MARK_LIST #ifdef MULTIPLE_HEAPS - mark_list_size = min (300*1024, max (8192, soh_segment_size/(2*10*32))); + mark_list_size = min (100*1024, max (8192, soh_segment_size/(2*10*32))); g_mark_list = make_mark_list (mark_list_size*n_heaps); min_balance_threshold = alloc_quantum_balance_units * CLR_SIZE * 2; @@ -22213,7 +22266,10 @@ void gc_heap::plan_phase (int condemned_gen_number) (mark_list_index - &mark_list[0]), ((mark_list_end - &mark_list[0])))); if (mark_list_index >= (mark_list_end + 1)) + { mark_list_index = mark_list_end + 1; + mark_list_overflow = true; + } #else dprintf (3, ("mark_list length: %Id", (mark_list_index - &mark_list[0]))); @@ -37151,6 +37207,12 @@ void gc_heap::do_post_gc() #else record_interesting_info_per_heap(); #endif //MULTIPLE_HEAPS + if (mark_list_overflow) + { + grow_mark_list(); + mark_list_overflow = false; + } + record_global_mechanisms(); #endif //GC_CONFIG_DRIVEN } diff --git a/src/coreclr/src/gc/gcpriv.h b/src/coreclr/src/gc/gcpriv.h index 93951a9b496987..6389a216097922 100644 --- a/src/coreclr/src/gc/gcpriv.h +++ b/src/coreclr/src/gc/gcpriv.h @@ -2895,6 +2895,11 @@ class gc_heap #endif #endif //MULTIPLE_HEAPS +#ifdef MARK_LIST + PER_HEAP_ISOLATED + void grow_mark_list(); +#endif //MARK_LIST + #ifdef BACKGROUND_GC PER_HEAP @@ -3759,13 +3764,11 @@ class gc_heap PER_HEAP_ISOLATED size_t mark_list_size; - PER_HEAP - uint8_t** mark_list_end; + PER_HEAP_ISOLATED + bool mark_list_overflow; -#if defined(TARGET_AMD64) && defined(TARGET_WINDOWS) && defined(MULTIPLE_HEAPS) PER_HEAP - uint32_t* mark_list_index_32; -#endif //defined(TARGET_AMD64) && defined(TARGET_WINDOWS) && defined(MULTIPLE_HEAPS) + uint8_t** mark_list_end; PER_HEAP uint8_t** mark_list_index; From 7e43b76bd6597adf950925fa8c18ef8130894644 Mon Sep 17 00:00:00 2001 From: Peter Sollich Date: Tue, 23 Jun 2020 11:41:41 +0200 Subject: [PATCH 12/31] Integrate new version including AVX512 code path. --- src/coreclr/src/gc/CMakeLists.txt | 3 +- src/coreclr/src/gc/defs.h | 45 + src/coreclr/src/gc/gc.cpp | 28 +- src/coreclr/src/gc/gcsvr.cpp | 6 +- src/coreclr/src/gc/gcwks.cpp | 6 +- .../{vxsort.cpp => machine_traits.avx2.cpp} | 7 +- src/coreclr/src/gc/machine_traits.avx2.h | 244 ++ src/coreclr/src/gc/machine_traits.avx512.h | 217 ++ src/coreclr/src/gc/machine_traits.h | 36 + src/coreclr/src/gc/smallsort/bitonic_gen.py | 491 ---- .../bitonic_sort.AVX2.double.generated.h | 1492 ---------- .../bitonic_sort.AVX2.float.generated.h | 1532 ---------- .../bitonic_sort.AVX2.int32_t.generated.h | 1532 ---------- .../bitonic_sort.AVX2.int64_t.generated.cpp | 26 + .../bitonic_sort.AVX2.int64_t.generated.h | 2461 ++++++++--------- .../bitonic_sort.AVX2.uint32_t.generated.cpp | 26 + .../bitonic_sort.AVX2.uint32_t.generated.h | 2345 ++++++++-------- .../bitonic_sort.AVX2.uint64_t.generated.h | 1540 ----------- .../bitonic_sort.AVX512.int64_t.generated.cpp | 26 + .../bitonic_sort.AVX512.int64_t.generated.h | 1344 +++++++++ ...bitonic_sort.AVX512.uint32_t.generated.cpp | 26 + .../bitonic_sort.AVX512.uint32_t.generated.h | 1384 +++++++++ src/coreclr/src/gc/smallsort/bitonic_sort.h | 9 +- src/coreclr/src/gc/smallsort/codegen/avx2.py | 485 ++++ .../src/gc/smallsort/codegen/avx512.py | 485 ++++ .../src/gc/smallsort/codegen/bitonic_gen.py | 105 + .../src/gc/smallsort/codegen/bitonic_isa.py | 67 + src/coreclr/src/gc/smallsort/codegen/utils.py | 19 + src/coreclr/src/gc/vxsort.h | 519 ++-- src/coreclr/src/gc/vxsort_targets_disable.h | 8 + .../src/gc/vxsort_targets_enable_avx2.h | 8 + .../src/gc/vxsort_targets_enable_avx512.h | 8 + 32 files changed, 7160 insertions(+), 9370 deletions(-) create mode 100644 src/coreclr/src/gc/defs.h rename src/coreclr/src/gc/{vxsort.cpp => machine_traits.avx2.cpp} (99%) create mode 100644 src/coreclr/src/gc/machine_traits.avx2.h create mode 100644 src/coreclr/src/gc/machine_traits.avx512.h create mode 100644 src/coreclr/src/gc/machine_traits.h delete mode 100644 src/coreclr/src/gc/smallsort/bitonic_gen.py delete mode 100644 src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.double.generated.h delete mode 100644 src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.float.generated.h delete mode 100644 src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.int32_t.generated.h create mode 100644 src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.int64_t.generated.cpp create mode 100644 src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.uint32_t.generated.cpp delete mode 100644 src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.uint64_t.generated.h create mode 100644 src/coreclr/src/gc/smallsort/bitonic_sort.AVX512.int64_t.generated.cpp create mode 100644 src/coreclr/src/gc/smallsort/bitonic_sort.AVX512.int64_t.generated.h create mode 100644 src/coreclr/src/gc/smallsort/bitonic_sort.AVX512.uint32_t.generated.cpp create mode 100644 src/coreclr/src/gc/smallsort/bitonic_sort.AVX512.uint32_t.generated.h create mode 100644 src/coreclr/src/gc/smallsort/codegen/avx2.py create mode 100644 src/coreclr/src/gc/smallsort/codegen/avx512.py create mode 100644 src/coreclr/src/gc/smallsort/codegen/bitonic_gen.py create mode 100644 src/coreclr/src/gc/smallsort/codegen/bitonic_isa.py create mode 100644 src/coreclr/src/gc/smallsort/codegen/utils.py create mode 100644 src/coreclr/src/gc/vxsort_targets_disable.h create mode 100644 src/coreclr/src/gc/vxsort_targets_enable_avx2.h create mode 100644 src/coreclr/src/gc/vxsort_targets_enable_avx512.h diff --git a/src/coreclr/src/gc/CMakeLists.txt b/src/coreclr/src/gc/CMakeLists.txt index 6d9f87f4e1fd19..ba4758f8ca82de 100644 --- a/src/coreclr/src/gc/CMakeLists.txt +++ b/src/coreclr/src/gc/CMakeLists.txt @@ -36,7 +36,8 @@ if(CLR_CMAKE_HOST_UNIX) else() set ( GC_SOURCES ${GC_SOURCES} - windows/gcenv.windows.cpp) + windows/gcenv.windows.cpp +) endif(CLR_CMAKE_HOST_UNIX) if (CLR_CMAKE_TARGET_WIN32) diff --git a/src/coreclr/src/gc/defs.h b/src/coreclr/src/gc/defs.h new file mode 100644 index 00000000000000..8902ffe5e61f91 --- /dev/null +++ b/src/coreclr/src/gc/defs.h @@ -0,0 +1,45 @@ +#ifndef VXSORT_DEFS_H +#define VXSORT_DEFS_H + +#if _MSC_VER +#ifdef _M_X86 +#define ARCH_X86 +#endif +#ifdef _M_X64 +#define ARCH_X64 +#endif +#ifdef _M_ARM64 +#define ARCH_ARM +#endif +#else +#ifdef __i386__ +#define ARCH_X86 +#endif +#ifdef __amd64__ +#define ARCH_X64 +#endif +#ifdef __arm__ +#define ARCH_ARM +#endif +#endif + +#ifdef _MSC_VER +#ifdef __clang__ +#define mess_up_cmov() +#define INLINE __attribute__((always_inline)) +#define NOINLINE __attribute__((noinline)) +#else +// MSVC +#include +#define mess_up_cmov() _ReadBarrier(); +#define INLINE __forceinline +#define NOINLINE __declspec(noinline) +#endif +#else +// GCC + Clang +#define mess_up_cmov() +#define INLINE __attribute__((always_inline)) +#define NOINLINE __attribute__((noinline)) +#endif + +#endif // VXSORT_DEFS_H diff --git a/src/coreclr/src/gc/gc.cpp b/src/coreclr/src/gc/gc.cpp index 3d4832f8976bc4..eb32d8e9e2e650 100644 --- a/src/coreclr/src/gc/gc.cpp +++ b/src/coreclr/src/gc/gc.cpp @@ -2084,7 +2084,7 @@ uint8_t* tree_search (uint8_t* tree, uint8_t* old_address); #ifdef USE_INTROSORT #define _sort introsort::sort #elif defined(USE_VXSORT) -#define _sort vxsort +#define _sort do_vxsort namespace std { template @@ -2116,13 +2116,15 @@ namespace std }; } +#include "machine_traits.avx2.h" +#include "machine_traits.avx512.h" #include "vxsort.h" #ifdef USE_VXSORT -void vxsort(uint8_t** low, uint8_t** high, unsigned int depth) +void do_vxsort(uint8_t** low, uint8_t** high, unsigned int depth) { - // auto sorter = gcsort::vxsort(); - auto sorter = gcsort::vxsort(); +// auto sorter = vxsort::cvxsort(); + auto sorter = vxsort::cvxsort(); sorter.sort((int64_t*)low, (int64_t*)high); #ifdef _DEBUG for (uint8_t** p = low; p < high; p++) @@ -2131,10 +2133,10 @@ void vxsort(uint8_t** low, uint8_t** high, unsigned int depth) } #endif } -void vxsort(uint32_t* low, uint32_t* high, unsigned int depth) +void do_vxsort(uint32_t* low, uint32_t* high, unsigned int depth) { - // auto sorter = gcsort::vxsort(); - auto sorter = gcsort::vxsort(); +// auto sorter = vxsort::cvxsort(); + auto sorter = vxsort::cvxsort(); sorter.sort(low, high); #ifdef _DEBUG for (uint32_t* p = low; p < high; p++) @@ -8809,19 +8811,19 @@ void gc_heap::combine_mark_lists() #endif // PARALLEL_MARK_LIST_SORT #endif //MULTIPLE_HEAPS -void gc_heap::grow_mark_list() +void gc_heap::grow_mark_list () { - size_t new_mark_list_size = min(mark_list_size * 2, 1000 * 1024); + size_t new_mark_list_size = min (mark_list_size * 2, 1000 * 1024); if (new_mark_list_size == mark_list_size) return; #ifdef MULTIPLE_HEAPS - uint8_t** new_mark_list = make_mark_list(new_mark_list_size * n_heaps); + uint8_t** new_mark_list = make_mark_list (new_mark_list_size * n_heaps); #ifdef PARALLEL_MARK_LIST_SORT - uint8_t** new_mark_list_copy = make_mark_list(new_mark_list_size * n_heaps); + uint8_t** new_mark_list_copy = make_mark_list (new_mark_list_size * n_heaps); #ifdef BIT_MAP_SORT - memset(new_mark_list_copy, 0, new_mark_list_size * n_heaps * sizeof(new_mark_list_copy[0])); + memset (new_mark_list_copy, 0, new_mark_list_size * n_heaps * sizeof(new_mark_list_copy[0])); #endif //BIT_MAP_SORT #endif //PARALLEL_MARK_LIST_SORT @@ -8848,7 +8850,7 @@ void gc_heap::grow_mark_list() } #else //MULTIPLE_HEAPS - uint8_t** new_mark_list = make_mark_list(new_mark_list_size); + uint8_t** new_mark_list = make_mark_list (new_mark_list_size); if (new_mark_list != nullptr) { delete[] mark_list; diff --git a/src/coreclr/src/gc/gcsvr.cpp b/src/coreclr/src/gc/gcsvr.cpp index a6a17d64f7549f..30f0f8c7fe3976 100644 --- a/src/coreclr/src/gc/gcsvr.cpp +++ b/src/coreclr/src/gc/gcsvr.cpp @@ -25,7 +25,11 @@ namespace SVR { #include "gcimpl.h" #include "gc.cpp" #ifdef USE_VXSORT -#include "vxsort.cpp" +#include "machine_traits.avx2.cpp" +#include "smallsort/bitonic_sort.AVX2.uint32_t.generated.cpp" +#include "smallsort/bitonic_sort.AVX2.int64_t.generated.cpp" +#include "smallsort/bitonic_sort.AVX512.uint32_t.generated.cpp" +#include "smallsort/bitonic_sort.AVX512.int64_t.generated.cpp" #endif //USE_VXSORT } diff --git a/src/coreclr/src/gc/gcwks.cpp b/src/coreclr/src/gc/gcwks.cpp index 6529156dabf74f..5b32db8503e900 100644 --- a/src/coreclr/src/gc/gcwks.cpp +++ b/src/coreclr/src/gc/gcwks.cpp @@ -25,7 +25,11 @@ namespace WKS { #include "gcimpl.h" #include "gc.cpp" #ifdef USE_VXSORT -#include "vxsort.cpp" +#include "machine_traits.avx2.cpp" +#include "smallsort/bitonic_sort.AVX2.uint32_t.generated.cpp" +#include "smallsort/bitonic_sort.AVX2.int64_t.generated.cpp" +#include "smallsort/bitonic_sort.AVX512.uint32_t.generated.cpp" +#include "smallsort/bitonic_sort.AVX512.int64_t.generated.cpp" #endif //USE_VXSORT } diff --git a/src/coreclr/src/gc/vxsort.cpp b/src/coreclr/src/gc/machine_traits.avx2.cpp similarity index 99% rename from src/coreclr/src/gc/vxsort.cpp rename to src/coreclr/src/gc/machine_traits.avx2.cpp index c7953f02229de1..4b5a9219c1fb7a 100644 --- a/src/coreclr/src/gc/vxsort.cpp +++ b/src/coreclr/src/gc/machine_traits.avx2.cpp @@ -1,7 +1,8 @@ -#include "vxsort.h" -#include +//#include -namespace gcsort { +#include "machine_traits.avx2.h" + +namespace vxsort { alignas(128) const int8_t perm_table_64[128] = { 0, 1, 2, 3, 4, 5, 6, 7, // 0b0000 (0) diff --git a/src/coreclr/src/gc/machine_traits.avx2.h b/src/coreclr/src/gc/machine_traits.avx2.h new file mode 100644 index 00000000000000..2f0368d9e39999 --- /dev/null +++ b/src/coreclr/src/gc/machine_traits.avx2.h @@ -0,0 +1,244 @@ +// +// Created by dans on 6/1/20. +// + +#ifndef VXSORT_MACHINE_TRAITS_AVX2_H +#define VXSORT_MACHINE_TRAITS_AVX2_H + +#include "vxsort_targets_enable_avx2.h" + +#include +//#include +#include + +#include "defs.h" +#include "machine_traits.h" + +#define i2d _mm256_castsi256_pd +#define d2i _mm256_castpd_si256 +#define i2s _mm256_castsi256_ps +#define s2i _mm256_castps_si256 +#define s2d _mm256_castps_pd +#define d2s _mm256_castpd_ps + +namespace vxsort { +extern const int8_t perm_table_64[128]; +extern const int8_t perm_table_32[2048]; + +void unsupported_operation() +{ + assert(!"operation is unsupported"); + GCToOSInterface::DebugBreak(); +} + +template <> +class vxsort_machine_traits { + public: + typedef __m256i TV; + typedef uint32_t TMASK; + + static constexpr bool supports_compress_writes() { return false; } + + static INLINE TV load_vec(TV* p) { + return _mm256_lddqu_si256(p); + } + + static INLINE void store_vec(TV* ptr, TV v) { + _mm256_storeu_si256(ptr, v); + } + + static void store_compress_vec(TV* ptr, TV v, TMASK mask) { unsupported_operation(); } + + static INLINE TV partition_vector(TV v, int mask) { + assert(mask >= 0); + assert(mask <= 255); + return s2i(_mm256_permutevar8x32_ps(i2s(v), _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i*)(perm_table_32 + mask * 8))))); + } + + static INLINE TV get_vec_pivot(int32_t pivot) { + return _mm256_set1_epi32(pivot); + } + static INLINE TMASK get_cmpgt_mask(TV a, TV b) { + return _mm256_movemask_ps(i2s(_mm256_cmpgt_epi32(a, b))); + } +}; + +template <> +class vxsort_machine_traits { + public: + typedef __m256i TV; + typedef uint32_t TMASK; + + static constexpr bool supports_compress_writes() { return false; } + + static INLINE TV load_vec(TV* p) { + return _mm256_lddqu_si256(p); + } + + static INLINE void store_vec(TV* ptr, TV v) { + _mm256_storeu_si256(ptr, v); + } + + static void store_compress_vec(TV* ptr, TV v, TMASK mask) { unsupported_operation(); } + + static INLINE TV partition_vector(TV v, int mask) { + assert(mask >= 0); + assert(mask <= 255); + return s2i(_mm256_permutevar8x32_ps(i2s(v), _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i*)(perm_table_32 + mask * 8))))); + } + + static INLINE TV get_vec_pivot(uint32_t pivot) { + return _mm256_set1_epi32(pivot); + } + static INLINE TMASK get_cmpgt_mask(TV a, TV b) { + __m256i top_bit = _mm256_set1_epi32(1U << 31); + return _mm256_movemask_ps(i2s(_mm256_cmpgt_epi32(_mm256_xor_si256(top_bit, a), _mm256_xor_si256(top_bit, b)))); + } +}; + +template <> +class vxsort_machine_traits { + public: + typedef __m256 TV; + typedef uint32_t TMASK; + + static constexpr bool supports_compress_writes() { return false; } + + static INLINE TV load_vec(TV* p) { + return _mm256_loadu_ps((float *)p); + } + + static INLINE void store_vec(TV* ptr, TV v) { + _mm256_storeu_ps((float *) ptr, v); + } + + static void store_compress_vec(TV* ptr, TV v, TMASK mask) { unsupported_operation(); } + + static INLINE TV partition_vector(TV v, int mask) { + assert(mask >= 0); + assert(mask <= 255); + return _mm256_permutevar8x32_ps(v, _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i*)(perm_table_32 + mask * 8)))); + } + + static INLINE TV get_vec_pivot(float pivot) { + return _mm256_set1_ps(pivot); + } + + static INLINE TMASK get_cmpgt_mask(TV a, TV b) { + /// 0x0E: Greater-than (ordered, signaling) \n + /// 0x1E: Greater-than (ordered, non-signaling) + return _mm256_movemask_ps(_mm256_cmp_ps(a, b, _CMP_GT_OS)); + } +}; + +template <> +class vxsort_machine_traits { + public: + typedef __m256i TV; + typedef uint32_t TMASK; + + static constexpr bool supports_compress_writes() { return false; } + + static INLINE TV load_vec(TV* p) { + return _mm256_lddqu_si256(p); + } + + static INLINE void store_vec(TV* ptr, TV v) { + _mm256_storeu_si256(ptr, v); + } + + static void store_compress_vec(TV* ptr, TV v, TMASK mask) { unsupported_operation(); } + + static INLINE TV partition_vector(TV v, int mask) { + assert(mask >= 0); + assert(mask <= 15); + return s2i(_mm256_permutevar8x32_ps(i2s(v), _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i*)(perm_table_64 + mask * 8))))); + } + + static INLINE TV get_vec_pivot(int64_t pivot) { + return _mm256_set1_epi64x(pivot); + } + static INLINE TMASK get_cmpgt_mask(TV a, TV b) { + return _mm256_movemask_pd(i2d(_mm256_cmpgt_epi64(a, b))); + } +}; + +template <> +class vxsort_machine_traits { + public: + typedef __m256i TV; + typedef uint32_t TMASK; + + static constexpr bool supports_compress_writes() { return false; } + + static INLINE TV load_vec(TV* p) { + return _mm256_lddqu_si256(p); + } + + static INLINE void store_vec(TV* ptr, TV v) { + _mm256_storeu_si256(ptr, v); + } + + static void store_compress_vec(TV* ptr, TV v, TMASK mask) { unsupported_operation(); } + + static INLINE TV partition_vector(TV v, int mask) { + assert(mask >= 0); + assert(mask <= 15); + return s2i(_mm256_permutevar8x32_ps(i2s(v), _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i*)(perm_table_64 + mask * 8))))); + } + static INLINE TV get_vec_pivot(int64_t pivot) { + return _mm256_set1_epi64x(pivot); + } + static INLINE TMASK get_cmpgt_mask(TV a, TV b) { + __m256i top_bit = _mm256_set1_epi64x(1LLU << 63); + return _mm256_movemask_pd(i2d(_mm256_cmpgt_epi64(_mm256_xor_si256(top_bit, a), _mm256_xor_si256(top_bit, b)))); + } +}; + +template <> +class vxsort_machine_traits { + public: + typedef __m256d TV; + typedef uint32_t TMASK; + + static constexpr bool supports_compress_writes() { return false; } + + static INLINE TV load_vec(TV* p) { + return _mm256_loadu_pd((double *) p); + } + + static INLINE void store_vec(TV* ptr, TV v) { + _mm256_storeu_pd((double *) ptr, v); + } + + static void store_compress_vec(TV* ptr, TV v, TMASK mask) { unsupported_operation(); } + + static INLINE TV partition_vector(TV v, int mask) { + assert(mask >= 0); + assert(mask <= 15); + return s2d(_mm256_permutevar8x32_ps(d2s(v), _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i*)(perm_table_64 + mask * 8))))); + } + + static INLINE TV get_vec_pivot(double pivot) { + return _mm256_set1_pd(pivot); + } + static INLINE TMASK get_cmpgt_mask(TV a, TV b) { + /// 0x0E: Greater-than (ordered, signaling) \n + /// 0x1E: Greater-than (ordered, non-signaling) + return _mm256_movemask_pd(_mm256_cmp_pd(a, b, _CMP_GT_OS)); + } +}; + +} + +#undef i2d +#undef d2i +#undef i2s +#undef s2i +#undef s2d +#undef d2s + +#include "vxsort_targets_disable.h" + + +#endif // VXSORT_VXSORT_AVX2_H diff --git a/src/coreclr/src/gc/machine_traits.avx512.h b/src/coreclr/src/gc/machine_traits.avx512.h new file mode 100644 index 00000000000000..fcf15c6974ef89 --- /dev/null +++ b/src/coreclr/src/gc/machine_traits.avx512.h @@ -0,0 +1,217 @@ +// +// Created by dans on 6/1/20. +// + +#ifndef VXSORT_MACHINE_TRAITS_AVX512_H +#define VXSORT_MACHINE_TRAITS_AVX512_H + +#include "vxsort_targets_enable_avx512.h" + +#include +#include "defs.h" +#include "machine_traits.h" + +namespace vxsort { +template <> +class vxsort_machine_traits { + public: + typedef __m512i TV; + typedef __mmask16 TMASK; + + static constexpr bool supports_compress_writes() { return true; } + + static INLINE TV load_vec(TV* p) { + return _mm512_loadu_si512(p); + } + + static INLINE void store_vec(TV* ptr, TV v) { + _mm512_storeu_si512(ptr, v); + } + + // Will never be called + static INLINE TV partition_vector(TV v, int mask) { return v; } + + + static void store_compress_vec(TV *ptr, TV v, TMASK mask) { + _mm512_mask_compressstoreu_epi32(ptr, mask, v); + } + + static INLINE TV get_vec_pivot(int32_t pivot) { + return _mm512_set1_epi32(pivot); + } + + static INLINE TMASK get_cmpgt_mask(TV a, TV b) { + return _mm512_cmp_epi32_mask(a, b, _MM_CMPINT_GT); + } +}; + +template <> +class vxsort_machine_traits { + public: + typedef __m512i TV; + typedef __mmask16 TMASK; + + static constexpr bool supports_compress_writes() { return true; } + + static INLINE TV load_vec(TV* p) { + return _mm512_loadu_si512(p); + } + + static INLINE void store_vec(TV* ptr, TV v) { + _mm512_storeu_si512(ptr, v); + } + + // Will never be called + static INLINE TV partition_vector(TV v, int mask) { return v; } + + + static void store_compress_vec(TV *ptr, TV v, TMASK mask) { + _mm512_mask_compressstoreu_epi32(ptr, mask, v); + } + + static INLINE TV get_vec_pivot(uint32_t pivot) { + return _mm512_set1_epi32(pivot); + } + + static INLINE TMASK get_cmpgt_mask(TV a, TV b) { + return _mm512_cmp_epu32_mask(a, b, _MM_CMPINT_GT); + } +}; + +template <> +class vxsort_machine_traits { + public: + typedef __m512 TV; + typedef __mmask16 TMASK; + + static constexpr bool supports_compress_writes() { return true; } + + static INLINE TV load_vec(TV* p) { + return _mm512_loadu_ps(p); + } + + static INLINE void store_vec(TV* ptr, TV v) { + _mm512_storeu_ps(ptr, v); + } + + // Will never be called + static INLINE TV partition_vector(TV v, int mask) { return v; } + + + static void store_compress_vec(TV *ptr, TV v, TMASK mask) { + _mm512_mask_compressstoreu_ps(ptr, mask, v); + } + + static INLINE TV get_vec_pivot(float pivot) { + return _mm512_set1_ps(pivot); + } + + static INLINE TMASK get_cmpgt_mask(TV a, TV b) { + return _mm512_cmp_ps_mask(a, b, _CMP_GT_OS); + } +}; + +template <> +class vxsort_machine_traits { + public: + typedef __m512i TV; + typedef __mmask8 TMASK; + + static constexpr bool supports_compress_writes() { return true; } + + static INLINE TV load_vec(TV* p) { + return _mm512_loadu_si512(p); + } + + static INLINE void store_vec(TV* ptr, TV v) { + _mm512_storeu_si512(ptr, v); + } + + // Will never be called + static INLINE TV partition_vector(TV v, int mask) { return v; } + + + static void store_compress_vec(TV *ptr, TV v, TMASK mask) { + _mm512_mask_compressstoreu_epi64(ptr, mask, v); + } + + static INLINE TV get_vec_pivot(int64_t pivot) { + return _mm512_set1_epi64(pivot); + } + + static INLINE TMASK get_cmpgt_mask(TV a, TV b) { + return _mm512_cmp_epi64_mask(a, b, _MM_CMPINT_GT); + } +}; + +template <> +class vxsort_machine_traits { + public: + typedef __m512i TV; + typedef __mmask8 TMASK; + + static constexpr bool supports_compress_writes() { return true; } + + static INLINE TV load_vec(TV* p) { + return _mm512_loadu_si512(p); + } + + static INLINE void store_vec(TV* ptr, TV v) { + _mm512_storeu_si512(ptr, v); + } + + // Will never be called + static INLINE TV partition_vector(TV v, int mask) { return v; } + + + static void store_compress_vec(TV *ptr, TV v, TMASK mask) { + _mm512_mask_compressstoreu_epi64(ptr, mask, v); + } + + static INLINE TV get_vec_pivot(uint64_t pivot) { + return _mm512_set1_epi64(pivot); + } + + static INLINE TMASK get_cmpgt_mask(TV a, TV b) { + return _mm512_cmp_epu64_mask(a, b, _MM_CMPINT_GT); + } +}; + +template <> +class vxsort_machine_traits { + public: + typedef __m512d TV; + typedef __mmask8 TMASK; + + static constexpr bool supports_compress_writes() { return true; } + + static INLINE TV load_vec(TV* p) { + return _mm512_loadu_pd(p); + } + + static INLINE void store_vec(TV* ptr, TV v) { + _mm512_storeu_pd(ptr, v); + } + + // Will never be called + static INLINE TV partition_vector(TV v, int mask) { return v; } + + + static void store_compress_vec(TV *ptr, TV v, TMASK mask) { + _mm512_mask_compressstoreu_pd(ptr, mask, v); + } + + static INLINE TV get_vec_pivot(double pivot) { + return _mm512_set1_pd(pivot); + } + + static INLINE TMASK get_cmpgt_mask(TV a, TV b) { + return _mm512_cmp_pd_mask(a, b, _CMP_GT_OS); + } +}; + +} + +#include "vxsort_targets_disable.h" + +#endif // VXSORT_VXSORT_AVX512_H diff --git a/src/coreclr/src/gc/machine_traits.h b/src/coreclr/src/gc/machine_traits.h new file mode 100644 index 00000000000000..4a9288b68ed477 --- /dev/null +++ b/src/coreclr/src/gc/machine_traits.h @@ -0,0 +1,36 @@ +// +// Created by dans on 6/1/20. +// + +#ifndef VXSORT_MACHINE_TRAITS_H +#define VXSORT_MACHINE_TRAITS_H + +//#include + +namespace vxsort { + +enum vector_machine { + NONE, + AVX2, + AVX512, + SVE, +}; + +template +struct vxsort_machine_traits { + public: + typedef int TV; + typedef int TMASK; + + static constexpr bool supports_compress_writes(); + + static TV load_vec(TV* ptr); + static void store_vec(TV* ptr, TV v); + static void store_compress_vec(TV* ptr, TV v, TMASK mask); + static TV partition_vector(TV v, int mask); + static TV get_vec_pivot(T pivot); + static TMASK get_cmpgt_mask(TV a, TV b); +}; +} + +#endif // VXSORT_MACHINE_TRAITS_H diff --git a/src/coreclr/src/gc/smallsort/bitonic_gen.py b/src/coreclr/src/gc/smallsort/bitonic_gen.py deleted file mode 100644 index 047fbd5fa5bf89..00000000000000 --- a/src/coreclr/src/gc/smallsort/bitonic_gen.py +++ /dev/null @@ -1,491 +0,0 @@ -#!/usr/bin/env python3 -import argparse -from datetime import datetime -from enum import Enum - -max_bitonic_sort_verctors = 16 - - -def next_power_of_2(v): - v = v - 1 - v |= v >> 1 - v |= v >> 2 - v |= v >> 4 - v |= v >> 8 - v |= v >> 16 - v = v + 1 - return int(v) - - -largest_merge_variant_needed = next_power_of_2(max_bitonic_sort_verctors) / 2; - -## types to function suffix -bitonic_type_map = { - "int32_t": "__m256i", - "uint32_t": "__m256i", - "float": "__m256", - "int64_t": "__m256i", - "uint64_t": "__m256i", - "double": "__m256d", -} - -bitonic_size_map = { - "int32_t": 4, - "uint32_t": 4, - "float": 4, - "int64_t": 8, - "uint64_t": 8, - "double": 8, -} - -bitonic_types = bitonic_size_map.keys() - - -def i2d(v, t): - if t == "double": - return v - elif t == "float": - return f"s2d({v})" - return f"i2d({v})" - -def i2s(v, t): - if t == "double": - raise Exception("WTF") - elif t == "float": - return f"i2s({v})" - return v - - -def d2i(v, t): - if t == "double": - return v - elif t == "float": - return f"d2s({v})" - return f"d2i({v})" - -def s2i(v, t): - if t == "double": - raise Exception("WTF") - elif t == "float": - return f"s2i({v})" - return v - - - -def generate_param_list(start, numParams): - return str.join(", ", list(map(lambda p: f"d{p:02d}", range(start, start + numParams)))) - - -def generate_param_def_list(numParams, nativeType): - return str.join(", ", list(map(lambda p: f"{bitonic_type_map[nativeType]}& d{p:02d}", range(1, numParams + 1)))) - - -def generate_shuffle_X1(v, t): - if bitonic_size_map[t] == 4: - return i2s(f"_mm256_shuffle_epi32({s2i(v, t)}, 0xB1)", t) - elif bitonic_size_map[t] == 8: - return d2i(f"_mm256_shuffle_pd({i2d(v, t)}, {i2d(v, t)}, 0x5)", t) - - -def generate_shuffle_X2(v, t): - if bitonic_size_map[t] == 4: - return i2s(f"_mm256_shuffle_epi32({s2i(v, t)}, 0x4E)", t) - elif bitonic_size_map[t] == 8: - return d2i(f"_mm256_permute4x64_pd({i2d(v, t)}, 0x4E)", t) - - -def generate_shuffle_XR(v, t): - if bitonic_size_map[t] == 4: - return i2s(f"_mm256_shuffle_epi32({s2i(v, t)}, 0x1B)", t) - elif bitonic_size_map[t] == 8: - return d2i(f"_mm256_permute4x64_pd({i2d(v, t)}, 0x1B)", t) - - -def generate_blend_B1(v1, v2, t, ascending): - if bitonic_size_map[t] == 4: - if ascending: - return i2s(f"_mm256_blend_epi32({s2i(v1, t)}, {s2i(v2, t)}, 0xAA)", t) - else: - return i2s(f"_mm256_blend_epi32({s2i(v2, t)}, {s2i(v1, t)}, 0xAA)", t) - elif bitonic_size_map[t] == 8: - if ascending: - return d2i(f"_mm256_blend_pd({i2d(v1, t)}, {i2d(v2, t)}, 0xA)", t) - else: - return d2i(f"_mm256_blend_pd({i2d(v2, t)}, {i2d(v1, t)}, 0xA)", t) - - -def generate_blend_B2(v1, v2, t, ascending): - if bitonic_size_map[t] == 4: - if ascending: - return i2s(f"_mm256_blend_epi32({s2i(v1, t)}, {s2i(v2, t)}, 0xCC)", t) - else: - return i2s(f"_mm256_blend_epi32({s2i(v2, t)}, {s2i(v1, t)}, 0xCC)", t) - elif bitonic_size_map[t] == 8: - if ascending: - return d2i(f"_mm256_blend_pd({i2d(v1, t)}, {i2d(v2, t)}, 0xC)", t) - else: - return d2i(f"_mm256_blend_pd({i2d(v2, t)}, {i2d(v1, t)}, 0xC)", t) - - -def generate_blend_B4(v1, v2, t, ascending): - if bitonic_size_map[t] == 4: - if ascending: - return i2s(f"_mm256_blend_epi32({s2i(v1, t)}, {s2i(v2, t)}, 0xF0)", t) - else: - return i2s(f"_mm256_blend_epi32({s2i(v2, t)}, {s2i(v1, t)}, 0xF0)", t) - elif bitonic_size_map[t] == 8: - raise Exception("WTF") - - -def generate_cross(v, t): - if bitonic_size_map[t] == 4: - return d2i(f"_mm256_permute4x64_pd({i2d(v, t)}, 0x4E)", t) - elif bitonic_size_map[t] == 8: - raise Exception("WTF") - - -def generate_reverse(v, t): - if bitonic_size_map[t] == 4: - v = f"_mm256_shuffle_epi32({s2i(v, t)}, 0x1B)" - return d2i(f"_mm256_permute4x64_pd({i2d(v, 'int32_t')}, 0x4E)", t) - elif bitonic_size_map[t] == 8: - return d2i(f"_mm256_permute4x64_pd({i2d(v, t)}, 0x1B)", t) - - -def crappity_crap_crap(v1, v2, t): - if t == "int64_t": - return f"cmp = _mm256_cmpgt_epi64({v1}, {v2});" - elif t == "uint64_t": - return f"cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, {v1}), _mm256_xor_si256(topBit, {v2}));" - - return "" - - -def generate_min(v1, v2, t): - if t == "int32_t": - return f"_mm256_min_epi32({v1}, {v2})" - elif t == "uint32_t": - return f"_mm256_min_epu32({v1}, {v2})" - elif t == "float": - return f"_mm256_min_ps({v1}, {v2})" - elif t == "int64_t": - return d2i(f"_mm256_blendv_pd({i2d(v1, t)}, {i2d(v2, t)}, i2d(cmp))", t) - elif t == "uint64_t": - return d2i(f"_mm256_blendv_pd({i2d(v1, t)}, {i2d(v2, t)}, i2d(cmp))", t) - elif t == "double": - return f"_mm256_min_pd({v1}, {v2})" - - -def generate_max(v1, v2, t): - if t == "int32_t": - return f"_mm256_max_epi32({v1}, {v2})" - elif t == "uint32_t": - return f"_mm256_max_epu32({v1}, {v2})" - elif t == "float": - return f"_mm256_max_ps({v1}, {v2})" - elif t == "int64_t": - return d2i(f"_mm256_blendv_pd({i2d(v2, t)}, {i2d(v1, t)}, i2d(cmp))", t) - elif t == "uint64_t": - return d2i(f"_mm256_blendv_pd({i2d(v2, t)}, {i2d(v1, t)}, i2d(cmp))", t) - elif t == "double": - return f"_mm256_max_pd({v1}, {v2})" - - -def generate_1v_basic_sorters(f, type, ascending): - maybe_cmp = lambda: ", cmp" if (type == "int64_t" or type == "uint64_t") else "" - maybe_topbit = lambda: f"\n {bitonic_type_map[type]} topBit = _mm256_set1_epi64x(1LLU << 63);" if ( - type == "uint64_t") else "" - - suffix = "ascending" if ascending else "descending" - - s = f""" static INLINE void sort_01v_{suffix}({generate_param_def_list(1, type)}) {{ - {bitonic_type_map[type]} min, max, s{maybe_cmp()};{maybe_topbit()} - - s = {generate_shuffle_X1("d01", type)}; - {crappity_crap_crap("s", "d01", type)} - min = {generate_min("s", "d01", type)}; - max = {generate_max("s", "d01", type)}; - d01 = {generate_blend_B1("min", "max", type, ascending)}; - - s = {generate_shuffle_XR("d01", type)}; - {crappity_crap_crap("s", "d01", type)} - min = {generate_min("s", "d01", type)}; - max = {generate_max("s", "d01", type)}; - d01 = {generate_blend_B2("min", "max", type, ascending)}; - - s = {generate_shuffle_X1("d01", type)}; - {crappity_crap_crap("s", "d01", type)} - min = {generate_min("s", "d01", type)}; - max = {generate_max("s", "d01", type)}; - d01 = {generate_blend_B1("min", "max", type, ascending)};""" - - print(s, file=f) - - if bitonic_size_map[type] == 4: - s = f""" - s = {generate_reverse("d01", type)}; - min = {generate_min("s", "d01", type)}; - max = {generate_max("s", "d01", type)}; - d01 = {generate_blend_B4("min", "max", type, ascending)}; - - s = {generate_shuffle_X2("d01", type)}; - min = {generate_min("s", "d01", type)}; - max = {generate_max("s", "d01", type)}; - d01 = {generate_blend_B2("min", "max", type, ascending)}; - - s = {generate_shuffle_X1("d01", type)}; - min = {generate_min("s", "d01", type)}; - max = {generate_max("s", "d01", type)}; - d01 = {generate_blend_B1("min", "max", type, ascending)};""" - print(s, file=f) - print("}", file=f) - - -def generate_1v_merge_sorters(f, type, ascending): - maybe_cmp = lambda: ", cmp" if (type == "int64_t" or type == "uint64_t") else "" - maybe_topbit = lambda: f"\n {bitonic_type_map[type]} topBit = _mm256_set1_epi64x(1LLU << 63);" if ( - type == "uint64_t") else "" - - suffix = "ascending" if ascending else "descending" - - s = f""" static INLINE void sort_01v_merge_{suffix}({generate_param_def_list(1, type)}) {{ - {bitonic_type_map[type]} min, max, s{maybe_cmp()};{maybe_topbit()}""" - print(s, file=f) - - if bitonic_size_map[type] == 4: - s = f""" - s = {generate_cross("d01", type)}; - min = {generate_min("s", "d01", type)}; - max = {generate_max("s", "d01", type)}; - d01 = {generate_blend_B4("min", "max", type, ascending)};""" - print(s, file=f) - - s = f""" - s = {generate_shuffle_X2("d01", type)}; - {crappity_crap_crap("s", "d01", type)} - min = {generate_min("s", "d01", type)}; - max = {generate_max("s", "d01", type)}; - d01 = {generate_blend_B2("min", "max", type, ascending)}; - - s = {generate_shuffle_X1("d01", type)}; - {crappity_crap_crap("s", "d01", type)} - min = {generate_min("s", "d01", type)}; - max = {generate_max("s", "d01", type)}; - d01 = {generate_blend_B1("min", "max", type, ascending)};""" - - print(s, file=f) - print(" }", file=f) - - -def generate_1v_sorters(f, type, ascending): - generate_1v_basic_sorters(f, type, ascending) - generate_1v_merge_sorters(f, type, ascending) - - -def generate_compounded_sorters(f, width, type, ascending): - maybe_cmp = lambda: ", cmp" if (type == "int64_t" or type == "uint64_t") else "" - maybe_topbit = lambda: f"\n {bitonic_type_map[type]} topBit = _mm256_set1_epi64x(1LLU << 63);" if ( - type == "uint64_t") else "" - - w1 = int(next_power_of_2(width) / 2) - w2 = int(width - w1) - - suffix = "ascending" if ascending else "descending" - rev_suffix = "descending" if ascending else "ascending" - - s = f""" static INLINE void sort_{width:02d}v_{suffix}({generate_param_def_list(width, type)}) {{ - {bitonic_type_map[type]} tmp{maybe_cmp()};{maybe_topbit()} - - sort_{w1:02d}v_{suffix}({generate_param_list(1, w1)}); - sort_{w2:02d}v_{rev_suffix}({generate_param_list(w1 + 1, w2)});""" - - print(s, file=f) - - for r in range(w1 + 1, width + 1): - x = w1 + 1 - (r - w1) - s = f""" - tmp = d{r:02d}; - {crappity_crap_crap(f"d{x:02d}", f"d{r:02d}", type)} - d{r:02d} = {generate_max(f"d{x:02d}", f"d{r:02d}", type)}; - d{x:02d} = {generate_min(f"d{x:02d}", "tmp", type)};""" - - print(s, file=f) - - s = f""" - sort_{w1:02d}v_merge_{suffix}({generate_param_list(1, w1)}); - sort_{w2:02d}v_merge_{suffix}({generate_param_list(w1 + 1, w2)});""" - print(s, file=f) - print(" }", file=f) - - -def generate_compounded_mergers(f, width, type, ascending): - maybe_cmp = lambda: ", cmp" if (type == "int64_t" or type == "uint64_t") else "" - maybe_topbit = lambda: f"\n {bitonic_type_map[type]} topBit = _mm256_set1_epi64x(1LLU << 63);" if ( - type == "uint64_t") else "" - - w1 = int(next_power_of_2(width) / 2) - w2 = int(width - w1) - - suffix = "ascending" if ascending else "descending" - rev_suffix = "descending" if ascending else "ascending" - - s = f""" static INLINE void sort_{width:02d}v_merge_{suffix}({generate_param_def_list(width, type)}) {{ - {bitonic_type_map[type]} tmp{maybe_cmp()};{maybe_topbit()}""" - print(s, file=f) - - for r in range(w1 + 1, width + 1): - x = r - w1 - s = f""" - tmp = d{x:02d}; - {crappity_crap_crap(f"d{r:02d}", f"d{x:02d}", type)} - d{x:02d} = {generate_min(f"d{r:02d}", f"d{x:02d}", type)}; - {crappity_crap_crap(f"d{r:02d}", "tmp", type)} - d{r:02d} = {generate_max(f"d{r:02d}", "tmp", type)};""" - print(s, file=f) - - s = f""" - sort_{w1:02d}v_merge_{suffix}({generate_param_list(1, w1)}); - sort_{w2:02d}v_merge_{suffix}({generate_param_list(w1 + 1, w2)});""" - print(s, file=f) - print(" }", file=f) - - -def get_load_intrinsic(type, v, offset): - if type == "double": - return f"_mm256_loadu_pd(({type} const *) ((__m256d const *) {v} + {offset}))" - if type == "float": - return f"_mm256_loadu_ps(({type} const *) ((__m256 const *) {v} + {offset}))" - return f"_mm256_lddqu_si256((__m256i const *) {v} + {offset});" - - -def get_store_intrinsic(type, ptr, offset, value): - if type == "double": - return f"_mm256_storeu_pd(({type} *) ((__m256d *) {ptr} + {offset}), {value})" - if type == "float": - return f"_mm256_storeu_ps(({type} *) ((__m256 *) {ptr} + {offset}), {value})" - return f"_mm256_storeu_si256((__m256i *) {ptr} + {offset}, {value})" - - -def generate_entry_points(f, type): - for m in range(1, max_bitonic_sort_verctors + 1): - s = f""" -static NOINLINE void sort_{m:02d}v({type} *ptr) {{""" - print(s, file=f) - - for l in range(0, m): - s = f" {bitonic_type_map[type]} d{l + 1:02d} = {get_load_intrinsic(type, 'ptr', l)};" - print(s, file=f) - - s = f" sort_{m:02d}v_ascending({generate_param_list(1, m)});" - print(s, file=f) - - for l in range(0, m): - s = f" {get_store_intrinsic(type, 'ptr', l, f'd{l + 1:02d}')};" - print(s, file=f) - - print("}", file=f) - - -def generate_master_entry_point(f, type): - s = f""" static void sort({type} *ptr, size_t length) {{ - const int N = {int(32 / bitonic_size_map[type])}; - - switch(length / N) {{""" - print(s, file=f) - - for m in range(1, max_bitonic_sort_verctors + 1): - s = f" case {m}: sort_{m:02d}v(ptr); break;" - print(s, file=f) - print(" }", file=f) - print("}", file=f) - pass - - -def autogenerated_blabber(): - return f"""///////////////////////////////////////////////////////////////////////////// -//// -// This file was auto-generated by a tool at {datetime.now().strftime("%F %H:%M:%S")} -// -// It is recommended you DO NOT directly edit this file but instead edit -// the code-generator that generated this source file instead. -/////////////////////////////////////////////////////////////////////////////""" - - -def generate_per_type(f, type, opts): - s = f"""{autogenerated_blabber()} - -#ifndef BITONIC_SORT_{str(opts.vector_isa).upper()}_{type.upper()}_H -#define BITONIC_SORT_{str - (opts.vector_isa).upper()}_{type.upper()}_H - -#include -#include "bitonic_sort.h" - -#ifdef _MSC_VER - // MSVC - #define INLINE __forceinline - #define NOINLINE __declspec(noinline) -#else - // GCC + Clang - #define INLINE __attribute__((always_inline)) - #define NOINLINE __attribute__((noinline)) -#endif - -#define i2d _mm256_castsi256_pd -#define d2i _mm256_castpd_si256 -#define i2s _mm256_castsi256_ps -#define s2i _mm256_castps_si256 -#define s2d _mm256_castps_pd -#define d2s _mm256_castpd_ps - -namespace gcsort {{ -namespace smallsort {{ -template<> struct bitonic<{type}> {{ -public: -""" - print(s, file=f) - generate_1v_sorters(f, type, ascending=True) - generate_1v_sorters(f, type, ascending=False) - for width in range(2, max_bitonic_sort_verctors + 1): - generate_compounded_sorters(f, width, type, ascending=True) - generate_compounded_sorters(f, width, type, ascending=False) - if width <= largest_merge_variant_needed: - generate_compounded_mergers(f, width, type, ascending=True) - generate_compounded_mergers(f, width, type, ascending=False) - - generate_entry_points(f, type) - generate_master_entry_point(f, type) - print("};\n}\n}\n#endif", file=f) - - -class Language(Enum): - csharp = 'csharp' - cpp = 'cpp' - rust = 'rust' - - def __str__(self): - return self.value - -class VectorISA(Enum): - AVX2 = 'AVX2' - AVX512 = 'AVX512' - SVE = 'SVE' - - def __str__(self): - return self.value - - -def generate_all_types(): - parser = argparse.ArgumentParser() - parser.add_argument("--language", type=Language, choices=list(Language), help="select output language: csharp/cpp/rust") - parser.add_argument("--vector-isa", type=VectorISA, choices=list(VectorISA), help="select vector isa: AVX2/AVX512/SVE") - - opts = parser.parse_args() - - for type in bitonic_types: - with open(f"bitonic_sort.{opts.vector_isa}.{type}.generated.h", "w") as f: - generate_per_type(f, type, opts) - - -if __name__ == '__main__': - generate_all_types() diff --git a/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.double.generated.h b/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.double.generated.h deleted file mode 100644 index 4c9fa8e4a86571..00000000000000 --- a/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.double.generated.h +++ /dev/null @@ -1,1492 +0,0 @@ -///////////////////////////////////////////////////////////////////////////// -//// -// This file was auto-generated by a tool at 2020-05-31 19:46:17 -// -// It is recommended you DO NOT directly edit this file but instead edit -// the code-generator that generated this source file instead. -///////////////////////////////////////////////////////////////////////////// - -#ifndef BITONIC_SORT_AVX2_DOUBLE_H -#define BITONIC_SORT_AVX2_DOUBLE_H - -#include -#include "bitonic_sort.h" - -#ifdef _MSC_VER - // MSVC - #define INLINE __forceinline - #define NOINLINE __declspec(noinline) -#else - // GCC + Clang - #define INLINE __attribute__((always_inline)) - #define NOINLINE __attribute__((noinline)) -#endif - -#define i2d _mm256_castsi256_pd -#define d2i _mm256_castpd_si256 -#define i2s _mm256_castsi256_ps -#define s2i _mm256_castps_si256 -#define s2d _mm256_castps_pd -#define d2s _mm256_castpd_ps - -namespace gcsort { -namespace smallsort { -template<> struct bitonic { -public: - - static INLINE void sort_01v_ascending(__m256d& d01) { - __m256d min, max, s; - - s = _mm256_shuffle_pd(d01, d01, 0x5); - - min = _mm256_min_pd(s, d01); - max = _mm256_max_pd(s, d01); - d01 = _mm256_blend_pd(min, max, 0xA); - - s = _mm256_permute4x64_pd(d01, 0x1B); - - min = _mm256_min_pd(s, d01); - max = _mm256_max_pd(s, d01); - d01 = _mm256_blend_pd(min, max, 0xC); - - s = _mm256_shuffle_pd(d01, d01, 0x5); - - min = _mm256_min_pd(s, d01); - max = _mm256_max_pd(s, d01); - d01 = _mm256_blend_pd(min, max, 0xA); -} - static INLINE void sort_01v_merge_ascending(__m256d& d01) { - __m256d min, max, s; - - s = _mm256_permute4x64_pd(d01, 0x4E); - - min = _mm256_min_pd(s, d01); - max = _mm256_max_pd(s, d01); - d01 = _mm256_blend_pd(min, max, 0xC); - - s = _mm256_shuffle_pd(d01, d01, 0x5); - - min = _mm256_min_pd(s, d01); - max = _mm256_max_pd(s, d01); - d01 = _mm256_blend_pd(min, max, 0xA); - } - static INLINE void sort_01v_descending(__m256d& d01) { - __m256d min, max, s; - - s = _mm256_shuffle_pd(d01, d01, 0x5); - - min = _mm256_min_pd(s, d01); - max = _mm256_max_pd(s, d01); - d01 = _mm256_blend_pd(max, min, 0xA); - - s = _mm256_permute4x64_pd(d01, 0x1B); - - min = _mm256_min_pd(s, d01); - max = _mm256_max_pd(s, d01); - d01 = _mm256_blend_pd(max, min, 0xC); - - s = _mm256_shuffle_pd(d01, d01, 0x5); - - min = _mm256_min_pd(s, d01); - max = _mm256_max_pd(s, d01); - d01 = _mm256_blend_pd(max, min, 0xA); -} - static INLINE void sort_01v_merge_descending(__m256d& d01) { - __m256d min, max, s; - - s = _mm256_permute4x64_pd(d01, 0x4E); - - min = _mm256_min_pd(s, d01); - max = _mm256_max_pd(s, d01); - d01 = _mm256_blend_pd(max, min, 0xC); - - s = _mm256_shuffle_pd(d01, d01, 0x5); - - min = _mm256_min_pd(s, d01); - max = _mm256_max_pd(s, d01); - d01 = _mm256_blend_pd(max, min, 0xA); - } - static INLINE void sort_02v_ascending(__m256d& d01, __m256d& d02) { - __m256d tmp; - - sort_01v_ascending(d01); - sort_01v_descending(d02); - - tmp = d02; - - d02 = _mm256_max_pd(d01, d02); - d01 = _mm256_min_pd(d01, tmp); - - sort_01v_merge_ascending(d01); - sort_01v_merge_ascending(d02); - } - static INLINE void sort_02v_descending(__m256d& d01, __m256d& d02) { - __m256d tmp; - - sort_01v_descending(d01); - sort_01v_ascending(d02); - - tmp = d02; - - d02 = _mm256_max_pd(d01, d02); - d01 = _mm256_min_pd(d01, tmp); - - sort_01v_merge_descending(d01); - sort_01v_merge_descending(d02); - } - static INLINE void sort_02v_merge_ascending(__m256d& d01, __m256d& d02) { - __m256d tmp; - - tmp = d01; - - d01 = _mm256_min_pd(d02, d01); - - d02 = _mm256_max_pd(d02, tmp); - - sort_01v_merge_ascending(d01); - sort_01v_merge_ascending(d02); - } - static INLINE void sort_02v_merge_descending(__m256d& d01, __m256d& d02) { - __m256d tmp; - - tmp = d01; - - d01 = _mm256_min_pd(d02, d01); - - d02 = _mm256_max_pd(d02, tmp); - - sort_01v_merge_descending(d01); - sort_01v_merge_descending(d02); - } - static INLINE void sort_03v_ascending(__m256d& d01, __m256d& d02, __m256d& d03) { - __m256d tmp; - - sort_02v_ascending(d01, d02); - sort_01v_descending(d03); - - tmp = d03; - - d03 = _mm256_max_pd(d02, d03); - d02 = _mm256_min_pd(d02, tmp); - - sort_02v_merge_ascending(d01, d02); - sort_01v_merge_ascending(d03); - } - static INLINE void sort_03v_descending(__m256d& d01, __m256d& d02, __m256d& d03) { - __m256d tmp; - - sort_02v_descending(d01, d02); - sort_01v_ascending(d03); - - tmp = d03; - - d03 = _mm256_max_pd(d02, d03); - d02 = _mm256_min_pd(d02, tmp); - - sort_02v_merge_descending(d01, d02); - sort_01v_merge_descending(d03); - } - static INLINE void sort_03v_merge_ascending(__m256d& d01, __m256d& d02, __m256d& d03) { - __m256d tmp; - - tmp = d01; - - d01 = _mm256_min_pd(d03, d01); - - d03 = _mm256_max_pd(d03, tmp); - - sort_02v_merge_ascending(d01, d02); - sort_01v_merge_ascending(d03); - } - static INLINE void sort_03v_merge_descending(__m256d& d01, __m256d& d02, __m256d& d03) { - __m256d tmp; - - tmp = d01; - - d01 = _mm256_min_pd(d03, d01); - - d03 = _mm256_max_pd(d03, tmp); - - sort_02v_merge_descending(d01, d02); - sort_01v_merge_descending(d03); - } - static INLINE void sort_04v_ascending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04) { - __m256d tmp; - - sort_02v_ascending(d01, d02); - sort_02v_descending(d03, d04); - - tmp = d03; - - d03 = _mm256_max_pd(d02, d03); - d02 = _mm256_min_pd(d02, tmp); - - tmp = d04; - - d04 = _mm256_max_pd(d01, d04); - d01 = _mm256_min_pd(d01, tmp); - - sort_02v_merge_ascending(d01, d02); - sort_02v_merge_ascending(d03, d04); - } - static INLINE void sort_04v_descending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04) { - __m256d tmp; - - sort_02v_descending(d01, d02); - sort_02v_ascending(d03, d04); - - tmp = d03; - - d03 = _mm256_max_pd(d02, d03); - d02 = _mm256_min_pd(d02, tmp); - - tmp = d04; - - d04 = _mm256_max_pd(d01, d04); - d01 = _mm256_min_pd(d01, tmp); - - sort_02v_merge_descending(d01, d02); - sort_02v_merge_descending(d03, d04); - } - static INLINE void sort_04v_merge_ascending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04) { - __m256d tmp; - - tmp = d01; - - d01 = _mm256_min_pd(d03, d01); - - d03 = _mm256_max_pd(d03, tmp); - - tmp = d02; - - d02 = _mm256_min_pd(d04, d02); - - d04 = _mm256_max_pd(d04, tmp); - - sort_02v_merge_ascending(d01, d02); - sort_02v_merge_ascending(d03, d04); - } - static INLINE void sort_04v_merge_descending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04) { - __m256d tmp; - - tmp = d01; - - d01 = _mm256_min_pd(d03, d01); - - d03 = _mm256_max_pd(d03, tmp); - - tmp = d02; - - d02 = _mm256_min_pd(d04, d02); - - d04 = _mm256_max_pd(d04, tmp); - - sort_02v_merge_descending(d01, d02); - sort_02v_merge_descending(d03, d04); - } - static INLINE void sort_05v_ascending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05) { - __m256d tmp; - - sort_04v_ascending(d01, d02, d03, d04); - sort_01v_descending(d05); - - tmp = d05; - - d05 = _mm256_max_pd(d04, d05); - d04 = _mm256_min_pd(d04, tmp); - - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_01v_merge_ascending(d05); - } - static INLINE void sort_05v_descending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05) { - __m256d tmp; - - sort_04v_descending(d01, d02, d03, d04); - sort_01v_ascending(d05); - - tmp = d05; - - d05 = _mm256_max_pd(d04, d05); - d04 = _mm256_min_pd(d04, tmp); - - sort_04v_merge_descending(d01, d02, d03, d04); - sort_01v_merge_descending(d05); - } - static INLINE void sort_05v_merge_ascending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05) { - __m256d tmp; - - tmp = d01; - - d01 = _mm256_min_pd(d05, d01); - - d05 = _mm256_max_pd(d05, tmp); - - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_01v_merge_ascending(d05); - } - static INLINE void sort_05v_merge_descending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05) { - __m256d tmp; - - tmp = d01; - - d01 = _mm256_min_pd(d05, d01); - - d05 = _mm256_max_pd(d05, tmp); - - sort_04v_merge_descending(d01, d02, d03, d04); - sort_01v_merge_descending(d05); - } - static INLINE void sort_06v_ascending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06) { - __m256d tmp; - - sort_04v_ascending(d01, d02, d03, d04); - sort_02v_descending(d05, d06); - - tmp = d05; - - d05 = _mm256_max_pd(d04, d05); - d04 = _mm256_min_pd(d04, tmp); - - tmp = d06; - - d06 = _mm256_max_pd(d03, d06); - d03 = _mm256_min_pd(d03, tmp); - - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_02v_merge_ascending(d05, d06); - } - static INLINE void sort_06v_descending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06) { - __m256d tmp; - - sort_04v_descending(d01, d02, d03, d04); - sort_02v_ascending(d05, d06); - - tmp = d05; - - d05 = _mm256_max_pd(d04, d05); - d04 = _mm256_min_pd(d04, tmp); - - tmp = d06; - - d06 = _mm256_max_pd(d03, d06); - d03 = _mm256_min_pd(d03, tmp); - - sort_04v_merge_descending(d01, d02, d03, d04); - sort_02v_merge_descending(d05, d06); - } - static INLINE void sort_06v_merge_ascending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06) { - __m256d tmp; - - tmp = d01; - - d01 = _mm256_min_pd(d05, d01); - - d05 = _mm256_max_pd(d05, tmp); - - tmp = d02; - - d02 = _mm256_min_pd(d06, d02); - - d06 = _mm256_max_pd(d06, tmp); - - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_02v_merge_ascending(d05, d06); - } - static INLINE void sort_06v_merge_descending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06) { - __m256d tmp; - - tmp = d01; - - d01 = _mm256_min_pd(d05, d01); - - d05 = _mm256_max_pd(d05, tmp); - - tmp = d02; - - d02 = _mm256_min_pd(d06, d02); - - d06 = _mm256_max_pd(d06, tmp); - - sort_04v_merge_descending(d01, d02, d03, d04); - sort_02v_merge_descending(d05, d06); - } - static INLINE void sort_07v_ascending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07) { - __m256d tmp; - - sort_04v_ascending(d01, d02, d03, d04); - sort_03v_descending(d05, d06, d07); - - tmp = d05; - - d05 = _mm256_max_pd(d04, d05); - d04 = _mm256_min_pd(d04, tmp); - - tmp = d06; - - d06 = _mm256_max_pd(d03, d06); - d03 = _mm256_min_pd(d03, tmp); - - tmp = d07; - - d07 = _mm256_max_pd(d02, d07); - d02 = _mm256_min_pd(d02, tmp); - - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_03v_merge_ascending(d05, d06, d07); - } - static INLINE void sort_07v_descending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07) { - __m256d tmp; - - sort_04v_descending(d01, d02, d03, d04); - sort_03v_ascending(d05, d06, d07); - - tmp = d05; - - d05 = _mm256_max_pd(d04, d05); - d04 = _mm256_min_pd(d04, tmp); - - tmp = d06; - - d06 = _mm256_max_pd(d03, d06); - d03 = _mm256_min_pd(d03, tmp); - - tmp = d07; - - d07 = _mm256_max_pd(d02, d07); - d02 = _mm256_min_pd(d02, tmp); - - sort_04v_merge_descending(d01, d02, d03, d04); - sort_03v_merge_descending(d05, d06, d07); - } - static INLINE void sort_07v_merge_ascending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07) { - __m256d tmp; - - tmp = d01; - - d01 = _mm256_min_pd(d05, d01); - - d05 = _mm256_max_pd(d05, tmp); - - tmp = d02; - - d02 = _mm256_min_pd(d06, d02); - - d06 = _mm256_max_pd(d06, tmp); - - tmp = d03; - - d03 = _mm256_min_pd(d07, d03); - - d07 = _mm256_max_pd(d07, tmp); - - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_03v_merge_ascending(d05, d06, d07); - } - static INLINE void sort_07v_merge_descending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07) { - __m256d tmp; - - tmp = d01; - - d01 = _mm256_min_pd(d05, d01); - - d05 = _mm256_max_pd(d05, tmp); - - tmp = d02; - - d02 = _mm256_min_pd(d06, d02); - - d06 = _mm256_max_pd(d06, tmp); - - tmp = d03; - - d03 = _mm256_min_pd(d07, d03); - - d07 = _mm256_max_pd(d07, tmp); - - sort_04v_merge_descending(d01, d02, d03, d04); - sort_03v_merge_descending(d05, d06, d07); - } - static INLINE void sort_08v_ascending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07, __m256d& d08) { - __m256d tmp; - - sort_04v_ascending(d01, d02, d03, d04); - sort_04v_descending(d05, d06, d07, d08); - - tmp = d05; - - d05 = _mm256_max_pd(d04, d05); - d04 = _mm256_min_pd(d04, tmp); - - tmp = d06; - - d06 = _mm256_max_pd(d03, d06); - d03 = _mm256_min_pd(d03, tmp); - - tmp = d07; - - d07 = _mm256_max_pd(d02, d07); - d02 = _mm256_min_pd(d02, tmp); - - tmp = d08; - - d08 = _mm256_max_pd(d01, d08); - d01 = _mm256_min_pd(d01, tmp); - - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_04v_merge_ascending(d05, d06, d07, d08); - } - static INLINE void sort_08v_descending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07, __m256d& d08) { - __m256d tmp; - - sort_04v_descending(d01, d02, d03, d04); - sort_04v_ascending(d05, d06, d07, d08); - - tmp = d05; - - d05 = _mm256_max_pd(d04, d05); - d04 = _mm256_min_pd(d04, tmp); - - tmp = d06; - - d06 = _mm256_max_pd(d03, d06); - d03 = _mm256_min_pd(d03, tmp); - - tmp = d07; - - d07 = _mm256_max_pd(d02, d07); - d02 = _mm256_min_pd(d02, tmp); - - tmp = d08; - - d08 = _mm256_max_pd(d01, d08); - d01 = _mm256_min_pd(d01, tmp); - - sort_04v_merge_descending(d01, d02, d03, d04); - sort_04v_merge_descending(d05, d06, d07, d08); - } - static INLINE void sort_08v_merge_ascending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07, __m256d& d08) { - __m256d tmp; - - tmp = d01; - - d01 = _mm256_min_pd(d05, d01); - - d05 = _mm256_max_pd(d05, tmp); - - tmp = d02; - - d02 = _mm256_min_pd(d06, d02); - - d06 = _mm256_max_pd(d06, tmp); - - tmp = d03; - - d03 = _mm256_min_pd(d07, d03); - - d07 = _mm256_max_pd(d07, tmp); - - tmp = d04; - - d04 = _mm256_min_pd(d08, d04); - - d08 = _mm256_max_pd(d08, tmp); - - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_04v_merge_ascending(d05, d06, d07, d08); - } - static INLINE void sort_08v_merge_descending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07, __m256d& d08) { - __m256d tmp; - - tmp = d01; - - d01 = _mm256_min_pd(d05, d01); - - d05 = _mm256_max_pd(d05, tmp); - - tmp = d02; - - d02 = _mm256_min_pd(d06, d02); - - d06 = _mm256_max_pd(d06, tmp); - - tmp = d03; - - d03 = _mm256_min_pd(d07, d03); - - d07 = _mm256_max_pd(d07, tmp); - - tmp = d04; - - d04 = _mm256_min_pd(d08, d04); - - d08 = _mm256_max_pd(d08, tmp); - - sort_04v_merge_descending(d01, d02, d03, d04); - sort_04v_merge_descending(d05, d06, d07, d08); - } - static INLINE void sort_09v_ascending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07, __m256d& d08, __m256d& d09) { - __m256d tmp; - - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_01v_descending(d09); - - tmp = d09; - - d09 = _mm256_max_pd(d08, d09); - d08 = _mm256_min_pd(d08, tmp); - - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_01v_merge_ascending(d09); - } - static INLINE void sort_09v_descending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07, __m256d& d08, __m256d& d09) { - __m256d tmp; - - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_01v_ascending(d09); - - tmp = d09; - - d09 = _mm256_max_pd(d08, d09); - d08 = _mm256_min_pd(d08, tmp); - - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_01v_merge_descending(d09); - } - static INLINE void sort_10v_ascending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07, __m256d& d08, __m256d& d09, __m256d& d10) { - __m256d tmp; - - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_02v_descending(d09, d10); - - tmp = d09; - - d09 = _mm256_max_pd(d08, d09); - d08 = _mm256_min_pd(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_pd(d07, d10); - d07 = _mm256_min_pd(d07, tmp); - - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_02v_merge_ascending(d09, d10); - } - static INLINE void sort_10v_descending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07, __m256d& d08, __m256d& d09, __m256d& d10) { - __m256d tmp; - - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_02v_ascending(d09, d10); - - tmp = d09; - - d09 = _mm256_max_pd(d08, d09); - d08 = _mm256_min_pd(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_pd(d07, d10); - d07 = _mm256_min_pd(d07, tmp); - - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_02v_merge_descending(d09, d10); - } - static INLINE void sort_11v_ascending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07, __m256d& d08, __m256d& d09, __m256d& d10, __m256d& d11) { - __m256d tmp; - - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_03v_descending(d09, d10, d11); - - tmp = d09; - - d09 = _mm256_max_pd(d08, d09); - d08 = _mm256_min_pd(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_pd(d07, d10); - d07 = _mm256_min_pd(d07, tmp); - - tmp = d11; - - d11 = _mm256_max_pd(d06, d11); - d06 = _mm256_min_pd(d06, tmp); - - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_03v_merge_ascending(d09, d10, d11); - } - static INLINE void sort_11v_descending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07, __m256d& d08, __m256d& d09, __m256d& d10, __m256d& d11) { - __m256d tmp; - - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_03v_ascending(d09, d10, d11); - - tmp = d09; - - d09 = _mm256_max_pd(d08, d09); - d08 = _mm256_min_pd(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_pd(d07, d10); - d07 = _mm256_min_pd(d07, tmp); - - tmp = d11; - - d11 = _mm256_max_pd(d06, d11); - d06 = _mm256_min_pd(d06, tmp); - - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_03v_merge_descending(d09, d10, d11); - } - static INLINE void sort_12v_ascending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07, __m256d& d08, __m256d& d09, __m256d& d10, __m256d& d11, __m256d& d12) { - __m256d tmp; - - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_04v_descending(d09, d10, d11, d12); - - tmp = d09; - - d09 = _mm256_max_pd(d08, d09); - d08 = _mm256_min_pd(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_pd(d07, d10); - d07 = _mm256_min_pd(d07, tmp); - - tmp = d11; - - d11 = _mm256_max_pd(d06, d11); - d06 = _mm256_min_pd(d06, tmp); - - tmp = d12; - - d12 = _mm256_max_pd(d05, d12); - d05 = _mm256_min_pd(d05, tmp); - - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_04v_merge_ascending(d09, d10, d11, d12); - } - static INLINE void sort_12v_descending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07, __m256d& d08, __m256d& d09, __m256d& d10, __m256d& d11, __m256d& d12) { - __m256d tmp; - - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_04v_ascending(d09, d10, d11, d12); - - tmp = d09; - - d09 = _mm256_max_pd(d08, d09); - d08 = _mm256_min_pd(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_pd(d07, d10); - d07 = _mm256_min_pd(d07, tmp); - - tmp = d11; - - d11 = _mm256_max_pd(d06, d11); - d06 = _mm256_min_pd(d06, tmp); - - tmp = d12; - - d12 = _mm256_max_pd(d05, d12); - d05 = _mm256_min_pd(d05, tmp); - - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_04v_merge_descending(d09, d10, d11, d12); - } - static INLINE void sort_13v_ascending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07, __m256d& d08, __m256d& d09, __m256d& d10, __m256d& d11, __m256d& d12, __m256d& d13) { - __m256d tmp; - - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_05v_descending(d09, d10, d11, d12, d13); - - tmp = d09; - - d09 = _mm256_max_pd(d08, d09); - d08 = _mm256_min_pd(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_pd(d07, d10); - d07 = _mm256_min_pd(d07, tmp); - - tmp = d11; - - d11 = _mm256_max_pd(d06, d11); - d06 = _mm256_min_pd(d06, tmp); - - tmp = d12; - - d12 = _mm256_max_pd(d05, d12); - d05 = _mm256_min_pd(d05, tmp); - - tmp = d13; - - d13 = _mm256_max_pd(d04, d13); - d04 = _mm256_min_pd(d04, tmp); - - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_05v_merge_ascending(d09, d10, d11, d12, d13); - } - static INLINE void sort_13v_descending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07, __m256d& d08, __m256d& d09, __m256d& d10, __m256d& d11, __m256d& d12, __m256d& d13) { - __m256d tmp; - - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_05v_ascending(d09, d10, d11, d12, d13); - - tmp = d09; - - d09 = _mm256_max_pd(d08, d09); - d08 = _mm256_min_pd(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_pd(d07, d10); - d07 = _mm256_min_pd(d07, tmp); - - tmp = d11; - - d11 = _mm256_max_pd(d06, d11); - d06 = _mm256_min_pd(d06, tmp); - - tmp = d12; - - d12 = _mm256_max_pd(d05, d12); - d05 = _mm256_min_pd(d05, tmp); - - tmp = d13; - - d13 = _mm256_max_pd(d04, d13); - d04 = _mm256_min_pd(d04, tmp); - - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_05v_merge_descending(d09, d10, d11, d12, d13); - } - static INLINE void sort_14v_ascending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07, __m256d& d08, __m256d& d09, __m256d& d10, __m256d& d11, __m256d& d12, __m256d& d13, __m256d& d14) { - __m256d tmp; - - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_06v_descending(d09, d10, d11, d12, d13, d14); - - tmp = d09; - - d09 = _mm256_max_pd(d08, d09); - d08 = _mm256_min_pd(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_pd(d07, d10); - d07 = _mm256_min_pd(d07, tmp); - - tmp = d11; - - d11 = _mm256_max_pd(d06, d11); - d06 = _mm256_min_pd(d06, tmp); - - tmp = d12; - - d12 = _mm256_max_pd(d05, d12); - d05 = _mm256_min_pd(d05, tmp); - - tmp = d13; - - d13 = _mm256_max_pd(d04, d13); - d04 = _mm256_min_pd(d04, tmp); - - tmp = d14; - - d14 = _mm256_max_pd(d03, d14); - d03 = _mm256_min_pd(d03, tmp); - - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_06v_merge_ascending(d09, d10, d11, d12, d13, d14); - } - static INLINE void sort_14v_descending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07, __m256d& d08, __m256d& d09, __m256d& d10, __m256d& d11, __m256d& d12, __m256d& d13, __m256d& d14) { - __m256d tmp; - - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_06v_ascending(d09, d10, d11, d12, d13, d14); - - tmp = d09; - - d09 = _mm256_max_pd(d08, d09); - d08 = _mm256_min_pd(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_pd(d07, d10); - d07 = _mm256_min_pd(d07, tmp); - - tmp = d11; - - d11 = _mm256_max_pd(d06, d11); - d06 = _mm256_min_pd(d06, tmp); - - tmp = d12; - - d12 = _mm256_max_pd(d05, d12); - d05 = _mm256_min_pd(d05, tmp); - - tmp = d13; - - d13 = _mm256_max_pd(d04, d13); - d04 = _mm256_min_pd(d04, tmp); - - tmp = d14; - - d14 = _mm256_max_pd(d03, d14); - d03 = _mm256_min_pd(d03, tmp); - - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_06v_merge_descending(d09, d10, d11, d12, d13, d14); - } - static INLINE void sort_15v_ascending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07, __m256d& d08, __m256d& d09, __m256d& d10, __m256d& d11, __m256d& d12, __m256d& d13, __m256d& d14, __m256d& d15) { - __m256d tmp; - - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_07v_descending(d09, d10, d11, d12, d13, d14, d15); - - tmp = d09; - - d09 = _mm256_max_pd(d08, d09); - d08 = _mm256_min_pd(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_pd(d07, d10); - d07 = _mm256_min_pd(d07, tmp); - - tmp = d11; - - d11 = _mm256_max_pd(d06, d11); - d06 = _mm256_min_pd(d06, tmp); - - tmp = d12; - - d12 = _mm256_max_pd(d05, d12); - d05 = _mm256_min_pd(d05, tmp); - - tmp = d13; - - d13 = _mm256_max_pd(d04, d13); - d04 = _mm256_min_pd(d04, tmp); - - tmp = d14; - - d14 = _mm256_max_pd(d03, d14); - d03 = _mm256_min_pd(d03, tmp); - - tmp = d15; - - d15 = _mm256_max_pd(d02, d15); - d02 = _mm256_min_pd(d02, tmp); - - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_07v_merge_ascending(d09, d10, d11, d12, d13, d14, d15); - } - static INLINE void sort_15v_descending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07, __m256d& d08, __m256d& d09, __m256d& d10, __m256d& d11, __m256d& d12, __m256d& d13, __m256d& d14, __m256d& d15) { - __m256d tmp; - - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_07v_ascending(d09, d10, d11, d12, d13, d14, d15); - - tmp = d09; - - d09 = _mm256_max_pd(d08, d09); - d08 = _mm256_min_pd(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_pd(d07, d10); - d07 = _mm256_min_pd(d07, tmp); - - tmp = d11; - - d11 = _mm256_max_pd(d06, d11); - d06 = _mm256_min_pd(d06, tmp); - - tmp = d12; - - d12 = _mm256_max_pd(d05, d12); - d05 = _mm256_min_pd(d05, tmp); - - tmp = d13; - - d13 = _mm256_max_pd(d04, d13); - d04 = _mm256_min_pd(d04, tmp); - - tmp = d14; - - d14 = _mm256_max_pd(d03, d14); - d03 = _mm256_min_pd(d03, tmp); - - tmp = d15; - - d15 = _mm256_max_pd(d02, d15); - d02 = _mm256_min_pd(d02, tmp); - - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_07v_merge_descending(d09, d10, d11, d12, d13, d14, d15); - } - static INLINE void sort_16v_ascending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07, __m256d& d08, __m256d& d09, __m256d& d10, __m256d& d11, __m256d& d12, __m256d& d13, __m256d& d14, __m256d& d15, __m256d& d16) { - __m256d tmp; - - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_08v_descending(d09, d10, d11, d12, d13, d14, d15, d16); - - tmp = d09; - - d09 = _mm256_max_pd(d08, d09); - d08 = _mm256_min_pd(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_pd(d07, d10); - d07 = _mm256_min_pd(d07, tmp); - - tmp = d11; - - d11 = _mm256_max_pd(d06, d11); - d06 = _mm256_min_pd(d06, tmp); - - tmp = d12; - - d12 = _mm256_max_pd(d05, d12); - d05 = _mm256_min_pd(d05, tmp); - - tmp = d13; - - d13 = _mm256_max_pd(d04, d13); - d04 = _mm256_min_pd(d04, tmp); - - tmp = d14; - - d14 = _mm256_max_pd(d03, d14); - d03 = _mm256_min_pd(d03, tmp); - - tmp = d15; - - d15 = _mm256_max_pd(d02, d15); - d02 = _mm256_min_pd(d02, tmp); - - tmp = d16; - - d16 = _mm256_max_pd(d01, d16); - d01 = _mm256_min_pd(d01, tmp); - - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_08v_merge_ascending(d09, d10, d11, d12, d13, d14, d15, d16); - } - static INLINE void sort_16v_descending(__m256d& d01, __m256d& d02, __m256d& d03, __m256d& d04, __m256d& d05, __m256d& d06, __m256d& d07, __m256d& d08, __m256d& d09, __m256d& d10, __m256d& d11, __m256d& d12, __m256d& d13, __m256d& d14, __m256d& d15, __m256d& d16) { - __m256d tmp; - - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_08v_ascending(d09, d10, d11, d12, d13, d14, d15, d16); - - tmp = d09; - - d09 = _mm256_max_pd(d08, d09); - d08 = _mm256_min_pd(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_pd(d07, d10); - d07 = _mm256_min_pd(d07, tmp); - - tmp = d11; - - d11 = _mm256_max_pd(d06, d11); - d06 = _mm256_min_pd(d06, tmp); - - tmp = d12; - - d12 = _mm256_max_pd(d05, d12); - d05 = _mm256_min_pd(d05, tmp); - - tmp = d13; - - d13 = _mm256_max_pd(d04, d13); - d04 = _mm256_min_pd(d04, tmp); - - tmp = d14; - - d14 = _mm256_max_pd(d03, d14); - d03 = _mm256_min_pd(d03, tmp); - - tmp = d15; - - d15 = _mm256_max_pd(d02, d15); - d02 = _mm256_min_pd(d02, tmp); - - tmp = d16; - - d16 = _mm256_max_pd(d01, d16); - d01 = _mm256_min_pd(d01, tmp); - - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_08v_merge_descending(d09, d10, d11, d12, d13, d14, d15, d16); - } - -static NOINLINE void sort_01v(double *ptr) { - __m256d d01 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 0)); - sort_01v_ascending(d01); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 0), d01); -} - -static NOINLINE void sort_02v(double *ptr) { - __m256d d01 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 0)); - __m256d d02 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 1)); - sort_02v_ascending(d01, d02); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 0), d01); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 1), d02); -} - -static NOINLINE void sort_03v(double *ptr) { - __m256d d01 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 0)); - __m256d d02 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 1)); - __m256d d03 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 2)); - sort_03v_ascending(d01, d02, d03); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 0), d01); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 1), d02); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 2), d03); -} - -static NOINLINE void sort_04v(double *ptr) { - __m256d d01 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 0)); - __m256d d02 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 1)); - __m256d d03 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 2)); - __m256d d04 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 3)); - sort_04v_ascending(d01, d02, d03, d04); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 0), d01); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 1), d02); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 2), d03); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 3), d04); -} - -static NOINLINE void sort_05v(double *ptr) { - __m256d d01 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 0)); - __m256d d02 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 1)); - __m256d d03 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 2)); - __m256d d04 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 3)); - __m256d d05 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 4)); - sort_05v_ascending(d01, d02, d03, d04, d05); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 0), d01); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 1), d02); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 2), d03); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 3), d04); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 4), d05); -} - -static NOINLINE void sort_06v(double *ptr) { - __m256d d01 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 0)); - __m256d d02 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 1)); - __m256d d03 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 2)); - __m256d d04 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 3)); - __m256d d05 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 4)); - __m256d d06 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 5)); - sort_06v_ascending(d01, d02, d03, d04, d05, d06); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 0), d01); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 1), d02); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 2), d03); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 3), d04); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 4), d05); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 5), d06); -} - -static NOINLINE void sort_07v(double *ptr) { - __m256d d01 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 0)); - __m256d d02 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 1)); - __m256d d03 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 2)); - __m256d d04 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 3)); - __m256d d05 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 4)); - __m256d d06 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 5)); - __m256d d07 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 6)); - sort_07v_ascending(d01, d02, d03, d04, d05, d06, d07); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 0), d01); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 1), d02); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 2), d03); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 3), d04); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 4), d05); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 5), d06); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 6), d07); -} - -static NOINLINE void sort_08v(double *ptr) { - __m256d d01 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 0)); - __m256d d02 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 1)); - __m256d d03 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 2)); - __m256d d04 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 3)); - __m256d d05 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 4)); - __m256d d06 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 5)); - __m256d d07 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 6)); - __m256d d08 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 7)); - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 0), d01); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 1), d02); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 2), d03); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 3), d04); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 4), d05); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 5), d06); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 6), d07); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 7), d08); -} - -static NOINLINE void sort_09v(double *ptr) { - __m256d d01 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 0)); - __m256d d02 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 1)); - __m256d d03 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 2)); - __m256d d04 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 3)); - __m256d d05 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 4)); - __m256d d06 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 5)); - __m256d d07 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 6)); - __m256d d08 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 7)); - __m256d d09 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 8)); - sort_09v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 0), d01); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 1), d02); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 2), d03); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 3), d04); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 4), d05); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 5), d06); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 6), d07); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 7), d08); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 8), d09); -} - -static NOINLINE void sort_10v(double *ptr) { - __m256d d01 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 0)); - __m256d d02 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 1)); - __m256d d03 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 2)); - __m256d d04 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 3)); - __m256d d05 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 4)); - __m256d d06 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 5)); - __m256d d07 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 6)); - __m256d d08 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 7)); - __m256d d09 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 8)); - __m256d d10 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 9)); - sort_10v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 0), d01); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 1), d02); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 2), d03); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 3), d04); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 4), d05); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 5), d06); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 6), d07); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 7), d08); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 8), d09); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 9), d10); -} - -static NOINLINE void sort_11v(double *ptr) { - __m256d d01 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 0)); - __m256d d02 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 1)); - __m256d d03 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 2)); - __m256d d04 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 3)); - __m256d d05 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 4)); - __m256d d06 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 5)); - __m256d d07 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 6)); - __m256d d08 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 7)); - __m256d d09 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 8)); - __m256d d10 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 9)); - __m256d d11 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 10)); - sort_11v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 0), d01); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 1), d02); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 2), d03); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 3), d04); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 4), d05); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 5), d06); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 6), d07); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 7), d08); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 8), d09); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 9), d10); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 10), d11); -} - -static NOINLINE void sort_12v(double *ptr) { - __m256d d01 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 0)); - __m256d d02 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 1)); - __m256d d03 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 2)); - __m256d d04 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 3)); - __m256d d05 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 4)); - __m256d d06 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 5)); - __m256d d07 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 6)); - __m256d d08 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 7)); - __m256d d09 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 8)); - __m256d d10 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 9)); - __m256d d11 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 10)); - __m256d d12 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 11)); - sort_12v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 0), d01); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 1), d02); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 2), d03); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 3), d04); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 4), d05); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 5), d06); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 6), d07); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 7), d08); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 8), d09); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 9), d10); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 10), d11); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 11), d12); -} - -static NOINLINE void sort_13v(double *ptr) { - __m256d d01 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 0)); - __m256d d02 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 1)); - __m256d d03 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 2)); - __m256d d04 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 3)); - __m256d d05 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 4)); - __m256d d06 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 5)); - __m256d d07 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 6)); - __m256d d08 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 7)); - __m256d d09 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 8)); - __m256d d10 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 9)); - __m256d d11 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 10)); - __m256d d12 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 11)); - __m256d d13 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 12)); - sort_13v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 0), d01); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 1), d02); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 2), d03); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 3), d04); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 4), d05); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 5), d06); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 6), d07); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 7), d08); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 8), d09); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 9), d10); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 10), d11); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 11), d12); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 12), d13); -} - -static NOINLINE void sort_14v(double *ptr) { - __m256d d01 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 0)); - __m256d d02 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 1)); - __m256d d03 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 2)); - __m256d d04 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 3)); - __m256d d05 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 4)); - __m256d d06 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 5)); - __m256d d07 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 6)); - __m256d d08 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 7)); - __m256d d09 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 8)); - __m256d d10 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 9)); - __m256d d11 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 10)); - __m256d d12 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 11)); - __m256d d13 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 12)); - __m256d d14 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 13)); - sort_14v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 0), d01); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 1), d02); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 2), d03); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 3), d04); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 4), d05); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 5), d06); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 6), d07); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 7), d08); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 8), d09); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 9), d10); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 10), d11); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 11), d12); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 12), d13); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 13), d14); -} - -static NOINLINE void sort_15v(double *ptr) { - __m256d d01 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 0)); - __m256d d02 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 1)); - __m256d d03 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 2)); - __m256d d04 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 3)); - __m256d d05 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 4)); - __m256d d06 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 5)); - __m256d d07 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 6)); - __m256d d08 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 7)); - __m256d d09 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 8)); - __m256d d10 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 9)); - __m256d d11 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 10)); - __m256d d12 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 11)); - __m256d d13 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 12)); - __m256d d14 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 13)); - __m256d d15 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 14)); - sort_15v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14, d15); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 0), d01); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 1), d02); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 2), d03); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 3), d04); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 4), d05); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 5), d06); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 6), d07); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 7), d08); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 8), d09); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 9), d10); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 10), d11); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 11), d12); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 12), d13); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 13), d14); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 14), d15); -} - -static NOINLINE void sort_16v(double *ptr) { - __m256d d01 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 0)); - __m256d d02 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 1)); - __m256d d03 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 2)); - __m256d d04 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 3)); - __m256d d05 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 4)); - __m256d d06 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 5)); - __m256d d07 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 6)); - __m256d d08 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 7)); - __m256d d09 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 8)); - __m256d d10 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 9)); - __m256d d11 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 10)); - __m256d d12 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 11)); - __m256d d13 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 12)); - __m256d d14 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 13)); - __m256d d15 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 14)); - __m256d d16 = _mm256_loadu_pd((double const *) ((__m256d const *) ptr + 15)); - sort_16v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14, d15, d16); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 0), d01); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 1), d02); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 2), d03); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 3), d04); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 4), d05); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 5), d06); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 6), d07); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 7), d08); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 8), d09); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 9), d10); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 10), d11); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 11), d12); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 12), d13); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 13), d14); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 14), d15); - _mm256_storeu_pd((double *) ((__m256d *) ptr + 15), d16); -} - static void sort(double *ptr, size_t length) { - const int N = 4; - - switch(length / N) { - case 1: sort_01v(ptr); break; - case 2: sort_02v(ptr); break; - case 3: sort_03v(ptr); break; - case 4: sort_04v(ptr); break; - case 5: sort_05v(ptr); break; - case 6: sort_06v(ptr); break; - case 7: sort_07v(ptr); break; - case 8: sort_08v(ptr); break; - case 9: sort_09v(ptr); break; - case 10: sort_10v(ptr); break; - case 11: sort_11v(ptr); break; - case 12: sort_12v(ptr); break; - case 13: sort_13v(ptr); break; - case 14: sort_14v(ptr); break; - case 15: sort_15v(ptr); break; - case 16: sort_16v(ptr); break; - } -} -}; -} -} -#endif diff --git a/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.float.generated.h b/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.float.generated.h deleted file mode 100644 index 358727f582990c..00000000000000 --- a/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.float.generated.h +++ /dev/null @@ -1,1532 +0,0 @@ -///////////////////////////////////////////////////////////////////////////// -//// -// This file was auto-generated by a tool at 2020-05-31 19:46:17 -// -// It is recommended you DO NOT directly edit this file but instead edit -// the code-generator that generated this source file instead. -///////////////////////////////////////////////////////////////////////////// - -#ifndef BITONIC_SORT_AVX2_FLOAT_H -#define BITONIC_SORT_AVX2_FLOAT_H - -#include -#include "bitonic_sort.h" - -#ifdef _MSC_VER - // MSVC - #define INLINE __forceinline - #define NOINLINE __declspec(noinline) -#else - // GCC + Clang - #define INLINE __attribute__((always_inline)) - #define NOINLINE __attribute__((noinline)) -#endif - -#define i2d _mm256_castsi256_pd -#define d2i _mm256_castpd_si256 -#define i2s _mm256_castsi256_ps -#define s2i _mm256_castps_si256 -#define s2d _mm256_castps_pd -#define d2s _mm256_castpd_ps - -namespace gcsort { -namespace smallsort { -template<> struct bitonic { -public: - - static INLINE void sort_01v_ascending(__m256& d01) { - __m256 min, max, s; - - s = i2s(_mm256_shuffle_epi32(s2i(d01), 0xB1)); - - min = _mm256_min_ps(s, d01); - max = _mm256_max_ps(s, d01); - d01 = i2s(_mm256_blend_epi32(s2i(min), s2i(max), 0xAA)); - - s = i2s(_mm256_shuffle_epi32(s2i(d01), 0x1B)); - - min = _mm256_min_ps(s, d01); - max = _mm256_max_ps(s, d01); - d01 = i2s(_mm256_blend_epi32(s2i(min), s2i(max), 0xCC)); - - s = i2s(_mm256_shuffle_epi32(s2i(d01), 0xB1)); - - min = _mm256_min_ps(s, d01); - max = _mm256_max_ps(s, d01); - d01 = i2s(_mm256_blend_epi32(s2i(min), s2i(max), 0xAA)); - - s = d2s(_mm256_permute4x64_pd(i2d(_mm256_shuffle_epi32(s2i(d01), 0x1B)), 0x4E)); - min = _mm256_min_ps(s, d01); - max = _mm256_max_ps(s, d01); - d01 = i2s(_mm256_blend_epi32(s2i(min), s2i(max), 0xF0)); - - s = i2s(_mm256_shuffle_epi32(s2i(d01), 0x4E)); - min = _mm256_min_ps(s, d01); - max = _mm256_max_ps(s, d01); - d01 = i2s(_mm256_blend_epi32(s2i(min), s2i(max), 0xCC)); - - s = i2s(_mm256_shuffle_epi32(s2i(d01), 0xB1)); - min = _mm256_min_ps(s, d01); - max = _mm256_max_ps(s, d01); - d01 = i2s(_mm256_blend_epi32(s2i(min), s2i(max), 0xAA)); -} - static INLINE void sort_01v_merge_ascending(__m256& d01) { - __m256 min, max, s; - - s = d2s(_mm256_permute4x64_pd(s2d(d01), 0x4E)); - min = _mm256_min_ps(s, d01); - max = _mm256_max_ps(s, d01); - d01 = i2s(_mm256_blend_epi32(s2i(min), s2i(max), 0xF0)); - - s = i2s(_mm256_shuffle_epi32(s2i(d01), 0x4E)); - - min = _mm256_min_ps(s, d01); - max = _mm256_max_ps(s, d01); - d01 = i2s(_mm256_blend_epi32(s2i(min), s2i(max), 0xCC)); - - s = i2s(_mm256_shuffle_epi32(s2i(d01), 0xB1)); - - min = _mm256_min_ps(s, d01); - max = _mm256_max_ps(s, d01); - d01 = i2s(_mm256_blend_epi32(s2i(min), s2i(max), 0xAA)); - } - static INLINE void sort_01v_descending(__m256& d01) { - __m256 min, max, s; - - s = i2s(_mm256_shuffle_epi32(s2i(d01), 0xB1)); - - min = _mm256_min_ps(s, d01); - max = _mm256_max_ps(s, d01); - d01 = i2s(_mm256_blend_epi32(s2i(max), s2i(min), 0xAA)); - - s = i2s(_mm256_shuffle_epi32(s2i(d01), 0x1B)); - - min = _mm256_min_ps(s, d01); - max = _mm256_max_ps(s, d01); - d01 = i2s(_mm256_blend_epi32(s2i(max), s2i(min), 0xCC)); - - s = i2s(_mm256_shuffle_epi32(s2i(d01), 0xB1)); - - min = _mm256_min_ps(s, d01); - max = _mm256_max_ps(s, d01); - d01 = i2s(_mm256_blend_epi32(s2i(max), s2i(min), 0xAA)); - - s = d2s(_mm256_permute4x64_pd(i2d(_mm256_shuffle_epi32(s2i(d01), 0x1B)), 0x4E)); - min = _mm256_min_ps(s, d01); - max = _mm256_max_ps(s, d01); - d01 = i2s(_mm256_blend_epi32(s2i(max), s2i(min), 0xF0)); - - s = i2s(_mm256_shuffle_epi32(s2i(d01), 0x4E)); - min = _mm256_min_ps(s, d01); - max = _mm256_max_ps(s, d01); - d01 = i2s(_mm256_blend_epi32(s2i(max), s2i(min), 0xCC)); - - s = i2s(_mm256_shuffle_epi32(s2i(d01), 0xB1)); - min = _mm256_min_ps(s, d01); - max = _mm256_max_ps(s, d01); - d01 = i2s(_mm256_blend_epi32(s2i(max), s2i(min), 0xAA)); -} - static INLINE void sort_01v_merge_descending(__m256& d01) { - __m256 min, max, s; - - s = d2s(_mm256_permute4x64_pd(s2d(d01), 0x4E)); - min = _mm256_min_ps(s, d01); - max = _mm256_max_ps(s, d01); - d01 = i2s(_mm256_blend_epi32(s2i(max), s2i(min), 0xF0)); - - s = i2s(_mm256_shuffle_epi32(s2i(d01), 0x4E)); - - min = _mm256_min_ps(s, d01); - max = _mm256_max_ps(s, d01); - d01 = i2s(_mm256_blend_epi32(s2i(max), s2i(min), 0xCC)); - - s = i2s(_mm256_shuffle_epi32(s2i(d01), 0xB1)); - - min = _mm256_min_ps(s, d01); - max = _mm256_max_ps(s, d01); - d01 = i2s(_mm256_blend_epi32(s2i(max), s2i(min), 0xAA)); - } - static INLINE void sort_02v_ascending(__m256& d01, __m256& d02) { - __m256 tmp; - - sort_01v_ascending(d01); - sort_01v_descending(d02); - - tmp = d02; - - d02 = _mm256_max_ps(d01, d02); - d01 = _mm256_min_ps(d01, tmp); - - sort_01v_merge_ascending(d01); - sort_01v_merge_ascending(d02); - } - static INLINE void sort_02v_descending(__m256& d01, __m256& d02) { - __m256 tmp; - - sort_01v_descending(d01); - sort_01v_ascending(d02); - - tmp = d02; - - d02 = _mm256_max_ps(d01, d02); - d01 = _mm256_min_ps(d01, tmp); - - sort_01v_merge_descending(d01); - sort_01v_merge_descending(d02); - } - static INLINE void sort_02v_merge_ascending(__m256& d01, __m256& d02) { - __m256 tmp; - - tmp = d01; - - d01 = _mm256_min_ps(d02, d01); - - d02 = _mm256_max_ps(d02, tmp); - - sort_01v_merge_ascending(d01); - sort_01v_merge_ascending(d02); - } - static INLINE void sort_02v_merge_descending(__m256& d01, __m256& d02) { - __m256 tmp; - - tmp = d01; - - d01 = _mm256_min_ps(d02, d01); - - d02 = _mm256_max_ps(d02, tmp); - - sort_01v_merge_descending(d01); - sort_01v_merge_descending(d02); - } - static INLINE void sort_03v_ascending(__m256& d01, __m256& d02, __m256& d03) { - __m256 tmp; - - sort_02v_ascending(d01, d02); - sort_01v_descending(d03); - - tmp = d03; - - d03 = _mm256_max_ps(d02, d03); - d02 = _mm256_min_ps(d02, tmp); - - sort_02v_merge_ascending(d01, d02); - sort_01v_merge_ascending(d03); - } - static INLINE void sort_03v_descending(__m256& d01, __m256& d02, __m256& d03) { - __m256 tmp; - - sort_02v_descending(d01, d02); - sort_01v_ascending(d03); - - tmp = d03; - - d03 = _mm256_max_ps(d02, d03); - d02 = _mm256_min_ps(d02, tmp); - - sort_02v_merge_descending(d01, d02); - sort_01v_merge_descending(d03); - } - static INLINE void sort_03v_merge_ascending(__m256& d01, __m256& d02, __m256& d03) { - __m256 tmp; - - tmp = d01; - - d01 = _mm256_min_ps(d03, d01); - - d03 = _mm256_max_ps(d03, tmp); - - sort_02v_merge_ascending(d01, d02); - sort_01v_merge_ascending(d03); - } - static INLINE void sort_03v_merge_descending(__m256& d01, __m256& d02, __m256& d03) { - __m256 tmp; - - tmp = d01; - - d01 = _mm256_min_ps(d03, d01); - - d03 = _mm256_max_ps(d03, tmp); - - sort_02v_merge_descending(d01, d02); - sort_01v_merge_descending(d03); - } - static INLINE void sort_04v_ascending(__m256& d01, __m256& d02, __m256& d03, __m256& d04) { - __m256 tmp; - - sort_02v_ascending(d01, d02); - sort_02v_descending(d03, d04); - - tmp = d03; - - d03 = _mm256_max_ps(d02, d03); - d02 = _mm256_min_ps(d02, tmp); - - tmp = d04; - - d04 = _mm256_max_ps(d01, d04); - d01 = _mm256_min_ps(d01, tmp); - - sort_02v_merge_ascending(d01, d02); - sort_02v_merge_ascending(d03, d04); - } - static INLINE void sort_04v_descending(__m256& d01, __m256& d02, __m256& d03, __m256& d04) { - __m256 tmp; - - sort_02v_descending(d01, d02); - sort_02v_ascending(d03, d04); - - tmp = d03; - - d03 = _mm256_max_ps(d02, d03); - d02 = _mm256_min_ps(d02, tmp); - - tmp = d04; - - d04 = _mm256_max_ps(d01, d04); - d01 = _mm256_min_ps(d01, tmp); - - sort_02v_merge_descending(d01, d02); - sort_02v_merge_descending(d03, d04); - } - static INLINE void sort_04v_merge_ascending(__m256& d01, __m256& d02, __m256& d03, __m256& d04) { - __m256 tmp; - - tmp = d01; - - d01 = _mm256_min_ps(d03, d01); - - d03 = _mm256_max_ps(d03, tmp); - - tmp = d02; - - d02 = _mm256_min_ps(d04, d02); - - d04 = _mm256_max_ps(d04, tmp); - - sort_02v_merge_ascending(d01, d02); - sort_02v_merge_ascending(d03, d04); - } - static INLINE void sort_04v_merge_descending(__m256& d01, __m256& d02, __m256& d03, __m256& d04) { - __m256 tmp; - - tmp = d01; - - d01 = _mm256_min_ps(d03, d01); - - d03 = _mm256_max_ps(d03, tmp); - - tmp = d02; - - d02 = _mm256_min_ps(d04, d02); - - d04 = _mm256_max_ps(d04, tmp); - - sort_02v_merge_descending(d01, d02); - sort_02v_merge_descending(d03, d04); - } - static INLINE void sort_05v_ascending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05) { - __m256 tmp; - - sort_04v_ascending(d01, d02, d03, d04); - sort_01v_descending(d05); - - tmp = d05; - - d05 = _mm256_max_ps(d04, d05); - d04 = _mm256_min_ps(d04, tmp); - - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_01v_merge_ascending(d05); - } - static INLINE void sort_05v_descending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05) { - __m256 tmp; - - sort_04v_descending(d01, d02, d03, d04); - sort_01v_ascending(d05); - - tmp = d05; - - d05 = _mm256_max_ps(d04, d05); - d04 = _mm256_min_ps(d04, tmp); - - sort_04v_merge_descending(d01, d02, d03, d04); - sort_01v_merge_descending(d05); - } - static INLINE void sort_05v_merge_ascending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05) { - __m256 tmp; - - tmp = d01; - - d01 = _mm256_min_ps(d05, d01); - - d05 = _mm256_max_ps(d05, tmp); - - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_01v_merge_ascending(d05); - } - static INLINE void sort_05v_merge_descending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05) { - __m256 tmp; - - tmp = d01; - - d01 = _mm256_min_ps(d05, d01); - - d05 = _mm256_max_ps(d05, tmp); - - sort_04v_merge_descending(d01, d02, d03, d04); - sort_01v_merge_descending(d05); - } - static INLINE void sort_06v_ascending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06) { - __m256 tmp; - - sort_04v_ascending(d01, d02, d03, d04); - sort_02v_descending(d05, d06); - - tmp = d05; - - d05 = _mm256_max_ps(d04, d05); - d04 = _mm256_min_ps(d04, tmp); - - tmp = d06; - - d06 = _mm256_max_ps(d03, d06); - d03 = _mm256_min_ps(d03, tmp); - - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_02v_merge_ascending(d05, d06); - } - static INLINE void sort_06v_descending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06) { - __m256 tmp; - - sort_04v_descending(d01, d02, d03, d04); - sort_02v_ascending(d05, d06); - - tmp = d05; - - d05 = _mm256_max_ps(d04, d05); - d04 = _mm256_min_ps(d04, tmp); - - tmp = d06; - - d06 = _mm256_max_ps(d03, d06); - d03 = _mm256_min_ps(d03, tmp); - - sort_04v_merge_descending(d01, d02, d03, d04); - sort_02v_merge_descending(d05, d06); - } - static INLINE void sort_06v_merge_ascending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06) { - __m256 tmp; - - tmp = d01; - - d01 = _mm256_min_ps(d05, d01); - - d05 = _mm256_max_ps(d05, tmp); - - tmp = d02; - - d02 = _mm256_min_ps(d06, d02); - - d06 = _mm256_max_ps(d06, tmp); - - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_02v_merge_ascending(d05, d06); - } - static INLINE void sort_06v_merge_descending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06) { - __m256 tmp; - - tmp = d01; - - d01 = _mm256_min_ps(d05, d01); - - d05 = _mm256_max_ps(d05, tmp); - - tmp = d02; - - d02 = _mm256_min_ps(d06, d02); - - d06 = _mm256_max_ps(d06, tmp); - - sort_04v_merge_descending(d01, d02, d03, d04); - sort_02v_merge_descending(d05, d06); - } - static INLINE void sort_07v_ascending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07) { - __m256 tmp; - - sort_04v_ascending(d01, d02, d03, d04); - sort_03v_descending(d05, d06, d07); - - tmp = d05; - - d05 = _mm256_max_ps(d04, d05); - d04 = _mm256_min_ps(d04, tmp); - - tmp = d06; - - d06 = _mm256_max_ps(d03, d06); - d03 = _mm256_min_ps(d03, tmp); - - tmp = d07; - - d07 = _mm256_max_ps(d02, d07); - d02 = _mm256_min_ps(d02, tmp); - - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_03v_merge_ascending(d05, d06, d07); - } - static INLINE void sort_07v_descending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07) { - __m256 tmp; - - sort_04v_descending(d01, d02, d03, d04); - sort_03v_ascending(d05, d06, d07); - - tmp = d05; - - d05 = _mm256_max_ps(d04, d05); - d04 = _mm256_min_ps(d04, tmp); - - tmp = d06; - - d06 = _mm256_max_ps(d03, d06); - d03 = _mm256_min_ps(d03, tmp); - - tmp = d07; - - d07 = _mm256_max_ps(d02, d07); - d02 = _mm256_min_ps(d02, tmp); - - sort_04v_merge_descending(d01, d02, d03, d04); - sort_03v_merge_descending(d05, d06, d07); - } - static INLINE void sort_07v_merge_ascending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07) { - __m256 tmp; - - tmp = d01; - - d01 = _mm256_min_ps(d05, d01); - - d05 = _mm256_max_ps(d05, tmp); - - tmp = d02; - - d02 = _mm256_min_ps(d06, d02); - - d06 = _mm256_max_ps(d06, tmp); - - tmp = d03; - - d03 = _mm256_min_ps(d07, d03); - - d07 = _mm256_max_ps(d07, tmp); - - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_03v_merge_ascending(d05, d06, d07); - } - static INLINE void sort_07v_merge_descending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07) { - __m256 tmp; - - tmp = d01; - - d01 = _mm256_min_ps(d05, d01); - - d05 = _mm256_max_ps(d05, tmp); - - tmp = d02; - - d02 = _mm256_min_ps(d06, d02); - - d06 = _mm256_max_ps(d06, tmp); - - tmp = d03; - - d03 = _mm256_min_ps(d07, d03); - - d07 = _mm256_max_ps(d07, tmp); - - sort_04v_merge_descending(d01, d02, d03, d04); - sort_03v_merge_descending(d05, d06, d07); - } - static INLINE void sort_08v_ascending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07, __m256& d08) { - __m256 tmp; - - sort_04v_ascending(d01, d02, d03, d04); - sort_04v_descending(d05, d06, d07, d08); - - tmp = d05; - - d05 = _mm256_max_ps(d04, d05); - d04 = _mm256_min_ps(d04, tmp); - - tmp = d06; - - d06 = _mm256_max_ps(d03, d06); - d03 = _mm256_min_ps(d03, tmp); - - tmp = d07; - - d07 = _mm256_max_ps(d02, d07); - d02 = _mm256_min_ps(d02, tmp); - - tmp = d08; - - d08 = _mm256_max_ps(d01, d08); - d01 = _mm256_min_ps(d01, tmp); - - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_04v_merge_ascending(d05, d06, d07, d08); - } - static INLINE void sort_08v_descending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07, __m256& d08) { - __m256 tmp; - - sort_04v_descending(d01, d02, d03, d04); - sort_04v_ascending(d05, d06, d07, d08); - - tmp = d05; - - d05 = _mm256_max_ps(d04, d05); - d04 = _mm256_min_ps(d04, tmp); - - tmp = d06; - - d06 = _mm256_max_ps(d03, d06); - d03 = _mm256_min_ps(d03, tmp); - - tmp = d07; - - d07 = _mm256_max_ps(d02, d07); - d02 = _mm256_min_ps(d02, tmp); - - tmp = d08; - - d08 = _mm256_max_ps(d01, d08); - d01 = _mm256_min_ps(d01, tmp); - - sort_04v_merge_descending(d01, d02, d03, d04); - sort_04v_merge_descending(d05, d06, d07, d08); - } - static INLINE void sort_08v_merge_ascending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07, __m256& d08) { - __m256 tmp; - - tmp = d01; - - d01 = _mm256_min_ps(d05, d01); - - d05 = _mm256_max_ps(d05, tmp); - - tmp = d02; - - d02 = _mm256_min_ps(d06, d02); - - d06 = _mm256_max_ps(d06, tmp); - - tmp = d03; - - d03 = _mm256_min_ps(d07, d03); - - d07 = _mm256_max_ps(d07, tmp); - - tmp = d04; - - d04 = _mm256_min_ps(d08, d04); - - d08 = _mm256_max_ps(d08, tmp); - - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_04v_merge_ascending(d05, d06, d07, d08); - } - static INLINE void sort_08v_merge_descending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07, __m256& d08) { - __m256 tmp; - - tmp = d01; - - d01 = _mm256_min_ps(d05, d01); - - d05 = _mm256_max_ps(d05, tmp); - - tmp = d02; - - d02 = _mm256_min_ps(d06, d02); - - d06 = _mm256_max_ps(d06, tmp); - - tmp = d03; - - d03 = _mm256_min_ps(d07, d03); - - d07 = _mm256_max_ps(d07, tmp); - - tmp = d04; - - d04 = _mm256_min_ps(d08, d04); - - d08 = _mm256_max_ps(d08, tmp); - - sort_04v_merge_descending(d01, d02, d03, d04); - sort_04v_merge_descending(d05, d06, d07, d08); - } - static INLINE void sort_09v_ascending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07, __m256& d08, __m256& d09) { - __m256 tmp; - - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_01v_descending(d09); - - tmp = d09; - - d09 = _mm256_max_ps(d08, d09); - d08 = _mm256_min_ps(d08, tmp); - - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_01v_merge_ascending(d09); - } - static INLINE void sort_09v_descending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07, __m256& d08, __m256& d09) { - __m256 tmp; - - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_01v_ascending(d09); - - tmp = d09; - - d09 = _mm256_max_ps(d08, d09); - d08 = _mm256_min_ps(d08, tmp); - - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_01v_merge_descending(d09); - } - static INLINE void sort_10v_ascending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07, __m256& d08, __m256& d09, __m256& d10) { - __m256 tmp; - - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_02v_descending(d09, d10); - - tmp = d09; - - d09 = _mm256_max_ps(d08, d09); - d08 = _mm256_min_ps(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_ps(d07, d10); - d07 = _mm256_min_ps(d07, tmp); - - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_02v_merge_ascending(d09, d10); - } - static INLINE void sort_10v_descending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07, __m256& d08, __m256& d09, __m256& d10) { - __m256 tmp; - - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_02v_ascending(d09, d10); - - tmp = d09; - - d09 = _mm256_max_ps(d08, d09); - d08 = _mm256_min_ps(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_ps(d07, d10); - d07 = _mm256_min_ps(d07, tmp); - - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_02v_merge_descending(d09, d10); - } - static INLINE void sort_11v_ascending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07, __m256& d08, __m256& d09, __m256& d10, __m256& d11) { - __m256 tmp; - - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_03v_descending(d09, d10, d11); - - tmp = d09; - - d09 = _mm256_max_ps(d08, d09); - d08 = _mm256_min_ps(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_ps(d07, d10); - d07 = _mm256_min_ps(d07, tmp); - - tmp = d11; - - d11 = _mm256_max_ps(d06, d11); - d06 = _mm256_min_ps(d06, tmp); - - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_03v_merge_ascending(d09, d10, d11); - } - static INLINE void sort_11v_descending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07, __m256& d08, __m256& d09, __m256& d10, __m256& d11) { - __m256 tmp; - - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_03v_ascending(d09, d10, d11); - - tmp = d09; - - d09 = _mm256_max_ps(d08, d09); - d08 = _mm256_min_ps(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_ps(d07, d10); - d07 = _mm256_min_ps(d07, tmp); - - tmp = d11; - - d11 = _mm256_max_ps(d06, d11); - d06 = _mm256_min_ps(d06, tmp); - - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_03v_merge_descending(d09, d10, d11); - } - static INLINE void sort_12v_ascending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07, __m256& d08, __m256& d09, __m256& d10, __m256& d11, __m256& d12) { - __m256 tmp; - - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_04v_descending(d09, d10, d11, d12); - - tmp = d09; - - d09 = _mm256_max_ps(d08, d09); - d08 = _mm256_min_ps(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_ps(d07, d10); - d07 = _mm256_min_ps(d07, tmp); - - tmp = d11; - - d11 = _mm256_max_ps(d06, d11); - d06 = _mm256_min_ps(d06, tmp); - - tmp = d12; - - d12 = _mm256_max_ps(d05, d12); - d05 = _mm256_min_ps(d05, tmp); - - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_04v_merge_ascending(d09, d10, d11, d12); - } - static INLINE void sort_12v_descending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07, __m256& d08, __m256& d09, __m256& d10, __m256& d11, __m256& d12) { - __m256 tmp; - - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_04v_ascending(d09, d10, d11, d12); - - tmp = d09; - - d09 = _mm256_max_ps(d08, d09); - d08 = _mm256_min_ps(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_ps(d07, d10); - d07 = _mm256_min_ps(d07, tmp); - - tmp = d11; - - d11 = _mm256_max_ps(d06, d11); - d06 = _mm256_min_ps(d06, tmp); - - tmp = d12; - - d12 = _mm256_max_ps(d05, d12); - d05 = _mm256_min_ps(d05, tmp); - - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_04v_merge_descending(d09, d10, d11, d12); - } - static INLINE void sort_13v_ascending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07, __m256& d08, __m256& d09, __m256& d10, __m256& d11, __m256& d12, __m256& d13) { - __m256 tmp; - - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_05v_descending(d09, d10, d11, d12, d13); - - tmp = d09; - - d09 = _mm256_max_ps(d08, d09); - d08 = _mm256_min_ps(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_ps(d07, d10); - d07 = _mm256_min_ps(d07, tmp); - - tmp = d11; - - d11 = _mm256_max_ps(d06, d11); - d06 = _mm256_min_ps(d06, tmp); - - tmp = d12; - - d12 = _mm256_max_ps(d05, d12); - d05 = _mm256_min_ps(d05, tmp); - - tmp = d13; - - d13 = _mm256_max_ps(d04, d13); - d04 = _mm256_min_ps(d04, tmp); - - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_05v_merge_ascending(d09, d10, d11, d12, d13); - } - static INLINE void sort_13v_descending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07, __m256& d08, __m256& d09, __m256& d10, __m256& d11, __m256& d12, __m256& d13) { - __m256 tmp; - - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_05v_ascending(d09, d10, d11, d12, d13); - - tmp = d09; - - d09 = _mm256_max_ps(d08, d09); - d08 = _mm256_min_ps(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_ps(d07, d10); - d07 = _mm256_min_ps(d07, tmp); - - tmp = d11; - - d11 = _mm256_max_ps(d06, d11); - d06 = _mm256_min_ps(d06, tmp); - - tmp = d12; - - d12 = _mm256_max_ps(d05, d12); - d05 = _mm256_min_ps(d05, tmp); - - tmp = d13; - - d13 = _mm256_max_ps(d04, d13); - d04 = _mm256_min_ps(d04, tmp); - - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_05v_merge_descending(d09, d10, d11, d12, d13); - } - static INLINE void sort_14v_ascending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07, __m256& d08, __m256& d09, __m256& d10, __m256& d11, __m256& d12, __m256& d13, __m256& d14) { - __m256 tmp; - - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_06v_descending(d09, d10, d11, d12, d13, d14); - - tmp = d09; - - d09 = _mm256_max_ps(d08, d09); - d08 = _mm256_min_ps(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_ps(d07, d10); - d07 = _mm256_min_ps(d07, tmp); - - tmp = d11; - - d11 = _mm256_max_ps(d06, d11); - d06 = _mm256_min_ps(d06, tmp); - - tmp = d12; - - d12 = _mm256_max_ps(d05, d12); - d05 = _mm256_min_ps(d05, tmp); - - tmp = d13; - - d13 = _mm256_max_ps(d04, d13); - d04 = _mm256_min_ps(d04, tmp); - - tmp = d14; - - d14 = _mm256_max_ps(d03, d14); - d03 = _mm256_min_ps(d03, tmp); - - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_06v_merge_ascending(d09, d10, d11, d12, d13, d14); - } - static INLINE void sort_14v_descending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07, __m256& d08, __m256& d09, __m256& d10, __m256& d11, __m256& d12, __m256& d13, __m256& d14) { - __m256 tmp; - - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_06v_ascending(d09, d10, d11, d12, d13, d14); - - tmp = d09; - - d09 = _mm256_max_ps(d08, d09); - d08 = _mm256_min_ps(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_ps(d07, d10); - d07 = _mm256_min_ps(d07, tmp); - - tmp = d11; - - d11 = _mm256_max_ps(d06, d11); - d06 = _mm256_min_ps(d06, tmp); - - tmp = d12; - - d12 = _mm256_max_ps(d05, d12); - d05 = _mm256_min_ps(d05, tmp); - - tmp = d13; - - d13 = _mm256_max_ps(d04, d13); - d04 = _mm256_min_ps(d04, tmp); - - tmp = d14; - - d14 = _mm256_max_ps(d03, d14); - d03 = _mm256_min_ps(d03, tmp); - - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_06v_merge_descending(d09, d10, d11, d12, d13, d14); - } - static INLINE void sort_15v_ascending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07, __m256& d08, __m256& d09, __m256& d10, __m256& d11, __m256& d12, __m256& d13, __m256& d14, __m256& d15) { - __m256 tmp; - - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_07v_descending(d09, d10, d11, d12, d13, d14, d15); - - tmp = d09; - - d09 = _mm256_max_ps(d08, d09); - d08 = _mm256_min_ps(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_ps(d07, d10); - d07 = _mm256_min_ps(d07, tmp); - - tmp = d11; - - d11 = _mm256_max_ps(d06, d11); - d06 = _mm256_min_ps(d06, tmp); - - tmp = d12; - - d12 = _mm256_max_ps(d05, d12); - d05 = _mm256_min_ps(d05, tmp); - - tmp = d13; - - d13 = _mm256_max_ps(d04, d13); - d04 = _mm256_min_ps(d04, tmp); - - tmp = d14; - - d14 = _mm256_max_ps(d03, d14); - d03 = _mm256_min_ps(d03, tmp); - - tmp = d15; - - d15 = _mm256_max_ps(d02, d15); - d02 = _mm256_min_ps(d02, tmp); - - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_07v_merge_ascending(d09, d10, d11, d12, d13, d14, d15); - } - static INLINE void sort_15v_descending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07, __m256& d08, __m256& d09, __m256& d10, __m256& d11, __m256& d12, __m256& d13, __m256& d14, __m256& d15) { - __m256 tmp; - - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_07v_ascending(d09, d10, d11, d12, d13, d14, d15); - - tmp = d09; - - d09 = _mm256_max_ps(d08, d09); - d08 = _mm256_min_ps(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_ps(d07, d10); - d07 = _mm256_min_ps(d07, tmp); - - tmp = d11; - - d11 = _mm256_max_ps(d06, d11); - d06 = _mm256_min_ps(d06, tmp); - - tmp = d12; - - d12 = _mm256_max_ps(d05, d12); - d05 = _mm256_min_ps(d05, tmp); - - tmp = d13; - - d13 = _mm256_max_ps(d04, d13); - d04 = _mm256_min_ps(d04, tmp); - - tmp = d14; - - d14 = _mm256_max_ps(d03, d14); - d03 = _mm256_min_ps(d03, tmp); - - tmp = d15; - - d15 = _mm256_max_ps(d02, d15); - d02 = _mm256_min_ps(d02, tmp); - - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_07v_merge_descending(d09, d10, d11, d12, d13, d14, d15); - } - static INLINE void sort_16v_ascending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07, __m256& d08, __m256& d09, __m256& d10, __m256& d11, __m256& d12, __m256& d13, __m256& d14, __m256& d15, __m256& d16) { - __m256 tmp; - - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_08v_descending(d09, d10, d11, d12, d13, d14, d15, d16); - - tmp = d09; - - d09 = _mm256_max_ps(d08, d09); - d08 = _mm256_min_ps(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_ps(d07, d10); - d07 = _mm256_min_ps(d07, tmp); - - tmp = d11; - - d11 = _mm256_max_ps(d06, d11); - d06 = _mm256_min_ps(d06, tmp); - - tmp = d12; - - d12 = _mm256_max_ps(d05, d12); - d05 = _mm256_min_ps(d05, tmp); - - tmp = d13; - - d13 = _mm256_max_ps(d04, d13); - d04 = _mm256_min_ps(d04, tmp); - - tmp = d14; - - d14 = _mm256_max_ps(d03, d14); - d03 = _mm256_min_ps(d03, tmp); - - tmp = d15; - - d15 = _mm256_max_ps(d02, d15); - d02 = _mm256_min_ps(d02, tmp); - - tmp = d16; - - d16 = _mm256_max_ps(d01, d16); - d01 = _mm256_min_ps(d01, tmp); - - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_08v_merge_ascending(d09, d10, d11, d12, d13, d14, d15, d16); - } - static INLINE void sort_16v_descending(__m256& d01, __m256& d02, __m256& d03, __m256& d04, __m256& d05, __m256& d06, __m256& d07, __m256& d08, __m256& d09, __m256& d10, __m256& d11, __m256& d12, __m256& d13, __m256& d14, __m256& d15, __m256& d16) { - __m256 tmp; - - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_08v_ascending(d09, d10, d11, d12, d13, d14, d15, d16); - - tmp = d09; - - d09 = _mm256_max_ps(d08, d09); - d08 = _mm256_min_ps(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_ps(d07, d10); - d07 = _mm256_min_ps(d07, tmp); - - tmp = d11; - - d11 = _mm256_max_ps(d06, d11); - d06 = _mm256_min_ps(d06, tmp); - - tmp = d12; - - d12 = _mm256_max_ps(d05, d12); - d05 = _mm256_min_ps(d05, tmp); - - tmp = d13; - - d13 = _mm256_max_ps(d04, d13); - d04 = _mm256_min_ps(d04, tmp); - - tmp = d14; - - d14 = _mm256_max_ps(d03, d14); - d03 = _mm256_min_ps(d03, tmp); - - tmp = d15; - - d15 = _mm256_max_ps(d02, d15); - d02 = _mm256_min_ps(d02, tmp); - - tmp = d16; - - d16 = _mm256_max_ps(d01, d16); - d01 = _mm256_min_ps(d01, tmp); - - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_08v_merge_descending(d09, d10, d11, d12, d13, d14, d15, d16); - } - -static NOINLINE void sort_01v(float *ptr) { - __m256 d01 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 0)); - sort_01v_ascending(d01); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 0), d01); -} - -static NOINLINE void sort_02v(float *ptr) { - __m256 d01 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 0)); - __m256 d02 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 1)); - sort_02v_ascending(d01, d02); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 0), d01); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 1), d02); -} - -static NOINLINE void sort_03v(float *ptr) { - __m256 d01 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 0)); - __m256 d02 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 1)); - __m256 d03 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 2)); - sort_03v_ascending(d01, d02, d03); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 0), d01); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 1), d02); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 2), d03); -} - -static NOINLINE void sort_04v(float *ptr) { - __m256 d01 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 0)); - __m256 d02 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 1)); - __m256 d03 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 2)); - __m256 d04 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 3)); - sort_04v_ascending(d01, d02, d03, d04); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 0), d01); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 1), d02); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 2), d03); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 3), d04); -} - -static NOINLINE void sort_05v(float *ptr) { - __m256 d01 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 0)); - __m256 d02 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 1)); - __m256 d03 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 2)); - __m256 d04 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 3)); - __m256 d05 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 4)); - sort_05v_ascending(d01, d02, d03, d04, d05); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 0), d01); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 1), d02); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 2), d03); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 3), d04); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 4), d05); -} - -static NOINLINE void sort_06v(float *ptr) { - __m256 d01 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 0)); - __m256 d02 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 1)); - __m256 d03 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 2)); - __m256 d04 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 3)); - __m256 d05 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 4)); - __m256 d06 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 5)); - sort_06v_ascending(d01, d02, d03, d04, d05, d06); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 0), d01); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 1), d02); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 2), d03); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 3), d04); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 4), d05); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 5), d06); -} - -static NOINLINE void sort_07v(float *ptr) { - __m256 d01 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 0)); - __m256 d02 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 1)); - __m256 d03 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 2)); - __m256 d04 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 3)); - __m256 d05 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 4)); - __m256 d06 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 5)); - __m256 d07 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 6)); - sort_07v_ascending(d01, d02, d03, d04, d05, d06, d07); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 0), d01); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 1), d02); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 2), d03); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 3), d04); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 4), d05); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 5), d06); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 6), d07); -} - -static NOINLINE void sort_08v(float *ptr) { - __m256 d01 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 0)); - __m256 d02 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 1)); - __m256 d03 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 2)); - __m256 d04 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 3)); - __m256 d05 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 4)); - __m256 d06 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 5)); - __m256 d07 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 6)); - __m256 d08 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 7)); - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 0), d01); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 1), d02); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 2), d03); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 3), d04); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 4), d05); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 5), d06); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 6), d07); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 7), d08); -} - -static NOINLINE void sort_09v(float *ptr) { - __m256 d01 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 0)); - __m256 d02 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 1)); - __m256 d03 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 2)); - __m256 d04 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 3)); - __m256 d05 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 4)); - __m256 d06 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 5)); - __m256 d07 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 6)); - __m256 d08 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 7)); - __m256 d09 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 8)); - sort_09v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 0), d01); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 1), d02); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 2), d03); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 3), d04); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 4), d05); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 5), d06); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 6), d07); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 7), d08); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 8), d09); -} - -static NOINLINE void sort_10v(float *ptr) { - __m256 d01 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 0)); - __m256 d02 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 1)); - __m256 d03 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 2)); - __m256 d04 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 3)); - __m256 d05 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 4)); - __m256 d06 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 5)); - __m256 d07 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 6)); - __m256 d08 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 7)); - __m256 d09 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 8)); - __m256 d10 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 9)); - sort_10v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 0), d01); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 1), d02); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 2), d03); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 3), d04); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 4), d05); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 5), d06); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 6), d07); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 7), d08); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 8), d09); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 9), d10); -} - -static NOINLINE void sort_11v(float *ptr) { - __m256 d01 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 0)); - __m256 d02 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 1)); - __m256 d03 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 2)); - __m256 d04 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 3)); - __m256 d05 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 4)); - __m256 d06 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 5)); - __m256 d07 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 6)); - __m256 d08 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 7)); - __m256 d09 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 8)); - __m256 d10 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 9)); - __m256 d11 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 10)); - sort_11v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 0), d01); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 1), d02); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 2), d03); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 3), d04); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 4), d05); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 5), d06); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 6), d07); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 7), d08); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 8), d09); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 9), d10); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 10), d11); -} - -static NOINLINE void sort_12v(float *ptr) { - __m256 d01 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 0)); - __m256 d02 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 1)); - __m256 d03 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 2)); - __m256 d04 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 3)); - __m256 d05 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 4)); - __m256 d06 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 5)); - __m256 d07 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 6)); - __m256 d08 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 7)); - __m256 d09 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 8)); - __m256 d10 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 9)); - __m256 d11 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 10)); - __m256 d12 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 11)); - sort_12v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 0), d01); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 1), d02); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 2), d03); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 3), d04); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 4), d05); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 5), d06); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 6), d07); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 7), d08); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 8), d09); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 9), d10); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 10), d11); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 11), d12); -} - -static NOINLINE void sort_13v(float *ptr) { - __m256 d01 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 0)); - __m256 d02 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 1)); - __m256 d03 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 2)); - __m256 d04 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 3)); - __m256 d05 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 4)); - __m256 d06 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 5)); - __m256 d07 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 6)); - __m256 d08 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 7)); - __m256 d09 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 8)); - __m256 d10 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 9)); - __m256 d11 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 10)); - __m256 d12 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 11)); - __m256 d13 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 12)); - sort_13v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 0), d01); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 1), d02); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 2), d03); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 3), d04); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 4), d05); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 5), d06); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 6), d07); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 7), d08); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 8), d09); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 9), d10); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 10), d11); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 11), d12); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 12), d13); -} - -static NOINLINE void sort_14v(float *ptr) { - __m256 d01 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 0)); - __m256 d02 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 1)); - __m256 d03 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 2)); - __m256 d04 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 3)); - __m256 d05 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 4)); - __m256 d06 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 5)); - __m256 d07 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 6)); - __m256 d08 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 7)); - __m256 d09 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 8)); - __m256 d10 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 9)); - __m256 d11 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 10)); - __m256 d12 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 11)); - __m256 d13 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 12)); - __m256 d14 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 13)); - sort_14v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 0), d01); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 1), d02); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 2), d03); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 3), d04); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 4), d05); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 5), d06); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 6), d07); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 7), d08); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 8), d09); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 9), d10); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 10), d11); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 11), d12); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 12), d13); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 13), d14); -} - -static NOINLINE void sort_15v(float *ptr) { - __m256 d01 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 0)); - __m256 d02 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 1)); - __m256 d03 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 2)); - __m256 d04 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 3)); - __m256 d05 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 4)); - __m256 d06 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 5)); - __m256 d07 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 6)); - __m256 d08 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 7)); - __m256 d09 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 8)); - __m256 d10 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 9)); - __m256 d11 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 10)); - __m256 d12 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 11)); - __m256 d13 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 12)); - __m256 d14 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 13)); - __m256 d15 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 14)); - sort_15v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14, d15); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 0), d01); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 1), d02); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 2), d03); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 3), d04); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 4), d05); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 5), d06); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 6), d07); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 7), d08); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 8), d09); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 9), d10); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 10), d11); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 11), d12); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 12), d13); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 13), d14); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 14), d15); -} - -static NOINLINE void sort_16v(float *ptr) { - __m256 d01 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 0)); - __m256 d02 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 1)); - __m256 d03 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 2)); - __m256 d04 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 3)); - __m256 d05 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 4)); - __m256 d06 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 5)); - __m256 d07 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 6)); - __m256 d08 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 7)); - __m256 d09 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 8)); - __m256 d10 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 9)); - __m256 d11 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 10)); - __m256 d12 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 11)); - __m256 d13 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 12)); - __m256 d14 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 13)); - __m256 d15 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 14)); - __m256 d16 = _mm256_loadu_ps((float const *) ((__m256 const *) ptr + 15)); - sort_16v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14, d15, d16); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 0), d01); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 1), d02); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 2), d03); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 3), d04); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 4), d05); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 5), d06); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 6), d07); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 7), d08); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 8), d09); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 9), d10); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 10), d11); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 11), d12); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 12), d13); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 13), d14); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 14), d15); - _mm256_storeu_ps((float *) ((__m256 *) ptr + 15), d16); -} - static void sort(float *ptr, size_t length) { - const int N = 8; - - switch(length / N) { - case 1: sort_01v(ptr); break; - case 2: sort_02v(ptr); break; - case 3: sort_03v(ptr); break; - case 4: sort_04v(ptr); break; - case 5: sort_05v(ptr); break; - case 6: sort_06v(ptr); break; - case 7: sort_07v(ptr); break; - case 8: sort_08v(ptr); break; - case 9: sort_09v(ptr); break; - case 10: sort_10v(ptr); break; - case 11: sort_11v(ptr); break; - case 12: sort_12v(ptr); break; - case 13: sort_13v(ptr); break; - case 14: sort_14v(ptr); break; - case 15: sort_15v(ptr); break; - case 16: sort_16v(ptr); break; - } -} -}; -} -} -#endif diff --git a/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.int32_t.generated.h b/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.int32_t.generated.h deleted file mode 100644 index 308bae8fa32694..00000000000000 --- a/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.int32_t.generated.h +++ /dev/null @@ -1,1532 +0,0 @@ -///////////////////////////////////////////////////////////////////////////// -//// -// This file was auto-generated by a tool at 2020-05-31 19:46:17 -// -// It is recommended you DO NOT directly edit this file but instead edit -// the code-generator that generated this source file instead. -///////////////////////////////////////////////////////////////////////////// - -#ifndef BITONIC_SORT_AVX2_INT32_T_H -#define BITONIC_SORT_AVX2_INT32_T_H - -#include -#include "bitonic_sort.h" - -#ifdef _MSC_VER - // MSVC - #define INLINE __forceinline - #define NOINLINE __declspec(noinline) -#else - // GCC + Clang - #define INLINE __attribute__((always_inline)) - #define NOINLINE __attribute__((noinline)) -#endif - -#define i2d _mm256_castsi256_pd -#define d2i _mm256_castpd_si256 -#define i2s _mm256_castsi256_ps -#define s2i _mm256_castps_si256 -#define s2d _mm256_castps_pd -#define d2s _mm256_castpd_ps - -namespace gcsort { -namespace smallsort { -template<> struct bitonic { -public: - - static INLINE void sort_01v_ascending(__m256i& d01) { - __m256i min, max, s; - - s = _mm256_shuffle_epi32(d01, 0xB1); - - min = _mm256_min_epi32(s, d01); - max = _mm256_max_epi32(s, d01); - d01 = _mm256_blend_epi32(min, max, 0xAA); - - s = _mm256_shuffle_epi32(d01, 0x1B); - - min = _mm256_min_epi32(s, d01); - max = _mm256_max_epi32(s, d01); - d01 = _mm256_blend_epi32(min, max, 0xCC); - - s = _mm256_shuffle_epi32(d01, 0xB1); - - min = _mm256_min_epi32(s, d01); - max = _mm256_max_epi32(s, d01); - d01 = _mm256_blend_epi32(min, max, 0xAA); - - s = d2i(_mm256_permute4x64_pd(i2d(_mm256_shuffle_epi32(d01, 0x1B)), 0x4E)); - min = _mm256_min_epi32(s, d01); - max = _mm256_max_epi32(s, d01); - d01 = _mm256_blend_epi32(min, max, 0xF0); - - s = _mm256_shuffle_epi32(d01, 0x4E); - min = _mm256_min_epi32(s, d01); - max = _mm256_max_epi32(s, d01); - d01 = _mm256_blend_epi32(min, max, 0xCC); - - s = _mm256_shuffle_epi32(d01, 0xB1); - min = _mm256_min_epi32(s, d01); - max = _mm256_max_epi32(s, d01); - d01 = _mm256_blend_epi32(min, max, 0xAA); -} - static INLINE void sort_01v_merge_ascending(__m256i& d01) { - __m256i min, max, s; - - s = d2i(_mm256_permute4x64_pd(i2d(d01), 0x4E)); - min = _mm256_min_epi32(s, d01); - max = _mm256_max_epi32(s, d01); - d01 = _mm256_blend_epi32(min, max, 0xF0); - - s = _mm256_shuffle_epi32(d01, 0x4E); - - min = _mm256_min_epi32(s, d01); - max = _mm256_max_epi32(s, d01); - d01 = _mm256_blend_epi32(min, max, 0xCC); - - s = _mm256_shuffle_epi32(d01, 0xB1); - - min = _mm256_min_epi32(s, d01); - max = _mm256_max_epi32(s, d01); - d01 = _mm256_blend_epi32(min, max, 0xAA); - } - static INLINE void sort_01v_descending(__m256i& d01) { - __m256i min, max, s; - - s = _mm256_shuffle_epi32(d01, 0xB1); - - min = _mm256_min_epi32(s, d01); - max = _mm256_max_epi32(s, d01); - d01 = _mm256_blend_epi32(max, min, 0xAA); - - s = _mm256_shuffle_epi32(d01, 0x1B); - - min = _mm256_min_epi32(s, d01); - max = _mm256_max_epi32(s, d01); - d01 = _mm256_blend_epi32(max, min, 0xCC); - - s = _mm256_shuffle_epi32(d01, 0xB1); - - min = _mm256_min_epi32(s, d01); - max = _mm256_max_epi32(s, d01); - d01 = _mm256_blend_epi32(max, min, 0xAA); - - s = d2i(_mm256_permute4x64_pd(i2d(_mm256_shuffle_epi32(d01, 0x1B)), 0x4E)); - min = _mm256_min_epi32(s, d01); - max = _mm256_max_epi32(s, d01); - d01 = _mm256_blend_epi32(max, min, 0xF0); - - s = _mm256_shuffle_epi32(d01, 0x4E); - min = _mm256_min_epi32(s, d01); - max = _mm256_max_epi32(s, d01); - d01 = _mm256_blend_epi32(max, min, 0xCC); - - s = _mm256_shuffle_epi32(d01, 0xB1); - min = _mm256_min_epi32(s, d01); - max = _mm256_max_epi32(s, d01); - d01 = _mm256_blend_epi32(max, min, 0xAA); -} - static INLINE void sort_01v_merge_descending(__m256i& d01) { - __m256i min, max, s; - - s = d2i(_mm256_permute4x64_pd(i2d(d01), 0x4E)); - min = _mm256_min_epi32(s, d01); - max = _mm256_max_epi32(s, d01); - d01 = _mm256_blend_epi32(max, min, 0xF0); - - s = _mm256_shuffle_epi32(d01, 0x4E); - - min = _mm256_min_epi32(s, d01); - max = _mm256_max_epi32(s, d01); - d01 = _mm256_blend_epi32(max, min, 0xCC); - - s = _mm256_shuffle_epi32(d01, 0xB1); - - min = _mm256_min_epi32(s, d01); - max = _mm256_max_epi32(s, d01); - d01 = _mm256_blend_epi32(max, min, 0xAA); - } - static INLINE void sort_02v_ascending(__m256i& d01, __m256i& d02) { - __m256i tmp; - - sort_01v_ascending(d01); - sort_01v_descending(d02); - - tmp = d02; - - d02 = _mm256_max_epi32(d01, d02); - d01 = _mm256_min_epi32(d01, tmp); - - sort_01v_merge_ascending(d01); - sort_01v_merge_ascending(d02); - } - static INLINE void sort_02v_descending(__m256i& d01, __m256i& d02) { - __m256i tmp; - - sort_01v_descending(d01); - sort_01v_ascending(d02); - - tmp = d02; - - d02 = _mm256_max_epi32(d01, d02); - d01 = _mm256_min_epi32(d01, tmp); - - sort_01v_merge_descending(d01); - sort_01v_merge_descending(d02); - } - static INLINE void sort_02v_merge_ascending(__m256i& d01, __m256i& d02) { - __m256i tmp; - - tmp = d01; - - d01 = _mm256_min_epi32(d02, d01); - - d02 = _mm256_max_epi32(d02, tmp); - - sort_01v_merge_ascending(d01); - sort_01v_merge_ascending(d02); - } - static INLINE void sort_02v_merge_descending(__m256i& d01, __m256i& d02) { - __m256i tmp; - - tmp = d01; - - d01 = _mm256_min_epi32(d02, d01); - - d02 = _mm256_max_epi32(d02, tmp); - - sort_01v_merge_descending(d01); - sort_01v_merge_descending(d02); - } - static INLINE void sort_03v_ascending(__m256i& d01, __m256i& d02, __m256i& d03) { - __m256i tmp; - - sort_02v_ascending(d01, d02); - sort_01v_descending(d03); - - tmp = d03; - - d03 = _mm256_max_epi32(d02, d03); - d02 = _mm256_min_epi32(d02, tmp); - - sort_02v_merge_ascending(d01, d02); - sort_01v_merge_ascending(d03); - } - static INLINE void sort_03v_descending(__m256i& d01, __m256i& d02, __m256i& d03) { - __m256i tmp; - - sort_02v_descending(d01, d02); - sort_01v_ascending(d03); - - tmp = d03; - - d03 = _mm256_max_epi32(d02, d03); - d02 = _mm256_min_epi32(d02, tmp); - - sort_02v_merge_descending(d01, d02); - sort_01v_merge_descending(d03); - } - static INLINE void sort_03v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03) { - __m256i tmp; - - tmp = d01; - - d01 = _mm256_min_epi32(d03, d01); - - d03 = _mm256_max_epi32(d03, tmp); - - sort_02v_merge_ascending(d01, d02); - sort_01v_merge_ascending(d03); - } - static INLINE void sort_03v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03) { - __m256i tmp; - - tmp = d01; - - d01 = _mm256_min_epi32(d03, d01); - - d03 = _mm256_max_epi32(d03, tmp); - - sort_02v_merge_descending(d01, d02); - sort_01v_merge_descending(d03); - } - static INLINE void sort_04v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04) { - __m256i tmp; - - sort_02v_ascending(d01, d02); - sort_02v_descending(d03, d04); - - tmp = d03; - - d03 = _mm256_max_epi32(d02, d03); - d02 = _mm256_min_epi32(d02, tmp); - - tmp = d04; - - d04 = _mm256_max_epi32(d01, d04); - d01 = _mm256_min_epi32(d01, tmp); - - sort_02v_merge_ascending(d01, d02); - sort_02v_merge_ascending(d03, d04); - } - static INLINE void sort_04v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04) { - __m256i tmp; - - sort_02v_descending(d01, d02); - sort_02v_ascending(d03, d04); - - tmp = d03; - - d03 = _mm256_max_epi32(d02, d03); - d02 = _mm256_min_epi32(d02, tmp); - - tmp = d04; - - d04 = _mm256_max_epi32(d01, d04); - d01 = _mm256_min_epi32(d01, tmp); - - sort_02v_merge_descending(d01, d02); - sort_02v_merge_descending(d03, d04); - } - static INLINE void sort_04v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04) { - __m256i tmp; - - tmp = d01; - - d01 = _mm256_min_epi32(d03, d01); - - d03 = _mm256_max_epi32(d03, tmp); - - tmp = d02; - - d02 = _mm256_min_epi32(d04, d02); - - d04 = _mm256_max_epi32(d04, tmp); - - sort_02v_merge_ascending(d01, d02); - sort_02v_merge_ascending(d03, d04); - } - static INLINE void sort_04v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04) { - __m256i tmp; - - tmp = d01; - - d01 = _mm256_min_epi32(d03, d01); - - d03 = _mm256_max_epi32(d03, tmp); - - tmp = d02; - - d02 = _mm256_min_epi32(d04, d02); - - d04 = _mm256_max_epi32(d04, tmp); - - sort_02v_merge_descending(d01, d02); - sort_02v_merge_descending(d03, d04); - } - static INLINE void sort_05v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05) { - __m256i tmp; - - sort_04v_ascending(d01, d02, d03, d04); - sort_01v_descending(d05); - - tmp = d05; - - d05 = _mm256_max_epi32(d04, d05); - d04 = _mm256_min_epi32(d04, tmp); - - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_01v_merge_ascending(d05); - } - static INLINE void sort_05v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05) { - __m256i tmp; - - sort_04v_descending(d01, d02, d03, d04); - sort_01v_ascending(d05); - - tmp = d05; - - d05 = _mm256_max_epi32(d04, d05); - d04 = _mm256_min_epi32(d04, tmp); - - sort_04v_merge_descending(d01, d02, d03, d04); - sort_01v_merge_descending(d05); - } - static INLINE void sort_05v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05) { - __m256i tmp; - - tmp = d01; - - d01 = _mm256_min_epi32(d05, d01); - - d05 = _mm256_max_epi32(d05, tmp); - - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_01v_merge_ascending(d05); - } - static INLINE void sort_05v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05) { - __m256i tmp; - - tmp = d01; - - d01 = _mm256_min_epi32(d05, d01); - - d05 = _mm256_max_epi32(d05, tmp); - - sort_04v_merge_descending(d01, d02, d03, d04); - sort_01v_merge_descending(d05); - } - static INLINE void sort_06v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06) { - __m256i tmp; - - sort_04v_ascending(d01, d02, d03, d04); - sort_02v_descending(d05, d06); - - tmp = d05; - - d05 = _mm256_max_epi32(d04, d05); - d04 = _mm256_min_epi32(d04, tmp); - - tmp = d06; - - d06 = _mm256_max_epi32(d03, d06); - d03 = _mm256_min_epi32(d03, tmp); - - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_02v_merge_ascending(d05, d06); - } - static INLINE void sort_06v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06) { - __m256i tmp; - - sort_04v_descending(d01, d02, d03, d04); - sort_02v_ascending(d05, d06); - - tmp = d05; - - d05 = _mm256_max_epi32(d04, d05); - d04 = _mm256_min_epi32(d04, tmp); - - tmp = d06; - - d06 = _mm256_max_epi32(d03, d06); - d03 = _mm256_min_epi32(d03, tmp); - - sort_04v_merge_descending(d01, d02, d03, d04); - sort_02v_merge_descending(d05, d06); - } - static INLINE void sort_06v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06) { - __m256i tmp; - - tmp = d01; - - d01 = _mm256_min_epi32(d05, d01); - - d05 = _mm256_max_epi32(d05, tmp); - - tmp = d02; - - d02 = _mm256_min_epi32(d06, d02); - - d06 = _mm256_max_epi32(d06, tmp); - - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_02v_merge_ascending(d05, d06); - } - static INLINE void sort_06v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06) { - __m256i tmp; - - tmp = d01; - - d01 = _mm256_min_epi32(d05, d01); - - d05 = _mm256_max_epi32(d05, tmp); - - tmp = d02; - - d02 = _mm256_min_epi32(d06, d02); - - d06 = _mm256_max_epi32(d06, tmp); - - sort_04v_merge_descending(d01, d02, d03, d04); - sort_02v_merge_descending(d05, d06); - } - static INLINE void sort_07v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07) { - __m256i tmp; - - sort_04v_ascending(d01, d02, d03, d04); - sort_03v_descending(d05, d06, d07); - - tmp = d05; - - d05 = _mm256_max_epi32(d04, d05); - d04 = _mm256_min_epi32(d04, tmp); - - tmp = d06; - - d06 = _mm256_max_epi32(d03, d06); - d03 = _mm256_min_epi32(d03, tmp); - - tmp = d07; - - d07 = _mm256_max_epi32(d02, d07); - d02 = _mm256_min_epi32(d02, tmp); - - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_03v_merge_ascending(d05, d06, d07); - } - static INLINE void sort_07v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07) { - __m256i tmp; - - sort_04v_descending(d01, d02, d03, d04); - sort_03v_ascending(d05, d06, d07); - - tmp = d05; - - d05 = _mm256_max_epi32(d04, d05); - d04 = _mm256_min_epi32(d04, tmp); - - tmp = d06; - - d06 = _mm256_max_epi32(d03, d06); - d03 = _mm256_min_epi32(d03, tmp); - - tmp = d07; - - d07 = _mm256_max_epi32(d02, d07); - d02 = _mm256_min_epi32(d02, tmp); - - sort_04v_merge_descending(d01, d02, d03, d04); - sort_03v_merge_descending(d05, d06, d07); - } - static INLINE void sort_07v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07) { - __m256i tmp; - - tmp = d01; - - d01 = _mm256_min_epi32(d05, d01); - - d05 = _mm256_max_epi32(d05, tmp); - - tmp = d02; - - d02 = _mm256_min_epi32(d06, d02); - - d06 = _mm256_max_epi32(d06, tmp); - - tmp = d03; - - d03 = _mm256_min_epi32(d07, d03); - - d07 = _mm256_max_epi32(d07, tmp); - - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_03v_merge_ascending(d05, d06, d07); - } - static INLINE void sort_07v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07) { - __m256i tmp; - - tmp = d01; - - d01 = _mm256_min_epi32(d05, d01); - - d05 = _mm256_max_epi32(d05, tmp); - - tmp = d02; - - d02 = _mm256_min_epi32(d06, d02); - - d06 = _mm256_max_epi32(d06, tmp); - - tmp = d03; - - d03 = _mm256_min_epi32(d07, d03); - - d07 = _mm256_max_epi32(d07, tmp); - - sort_04v_merge_descending(d01, d02, d03, d04); - sort_03v_merge_descending(d05, d06, d07); - } - static INLINE void sort_08v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08) { - __m256i tmp; - - sort_04v_ascending(d01, d02, d03, d04); - sort_04v_descending(d05, d06, d07, d08); - - tmp = d05; - - d05 = _mm256_max_epi32(d04, d05); - d04 = _mm256_min_epi32(d04, tmp); - - tmp = d06; - - d06 = _mm256_max_epi32(d03, d06); - d03 = _mm256_min_epi32(d03, tmp); - - tmp = d07; - - d07 = _mm256_max_epi32(d02, d07); - d02 = _mm256_min_epi32(d02, tmp); - - tmp = d08; - - d08 = _mm256_max_epi32(d01, d08); - d01 = _mm256_min_epi32(d01, tmp); - - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_04v_merge_ascending(d05, d06, d07, d08); - } - static INLINE void sort_08v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08) { - __m256i tmp; - - sort_04v_descending(d01, d02, d03, d04); - sort_04v_ascending(d05, d06, d07, d08); - - tmp = d05; - - d05 = _mm256_max_epi32(d04, d05); - d04 = _mm256_min_epi32(d04, tmp); - - tmp = d06; - - d06 = _mm256_max_epi32(d03, d06); - d03 = _mm256_min_epi32(d03, tmp); - - tmp = d07; - - d07 = _mm256_max_epi32(d02, d07); - d02 = _mm256_min_epi32(d02, tmp); - - tmp = d08; - - d08 = _mm256_max_epi32(d01, d08); - d01 = _mm256_min_epi32(d01, tmp); - - sort_04v_merge_descending(d01, d02, d03, d04); - sort_04v_merge_descending(d05, d06, d07, d08); - } - static INLINE void sort_08v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08) { - __m256i tmp; - - tmp = d01; - - d01 = _mm256_min_epi32(d05, d01); - - d05 = _mm256_max_epi32(d05, tmp); - - tmp = d02; - - d02 = _mm256_min_epi32(d06, d02); - - d06 = _mm256_max_epi32(d06, tmp); - - tmp = d03; - - d03 = _mm256_min_epi32(d07, d03); - - d07 = _mm256_max_epi32(d07, tmp); - - tmp = d04; - - d04 = _mm256_min_epi32(d08, d04); - - d08 = _mm256_max_epi32(d08, tmp); - - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_04v_merge_ascending(d05, d06, d07, d08); - } - static INLINE void sort_08v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08) { - __m256i tmp; - - tmp = d01; - - d01 = _mm256_min_epi32(d05, d01); - - d05 = _mm256_max_epi32(d05, tmp); - - tmp = d02; - - d02 = _mm256_min_epi32(d06, d02); - - d06 = _mm256_max_epi32(d06, tmp); - - tmp = d03; - - d03 = _mm256_min_epi32(d07, d03); - - d07 = _mm256_max_epi32(d07, tmp); - - tmp = d04; - - d04 = _mm256_min_epi32(d08, d04); - - d08 = _mm256_max_epi32(d08, tmp); - - sort_04v_merge_descending(d01, d02, d03, d04); - sort_04v_merge_descending(d05, d06, d07, d08); - } - static INLINE void sort_09v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09) { - __m256i tmp; - - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_01v_descending(d09); - - tmp = d09; - - d09 = _mm256_max_epi32(d08, d09); - d08 = _mm256_min_epi32(d08, tmp); - - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_01v_merge_ascending(d09); - } - static INLINE void sort_09v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09) { - __m256i tmp; - - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_01v_ascending(d09); - - tmp = d09; - - d09 = _mm256_max_epi32(d08, d09); - d08 = _mm256_min_epi32(d08, tmp); - - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_01v_merge_descending(d09); - } - static INLINE void sort_10v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10) { - __m256i tmp; - - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_02v_descending(d09, d10); - - tmp = d09; - - d09 = _mm256_max_epi32(d08, d09); - d08 = _mm256_min_epi32(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_epi32(d07, d10); - d07 = _mm256_min_epi32(d07, tmp); - - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_02v_merge_ascending(d09, d10); - } - static INLINE void sort_10v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10) { - __m256i tmp; - - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_02v_ascending(d09, d10); - - tmp = d09; - - d09 = _mm256_max_epi32(d08, d09); - d08 = _mm256_min_epi32(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_epi32(d07, d10); - d07 = _mm256_min_epi32(d07, tmp); - - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_02v_merge_descending(d09, d10); - } - static INLINE void sort_11v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11) { - __m256i tmp; - - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_03v_descending(d09, d10, d11); - - tmp = d09; - - d09 = _mm256_max_epi32(d08, d09); - d08 = _mm256_min_epi32(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_epi32(d07, d10); - d07 = _mm256_min_epi32(d07, tmp); - - tmp = d11; - - d11 = _mm256_max_epi32(d06, d11); - d06 = _mm256_min_epi32(d06, tmp); - - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_03v_merge_ascending(d09, d10, d11); - } - static INLINE void sort_11v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11) { - __m256i tmp; - - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_03v_ascending(d09, d10, d11); - - tmp = d09; - - d09 = _mm256_max_epi32(d08, d09); - d08 = _mm256_min_epi32(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_epi32(d07, d10); - d07 = _mm256_min_epi32(d07, tmp); - - tmp = d11; - - d11 = _mm256_max_epi32(d06, d11); - d06 = _mm256_min_epi32(d06, tmp); - - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_03v_merge_descending(d09, d10, d11); - } - static INLINE void sort_12v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12) { - __m256i tmp; - - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_04v_descending(d09, d10, d11, d12); - - tmp = d09; - - d09 = _mm256_max_epi32(d08, d09); - d08 = _mm256_min_epi32(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_epi32(d07, d10); - d07 = _mm256_min_epi32(d07, tmp); - - tmp = d11; - - d11 = _mm256_max_epi32(d06, d11); - d06 = _mm256_min_epi32(d06, tmp); - - tmp = d12; - - d12 = _mm256_max_epi32(d05, d12); - d05 = _mm256_min_epi32(d05, tmp); - - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_04v_merge_ascending(d09, d10, d11, d12); - } - static INLINE void sort_12v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12) { - __m256i tmp; - - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_04v_ascending(d09, d10, d11, d12); - - tmp = d09; - - d09 = _mm256_max_epi32(d08, d09); - d08 = _mm256_min_epi32(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_epi32(d07, d10); - d07 = _mm256_min_epi32(d07, tmp); - - tmp = d11; - - d11 = _mm256_max_epi32(d06, d11); - d06 = _mm256_min_epi32(d06, tmp); - - tmp = d12; - - d12 = _mm256_max_epi32(d05, d12); - d05 = _mm256_min_epi32(d05, tmp); - - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_04v_merge_descending(d09, d10, d11, d12); - } - static INLINE void sort_13v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13) { - __m256i tmp; - - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_05v_descending(d09, d10, d11, d12, d13); - - tmp = d09; - - d09 = _mm256_max_epi32(d08, d09); - d08 = _mm256_min_epi32(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_epi32(d07, d10); - d07 = _mm256_min_epi32(d07, tmp); - - tmp = d11; - - d11 = _mm256_max_epi32(d06, d11); - d06 = _mm256_min_epi32(d06, tmp); - - tmp = d12; - - d12 = _mm256_max_epi32(d05, d12); - d05 = _mm256_min_epi32(d05, tmp); - - tmp = d13; - - d13 = _mm256_max_epi32(d04, d13); - d04 = _mm256_min_epi32(d04, tmp); - - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_05v_merge_ascending(d09, d10, d11, d12, d13); - } - static INLINE void sort_13v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13) { - __m256i tmp; - - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_05v_ascending(d09, d10, d11, d12, d13); - - tmp = d09; - - d09 = _mm256_max_epi32(d08, d09); - d08 = _mm256_min_epi32(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_epi32(d07, d10); - d07 = _mm256_min_epi32(d07, tmp); - - tmp = d11; - - d11 = _mm256_max_epi32(d06, d11); - d06 = _mm256_min_epi32(d06, tmp); - - tmp = d12; - - d12 = _mm256_max_epi32(d05, d12); - d05 = _mm256_min_epi32(d05, tmp); - - tmp = d13; - - d13 = _mm256_max_epi32(d04, d13); - d04 = _mm256_min_epi32(d04, tmp); - - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_05v_merge_descending(d09, d10, d11, d12, d13); - } - static INLINE void sort_14v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14) { - __m256i tmp; - - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_06v_descending(d09, d10, d11, d12, d13, d14); - - tmp = d09; - - d09 = _mm256_max_epi32(d08, d09); - d08 = _mm256_min_epi32(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_epi32(d07, d10); - d07 = _mm256_min_epi32(d07, tmp); - - tmp = d11; - - d11 = _mm256_max_epi32(d06, d11); - d06 = _mm256_min_epi32(d06, tmp); - - tmp = d12; - - d12 = _mm256_max_epi32(d05, d12); - d05 = _mm256_min_epi32(d05, tmp); - - tmp = d13; - - d13 = _mm256_max_epi32(d04, d13); - d04 = _mm256_min_epi32(d04, tmp); - - tmp = d14; - - d14 = _mm256_max_epi32(d03, d14); - d03 = _mm256_min_epi32(d03, tmp); - - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_06v_merge_ascending(d09, d10, d11, d12, d13, d14); - } - static INLINE void sort_14v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14) { - __m256i tmp; - - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_06v_ascending(d09, d10, d11, d12, d13, d14); - - tmp = d09; - - d09 = _mm256_max_epi32(d08, d09); - d08 = _mm256_min_epi32(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_epi32(d07, d10); - d07 = _mm256_min_epi32(d07, tmp); - - tmp = d11; - - d11 = _mm256_max_epi32(d06, d11); - d06 = _mm256_min_epi32(d06, tmp); - - tmp = d12; - - d12 = _mm256_max_epi32(d05, d12); - d05 = _mm256_min_epi32(d05, tmp); - - tmp = d13; - - d13 = _mm256_max_epi32(d04, d13); - d04 = _mm256_min_epi32(d04, tmp); - - tmp = d14; - - d14 = _mm256_max_epi32(d03, d14); - d03 = _mm256_min_epi32(d03, tmp); - - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_06v_merge_descending(d09, d10, d11, d12, d13, d14); - } - static INLINE void sort_15v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14, __m256i& d15) { - __m256i tmp; - - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_07v_descending(d09, d10, d11, d12, d13, d14, d15); - - tmp = d09; - - d09 = _mm256_max_epi32(d08, d09); - d08 = _mm256_min_epi32(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_epi32(d07, d10); - d07 = _mm256_min_epi32(d07, tmp); - - tmp = d11; - - d11 = _mm256_max_epi32(d06, d11); - d06 = _mm256_min_epi32(d06, tmp); - - tmp = d12; - - d12 = _mm256_max_epi32(d05, d12); - d05 = _mm256_min_epi32(d05, tmp); - - tmp = d13; - - d13 = _mm256_max_epi32(d04, d13); - d04 = _mm256_min_epi32(d04, tmp); - - tmp = d14; - - d14 = _mm256_max_epi32(d03, d14); - d03 = _mm256_min_epi32(d03, tmp); - - tmp = d15; - - d15 = _mm256_max_epi32(d02, d15); - d02 = _mm256_min_epi32(d02, tmp); - - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_07v_merge_ascending(d09, d10, d11, d12, d13, d14, d15); - } - static INLINE void sort_15v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14, __m256i& d15) { - __m256i tmp; - - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_07v_ascending(d09, d10, d11, d12, d13, d14, d15); - - tmp = d09; - - d09 = _mm256_max_epi32(d08, d09); - d08 = _mm256_min_epi32(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_epi32(d07, d10); - d07 = _mm256_min_epi32(d07, tmp); - - tmp = d11; - - d11 = _mm256_max_epi32(d06, d11); - d06 = _mm256_min_epi32(d06, tmp); - - tmp = d12; - - d12 = _mm256_max_epi32(d05, d12); - d05 = _mm256_min_epi32(d05, tmp); - - tmp = d13; - - d13 = _mm256_max_epi32(d04, d13); - d04 = _mm256_min_epi32(d04, tmp); - - tmp = d14; - - d14 = _mm256_max_epi32(d03, d14); - d03 = _mm256_min_epi32(d03, tmp); - - tmp = d15; - - d15 = _mm256_max_epi32(d02, d15); - d02 = _mm256_min_epi32(d02, tmp); - - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_07v_merge_descending(d09, d10, d11, d12, d13, d14, d15); - } - static INLINE void sort_16v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14, __m256i& d15, __m256i& d16) { - __m256i tmp; - - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_08v_descending(d09, d10, d11, d12, d13, d14, d15, d16); - - tmp = d09; - - d09 = _mm256_max_epi32(d08, d09); - d08 = _mm256_min_epi32(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_epi32(d07, d10); - d07 = _mm256_min_epi32(d07, tmp); - - tmp = d11; - - d11 = _mm256_max_epi32(d06, d11); - d06 = _mm256_min_epi32(d06, tmp); - - tmp = d12; - - d12 = _mm256_max_epi32(d05, d12); - d05 = _mm256_min_epi32(d05, tmp); - - tmp = d13; - - d13 = _mm256_max_epi32(d04, d13); - d04 = _mm256_min_epi32(d04, tmp); - - tmp = d14; - - d14 = _mm256_max_epi32(d03, d14); - d03 = _mm256_min_epi32(d03, tmp); - - tmp = d15; - - d15 = _mm256_max_epi32(d02, d15); - d02 = _mm256_min_epi32(d02, tmp); - - tmp = d16; - - d16 = _mm256_max_epi32(d01, d16); - d01 = _mm256_min_epi32(d01, tmp); - - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_08v_merge_ascending(d09, d10, d11, d12, d13, d14, d15, d16); - } - static INLINE void sort_16v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14, __m256i& d15, __m256i& d16) { - __m256i tmp; - - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_08v_ascending(d09, d10, d11, d12, d13, d14, d15, d16); - - tmp = d09; - - d09 = _mm256_max_epi32(d08, d09); - d08 = _mm256_min_epi32(d08, tmp); - - tmp = d10; - - d10 = _mm256_max_epi32(d07, d10); - d07 = _mm256_min_epi32(d07, tmp); - - tmp = d11; - - d11 = _mm256_max_epi32(d06, d11); - d06 = _mm256_min_epi32(d06, tmp); - - tmp = d12; - - d12 = _mm256_max_epi32(d05, d12); - d05 = _mm256_min_epi32(d05, tmp); - - tmp = d13; - - d13 = _mm256_max_epi32(d04, d13); - d04 = _mm256_min_epi32(d04, tmp); - - tmp = d14; - - d14 = _mm256_max_epi32(d03, d14); - d03 = _mm256_min_epi32(d03, tmp); - - tmp = d15; - - d15 = _mm256_max_epi32(d02, d15); - d02 = _mm256_min_epi32(d02, tmp); - - tmp = d16; - - d16 = _mm256_max_epi32(d01, d16); - d01 = _mm256_min_epi32(d01, tmp); - - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_08v_merge_descending(d09, d10, d11, d12, d13, d14, d15, d16); - } - -static NOINLINE void sort_01v(int32_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - sort_01v_ascending(d01); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); -} - -static NOINLINE void sort_02v(int32_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - sort_02v_ascending(d01, d02); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); -} - -static NOINLINE void sort_03v(int32_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - sort_03v_ascending(d01, d02, d03); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); -} - -static NOINLINE void sort_04v(int32_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - sort_04v_ascending(d01, d02, d03, d04); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); -} - -static NOINLINE void sort_05v(int32_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - sort_05v_ascending(d01, d02, d03, d04, d05); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); -} - -static NOINLINE void sort_06v(int32_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - sort_06v_ascending(d01, d02, d03, d04, d05, d06); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); -} - -static NOINLINE void sort_07v(int32_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; - sort_07v_ascending(d01, d02, d03, d04, d05, d06, d07); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); - _mm256_storeu_si256((__m256i *) ptr + 6, d07); -} - -static NOINLINE void sort_08v(int32_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); - _mm256_storeu_si256((__m256i *) ptr + 6, d07); - _mm256_storeu_si256((__m256i *) ptr + 7, d08); -} - -static NOINLINE void sort_09v(int32_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; - __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; - sort_09v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); - _mm256_storeu_si256((__m256i *) ptr + 6, d07); - _mm256_storeu_si256((__m256i *) ptr + 7, d08); - _mm256_storeu_si256((__m256i *) ptr + 8, d09); -} - -static NOINLINE void sort_10v(int32_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; - __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; - __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; - sort_10v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); - _mm256_storeu_si256((__m256i *) ptr + 6, d07); - _mm256_storeu_si256((__m256i *) ptr + 7, d08); - _mm256_storeu_si256((__m256i *) ptr + 8, d09); - _mm256_storeu_si256((__m256i *) ptr + 9, d10); -} - -static NOINLINE void sort_11v(int32_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; - __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; - __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; - __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; - sort_11v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); - _mm256_storeu_si256((__m256i *) ptr + 6, d07); - _mm256_storeu_si256((__m256i *) ptr + 7, d08); - _mm256_storeu_si256((__m256i *) ptr + 8, d09); - _mm256_storeu_si256((__m256i *) ptr + 9, d10); - _mm256_storeu_si256((__m256i *) ptr + 10, d11); -} - -static NOINLINE void sort_12v(int32_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; - __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; - __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; - __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; - __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; - sort_12v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); - _mm256_storeu_si256((__m256i *) ptr + 6, d07); - _mm256_storeu_si256((__m256i *) ptr + 7, d08); - _mm256_storeu_si256((__m256i *) ptr + 8, d09); - _mm256_storeu_si256((__m256i *) ptr + 9, d10); - _mm256_storeu_si256((__m256i *) ptr + 10, d11); - _mm256_storeu_si256((__m256i *) ptr + 11, d12); -} - -static NOINLINE void sort_13v(int32_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; - __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; - __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; - __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; - __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; - __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12);; - sort_13v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); - _mm256_storeu_si256((__m256i *) ptr + 6, d07); - _mm256_storeu_si256((__m256i *) ptr + 7, d08); - _mm256_storeu_si256((__m256i *) ptr + 8, d09); - _mm256_storeu_si256((__m256i *) ptr + 9, d10); - _mm256_storeu_si256((__m256i *) ptr + 10, d11); - _mm256_storeu_si256((__m256i *) ptr + 11, d12); - _mm256_storeu_si256((__m256i *) ptr + 12, d13); -} - -static NOINLINE void sort_14v(int32_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; - __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; - __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; - __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; - __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; - __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12);; - __m256i d14 = _mm256_lddqu_si256((__m256i const *) ptr + 13);; - sort_14v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); - _mm256_storeu_si256((__m256i *) ptr + 6, d07); - _mm256_storeu_si256((__m256i *) ptr + 7, d08); - _mm256_storeu_si256((__m256i *) ptr + 8, d09); - _mm256_storeu_si256((__m256i *) ptr + 9, d10); - _mm256_storeu_si256((__m256i *) ptr + 10, d11); - _mm256_storeu_si256((__m256i *) ptr + 11, d12); - _mm256_storeu_si256((__m256i *) ptr + 12, d13); - _mm256_storeu_si256((__m256i *) ptr + 13, d14); -} - -static NOINLINE void sort_15v(int32_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; - __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; - __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; - __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; - __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; - __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12);; - __m256i d14 = _mm256_lddqu_si256((__m256i const *) ptr + 13);; - __m256i d15 = _mm256_lddqu_si256((__m256i const *) ptr + 14);; - sort_15v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14, d15); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); - _mm256_storeu_si256((__m256i *) ptr + 6, d07); - _mm256_storeu_si256((__m256i *) ptr + 7, d08); - _mm256_storeu_si256((__m256i *) ptr + 8, d09); - _mm256_storeu_si256((__m256i *) ptr + 9, d10); - _mm256_storeu_si256((__m256i *) ptr + 10, d11); - _mm256_storeu_si256((__m256i *) ptr + 11, d12); - _mm256_storeu_si256((__m256i *) ptr + 12, d13); - _mm256_storeu_si256((__m256i *) ptr + 13, d14); - _mm256_storeu_si256((__m256i *) ptr + 14, d15); -} - -static NOINLINE void sort_16v(int32_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; - __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; - __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; - __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; - __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; - __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12);; - __m256i d14 = _mm256_lddqu_si256((__m256i const *) ptr + 13);; - __m256i d15 = _mm256_lddqu_si256((__m256i const *) ptr + 14);; - __m256i d16 = _mm256_lddqu_si256((__m256i const *) ptr + 15);; - sort_16v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14, d15, d16); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); - _mm256_storeu_si256((__m256i *) ptr + 6, d07); - _mm256_storeu_si256((__m256i *) ptr + 7, d08); - _mm256_storeu_si256((__m256i *) ptr + 8, d09); - _mm256_storeu_si256((__m256i *) ptr + 9, d10); - _mm256_storeu_si256((__m256i *) ptr + 10, d11); - _mm256_storeu_si256((__m256i *) ptr + 11, d12); - _mm256_storeu_si256((__m256i *) ptr + 12, d13); - _mm256_storeu_si256((__m256i *) ptr + 13, d14); - _mm256_storeu_si256((__m256i *) ptr + 14, d15); - _mm256_storeu_si256((__m256i *) ptr + 15, d16); -} - static void sort(int32_t *ptr, size_t length) { - const int N = 8; - - switch(length / N) { - case 1: sort_01v(ptr); break; - case 2: sort_02v(ptr); break; - case 3: sort_03v(ptr); break; - case 4: sort_04v(ptr); break; - case 5: sort_05v(ptr); break; - case 6: sort_06v(ptr); break; - case 7: sort_07v(ptr); break; - case 8: sort_08v(ptr); break; - case 9: sort_09v(ptr); break; - case 10: sort_10v(ptr); break; - case 11: sort_11v(ptr); break; - case 12: sort_12v(ptr); break; - case 13: sort_13v(ptr); break; - case 14: sort_14v(ptr); break; - case 15: sort_15v(ptr); break; - case 16: sort_16v(ptr); break; - } -} -}; -} -} -#endif diff --git a/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.int64_t.generated.cpp b/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.int64_t.generated.cpp new file mode 100644 index 00000000000000..615d4162e62b6f --- /dev/null +++ b/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.int64_t.generated.cpp @@ -0,0 +1,26 @@ +#include "bitonic_sort.AVX2.int64_t.generated.h" + +using namespace vxsort; + +void vxsort::smallsort::bitonic::sort(int64_t *ptr, size_t length) { + const int N = 4; + + switch(length / N) { + case 1: sort_01v(ptr); break; + case 2: sort_02v(ptr); break; + case 3: sort_03v(ptr); break; + case 4: sort_04v(ptr); break; + case 5: sort_05v(ptr); break; + case 6: sort_06v(ptr); break; + case 7: sort_07v(ptr); break; + case 8: sort_08v(ptr); break; + case 9: sort_09v(ptr); break; + case 10: sort_10v(ptr); break; + case 11: sort_11v(ptr); break; + case 12: sort_12v(ptr); break; + case 13: sort_13v(ptr); break; + case 14: sort_14v(ptr); break; + case 15: sort_15v(ptr); break; + case 16: sort_16v(ptr); break; + } +} diff --git a/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.int64_t.generated.h b/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.int64_t.generated.h index 782b44d673914b..bc42ebc91f58ca 100644 --- a/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.int64_t.generated.h +++ b/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.int64_t.generated.h @@ -1,6 +1,6 @@ ///////////////////////////////////////////////////////////////////////////// //// -// This file was auto-generated by a tool at 2020-05-31 19:46:17 +// This file was auto-generated by a tool at 2020-06-22 05:27:48 // // It is recommended you DO NOT directly edit this file but instead edit // the code-generator that generated this source file instead. @@ -9,19 +9,18 @@ #ifndef BITONIC_SORT_AVX2_INT64_T_H #define BITONIC_SORT_AVX2_INT64_T_H -#include -#include "bitonic_sort.h" - -#ifdef _MSC_VER - // MSVC - #define INLINE __forceinline - #define NOINLINE __declspec(noinline) +#ifdef __GNUC__ +#ifdef __clang__ +#pragma clang attribute push (__attribute__((target("avx2"))), apply_to = any(function)) #else - // GCC + Clang - #define INLINE __attribute__((always_inline)) - #define NOINLINE __attribute__((noinline)) +#pragma GCC push_options +#pragma GCC target("avx2") +#endif #endif +#include +#include "bitonic_sort.h" + #define i2d _mm256_castsi256_pd #define d2i _mm256_castpd_si256 #define i2s _mm256_castsi256_ps @@ -29,1464 +28,1460 @@ #define s2d _mm256_castps_pd #define d2s _mm256_castpd_ps -namespace gcsort { +namespace vxsort { namespace smallsort { -template<> struct bitonic { +template<> struct bitonic { public: static INLINE void sort_01v_ascending(__m256i& d01) { - __m256i min, max, s, cmp; - - s = d2i(_mm256_shuffle_pd(i2d(d01), i2d(d01), 0x5)); - cmp = _mm256_cmpgt_epi64(s, d01); - min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); - max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); - d01 = d2i(_mm256_blend_pd(i2d(min), i2d(max), 0xA)); - - s = d2i(_mm256_permute4x64_pd(i2d(d01), 0x1B)); - cmp = _mm256_cmpgt_epi64(s, d01); - min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); - max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); - d01 = d2i(_mm256_blend_pd(i2d(min), i2d(max), 0xC)); - - s = d2i(_mm256_shuffle_pd(i2d(d01), i2d(d01), 0x5)); - cmp = _mm256_cmpgt_epi64(s, d01); - min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); - max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); - d01 = d2i(_mm256_blend_pd(i2d(min), i2d(max), 0xA)); + __m256i min, max, s, cmp; + + s = d2i(_mm256_shuffle_pd(i2d(d01), i2d(d01), 0x5)); + cmp = _mm256_cmpgt_epi64(s, d01); + min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); + max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); + d01 = d2i(_mm256_blend_pd(i2d(min), i2d(max), 0xA)); + + s = d2i(_mm256_permute4x64_pd(i2d(d01), 0x1B)); + cmp = _mm256_cmpgt_epi64(s, d01); + min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); + max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); + d01 = d2i(_mm256_blend_pd(i2d(min), i2d(max), 0xC)); + + s = d2i(_mm256_shuffle_pd(i2d(d01), i2d(d01), 0x5)); + cmp = _mm256_cmpgt_epi64(s, d01); + min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); + max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); + d01 = d2i(_mm256_blend_pd(i2d(min), i2d(max), 0xA)); } static INLINE void sort_01v_merge_ascending(__m256i& d01) { - __m256i min, max, s, cmp; - - s = d2i(_mm256_permute4x64_pd(i2d(d01), 0x4E)); - cmp = _mm256_cmpgt_epi64(s, d01); - min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); - max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); - d01 = d2i(_mm256_blend_pd(i2d(min), i2d(max), 0xC)); - - s = d2i(_mm256_shuffle_pd(i2d(d01), i2d(d01), 0x5)); - cmp = _mm256_cmpgt_epi64(s, d01); - min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); - max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); - d01 = d2i(_mm256_blend_pd(i2d(min), i2d(max), 0xA)); + __m256i min, max, s, cmp; + + s = d2i(_mm256_permute4x64_pd(i2d(d01), 0x4E)); + cmp = _mm256_cmpgt_epi64(s, d01); + min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); + max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); + d01 = d2i(_mm256_blend_pd(i2d(min), i2d(max), 0xC)); + + s = d2i(_mm256_shuffle_pd(i2d(d01), i2d(d01), 0x5)); + cmp = _mm256_cmpgt_epi64(s, d01); + min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); + max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); + d01 = d2i(_mm256_blend_pd(i2d(min), i2d(max), 0xA)); } static INLINE void sort_01v_descending(__m256i& d01) { - __m256i min, max, s, cmp; - - s = d2i(_mm256_shuffle_pd(i2d(d01), i2d(d01), 0x5)); - cmp = _mm256_cmpgt_epi64(s, d01); - min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); - max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); - d01 = d2i(_mm256_blend_pd(i2d(max), i2d(min), 0xA)); - - s = d2i(_mm256_permute4x64_pd(i2d(d01), 0x1B)); - cmp = _mm256_cmpgt_epi64(s, d01); - min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); - max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); - d01 = d2i(_mm256_blend_pd(i2d(max), i2d(min), 0xC)); - - s = d2i(_mm256_shuffle_pd(i2d(d01), i2d(d01), 0x5)); - cmp = _mm256_cmpgt_epi64(s, d01); - min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); - max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); - d01 = d2i(_mm256_blend_pd(i2d(max), i2d(min), 0xA)); + __m256i min, max, s, cmp; + + s = d2i(_mm256_shuffle_pd(i2d(d01), i2d(d01), 0x5)); + cmp = _mm256_cmpgt_epi64(s, d01); + min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); + max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); + d01 = d2i(_mm256_blend_pd(i2d(max), i2d(min), 0xA)); + + s = d2i(_mm256_permute4x64_pd(i2d(d01), 0x1B)); + cmp = _mm256_cmpgt_epi64(s, d01); + min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); + max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); + d01 = d2i(_mm256_blend_pd(i2d(max), i2d(min), 0xC)); + + s = d2i(_mm256_shuffle_pd(i2d(d01), i2d(d01), 0x5)); + cmp = _mm256_cmpgt_epi64(s, d01); + min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); + max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); + d01 = d2i(_mm256_blend_pd(i2d(max), i2d(min), 0xA)); } static INLINE void sort_01v_merge_descending(__m256i& d01) { - __m256i min, max, s, cmp; - - s = d2i(_mm256_permute4x64_pd(i2d(d01), 0x4E)); - cmp = _mm256_cmpgt_epi64(s, d01); - min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); - max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); - d01 = d2i(_mm256_blend_pd(i2d(max), i2d(min), 0xC)); - - s = d2i(_mm256_shuffle_pd(i2d(d01), i2d(d01), 0x5)); - cmp = _mm256_cmpgt_epi64(s, d01); - min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); - max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); - d01 = d2i(_mm256_blend_pd(i2d(max), i2d(min), 0xA)); + __m256i min, max, s, cmp; + + s = d2i(_mm256_permute4x64_pd(i2d(d01), 0x4E)); + cmp = _mm256_cmpgt_epi64(s, d01); + min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); + max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); + d01 = d2i(_mm256_blend_pd(i2d(max), i2d(min), 0xC)); + + s = d2i(_mm256_shuffle_pd(i2d(d01), i2d(d01), 0x5)); + cmp = _mm256_cmpgt_epi64(s, d01); + min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); + max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); + d01 = d2i(_mm256_blend_pd(i2d(max), i2d(min), 0xA)); } static INLINE void sort_02v_ascending(__m256i& d01, __m256i& d02) { - __m256i tmp, cmp; + __m256i tmp, cmp; - sort_01v_ascending(d01); - sort_01v_descending(d02); + sort_01v_ascending(d01); + sort_01v_descending(d02); - tmp = d02; - cmp = _mm256_cmpgt_epi64(d01, d02); - d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(d01), i2d(cmp))); - d01 = d2i(_mm256_blendv_pd(i2d(d01), i2d(tmp), i2d(cmp))); + tmp = d02; + cmp = _mm256_cmpgt_epi64(d01, d02); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(d01), i2d(cmp))); + d01 = d2i(_mm256_blendv_pd(i2d(d01), i2d(tmp), i2d(cmp))); - sort_01v_merge_ascending(d01); - sort_01v_merge_ascending(d02); + sort_01v_merge_ascending(d01); + sort_01v_merge_ascending(d02); } static INLINE void sort_02v_descending(__m256i& d01, __m256i& d02) { - __m256i tmp, cmp; + __m256i tmp, cmp; - sort_01v_descending(d01); - sort_01v_ascending(d02); + sort_01v_descending(d01); + sort_01v_ascending(d02); - tmp = d02; - cmp = _mm256_cmpgt_epi64(d01, d02); - d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(d01), i2d(cmp))); - d01 = d2i(_mm256_blendv_pd(i2d(d01), i2d(tmp), i2d(cmp))); + tmp = d02; + cmp = _mm256_cmpgt_epi64(d01, d02); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(d01), i2d(cmp))); + d01 = d2i(_mm256_blendv_pd(i2d(d01), i2d(tmp), i2d(cmp))); - sort_01v_merge_descending(d01); - sort_01v_merge_descending(d02); + sort_01v_merge_descending(d01); + sort_01v_merge_descending(d02); } static INLINE void sort_02v_merge_ascending(__m256i& d01, __m256i& d02) { - __m256i tmp, cmp; + __m256i tmp, cmp; - tmp = d01; - cmp = _mm256_cmpgt_epi64(d02, d01); - d01 = d2i(_mm256_blendv_pd(i2d(d02), i2d(d01), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(d02, tmp); - d02 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d02), i2d(cmp))); + tmp = d01; + cmp = _mm256_cmpgt_epi64(d02, d01); + d01 = d2i(_mm256_blendv_pd(i2d(d02), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d02, tmp); + d02 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d02), i2d(cmp))); - sort_01v_merge_ascending(d01); - sort_01v_merge_ascending(d02); + sort_01v_merge_ascending(d01); + sort_01v_merge_ascending(d02); } static INLINE void sort_02v_merge_descending(__m256i& d01, __m256i& d02) { - __m256i tmp, cmp; + __m256i tmp, cmp; - tmp = d01; - cmp = _mm256_cmpgt_epi64(d02, d01); - d01 = d2i(_mm256_blendv_pd(i2d(d02), i2d(d01), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(d02, tmp); - d02 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d02), i2d(cmp))); + tmp = d01; + cmp = _mm256_cmpgt_epi64(d02, d01); + d01 = d2i(_mm256_blendv_pd(i2d(d02), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d02, tmp); + d02 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d02), i2d(cmp))); - sort_01v_merge_descending(d01); - sort_01v_merge_descending(d02); + sort_01v_merge_descending(d01); + sort_01v_merge_descending(d02); } static INLINE void sort_03v_ascending(__m256i& d01, __m256i& d02, __m256i& d03) { - __m256i tmp, cmp; + __m256i tmp, cmp; - sort_02v_ascending(d01, d02); - sort_01v_descending(d03); + sort_02v_ascending(d01, d02); + sort_01v_descending(d03); - tmp = d03; - cmp = _mm256_cmpgt_epi64(d02, d03); - d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(d02), i2d(cmp))); - d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); + tmp = d03; + cmp = _mm256_cmpgt_epi64(d02, d03); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(d02), i2d(cmp))); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); - sort_02v_merge_ascending(d01, d02); - sort_01v_merge_ascending(d03); + sort_02v_merge_ascending(d01, d02); + sort_01v_merge_ascending(d03); } static INLINE void sort_03v_descending(__m256i& d01, __m256i& d02, __m256i& d03) { - __m256i tmp, cmp; + __m256i tmp, cmp; - sort_02v_descending(d01, d02); - sort_01v_ascending(d03); + sort_02v_descending(d01, d02); + sort_01v_ascending(d03); - tmp = d03; - cmp = _mm256_cmpgt_epi64(d02, d03); - d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(d02), i2d(cmp))); - d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); + tmp = d03; + cmp = _mm256_cmpgt_epi64(d02, d03); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(d02), i2d(cmp))); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); - sort_02v_merge_descending(d01, d02); - sort_01v_merge_descending(d03); + sort_02v_merge_descending(d01, d02); + sort_01v_merge_descending(d03); } static INLINE void sort_03v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03) { - __m256i tmp, cmp; + __m256i tmp, cmp; - tmp = d01; - cmp = _mm256_cmpgt_epi64(d03, d01); - d01 = d2i(_mm256_blendv_pd(i2d(d03), i2d(d01), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(d03, tmp); - d03 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d03), i2d(cmp))); + tmp = d01; + cmp = _mm256_cmpgt_epi64(d03, d01); + d01 = d2i(_mm256_blendv_pd(i2d(d03), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d03, tmp); + d03 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d03), i2d(cmp))); - sort_02v_merge_ascending(d01, d02); - sort_01v_merge_ascending(d03); + sort_02v_merge_ascending(d01, d02); + sort_01v_merge_ascending(d03); } static INLINE void sort_03v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03) { - __m256i tmp, cmp; + __m256i tmp, cmp; - tmp = d01; - cmp = _mm256_cmpgt_epi64(d03, d01); - d01 = d2i(_mm256_blendv_pd(i2d(d03), i2d(d01), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(d03, tmp); - d03 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d03), i2d(cmp))); + tmp = d01; + cmp = _mm256_cmpgt_epi64(d03, d01); + d01 = d2i(_mm256_blendv_pd(i2d(d03), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d03, tmp); + d03 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d03), i2d(cmp))); - sort_02v_merge_descending(d01, d02); - sort_01v_merge_descending(d03); + sort_02v_merge_descending(d01, d02); + sort_01v_merge_descending(d03); } static INLINE void sort_04v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04) { - __m256i tmp, cmp; + __m256i tmp, cmp; - sort_02v_ascending(d01, d02); - sort_02v_descending(d03, d04); + sort_02v_ascending(d01, d02); + sort_02v_descending(d03, d04); - tmp = d03; - cmp = _mm256_cmpgt_epi64(d02, d03); - d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(d02), i2d(cmp))); - d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); + tmp = d03; + cmp = _mm256_cmpgt_epi64(d02, d03); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(d02), i2d(cmp))); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); - tmp = d04; - cmp = _mm256_cmpgt_epi64(d01, d04); - d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(d01), i2d(cmp))); - d01 = d2i(_mm256_blendv_pd(i2d(d01), i2d(tmp), i2d(cmp))); + tmp = d04; + cmp = _mm256_cmpgt_epi64(d01, d04); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(d01), i2d(cmp))); + d01 = d2i(_mm256_blendv_pd(i2d(d01), i2d(tmp), i2d(cmp))); - sort_02v_merge_ascending(d01, d02); - sort_02v_merge_ascending(d03, d04); + sort_02v_merge_ascending(d01, d02); + sort_02v_merge_ascending(d03, d04); } static INLINE void sort_04v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04) { - __m256i tmp, cmp; + __m256i tmp, cmp; - sort_02v_descending(d01, d02); - sort_02v_ascending(d03, d04); + sort_02v_descending(d01, d02); + sort_02v_ascending(d03, d04); - tmp = d03; - cmp = _mm256_cmpgt_epi64(d02, d03); - d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(d02), i2d(cmp))); - d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); + tmp = d03; + cmp = _mm256_cmpgt_epi64(d02, d03); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(d02), i2d(cmp))); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); - tmp = d04; - cmp = _mm256_cmpgt_epi64(d01, d04); - d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(d01), i2d(cmp))); - d01 = d2i(_mm256_blendv_pd(i2d(d01), i2d(tmp), i2d(cmp))); + tmp = d04; + cmp = _mm256_cmpgt_epi64(d01, d04); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(d01), i2d(cmp))); + d01 = d2i(_mm256_blendv_pd(i2d(d01), i2d(tmp), i2d(cmp))); - sort_02v_merge_descending(d01, d02); - sort_02v_merge_descending(d03, d04); + sort_02v_merge_descending(d01, d02); + sort_02v_merge_descending(d03, d04); } static INLINE void sort_04v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04) { - __m256i tmp, cmp; - - tmp = d01; - cmp = _mm256_cmpgt_epi64(d03, d01); - d01 = d2i(_mm256_blendv_pd(i2d(d03), i2d(d01), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(d03, tmp); - d03 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d03), i2d(cmp))); - - tmp = d02; - cmp = _mm256_cmpgt_epi64(d04, d02); - d02 = d2i(_mm256_blendv_pd(i2d(d04), i2d(d02), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(d04, tmp); - d04 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d04), i2d(cmp))); - - sort_02v_merge_ascending(d01, d02); - sort_02v_merge_ascending(d03, d04); + __m256i tmp, cmp; + + tmp = d01; + cmp = _mm256_cmpgt_epi64(d03, d01); + d01 = d2i(_mm256_blendv_pd(i2d(d03), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d03, tmp); + d03 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d03), i2d(cmp))); + + tmp = d02; + cmp = _mm256_cmpgt_epi64(d04, d02); + d02 = d2i(_mm256_blendv_pd(i2d(d04), i2d(d02), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d04, tmp); + d04 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d04), i2d(cmp))); + + sort_02v_merge_ascending(d01, d02); + sort_02v_merge_ascending(d03, d04); } static INLINE void sort_04v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04) { - __m256i tmp, cmp; - - tmp = d01; - cmp = _mm256_cmpgt_epi64(d03, d01); - d01 = d2i(_mm256_blendv_pd(i2d(d03), i2d(d01), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(d03, tmp); - d03 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d03), i2d(cmp))); - - tmp = d02; - cmp = _mm256_cmpgt_epi64(d04, d02); - d02 = d2i(_mm256_blendv_pd(i2d(d04), i2d(d02), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(d04, tmp); - d04 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d04), i2d(cmp))); - - sort_02v_merge_descending(d01, d02); - sort_02v_merge_descending(d03, d04); + __m256i tmp, cmp; + + tmp = d01; + cmp = _mm256_cmpgt_epi64(d03, d01); + d01 = d2i(_mm256_blendv_pd(i2d(d03), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d03, tmp); + d03 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d03), i2d(cmp))); + + tmp = d02; + cmp = _mm256_cmpgt_epi64(d04, d02); + d02 = d2i(_mm256_blendv_pd(i2d(d04), i2d(d02), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d04, tmp); + d04 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d04), i2d(cmp))); + + sort_02v_merge_descending(d01, d02); + sort_02v_merge_descending(d03, d04); } static INLINE void sort_05v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05) { - __m256i tmp, cmp; + __m256i tmp, cmp; - sort_04v_ascending(d01, d02, d03, d04); - sort_01v_descending(d05); + sort_04v_ascending(d01, d02, d03, d04); + sort_01v_descending(d05); - tmp = d05; - cmp = _mm256_cmpgt_epi64(d04, d05); - d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d04), i2d(cmp))); - d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + tmp = d05; + cmp = _mm256_cmpgt_epi64(d04, d05); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_01v_merge_ascending(d05); + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_01v_merge_ascending(d05); } static INLINE void sort_05v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05) { - __m256i tmp, cmp; + __m256i tmp, cmp; - sort_04v_descending(d01, d02, d03, d04); - sort_01v_ascending(d05); + sort_04v_descending(d01, d02, d03, d04); + sort_01v_ascending(d05); - tmp = d05; - cmp = _mm256_cmpgt_epi64(d04, d05); - d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d04), i2d(cmp))); - d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + tmp = d05; + cmp = _mm256_cmpgt_epi64(d04, d05); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); - sort_04v_merge_descending(d01, d02, d03, d04); - sort_01v_merge_descending(d05); + sort_04v_merge_descending(d01, d02, d03, d04); + sort_01v_merge_descending(d05); } static INLINE void sort_05v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05) { - __m256i tmp, cmp; + __m256i tmp, cmp; - tmp = d01; - cmp = _mm256_cmpgt_epi64(d05, d01); - d01 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d01), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(d05, tmp); - d05 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d05), i2d(cmp))); + tmp = d01; + cmp = _mm256_cmpgt_epi64(d05, d01); + d01 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d05, tmp); + d05 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d05), i2d(cmp))); - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_01v_merge_ascending(d05); + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_01v_merge_ascending(d05); } static INLINE void sort_05v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05) { - __m256i tmp, cmp; + __m256i tmp, cmp; - tmp = d01; - cmp = _mm256_cmpgt_epi64(d05, d01); - d01 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d01), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(d05, tmp); - d05 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d05), i2d(cmp))); + tmp = d01; + cmp = _mm256_cmpgt_epi64(d05, d01); + d01 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d05, tmp); + d05 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d05), i2d(cmp))); - sort_04v_merge_descending(d01, d02, d03, d04); - sort_01v_merge_descending(d05); + sort_04v_merge_descending(d01, d02, d03, d04); + sort_01v_merge_descending(d05); } static INLINE void sort_06v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06) { - __m256i tmp, cmp; + __m256i tmp, cmp; - sort_04v_ascending(d01, d02, d03, d04); - sort_02v_descending(d05, d06); + sort_04v_ascending(d01, d02, d03, d04); + sort_02v_descending(d05, d06); - tmp = d05; - cmp = _mm256_cmpgt_epi64(d04, d05); - d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d04), i2d(cmp))); - d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + tmp = d05; + cmp = _mm256_cmpgt_epi64(d04, d05); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); - tmp = d06; - cmp = _mm256_cmpgt_epi64(d03, d06); - d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d03), i2d(cmp))); - d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); + tmp = d06; + cmp = _mm256_cmpgt_epi64(d03, d06); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d03), i2d(cmp))); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_02v_merge_ascending(d05, d06); + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_02v_merge_ascending(d05, d06); } static INLINE void sort_06v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06) { - __m256i tmp, cmp; + __m256i tmp, cmp; - sort_04v_descending(d01, d02, d03, d04); - sort_02v_ascending(d05, d06); + sort_04v_descending(d01, d02, d03, d04); + sort_02v_ascending(d05, d06); - tmp = d05; - cmp = _mm256_cmpgt_epi64(d04, d05); - d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d04), i2d(cmp))); - d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + tmp = d05; + cmp = _mm256_cmpgt_epi64(d04, d05); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); - tmp = d06; - cmp = _mm256_cmpgt_epi64(d03, d06); - d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d03), i2d(cmp))); - d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); + tmp = d06; + cmp = _mm256_cmpgt_epi64(d03, d06); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d03), i2d(cmp))); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); - sort_04v_merge_descending(d01, d02, d03, d04); - sort_02v_merge_descending(d05, d06); + sort_04v_merge_descending(d01, d02, d03, d04); + sort_02v_merge_descending(d05, d06); } static INLINE void sort_06v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06) { - __m256i tmp, cmp; - - tmp = d01; - cmp = _mm256_cmpgt_epi64(d05, d01); - d01 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d01), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(d05, tmp); - d05 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d05), i2d(cmp))); - - tmp = d02; - cmp = _mm256_cmpgt_epi64(d06, d02); - d02 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d02), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(d06, tmp); - d06 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d06), i2d(cmp))); - - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_02v_merge_ascending(d05, d06); + __m256i tmp, cmp; + + tmp = d01; + cmp = _mm256_cmpgt_epi64(d05, d01); + d01 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d05, tmp); + d05 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d05), i2d(cmp))); + + tmp = d02; + cmp = _mm256_cmpgt_epi64(d06, d02); + d02 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d02), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d06, tmp); + d06 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d06), i2d(cmp))); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_02v_merge_ascending(d05, d06); } static INLINE void sort_06v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06) { - __m256i tmp, cmp; - - tmp = d01; - cmp = _mm256_cmpgt_epi64(d05, d01); - d01 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d01), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(d05, tmp); - d05 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d05), i2d(cmp))); - - tmp = d02; - cmp = _mm256_cmpgt_epi64(d06, d02); - d02 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d02), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(d06, tmp); - d06 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d06), i2d(cmp))); - - sort_04v_merge_descending(d01, d02, d03, d04); - sort_02v_merge_descending(d05, d06); + __m256i tmp, cmp; + + tmp = d01; + cmp = _mm256_cmpgt_epi64(d05, d01); + d01 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d05, tmp); + d05 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d05), i2d(cmp))); + + tmp = d02; + cmp = _mm256_cmpgt_epi64(d06, d02); + d02 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d02), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d06, tmp); + d06 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d06), i2d(cmp))); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_02v_merge_descending(d05, d06); } static INLINE void sort_07v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07) { - __m256i tmp, cmp; + __m256i tmp, cmp; - sort_04v_ascending(d01, d02, d03, d04); - sort_03v_descending(d05, d06, d07); + sort_04v_ascending(d01, d02, d03, d04); + sort_03v_descending(d05, d06, d07); - tmp = d05; - cmp = _mm256_cmpgt_epi64(d04, d05); - d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d04), i2d(cmp))); - d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + tmp = d05; + cmp = _mm256_cmpgt_epi64(d04, d05); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); - tmp = d06; - cmp = _mm256_cmpgt_epi64(d03, d06); - d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d03), i2d(cmp))); - d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); + tmp = d06; + cmp = _mm256_cmpgt_epi64(d03, d06); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d03), i2d(cmp))); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); - tmp = d07; - cmp = _mm256_cmpgt_epi64(d02, d07); - d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(d02), i2d(cmp))); - d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); + tmp = d07; + cmp = _mm256_cmpgt_epi64(d02, d07); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(d02), i2d(cmp))); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_03v_merge_ascending(d05, d06, d07); + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_03v_merge_ascending(d05, d06, d07); } static INLINE void sort_07v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07) { - __m256i tmp, cmp; + __m256i tmp, cmp; - sort_04v_descending(d01, d02, d03, d04); - sort_03v_ascending(d05, d06, d07); + sort_04v_descending(d01, d02, d03, d04); + sort_03v_ascending(d05, d06, d07); - tmp = d05; - cmp = _mm256_cmpgt_epi64(d04, d05); - d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d04), i2d(cmp))); - d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + tmp = d05; + cmp = _mm256_cmpgt_epi64(d04, d05); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); - tmp = d06; - cmp = _mm256_cmpgt_epi64(d03, d06); - d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d03), i2d(cmp))); - d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); + tmp = d06; + cmp = _mm256_cmpgt_epi64(d03, d06); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d03), i2d(cmp))); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); - tmp = d07; - cmp = _mm256_cmpgt_epi64(d02, d07); - d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(d02), i2d(cmp))); - d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); + tmp = d07; + cmp = _mm256_cmpgt_epi64(d02, d07); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(d02), i2d(cmp))); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); - sort_04v_merge_descending(d01, d02, d03, d04); - sort_03v_merge_descending(d05, d06, d07); + sort_04v_merge_descending(d01, d02, d03, d04); + sort_03v_merge_descending(d05, d06, d07); } static INLINE void sort_07v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07) { - __m256i tmp, cmp; - - tmp = d01; - cmp = _mm256_cmpgt_epi64(d05, d01); - d01 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d01), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(d05, tmp); - d05 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d05), i2d(cmp))); - - tmp = d02; - cmp = _mm256_cmpgt_epi64(d06, d02); - d02 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d02), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(d06, tmp); - d06 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d06), i2d(cmp))); - - tmp = d03; - cmp = _mm256_cmpgt_epi64(d07, d03); - d03 = d2i(_mm256_blendv_pd(i2d(d07), i2d(d03), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(d07, tmp); - d07 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d07), i2d(cmp))); - - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_03v_merge_ascending(d05, d06, d07); + __m256i tmp, cmp; + + tmp = d01; + cmp = _mm256_cmpgt_epi64(d05, d01); + d01 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d05, tmp); + d05 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d05), i2d(cmp))); + + tmp = d02; + cmp = _mm256_cmpgt_epi64(d06, d02); + d02 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d02), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d06, tmp); + d06 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d06), i2d(cmp))); + + tmp = d03; + cmp = _mm256_cmpgt_epi64(d07, d03); + d03 = d2i(_mm256_blendv_pd(i2d(d07), i2d(d03), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d07, tmp); + d07 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d07), i2d(cmp))); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_03v_merge_ascending(d05, d06, d07); } static INLINE void sort_07v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07) { - __m256i tmp, cmp; - - tmp = d01; - cmp = _mm256_cmpgt_epi64(d05, d01); - d01 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d01), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(d05, tmp); - d05 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d05), i2d(cmp))); - - tmp = d02; - cmp = _mm256_cmpgt_epi64(d06, d02); - d02 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d02), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(d06, tmp); - d06 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d06), i2d(cmp))); - - tmp = d03; - cmp = _mm256_cmpgt_epi64(d07, d03); - d03 = d2i(_mm256_blendv_pd(i2d(d07), i2d(d03), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(d07, tmp); - d07 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d07), i2d(cmp))); - - sort_04v_merge_descending(d01, d02, d03, d04); - sort_03v_merge_descending(d05, d06, d07); + __m256i tmp, cmp; + + tmp = d01; + cmp = _mm256_cmpgt_epi64(d05, d01); + d01 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d05, tmp); + d05 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d05), i2d(cmp))); + + tmp = d02; + cmp = _mm256_cmpgt_epi64(d06, d02); + d02 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d02), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d06, tmp); + d06 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d06), i2d(cmp))); + + tmp = d03; + cmp = _mm256_cmpgt_epi64(d07, d03); + d03 = d2i(_mm256_blendv_pd(i2d(d07), i2d(d03), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d07, tmp); + d07 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d07), i2d(cmp))); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_03v_merge_descending(d05, d06, d07); } static INLINE void sort_08v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08) { - __m256i tmp, cmp; + __m256i tmp, cmp; - sort_04v_ascending(d01, d02, d03, d04); - sort_04v_descending(d05, d06, d07, d08); + sort_04v_ascending(d01, d02, d03, d04); + sort_04v_descending(d05, d06, d07, d08); - tmp = d05; - cmp = _mm256_cmpgt_epi64(d04, d05); - d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d04), i2d(cmp))); - d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + tmp = d05; + cmp = _mm256_cmpgt_epi64(d04, d05); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); - tmp = d06; - cmp = _mm256_cmpgt_epi64(d03, d06); - d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d03), i2d(cmp))); - d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); + tmp = d06; + cmp = _mm256_cmpgt_epi64(d03, d06); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d03), i2d(cmp))); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); - tmp = d07; - cmp = _mm256_cmpgt_epi64(d02, d07); - d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(d02), i2d(cmp))); - d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); + tmp = d07; + cmp = _mm256_cmpgt_epi64(d02, d07); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(d02), i2d(cmp))); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); - tmp = d08; - cmp = _mm256_cmpgt_epi64(d01, d08); - d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(d01), i2d(cmp))); - d01 = d2i(_mm256_blendv_pd(i2d(d01), i2d(tmp), i2d(cmp))); + tmp = d08; + cmp = _mm256_cmpgt_epi64(d01, d08); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(d01), i2d(cmp))); + d01 = d2i(_mm256_blendv_pd(i2d(d01), i2d(tmp), i2d(cmp))); - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_04v_merge_ascending(d05, d06, d07, d08); + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_04v_merge_ascending(d05, d06, d07, d08); } static INLINE void sort_08v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08) { - __m256i tmp, cmp; + __m256i tmp, cmp; - sort_04v_descending(d01, d02, d03, d04); - sort_04v_ascending(d05, d06, d07, d08); + sort_04v_descending(d01, d02, d03, d04); + sort_04v_ascending(d05, d06, d07, d08); - tmp = d05; - cmp = _mm256_cmpgt_epi64(d04, d05); - d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d04), i2d(cmp))); - d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + tmp = d05; + cmp = _mm256_cmpgt_epi64(d04, d05); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); - tmp = d06; - cmp = _mm256_cmpgt_epi64(d03, d06); - d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d03), i2d(cmp))); - d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); + tmp = d06; + cmp = _mm256_cmpgt_epi64(d03, d06); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d03), i2d(cmp))); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); - tmp = d07; - cmp = _mm256_cmpgt_epi64(d02, d07); - d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(d02), i2d(cmp))); - d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); + tmp = d07; + cmp = _mm256_cmpgt_epi64(d02, d07); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(d02), i2d(cmp))); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); - tmp = d08; - cmp = _mm256_cmpgt_epi64(d01, d08); - d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(d01), i2d(cmp))); - d01 = d2i(_mm256_blendv_pd(i2d(d01), i2d(tmp), i2d(cmp))); + tmp = d08; + cmp = _mm256_cmpgt_epi64(d01, d08); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(d01), i2d(cmp))); + d01 = d2i(_mm256_blendv_pd(i2d(d01), i2d(tmp), i2d(cmp))); - sort_04v_merge_descending(d01, d02, d03, d04); - sort_04v_merge_descending(d05, d06, d07, d08); + sort_04v_merge_descending(d01, d02, d03, d04); + sort_04v_merge_descending(d05, d06, d07, d08); } static INLINE void sort_08v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08) { - __m256i tmp, cmp; - - tmp = d01; - cmp = _mm256_cmpgt_epi64(d05, d01); - d01 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d01), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(d05, tmp); - d05 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d05), i2d(cmp))); - - tmp = d02; - cmp = _mm256_cmpgt_epi64(d06, d02); - d02 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d02), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(d06, tmp); - d06 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d06), i2d(cmp))); - - tmp = d03; - cmp = _mm256_cmpgt_epi64(d07, d03); - d03 = d2i(_mm256_blendv_pd(i2d(d07), i2d(d03), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(d07, tmp); - d07 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d07), i2d(cmp))); - - tmp = d04; - cmp = _mm256_cmpgt_epi64(d08, d04); - d04 = d2i(_mm256_blendv_pd(i2d(d08), i2d(d04), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(d08, tmp); - d08 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d08), i2d(cmp))); - - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_04v_merge_ascending(d05, d06, d07, d08); + __m256i tmp, cmp; + + tmp = d01; + cmp = _mm256_cmpgt_epi64(d05, d01); + d01 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d05, tmp); + d05 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d05), i2d(cmp))); + + tmp = d02; + cmp = _mm256_cmpgt_epi64(d06, d02); + d02 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d02), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d06, tmp); + d06 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d06), i2d(cmp))); + + tmp = d03; + cmp = _mm256_cmpgt_epi64(d07, d03); + d03 = d2i(_mm256_blendv_pd(i2d(d07), i2d(d03), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d07, tmp); + d07 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d07), i2d(cmp))); + + tmp = d04; + cmp = _mm256_cmpgt_epi64(d08, d04); + d04 = d2i(_mm256_blendv_pd(i2d(d08), i2d(d04), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d08, tmp); + d08 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d08), i2d(cmp))); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_04v_merge_ascending(d05, d06, d07, d08); } static INLINE void sort_08v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08) { - __m256i tmp, cmp; - - tmp = d01; - cmp = _mm256_cmpgt_epi64(d05, d01); - d01 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d01), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(d05, tmp); - d05 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d05), i2d(cmp))); - - tmp = d02; - cmp = _mm256_cmpgt_epi64(d06, d02); - d02 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d02), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(d06, tmp); - d06 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d06), i2d(cmp))); - - tmp = d03; - cmp = _mm256_cmpgt_epi64(d07, d03); - d03 = d2i(_mm256_blendv_pd(i2d(d07), i2d(d03), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(d07, tmp); - d07 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d07), i2d(cmp))); - - tmp = d04; - cmp = _mm256_cmpgt_epi64(d08, d04); - d04 = d2i(_mm256_blendv_pd(i2d(d08), i2d(d04), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(d08, tmp); - d08 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d08), i2d(cmp))); - - sort_04v_merge_descending(d01, d02, d03, d04); - sort_04v_merge_descending(d05, d06, d07, d08); + __m256i tmp, cmp; + + tmp = d01; + cmp = _mm256_cmpgt_epi64(d05, d01); + d01 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d01), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d05, tmp); + d05 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d05), i2d(cmp))); + + tmp = d02; + cmp = _mm256_cmpgt_epi64(d06, d02); + d02 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d02), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d06, tmp); + d06 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d06), i2d(cmp))); + + tmp = d03; + cmp = _mm256_cmpgt_epi64(d07, d03); + d03 = d2i(_mm256_blendv_pd(i2d(d07), i2d(d03), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d07, tmp); + d07 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d07), i2d(cmp))); + + tmp = d04; + cmp = _mm256_cmpgt_epi64(d08, d04); + d04 = d2i(_mm256_blendv_pd(i2d(d08), i2d(d04), i2d(cmp))); + cmp = _mm256_cmpgt_epi64(d08, tmp); + d08 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d08), i2d(cmp))); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_04v_merge_descending(d05, d06, d07, d08); } static INLINE void sort_09v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09) { - __m256i tmp, cmp; + __m256i tmp, cmp; - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_01v_descending(d09); + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_01v_descending(d09); - tmp = d09; - cmp = _mm256_cmpgt_epi64(d08, d09); - d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); - d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + tmp = d09; + cmp = _mm256_cmpgt_epi64(d08, d09); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_01v_merge_ascending(d09); + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_01v_merge_ascending(d09); } static INLINE void sort_09v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09) { - __m256i tmp, cmp; + __m256i tmp, cmp; - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_01v_ascending(d09); + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_01v_ascending(d09); - tmp = d09; - cmp = _mm256_cmpgt_epi64(d08, d09); - d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); - d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + tmp = d09; + cmp = _mm256_cmpgt_epi64(d08, d09); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_01v_merge_descending(d09); + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_01v_merge_descending(d09); } static INLINE void sort_10v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10) { - __m256i tmp, cmp; + __m256i tmp, cmp; - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_02v_descending(d09, d10); + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_02v_descending(d09, d10); - tmp = d09; - cmp = _mm256_cmpgt_epi64(d08, d09); - d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); - d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + tmp = d09; + cmp = _mm256_cmpgt_epi64(d08, d09); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); - tmp = d10; - cmp = _mm256_cmpgt_epi64(d07, d10); - d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); - d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + tmp = d10; + cmp = _mm256_cmpgt_epi64(d07, d10); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_02v_merge_ascending(d09, d10); + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_02v_merge_ascending(d09, d10); } static INLINE void sort_10v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10) { - __m256i tmp, cmp; + __m256i tmp, cmp; - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_02v_ascending(d09, d10); + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_02v_ascending(d09, d10); - tmp = d09; - cmp = _mm256_cmpgt_epi64(d08, d09); - d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); - d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + tmp = d09; + cmp = _mm256_cmpgt_epi64(d08, d09); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); - tmp = d10; - cmp = _mm256_cmpgt_epi64(d07, d10); - d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); - d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + tmp = d10; + cmp = _mm256_cmpgt_epi64(d07, d10); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_02v_merge_descending(d09, d10); + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_02v_merge_descending(d09, d10); } static INLINE void sort_11v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11) { - __m256i tmp, cmp; + __m256i tmp, cmp; - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_03v_descending(d09, d10, d11); + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_03v_descending(d09, d10, d11); - tmp = d09; - cmp = _mm256_cmpgt_epi64(d08, d09); - d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); - d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + tmp = d09; + cmp = _mm256_cmpgt_epi64(d08, d09); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); - tmp = d10; - cmp = _mm256_cmpgt_epi64(d07, d10); - d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); - d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + tmp = d10; + cmp = _mm256_cmpgt_epi64(d07, d10); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); - tmp = d11; - cmp = _mm256_cmpgt_epi64(d06, d11); - d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); - d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); + tmp = d11; + cmp = _mm256_cmpgt_epi64(d06, d11); + d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_03v_merge_ascending(d09, d10, d11); + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_03v_merge_ascending(d09, d10, d11); } static INLINE void sort_11v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11) { - __m256i tmp, cmp; + __m256i tmp, cmp; - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_03v_ascending(d09, d10, d11); + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_03v_ascending(d09, d10, d11); - tmp = d09; - cmp = _mm256_cmpgt_epi64(d08, d09); - d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); - d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + tmp = d09; + cmp = _mm256_cmpgt_epi64(d08, d09); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); - tmp = d10; - cmp = _mm256_cmpgt_epi64(d07, d10); - d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); - d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + tmp = d10; + cmp = _mm256_cmpgt_epi64(d07, d10); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); - tmp = d11; - cmp = _mm256_cmpgt_epi64(d06, d11); - d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); - d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); + tmp = d11; + cmp = _mm256_cmpgt_epi64(d06, d11); + d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_03v_merge_descending(d09, d10, d11); + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_03v_merge_descending(d09, d10, d11); } static INLINE void sort_12v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12) { - __m256i tmp, cmp; + __m256i tmp, cmp; - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_04v_descending(d09, d10, d11, d12); + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_04v_descending(d09, d10, d11, d12); - tmp = d09; - cmp = _mm256_cmpgt_epi64(d08, d09); - d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); - d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + tmp = d09; + cmp = _mm256_cmpgt_epi64(d08, d09); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); - tmp = d10; - cmp = _mm256_cmpgt_epi64(d07, d10); - d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); - d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + tmp = d10; + cmp = _mm256_cmpgt_epi64(d07, d10); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); - tmp = d11; - cmp = _mm256_cmpgt_epi64(d06, d11); - d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); - d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); + tmp = d11; + cmp = _mm256_cmpgt_epi64(d06, d11); + d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); - tmp = d12; - cmp = _mm256_cmpgt_epi64(d05, d12); - d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); - d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); + tmp = d12; + cmp = _mm256_cmpgt_epi64(d05, d12); + d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_04v_merge_ascending(d09, d10, d11, d12); + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_04v_merge_ascending(d09, d10, d11, d12); } static INLINE void sort_12v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12) { - __m256i tmp, cmp; + __m256i tmp, cmp; - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_04v_ascending(d09, d10, d11, d12); + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_04v_ascending(d09, d10, d11, d12); - tmp = d09; - cmp = _mm256_cmpgt_epi64(d08, d09); - d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); - d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + tmp = d09; + cmp = _mm256_cmpgt_epi64(d08, d09); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); - tmp = d10; - cmp = _mm256_cmpgt_epi64(d07, d10); - d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); - d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + tmp = d10; + cmp = _mm256_cmpgt_epi64(d07, d10); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); - tmp = d11; - cmp = _mm256_cmpgt_epi64(d06, d11); - d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); - d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); + tmp = d11; + cmp = _mm256_cmpgt_epi64(d06, d11); + d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); - tmp = d12; - cmp = _mm256_cmpgt_epi64(d05, d12); - d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); - d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); + tmp = d12; + cmp = _mm256_cmpgt_epi64(d05, d12); + d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_04v_merge_descending(d09, d10, d11, d12); + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_04v_merge_descending(d09, d10, d11, d12); } static INLINE void sort_13v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13) { - __m256i tmp, cmp; - - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_05v_descending(d09, d10, d11, d12, d13); - - tmp = d09; - cmp = _mm256_cmpgt_epi64(d08, d09); - d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); - d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); - - tmp = d10; - cmp = _mm256_cmpgt_epi64(d07, d10); - d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); - d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); - - tmp = d11; - cmp = _mm256_cmpgt_epi64(d06, d11); - d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); - d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); - - tmp = d12; - cmp = _mm256_cmpgt_epi64(d05, d12); - d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); - d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); - - tmp = d13; - cmp = _mm256_cmpgt_epi64(d04, d13); - d13 = d2i(_mm256_blendv_pd(i2d(d13), i2d(d04), i2d(cmp))); - d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); - - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_05v_merge_ascending(d09, d10, d11, d12, d13); + __m256i tmp, cmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_05v_descending(d09, d10, d11, d12, d13); + + tmp = d09; + cmp = _mm256_cmpgt_epi64(d08, d09); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + + tmp = d10; + cmp = _mm256_cmpgt_epi64(d07, d10); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + + tmp = d11; + cmp = _mm256_cmpgt_epi64(d06, d11); + d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); + + tmp = d12; + cmp = _mm256_cmpgt_epi64(d05, d12); + d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); + + tmp = d13; + cmp = _mm256_cmpgt_epi64(d04, d13); + d13 = d2i(_mm256_blendv_pd(i2d(d13), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_05v_merge_ascending(d09, d10, d11, d12, d13); } static INLINE void sort_13v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13) { - __m256i tmp, cmp; - - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_05v_ascending(d09, d10, d11, d12, d13); - - tmp = d09; - cmp = _mm256_cmpgt_epi64(d08, d09); - d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); - d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); - - tmp = d10; - cmp = _mm256_cmpgt_epi64(d07, d10); - d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); - d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); - - tmp = d11; - cmp = _mm256_cmpgt_epi64(d06, d11); - d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); - d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); - - tmp = d12; - cmp = _mm256_cmpgt_epi64(d05, d12); - d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); - d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); - - tmp = d13; - cmp = _mm256_cmpgt_epi64(d04, d13); - d13 = d2i(_mm256_blendv_pd(i2d(d13), i2d(d04), i2d(cmp))); - d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); - - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_05v_merge_descending(d09, d10, d11, d12, d13); + __m256i tmp, cmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_05v_ascending(d09, d10, d11, d12, d13); + + tmp = d09; + cmp = _mm256_cmpgt_epi64(d08, d09); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + + tmp = d10; + cmp = _mm256_cmpgt_epi64(d07, d10); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + + tmp = d11; + cmp = _mm256_cmpgt_epi64(d06, d11); + d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); + + tmp = d12; + cmp = _mm256_cmpgt_epi64(d05, d12); + d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); + + tmp = d13; + cmp = _mm256_cmpgt_epi64(d04, d13); + d13 = d2i(_mm256_blendv_pd(i2d(d13), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_05v_merge_descending(d09, d10, d11, d12, d13); } static INLINE void sort_14v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14) { - __m256i tmp, cmp; - - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_06v_descending(d09, d10, d11, d12, d13, d14); - - tmp = d09; - cmp = _mm256_cmpgt_epi64(d08, d09); - d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); - d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); - - tmp = d10; - cmp = _mm256_cmpgt_epi64(d07, d10); - d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); - d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); - - tmp = d11; - cmp = _mm256_cmpgt_epi64(d06, d11); - d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); - d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); - - tmp = d12; - cmp = _mm256_cmpgt_epi64(d05, d12); - d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); - d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); - - tmp = d13; - cmp = _mm256_cmpgt_epi64(d04, d13); - d13 = d2i(_mm256_blendv_pd(i2d(d13), i2d(d04), i2d(cmp))); - d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); - - tmp = d14; - cmp = _mm256_cmpgt_epi64(d03, d14); - d14 = d2i(_mm256_blendv_pd(i2d(d14), i2d(d03), i2d(cmp))); - d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); - - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_06v_merge_ascending(d09, d10, d11, d12, d13, d14); + __m256i tmp, cmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_06v_descending(d09, d10, d11, d12, d13, d14); + + tmp = d09; + cmp = _mm256_cmpgt_epi64(d08, d09); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + + tmp = d10; + cmp = _mm256_cmpgt_epi64(d07, d10); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + + tmp = d11; + cmp = _mm256_cmpgt_epi64(d06, d11); + d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); + + tmp = d12; + cmp = _mm256_cmpgt_epi64(d05, d12); + d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); + + tmp = d13; + cmp = _mm256_cmpgt_epi64(d04, d13); + d13 = d2i(_mm256_blendv_pd(i2d(d13), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + + tmp = d14; + cmp = _mm256_cmpgt_epi64(d03, d14); + d14 = d2i(_mm256_blendv_pd(i2d(d14), i2d(d03), i2d(cmp))); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_06v_merge_ascending(d09, d10, d11, d12, d13, d14); } static INLINE void sort_14v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14) { - __m256i tmp, cmp; - - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_06v_ascending(d09, d10, d11, d12, d13, d14); - - tmp = d09; - cmp = _mm256_cmpgt_epi64(d08, d09); - d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); - d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); - - tmp = d10; - cmp = _mm256_cmpgt_epi64(d07, d10); - d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); - d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); - - tmp = d11; - cmp = _mm256_cmpgt_epi64(d06, d11); - d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); - d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); - - tmp = d12; - cmp = _mm256_cmpgt_epi64(d05, d12); - d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); - d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); - - tmp = d13; - cmp = _mm256_cmpgt_epi64(d04, d13); - d13 = d2i(_mm256_blendv_pd(i2d(d13), i2d(d04), i2d(cmp))); - d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); - - tmp = d14; - cmp = _mm256_cmpgt_epi64(d03, d14); - d14 = d2i(_mm256_blendv_pd(i2d(d14), i2d(d03), i2d(cmp))); - d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); - - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_06v_merge_descending(d09, d10, d11, d12, d13, d14); + __m256i tmp, cmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_06v_ascending(d09, d10, d11, d12, d13, d14); + + tmp = d09; + cmp = _mm256_cmpgt_epi64(d08, d09); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + + tmp = d10; + cmp = _mm256_cmpgt_epi64(d07, d10); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + + tmp = d11; + cmp = _mm256_cmpgt_epi64(d06, d11); + d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); + + tmp = d12; + cmp = _mm256_cmpgt_epi64(d05, d12); + d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); + + tmp = d13; + cmp = _mm256_cmpgt_epi64(d04, d13); + d13 = d2i(_mm256_blendv_pd(i2d(d13), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + + tmp = d14; + cmp = _mm256_cmpgt_epi64(d03, d14); + d14 = d2i(_mm256_blendv_pd(i2d(d14), i2d(d03), i2d(cmp))); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_06v_merge_descending(d09, d10, d11, d12, d13, d14); } static INLINE void sort_15v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14, __m256i& d15) { - __m256i tmp, cmp; - - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_07v_descending(d09, d10, d11, d12, d13, d14, d15); - - tmp = d09; - cmp = _mm256_cmpgt_epi64(d08, d09); - d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); - d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); - - tmp = d10; - cmp = _mm256_cmpgt_epi64(d07, d10); - d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); - d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); - - tmp = d11; - cmp = _mm256_cmpgt_epi64(d06, d11); - d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); - d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); - - tmp = d12; - cmp = _mm256_cmpgt_epi64(d05, d12); - d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); - d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); - - tmp = d13; - cmp = _mm256_cmpgt_epi64(d04, d13); - d13 = d2i(_mm256_blendv_pd(i2d(d13), i2d(d04), i2d(cmp))); - d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); - - tmp = d14; - cmp = _mm256_cmpgt_epi64(d03, d14); - d14 = d2i(_mm256_blendv_pd(i2d(d14), i2d(d03), i2d(cmp))); - d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); - - tmp = d15; - cmp = _mm256_cmpgt_epi64(d02, d15); - d15 = d2i(_mm256_blendv_pd(i2d(d15), i2d(d02), i2d(cmp))); - d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); - - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_07v_merge_ascending(d09, d10, d11, d12, d13, d14, d15); + __m256i tmp, cmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_07v_descending(d09, d10, d11, d12, d13, d14, d15); + + tmp = d09; + cmp = _mm256_cmpgt_epi64(d08, d09); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + + tmp = d10; + cmp = _mm256_cmpgt_epi64(d07, d10); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + + tmp = d11; + cmp = _mm256_cmpgt_epi64(d06, d11); + d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); + + tmp = d12; + cmp = _mm256_cmpgt_epi64(d05, d12); + d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); + + tmp = d13; + cmp = _mm256_cmpgt_epi64(d04, d13); + d13 = d2i(_mm256_blendv_pd(i2d(d13), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + + tmp = d14; + cmp = _mm256_cmpgt_epi64(d03, d14); + d14 = d2i(_mm256_blendv_pd(i2d(d14), i2d(d03), i2d(cmp))); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); + + tmp = d15; + cmp = _mm256_cmpgt_epi64(d02, d15); + d15 = d2i(_mm256_blendv_pd(i2d(d15), i2d(d02), i2d(cmp))); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_07v_merge_ascending(d09, d10, d11, d12, d13, d14, d15); } static INLINE void sort_15v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14, __m256i& d15) { - __m256i tmp, cmp; - - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_07v_ascending(d09, d10, d11, d12, d13, d14, d15); - - tmp = d09; - cmp = _mm256_cmpgt_epi64(d08, d09); - d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); - d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); - - tmp = d10; - cmp = _mm256_cmpgt_epi64(d07, d10); - d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); - d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); - - tmp = d11; - cmp = _mm256_cmpgt_epi64(d06, d11); - d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); - d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); - - tmp = d12; - cmp = _mm256_cmpgt_epi64(d05, d12); - d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); - d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); - - tmp = d13; - cmp = _mm256_cmpgt_epi64(d04, d13); - d13 = d2i(_mm256_blendv_pd(i2d(d13), i2d(d04), i2d(cmp))); - d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); - - tmp = d14; - cmp = _mm256_cmpgt_epi64(d03, d14); - d14 = d2i(_mm256_blendv_pd(i2d(d14), i2d(d03), i2d(cmp))); - d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); - - tmp = d15; - cmp = _mm256_cmpgt_epi64(d02, d15); - d15 = d2i(_mm256_blendv_pd(i2d(d15), i2d(d02), i2d(cmp))); - d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); - - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_07v_merge_descending(d09, d10, d11, d12, d13, d14, d15); + __m256i tmp, cmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_07v_ascending(d09, d10, d11, d12, d13, d14, d15); + + tmp = d09; + cmp = _mm256_cmpgt_epi64(d08, d09); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + + tmp = d10; + cmp = _mm256_cmpgt_epi64(d07, d10); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + + tmp = d11; + cmp = _mm256_cmpgt_epi64(d06, d11); + d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); + + tmp = d12; + cmp = _mm256_cmpgt_epi64(d05, d12); + d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); + + tmp = d13; + cmp = _mm256_cmpgt_epi64(d04, d13); + d13 = d2i(_mm256_blendv_pd(i2d(d13), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + + tmp = d14; + cmp = _mm256_cmpgt_epi64(d03, d14); + d14 = d2i(_mm256_blendv_pd(i2d(d14), i2d(d03), i2d(cmp))); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); + + tmp = d15; + cmp = _mm256_cmpgt_epi64(d02, d15); + d15 = d2i(_mm256_blendv_pd(i2d(d15), i2d(d02), i2d(cmp))); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_07v_merge_descending(d09, d10, d11, d12, d13, d14, d15); } static INLINE void sort_16v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14, __m256i& d15, __m256i& d16) { - __m256i tmp, cmp; - - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_08v_descending(d09, d10, d11, d12, d13, d14, d15, d16); - - tmp = d09; - cmp = _mm256_cmpgt_epi64(d08, d09); - d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); - d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); - - tmp = d10; - cmp = _mm256_cmpgt_epi64(d07, d10); - d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); - d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); - - tmp = d11; - cmp = _mm256_cmpgt_epi64(d06, d11); - d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); - d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); - - tmp = d12; - cmp = _mm256_cmpgt_epi64(d05, d12); - d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); - d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); - - tmp = d13; - cmp = _mm256_cmpgt_epi64(d04, d13); - d13 = d2i(_mm256_blendv_pd(i2d(d13), i2d(d04), i2d(cmp))); - d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); - - tmp = d14; - cmp = _mm256_cmpgt_epi64(d03, d14); - d14 = d2i(_mm256_blendv_pd(i2d(d14), i2d(d03), i2d(cmp))); - d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); - - tmp = d15; - cmp = _mm256_cmpgt_epi64(d02, d15); - d15 = d2i(_mm256_blendv_pd(i2d(d15), i2d(d02), i2d(cmp))); - d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); - - tmp = d16; - cmp = _mm256_cmpgt_epi64(d01, d16); - d16 = d2i(_mm256_blendv_pd(i2d(d16), i2d(d01), i2d(cmp))); - d01 = d2i(_mm256_blendv_pd(i2d(d01), i2d(tmp), i2d(cmp))); - - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_08v_merge_ascending(d09, d10, d11, d12, d13, d14, d15, d16); + __m256i tmp, cmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_08v_descending(d09, d10, d11, d12, d13, d14, d15, d16); + + tmp = d09; + cmp = _mm256_cmpgt_epi64(d08, d09); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + + tmp = d10; + cmp = _mm256_cmpgt_epi64(d07, d10); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + + tmp = d11; + cmp = _mm256_cmpgt_epi64(d06, d11); + d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); + + tmp = d12; + cmp = _mm256_cmpgt_epi64(d05, d12); + d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); + + tmp = d13; + cmp = _mm256_cmpgt_epi64(d04, d13); + d13 = d2i(_mm256_blendv_pd(i2d(d13), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + + tmp = d14; + cmp = _mm256_cmpgt_epi64(d03, d14); + d14 = d2i(_mm256_blendv_pd(i2d(d14), i2d(d03), i2d(cmp))); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); + + tmp = d15; + cmp = _mm256_cmpgt_epi64(d02, d15); + d15 = d2i(_mm256_blendv_pd(i2d(d15), i2d(d02), i2d(cmp))); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); + + tmp = d16; + cmp = _mm256_cmpgt_epi64(d01, d16); + d16 = d2i(_mm256_blendv_pd(i2d(d16), i2d(d01), i2d(cmp))); + d01 = d2i(_mm256_blendv_pd(i2d(d01), i2d(tmp), i2d(cmp))); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_08v_merge_ascending(d09, d10, d11, d12, d13, d14, d15, d16); } static INLINE void sort_16v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14, __m256i& d15, __m256i& d16) { - __m256i tmp, cmp; - - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_08v_ascending(d09, d10, d11, d12, d13, d14, d15, d16); - - tmp = d09; - cmp = _mm256_cmpgt_epi64(d08, d09); - d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); - d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); - - tmp = d10; - cmp = _mm256_cmpgt_epi64(d07, d10); - d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); - d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); - - tmp = d11; - cmp = _mm256_cmpgt_epi64(d06, d11); - d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); - d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); - - tmp = d12; - cmp = _mm256_cmpgt_epi64(d05, d12); - d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); - d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); - - tmp = d13; - cmp = _mm256_cmpgt_epi64(d04, d13); - d13 = d2i(_mm256_blendv_pd(i2d(d13), i2d(d04), i2d(cmp))); - d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); - - tmp = d14; - cmp = _mm256_cmpgt_epi64(d03, d14); - d14 = d2i(_mm256_blendv_pd(i2d(d14), i2d(d03), i2d(cmp))); - d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); - - tmp = d15; - cmp = _mm256_cmpgt_epi64(d02, d15); - d15 = d2i(_mm256_blendv_pd(i2d(d15), i2d(d02), i2d(cmp))); - d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); - - tmp = d16; - cmp = _mm256_cmpgt_epi64(d01, d16); - d16 = d2i(_mm256_blendv_pd(i2d(d16), i2d(d01), i2d(cmp))); - d01 = d2i(_mm256_blendv_pd(i2d(d01), i2d(tmp), i2d(cmp))); - - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_08v_merge_descending(d09, d10, d11, d12, d13, d14, d15, d16); + __m256i tmp, cmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_08v_ascending(d09, d10, d11, d12, d13, d14, d15, d16); + + tmp = d09; + cmp = _mm256_cmpgt_epi64(d08, d09); + d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); + d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); + + tmp = d10; + cmp = _mm256_cmpgt_epi64(d07, d10); + d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); + d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); + + tmp = d11; + cmp = _mm256_cmpgt_epi64(d06, d11); + d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); + d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); + + tmp = d12; + cmp = _mm256_cmpgt_epi64(d05, d12); + d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); + d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); + + tmp = d13; + cmp = _mm256_cmpgt_epi64(d04, d13); + d13 = d2i(_mm256_blendv_pd(i2d(d13), i2d(d04), i2d(cmp))); + d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); + + tmp = d14; + cmp = _mm256_cmpgt_epi64(d03, d14); + d14 = d2i(_mm256_blendv_pd(i2d(d14), i2d(d03), i2d(cmp))); + d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); + + tmp = d15; + cmp = _mm256_cmpgt_epi64(d02, d15); + d15 = d2i(_mm256_blendv_pd(i2d(d15), i2d(d02), i2d(cmp))); + d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); + + tmp = d16; + cmp = _mm256_cmpgt_epi64(d01, d16); + d16 = d2i(_mm256_blendv_pd(i2d(d16), i2d(d01), i2d(cmp))); + d01 = d2i(_mm256_blendv_pd(i2d(d01), i2d(tmp), i2d(cmp))); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_08v_merge_descending(d09, d10, d11, d12, d13, d14, d15, d16); } -static NOINLINE void sort_01v(int64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - sort_01v_ascending(d01); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); + static NOINLINE void sort_01v(int64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + sort_01v_ascending(d01); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); } -static NOINLINE void sort_02v(int64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - sort_02v_ascending(d01, d02); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); + static NOINLINE void sort_02v(int64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + sort_02v_ascending(d01, d02); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); } -static NOINLINE void sort_03v(int64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - sort_03v_ascending(d01, d02, d03); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); + static NOINLINE void sort_03v(int64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + sort_03v_ascending(d01, d02, d03); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); } -static NOINLINE void sort_04v(int64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - sort_04v_ascending(d01, d02, d03, d04); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); + static NOINLINE void sort_04v(int64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + sort_04v_ascending(d01, d02, d03, d04); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); } -static NOINLINE void sort_05v(int64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - sort_05v_ascending(d01, d02, d03, d04, d05); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); + static NOINLINE void sort_05v(int64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + sort_05v_ascending(d01, d02, d03, d04, d05); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); } -static NOINLINE void sort_06v(int64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - sort_06v_ascending(d01, d02, d03, d04, d05, d06); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); + static NOINLINE void sort_06v(int64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + sort_06v_ascending(d01, d02, d03, d04, d05, d06); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); } -static NOINLINE void sort_07v(int64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; - sort_07v_ascending(d01, d02, d03, d04, d05, d06, d07); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); - _mm256_storeu_si256((__m256i *) ptr + 6, d07); + static NOINLINE void sort_07v(int64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + sort_07v_ascending(d01, d02, d03, d04, d05, d06, d07); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); } -static NOINLINE void sort_08v(int64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); - _mm256_storeu_si256((__m256i *) ptr + 6, d07); - _mm256_storeu_si256((__m256i *) ptr + 7, d08); + static NOINLINE void sort_08v(int64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); } -static NOINLINE void sort_09v(int64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; - __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; - sort_09v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); - _mm256_storeu_si256((__m256i *) ptr + 6, d07); - _mm256_storeu_si256((__m256i *) ptr + 7, d08); - _mm256_storeu_si256((__m256i *) ptr + 8, d09); + static NOINLINE void sort_09v(int64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + sort_09v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); } -static NOINLINE void sort_10v(int64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; - __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; - __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; - sort_10v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); - _mm256_storeu_si256((__m256i *) ptr + 6, d07); - _mm256_storeu_si256((__m256i *) ptr + 7, d08); - _mm256_storeu_si256((__m256i *) ptr + 8, d09); - _mm256_storeu_si256((__m256i *) ptr + 9, d10); + static NOINLINE void sort_10v(int64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + sort_10v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); } -static NOINLINE void sort_11v(int64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; - __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; - __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; - __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; - sort_11v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); - _mm256_storeu_si256((__m256i *) ptr + 6, d07); - _mm256_storeu_si256((__m256i *) ptr + 7, d08); - _mm256_storeu_si256((__m256i *) ptr + 8, d09); - _mm256_storeu_si256((__m256i *) ptr + 9, d10); - _mm256_storeu_si256((__m256i *) ptr + 10, d11); + static NOINLINE void sort_11v(int64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; + sort_11v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); + _mm256_storeu_si256((__m256i *) ptr + 10, d11); } -static NOINLINE void sort_12v(int64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; - __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; - __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; - __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; - __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; - sort_12v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); - _mm256_storeu_si256((__m256i *) ptr + 6, d07); - _mm256_storeu_si256((__m256i *) ptr + 7, d08); - _mm256_storeu_si256((__m256i *) ptr + 8, d09); - _mm256_storeu_si256((__m256i *) ptr + 9, d10); - _mm256_storeu_si256((__m256i *) ptr + 10, d11); - _mm256_storeu_si256((__m256i *) ptr + 11, d12); + static NOINLINE void sort_12v(int64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; + __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; + sort_12v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); + _mm256_storeu_si256((__m256i *) ptr + 10, d11); + _mm256_storeu_si256((__m256i *) ptr + 11, d12); } -static NOINLINE void sort_13v(int64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; - __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; - __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; - __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; - __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; - __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12);; - sort_13v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); - _mm256_storeu_si256((__m256i *) ptr + 6, d07); - _mm256_storeu_si256((__m256i *) ptr + 7, d08); - _mm256_storeu_si256((__m256i *) ptr + 8, d09); - _mm256_storeu_si256((__m256i *) ptr + 9, d10); - _mm256_storeu_si256((__m256i *) ptr + 10, d11); - _mm256_storeu_si256((__m256i *) ptr + 11, d12); - _mm256_storeu_si256((__m256i *) ptr + 12, d13); + static NOINLINE void sort_13v(int64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; + __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; + __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12);; + sort_13v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); + _mm256_storeu_si256((__m256i *) ptr + 10, d11); + _mm256_storeu_si256((__m256i *) ptr + 11, d12); + _mm256_storeu_si256((__m256i *) ptr + 12, d13); } -static NOINLINE void sort_14v(int64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; - __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; - __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; - __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; - __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; - __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12);; - __m256i d14 = _mm256_lddqu_si256((__m256i const *) ptr + 13);; - sort_14v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); - _mm256_storeu_si256((__m256i *) ptr + 6, d07); - _mm256_storeu_si256((__m256i *) ptr + 7, d08); - _mm256_storeu_si256((__m256i *) ptr + 8, d09); - _mm256_storeu_si256((__m256i *) ptr + 9, d10); - _mm256_storeu_si256((__m256i *) ptr + 10, d11); - _mm256_storeu_si256((__m256i *) ptr + 11, d12); - _mm256_storeu_si256((__m256i *) ptr + 12, d13); - _mm256_storeu_si256((__m256i *) ptr + 13, d14); + static NOINLINE void sort_14v(int64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; + __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; + __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12);; + __m256i d14 = _mm256_lddqu_si256((__m256i const *) ptr + 13);; + sort_14v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); + _mm256_storeu_si256((__m256i *) ptr + 10, d11); + _mm256_storeu_si256((__m256i *) ptr + 11, d12); + _mm256_storeu_si256((__m256i *) ptr + 12, d13); + _mm256_storeu_si256((__m256i *) ptr + 13, d14); } -static NOINLINE void sort_15v(int64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; - __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; - __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; - __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; - __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; - __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12);; - __m256i d14 = _mm256_lddqu_si256((__m256i const *) ptr + 13);; - __m256i d15 = _mm256_lddqu_si256((__m256i const *) ptr + 14);; - sort_15v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14, d15); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); - _mm256_storeu_si256((__m256i *) ptr + 6, d07); - _mm256_storeu_si256((__m256i *) ptr + 7, d08); - _mm256_storeu_si256((__m256i *) ptr + 8, d09); - _mm256_storeu_si256((__m256i *) ptr + 9, d10); - _mm256_storeu_si256((__m256i *) ptr + 10, d11); - _mm256_storeu_si256((__m256i *) ptr + 11, d12); - _mm256_storeu_si256((__m256i *) ptr + 12, d13); - _mm256_storeu_si256((__m256i *) ptr + 13, d14); - _mm256_storeu_si256((__m256i *) ptr + 14, d15); + static NOINLINE void sort_15v(int64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; + __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; + __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12);; + __m256i d14 = _mm256_lddqu_si256((__m256i const *) ptr + 13);; + __m256i d15 = _mm256_lddqu_si256((__m256i const *) ptr + 14);; + sort_15v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14, d15); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); + _mm256_storeu_si256((__m256i *) ptr + 10, d11); + _mm256_storeu_si256((__m256i *) ptr + 11, d12); + _mm256_storeu_si256((__m256i *) ptr + 12, d13); + _mm256_storeu_si256((__m256i *) ptr + 13, d14); + _mm256_storeu_si256((__m256i *) ptr + 14, d15); } -static NOINLINE void sort_16v(int64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; - __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; - __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; - __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; - __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; - __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12);; - __m256i d14 = _mm256_lddqu_si256((__m256i const *) ptr + 13);; - __m256i d15 = _mm256_lddqu_si256((__m256i const *) ptr + 14);; - __m256i d16 = _mm256_lddqu_si256((__m256i const *) ptr + 15);; - sort_16v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14, d15, d16); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); - _mm256_storeu_si256((__m256i *) ptr + 6, d07); - _mm256_storeu_si256((__m256i *) ptr + 7, d08); - _mm256_storeu_si256((__m256i *) ptr + 8, d09); - _mm256_storeu_si256((__m256i *) ptr + 9, d10); - _mm256_storeu_si256((__m256i *) ptr + 10, d11); - _mm256_storeu_si256((__m256i *) ptr + 11, d12); - _mm256_storeu_si256((__m256i *) ptr + 12, d13); - _mm256_storeu_si256((__m256i *) ptr + 13, d14); - _mm256_storeu_si256((__m256i *) ptr + 14, d15); - _mm256_storeu_si256((__m256i *) ptr + 15, d16); -} - static void sort(int64_t *ptr, size_t length) { - const int N = 4; - - switch(length / N) { - case 1: sort_01v(ptr); break; - case 2: sort_02v(ptr); break; - case 3: sort_03v(ptr); break; - case 4: sort_04v(ptr); break; - case 5: sort_05v(ptr); break; - case 6: sort_06v(ptr); break; - case 7: sort_07v(ptr); break; - case 8: sort_08v(ptr); break; - case 9: sort_09v(ptr); break; - case 10: sort_10v(ptr); break; - case 11: sort_11v(ptr); break; - case 12: sort_12v(ptr); break; - case 13: sort_13v(ptr); break; - case 14: sort_14v(ptr); break; - case 15: sort_15v(ptr); break; - case 16: sort_16v(ptr); break; - } + static NOINLINE void sort_16v(int64_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; + __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; + __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12);; + __m256i d14 = _mm256_lddqu_si256((__m256i const *) ptr + 13);; + __m256i d15 = _mm256_lddqu_si256((__m256i const *) ptr + 14);; + __m256i d16 = _mm256_lddqu_si256((__m256i const *) ptr + 15);; + sort_16v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14, d15, d16); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); + _mm256_storeu_si256((__m256i *) ptr + 10, d11); + _mm256_storeu_si256((__m256i *) ptr + 11, d12); + _mm256_storeu_si256((__m256i *) ptr + 12, d13); + _mm256_storeu_si256((__m256i *) ptr + 13, d14); + _mm256_storeu_si256((__m256i *) ptr + 14, d15); + _mm256_storeu_si256((__m256i *) ptr + 15, d16); } + static void sort(int64_t *ptr, size_t length); + }; } } + +#undef i2d +#undef d2i +#undef i2s +#undef s2i +#undef s2d +#undef d2s + +#ifdef __GNUC__ +#ifdef __clang__ +#pragma clang attribute pop +#else +#pragma GCC pop_options +#endif +#endif #endif + diff --git a/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.uint32_t.generated.cpp b/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.uint32_t.generated.cpp new file mode 100644 index 00000000000000..93d974cc2edf78 --- /dev/null +++ b/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.uint32_t.generated.cpp @@ -0,0 +1,26 @@ +#include "bitonic_sort.AVX2.uint32_t.generated.h" + +using namespace vxsort; + +void vxsort::smallsort::bitonic::sort(uint32_t *ptr, size_t length) { + const int N = 8; + + switch(length / N) { + case 1: sort_01v(ptr); break; + case 2: sort_02v(ptr); break; + case 3: sort_03v(ptr); break; + case 4: sort_04v(ptr); break; + case 5: sort_05v(ptr); break; + case 6: sort_06v(ptr); break; + case 7: sort_07v(ptr); break; + case 8: sort_08v(ptr); break; + case 9: sort_09v(ptr); break; + case 10: sort_10v(ptr); break; + case 11: sort_11v(ptr); break; + case 12: sort_12v(ptr); break; + case 13: sort_13v(ptr); break; + case 14: sort_14v(ptr); break; + case 15: sort_15v(ptr); break; + case 16: sort_16v(ptr); break; + } +} diff --git a/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.uint32_t.generated.h b/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.uint32_t.generated.h index cf1d35a3a90f14..33a2d668474d03 100644 --- a/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.uint32_t.generated.h +++ b/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.uint32_t.generated.h @@ -1,6 +1,6 @@ ///////////////////////////////////////////////////////////////////////////// //// -// This file was auto-generated by a tool at 2020-05-31 19:46:17 +// This file was auto-generated by a tool at 2020-06-22 05:27:48 // // It is recommended you DO NOT directly edit this file but instead edit // the code-generator that generated this source file instead. @@ -9,19 +9,18 @@ #ifndef BITONIC_SORT_AVX2_UINT32_T_H #define BITONIC_SORT_AVX2_UINT32_T_H -#include -#include "bitonic_sort.h" - -#ifdef _MSC_VER - // MSVC - #define INLINE __forceinline - #define NOINLINE __declspec(noinline) +#ifdef __GNUC__ +#ifdef __clang__ +#pragma clang attribute push (__attribute__((target("avx2"))), apply_to = any(function)) #else - // GCC + Clang - #define INLINE __attribute__((always_inline)) - #define NOINLINE __attribute__((noinline)) +#pragma GCC push_options +#pragma GCC target("avx2") +#endif #endif +#include +#include "bitonic_sort.h" + #define i2d _mm256_castsi256_pd #define d2i _mm256_castpd_si256 #define i2s _mm256_castsi256_ps @@ -29,1504 +28,1500 @@ #define s2d _mm256_castps_pd #define d2s _mm256_castpd_ps -namespace gcsort { +namespace vxsort { namespace smallsort { -template<> struct bitonic { +template<> struct bitonic { public: static INLINE void sort_01v_ascending(__m256i& d01) { - __m256i min, max, s; - - s = _mm256_shuffle_epi32(d01, 0xB1); - - min = _mm256_min_epu32(s, d01); - max = _mm256_max_epu32(s, d01); - d01 = _mm256_blend_epi32(min, max, 0xAA); - - s = _mm256_shuffle_epi32(d01, 0x1B); - - min = _mm256_min_epu32(s, d01); - max = _mm256_max_epu32(s, d01); - d01 = _mm256_blend_epi32(min, max, 0xCC); - - s = _mm256_shuffle_epi32(d01, 0xB1); - - min = _mm256_min_epu32(s, d01); - max = _mm256_max_epu32(s, d01); - d01 = _mm256_blend_epi32(min, max, 0xAA); - - s = d2i(_mm256_permute4x64_pd(i2d(_mm256_shuffle_epi32(d01, 0x1B)), 0x4E)); - min = _mm256_min_epu32(s, d01); - max = _mm256_max_epu32(s, d01); - d01 = _mm256_blend_epi32(min, max, 0xF0); - - s = _mm256_shuffle_epi32(d01, 0x4E); - min = _mm256_min_epu32(s, d01); - max = _mm256_max_epu32(s, d01); - d01 = _mm256_blend_epi32(min, max, 0xCC); - - s = _mm256_shuffle_epi32(d01, 0xB1); - min = _mm256_min_epu32(s, d01); - max = _mm256_max_epu32(s, d01); - d01 = _mm256_blend_epi32(min, max, 0xAA); + __m256i min, max, s; + + s = _mm256_shuffle_epi32(d01, 0xB1); + + min = _mm256_min_epu32(s, d01); + max = _mm256_max_epu32(s, d01); + d01 = _mm256_blend_epi32(min, max, 0xAA); + + s = _mm256_shuffle_epi32(d01, 0x1B); + + min = _mm256_min_epu32(s, d01); + max = _mm256_max_epu32(s, d01); + d01 = _mm256_blend_epi32(min, max, 0xCC); + + s = _mm256_shuffle_epi32(d01, 0xB1); + + min = _mm256_min_epu32(s, d01); + max = _mm256_max_epu32(s, d01); + d01 = _mm256_blend_epi32(min, max, 0xAA); + + s = d2i(_mm256_permute4x64_pd(i2d(_mm256_shuffle_epi32(d01, 0x1B)), 0x4E)); + min = _mm256_min_epu32(s, d01); + max = _mm256_max_epu32(s, d01); + d01 = _mm256_blend_epi32(min, max, 0xF0); + + s = _mm256_shuffle_epi32(d01, 0x4E); + min = _mm256_min_epu32(s, d01); + max = _mm256_max_epu32(s, d01); + d01 = _mm256_blend_epi32(min, max, 0xCC); + + s = _mm256_shuffle_epi32(d01, 0xB1); + min = _mm256_min_epu32(s, d01); + max = _mm256_max_epu32(s, d01); + d01 = _mm256_blend_epi32(min, max, 0xAA); } static INLINE void sort_01v_merge_ascending(__m256i& d01) { - __m256i min, max, s; - - s = d2i(_mm256_permute4x64_pd(i2d(d01), 0x4E)); - min = _mm256_min_epu32(s, d01); - max = _mm256_max_epu32(s, d01); - d01 = _mm256_blend_epi32(min, max, 0xF0); - - s = _mm256_shuffle_epi32(d01, 0x4E); - - min = _mm256_min_epu32(s, d01); - max = _mm256_max_epu32(s, d01); - d01 = _mm256_blend_epi32(min, max, 0xCC); - - s = _mm256_shuffle_epi32(d01, 0xB1); - - min = _mm256_min_epu32(s, d01); - max = _mm256_max_epu32(s, d01); - d01 = _mm256_blend_epi32(min, max, 0xAA); + __m256i min, max, s; + + s = d2i(_mm256_permute4x64_pd(i2d(d01), 0x4E)); + min = _mm256_min_epu32(s, d01); + max = _mm256_max_epu32(s, d01); + d01 = _mm256_blend_epi32(min, max, 0xF0); + + s = _mm256_shuffle_epi32(d01, 0x4E); + + min = _mm256_min_epu32(s, d01); + max = _mm256_max_epu32(s, d01); + d01 = _mm256_blend_epi32(min, max, 0xCC); + + s = _mm256_shuffle_epi32(d01, 0xB1); + + min = _mm256_min_epu32(s, d01); + max = _mm256_max_epu32(s, d01); + d01 = _mm256_blend_epi32(min, max, 0xAA); } static INLINE void sort_01v_descending(__m256i& d01) { - __m256i min, max, s; - - s = _mm256_shuffle_epi32(d01, 0xB1); - - min = _mm256_min_epu32(s, d01); - max = _mm256_max_epu32(s, d01); - d01 = _mm256_blend_epi32(max, min, 0xAA); - - s = _mm256_shuffle_epi32(d01, 0x1B); - - min = _mm256_min_epu32(s, d01); - max = _mm256_max_epu32(s, d01); - d01 = _mm256_blend_epi32(max, min, 0xCC); - - s = _mm256_shuffle_epi32(d01, 0xB1); - - min = _mm256_min_epu32(s, d01); - max = _mm256_max_epu32(s, d01); - d01 = _mm256_blend_epi32(max, min, 0xAA); - - s = d2i(_mm256_permute4x64_pd(i2d(_mm256_shuffle_epi32(d01, 0x1B)), 0x4E)); - min = _mm256_min_epu32(s, d01); - max = _mm256_max_epu32(s, d01); - d01 = _mm256_blend_epi32(max, min, 0xF0); - - s = _mm256_shuffle_epi32(d01, 0x4E); - min = _mm256_min_epu32(s, d01); - max = _mm256_max_epu32(s, d01); - d01 = _mm256_blend_epi32(max, min, 0xCC); - - s = _mm256_shuffle_epi32(d01, 0xB1); - min = _mm256_min_epu32(s, d01); - max = _mm256_max_epu32(s, d01); - d01 = _mm256_blend_epi32(max, min, 0xAA); + __m256i min, max, s; + + s = _mm256_shuffle_epi32(d01, 0xB1); + + min = _mm256_min_epu32(s, d01); + max = _mm256_max_epu32(s, d01); + d01 = _mm256_blend_epi32(max, min, 0xAA); + + s = _mm256_shuffle_epi32(d01, 0x1B); + + min = _mm256_min_epu32(s, d01); + max = _mm256_max_epu32(s, d01); + d01 = _mm256_blend_epi32(max, min, 0xCC); + + s = _mm256_shuffle_epi32(d01, 0xB1); + + min = _mm256_min_epu32(s, d01); + max = _mm256_max_epu32(s, d01); + d01 = _mm256_blend_epi32(max, min, 0xAA); + + s = d2i(_mm256_permute4x64_pd(i2d(_mm256_shuffle_epi32(d01, 0x1B)), 0x4E)); + min = _mm256_min_epu32(s, d01); + max = _mm256_max_epu32(s, d01); + d01 = _mm256_blend_epi32(max, min, 0xF0); + + s = _mm256_shuffle_epi32(d01, 0x4E); + min = _mm256_min_epu32(s, d01); + max = _mm256_max_epu32(s, d01); + d01 = _mm256_blend_epi32(max, min, 0xCC); + + s = _mm256_shuffle_epi32(d01, 0xB1); + min = _mm256_min_epu32(s, d01); + max = _mm256_max_epu32(s, d01); + d01 = _mm256_blend_epi32(max, min, 0xAA); } static INLINE void sort_01v_merge_descending(__m256i& d01) { - __m256i min, max, s; - - s = d2i(_mm256_permute4x64_pd(i2d(d01), 0x4E)); - min = _mm256_min_epu32(s, d01); - max = _mm256_max_epu32(s, d01); - d01 = _mm256_blend_epi32(max, min, 0xF0); - - s = _mm256_shuffle_epi32(d01, 0x4E); - - min = _mm256_min_epu32(s, d01); - max = _mm256_max_epu32(s, d01); - d01 = _mm256_blend_epi32(max, min, 0xCC); - - s = _mm256_shuffle_epi32(d01, 0xB1); - - min = _mm256_min_epu32(s, d01); - max = _mm256_max_epu32(s, d01); - d01 = _mm256_blend_epi32(max, min, 0xAA); + __m256i min, max, s; + + s = d2i(_mm256_permute4x64_pd(i2d(d01), 0x4E)); + min = _mm256_min_epu32(s, d01); + max = _mm256_max_epu32(s, d01); + d01 = _mm256_blend_epi32(max, min, 0xF0); + + s = _mm256_shuffle_epi32(d01, 0x4E); + + min = _mm256_min_epu32(s, d01); + max = _mm256_max_epu32(s, d01); + d01 = _mm256_blend_epi32(max, min, 0xCC); + + s = _mm256_shuffle_epi32(d01, 0xB1); + + min = _mm256_min_epu32(s, d01); + max = _mm256_max_epu32(s, d01); + d01 = _mm256_blend_epi32(max, min, 0xAA); } static INLINE void sort_02v_ascending(__m256i& d01, __m256i& d02) { - __m256i tmp; + __m256i tmp; - sort_01v_ascending(d01); - sort_01v_descending(d02); + sort_01v_ascending(d01); + sort_01v_descending(d02); - tmp = d02; - - d02 = _mm256_max_epu32(d01, d02); - d01 = _mm256_min_epu32(d01, tmp); + tmp = d02; + + d02 = _mm256_max_epu32(d01, d02); + d01 = _mm256_min_epu32(d01, tmp); - sort_01v_merge_ascending(d01); - sort_01v_merge_ascending(d02); + sort_01v_merge_ascending(d01); + sort_01v_merge_ascending(d02); } static INLINE void sort_02v_descending(__m256i& d01, __m256i& d02) { - __m256i tmp; + __m256i tmp; - sort_01v_descending(d01); - sort_01v_ascending(d02); + sort_01v_descending(d01); + sort_01v_ascending(d02); - tmp = d02; - - d02 = _mm256_max_epu32(d01, d02); - d01 = _mm256_min_epu32(d01, tmp); + tmp = d02; + + d02 = _mm256_max_epu32(d01, d02); + d01 = _mm256_min_epu32(d01, tmp); - sort_01v_merge_descending(d01); - sort_01v_merge_descending(d02); + sort_01v_merge_descending(d01); + sort_01v_merge_descending(d02); } static INLINE void sort_02v_merge_ascending(__m256i& d01, __m256i& d02) { - __m256i tmp; + __m256i tmp; - tmp = d01; - - d01 = _mm256_min_epu32(d02, d01); - - d02 = _mm256_max_epu32(d02, tmp); + tmp = d01; + + d01 = _mm256_min_epu32(d02, d01); + + d02 = _mm256_max_epu32(d02, tmp); - sort_01v_merge_ascending(d01); - sort_01v_merge_ascending(d02); + sort_01v_merge_ascending(d01); + sort_01v_merge_ascending(d02); } static INLINE void sort_02v_merge_descending(__m256i& d01, __m256i& d02) { - __m256i tmp; + __m256i tmp; - tmp = d01; - - d01 = _mm256_min_epu32(d02, d01); - - d02 = _mm256_max_epu32(d02, tmp); + tmp = d01; + + d01 = _mm256_min_epu32(d02, d01); + + d02 = _mm256_max_epu32(d02, tmp); - sort_01v_merge_descending(d01); - sort_01v_merge_descending(d02); + sort_01v_merge_descending(d01); + sort_01v_merge_descending(d02); } static INLINE void sort_03v_ascending(__m256i& d01, __m256i& d02, __m256i& d03) { - __m256i tmp; + __m256i tmp; - sort_02v_ascending(d01, d02); - sort_01v_descending(d03); + sort_02v_ascending(d01, d02); + sort_01v_descending(d03); - tmp = d03; - - d03 = _mm256_max_epu32(d02, d03); - d02 = _mm256_min_epu32(d02, tmp); + tmp = d03; + + d03 = _mm256_max_epu32(d02, d03); + d02 = _mm256_min_epu32(d02, tmp); - sort_02v_merge_ascending(d01, d02); - sort_01v_merge_ascending(d03); + sort_02v_merge_ascending(d01, d02); + sort_01v_merge_ascending(d03); } static INLINE void sort_03v_descending(__m256i& d01, __m256i& d02, __m256i& d03) { - __m256i tmp; + __m256i tmp; - sort_02v_descending(d01, d02); - sort_01v_ascending(d03); + sort_02v_descending(d01, d02); + sort_01v_ascending(d03); - tmp = d03; - - d03 = _mm256_max_epu32(d02, d03); - d02 = _mm256_min_epu32(d02, tmp); + tmp = d03; + + d03 = _mm256_max_epu32(d02, d03); + d02 = _mm256_min_epu32(d02, tmp); - sort_02v_merge_descending(d01, d02); - sort_01v_merge_descending(d03); + sort_02v_merge_descending(d01, d02); + sort_01v_merge_descending(d03); } static INLINE void sort_03v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03) { - __m256i tmp; + __m256i tmp; - tmp = d01; - - d01 = _mm256_min_epu32(d03, d01); - - d03 = _mm256_max_epu32(d03, tmp); + tmp = d01; + + d01 = _mm256_min_epu32(d03, d01); + + d03 = _mm256_max_epu32(d03, tmp); - sort_02v_merge_ascending(d01, d02); - sort_01v_merge_ascending(d03); + sort_02v_merge_ascending(d01, d02); + sort_01v_merge_ascending(d03); } static INLINE void sort_03v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03) { - __m256i tmp; + __m256i tmp; - tmp = d01; - - d01 = _mm256_min_epu32(d03, d01); - - d03 = _mm256_max_epu32(d03, tmp); + tmp = d01; + + d01 = _mm256_min_epu32(d03, d01); + + d03 = _mm256_max_epu32(d03, tmp); - sort_02v_merge_descending(d01, d02); - sort_01v_merge_descending(d03); + sort_02v_merge_descending(d01, d02); + sort_01v_merge_descending(d03); } static INLINE void sort_04v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04) { - __m256i tmp; + __m256i tmp; - sort_02v_ascending(d01, d02); - sort_02v_descending(d03, d04); + sort_02v_ascending(d01, d02); + sort_02v_descending(d03, d04); - tmp = d03; - - d03 = _mm256_max_epu32(d02, d03); - d02 = _mm256_min_epu32(d02, tmp); + tmp = d03; + + d03 = _mm256_max_epu32(d02, d03); + d02 = _mm256_min_epu32(d02, tmp); - tmp = d04; - - d04 = _mm256_max_epu32(d01, d04); - d01 = _mm256_min_epu32(d01, tmp); + tmp = d04; + + d04 = _mm256_max_epu32(d01, d04); + d01 = _mm256_min_epu32(d01, tmp); - sort_02v_merge_ascending(d01, d02); - sort_02v_merge_ascending(d03, d04); + sort_02v_merge_ascending(d01, d02); + sort_02v_merge_ascending(d03, d04); } static INLINE void sort_04v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04) { - __m256i tmp; + __m256i tmp; - sort_02v_descending(d01, d02); - sort_02v_ascending(d03, d04); + sort_02v_descending(d01, d02); + sort_02v_ascending(d03, d04); - tmp = d03; - - d03 = _mm256_max_epu32(d02, d03); - d02 = _mm256_min_epu32(d02, tmp); + tmp = d03; + + d03 = _mm256_max_epu32(d02, d03); + d02 = _mm256_min_epu32(d02, tmp); - tmp = d04; - - d04 = _mm256_max_epu32(d01, d04); - d01 = _mm256_min_epu32(d01, tmp); + tmp = d04; + + d04 = _mm256_max_epu32(d01, d04); + d01 = _mm256_min_epu32(d01, tmp); - sort_02v_merge_descending(d01, d02); - sort_02v_merge_descending(d03, d04); + sort_02v_merge_descending(d01, d02); + sort_02v_merge_descending(d03, d04); } static INLINE void sort_04v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04) { - __m256i tmp; + __m256i tmp; - tmp = d01; - - d01 = _mm256_min_epu32(d03, d01); - - d03 = _mm256_max_epu32(d03, tmp); + tmp = d01; + + d01 = _mm256_min_epu32(d03, d01); + + d03 = _mm256_max_epu32(d03, tmp); - tmp = d02; - - d02 = _mm256_min_epu32(d04, d02); - - d04 = _mm256_max_epu32(d04, tmp); + tmp = d02; + + d02 = _mm256_min_epu32(d04, d02); + + d04 = _mm256_max_epu32(d04, tmp); - sort_02v_merge_ascending(d01, d02); - sort_02v_merge_ascending(d03, d04); + sort_02v_merge_ascending(d01, d02); + sort_02v_merge_ascending(d03, d04); } static INLINE void sort_04v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04) { - __m256i tmp; + __m256i tmp; - tmp = d01; - - d01 = _mm256_min_epu32(d03, d01); - - d03 = _mm256_max_epu32(d03, tmp); + tmp = d01; + + d01 = _mm256_min_epu32(d03, d01); + + d03 = _mm256_max_epu32(d03, tmp); - tmp = d02; - - d02 = _mm256_min_epu32(d04, d02); - - d04 = _mm256_max_epu32(d04, tmp); + tmp = d02; + + d02 = _mm256_min_epu32(d04, d02); + + d04 = _mm256_max_epu32(d04, tmp); - sort_02v_merge_descending(d01, d02); - sort_02v_merge_descending(d03, d04); + sort_02v_merge_descending(d01, d02); + sort_02v_merge_descending(d03, d04); } static INLINE void sort_05v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05) { - __m256i tmp; + __m256i tmp; - sort_04v_ascending(d01, d02, d03, d04); - sort_01v_descending(d05); + sort_04v_ascending(d01, d02, d03, d04); + sort_01v_descending(d05); - tmp = d05; - - d05 = _mm256_max_epu32(d04, d05); - d04 = _mm256_min_epu32(d04, tmp); + tmp = d05; + + d05 = _mm256_max_epu32(d04, d05); + d04 = _mm256_min_epu32(d04, tmp); - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_01v_merge_ascending(d05); + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_01v_merge_ascending(d05); } static INLINE void sort_05v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05) { - __m256i tmp; + __m256i tmp; - sort_04v_descending(d01, d02, d03, d04); - sort_01v_ascending(d05); + sort_04v_descending(d01, d02, d03, d04); + sort_01v_ascending(d05); - tmp = d05; - - d05 = _mm256_max_epu32(d04, d05); - d04 = _mm256_min_epu32(d04, tmp); + tmp = d05; + + d05 = _mm256_max_epu32(d04, d05); + d04 = _mm256_min_epu32(d04, tmp); - sort_04v_merge_descending(d01, d02, d03, d04); - sort_01v_merge_descending(d05); + sort_04v_merge_descending(d01, d02, d03, d04); + sort_01v_merge_descending(d05); } static INLINE void sort_05v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05) { - __m256i tmp; + __m256i tmp; - tmp = d01; - - d01 = _mm256_min_epu32(d05, d01); - - d05 = _mm256_max_epu32(d05, tmp); + tmp = d01; + + d01 = _mm256_min_epu32(d05, d01); + + d05 = _mm256_max_epu32(d05, tmp); - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_01v_merge_ascending(d05); + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_01v_merge_ascending(d05); } static INLINE void sort_05v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05) { - __m256i tmp; + __m256i tmp; - tmp = d01; - - d01 = _mm256_min_epu32(d05, d01); - - d05 = _mm256_max_epu32(d05, tmp); + tmp = d01; + + d01 = _mm256_min_epu32(d05, d01); + + d05 = _mm256_max_epu32(d05, tmp); - sort_04v_merge_descending(d01, d02, d03, d04); - sort_01v_merge_descending(d05); + sort_04v_merge_descending(d01, d02, d03, d04); + sort_01v_merge_descending(d05); } static INLINE void sort_06v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06) { - __m256i tmp; + __m256i tmp; - sort_04v_ascending(d01, d02, d03, d04); - sort_02v_descending(d05, d06); + sort_04v_ascending(d01, d02, d03, d04); + sort_02v_descending(d05, d06); - tmp = d05; - - d05 = _mm256_max_epu32(d04, d05); - d04 = _mm256_min_epu32(d04, tmp); + tmp = d05; + + d05 = _mm256_max_epu32(d04, d05); + d04 = _mm256_min_epu32(d04, tmp); - tmp = d06; - - d06 = _mm256_max_epu32(d03, d06); - d03 = _mm256_min_epu32(d03, tmp); + tmp = d06; + + d06 = _mm256_max_epu32(d03, d06); + d03 = _mm256_min_epu32(d03, tmp); - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_02v_merge_ascending(d05, d06); + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_02v_merge_ascending(d05, d06); } static INLINE void sort_06v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06) { - __m256i tmp; + __m256i tmp; - sort_04v_descending(d01, d02, d03, d04); - sort_02v_ascending(d05, d06); + sort_04v_descending(d01, d02, d03, d04); + sort_02v_ascending(d05, d06); - tmp = d05; - - d05 = _mm256_max_epu32(d04, d05); - d04 = _mm256_min_epu32(d04, tmp); + tmp = d05; + + d05 = _mm256_max_epu32(d04, d05); + d04 = _mm256_min_epu32(d04, tmp); - tmp = d06; - - d06 = _mm256_max_epu32(d03, d06); - d03 = _mm256_min_epu32(d03, tmp); + tmp = d06; + + d06 = _mm256_max_epu32(d03, d06); + d03 = _mm256_min_epu32(d03, tmp); - sort_04v_merge_descending(d01, d02, d03, d04); - sort_02v_merge_descending(d05, d06); + sort_04v_merge_descending(d01, d02, d03, d04); + sort_02v_merge_descending(d05, d06); } static INLINE void sort_06v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06) { - __m256i tmp; + __m256i tmp; - tmp = d01; - - d01 = _mm256_min_epu32(d05, d01); - - d05 = _mm256_max_epu32(d05, tmp); + tmp = d01; + + d01 = _mm256_min_epu32(d05, d01); + + d05 = _mm256_max_epu32(d05, tmp); - tmp = d02; - - d02 = _mm256_min_epu32(d06, d02); - - d06 = _mm256_max_epu32(d06, tmp); + tmp = d02; + + d02 = _mm256_min_epu32(d06, d02); + + d06 = _mm256_max_epu32(d06, tmp); - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_02v_merge_ascending(d05, d06); + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_02v_merge_ascending(d05, d06); } static INLINE void sort_06v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06) { - __m256i tmp; + __m256i tmp; - tmp = d01; - - d01 = _mm256_min_epu32(d05, d01); - - d05 = _mm256_max_epu32(d05, tmp); + tmp = d01; + + d01 = _mm256_min_epu32(d05, d01); + + d05 = _mm256_max_epu32(d05, tmp); - tmp = d02; - - d02 = _mm256_min_epu32(d06, d02); - - d06 = _mm256_max_epu32(d06, tmp); + tmp = d02; + + d02 = _mm256_min_epu32(d06, d02); + + d06 = _mm256_max_epu32(d06, tmp); - sort_04v_merge_descending(d01, d02, d03, d04); - sort_02v_merge_descending(d05, d06); + sort_04v_merge_descending(d01, d02, d03, d04); + sort_02v_merge_descending(d05, d06); } static INLINE void sort_07v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07) { - __m256i tmp; + __m256i tmp; - sort_04v_ascending(d01, d02, d03, d04); - sort_03v_descending(d05, d06, d07); + sort_04v_ascending(d01, d02, d03, d04); + sort_03v_descending(d05, d06, d07); - tmp = d05; - - d05 = _mm256_max_epu32(d04, d05); - d04 = _mm256_min_epu32(d04, tmp); + tmp = d05; + + d05 = _mm256_max_epu32(d04, d05); + d04 = _mm256_min_epu32(d04, tmp); - tmp = d06; - - d06 = _mm256_max_epu32(d03, d06); - d03 = _mm256_min_epu32(d03, tmp); + tmp = d06; + + d06 = _mm256_max_epu32(d03, d06); + d03 = _mm256_min_epu32(d03, tmp); - tmp = d07; - - d07 = _mm256_max_epu32(d02, d07); - d02 = _mm256_min_epu32(d02, tmp); + tmp = d07; + + d07 = _mm256_max_epu32(d02, d07); + d02 = _mm256_min_epu32(d02, tmp); - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_03v_merge_ascending(d05, d06, d07); + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_03v_merge_ascending(d05, d06, d07); } static INLINE void sort_07v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07) { - __m256i tmp; + __m256i tmp; - sort_04v_descending(d01, d02, d03, d04); - sort_03v_ascending(d05, d06, d07); + sort_04v_descending(d01, d02, d03, d04); + sort_03v_ascending(d05, d06, d07); - tmp = d05; - - d05 = _mm256_max_epu32(d04, d05); - d04 = _mm256_min_epu32(d04, tmp); + tmp = d05; + + d05 = _mm256_max_epu32(d04, d05); + d04 = _mm256_min_epu32(d04, tmp); - tmp = d06; - - d06 = _mm256_max_epu32(d03, d06); - d03 = _mm256_min_epu32(d03, tmp); + tmp = d06; + + d06 = _mm256_max_epu32(d03, d06); + d03 = _mm256_min_epu32(d03, tmp); - tmp = d07; - - d07 = _mm256_max_epu32(d02, d07); - d02 = _mm256_min_epu32(d02, tmp); + tmp = d07; + + d07 = _mm256_max_epu32(d02, d07); + d02 = _mm256_min_epu32(d02, tmp); - sort_04v_merge_descending(d01, d02, d03, d04); - sort_03v_merge_descending(d05, d06, d07); + sort_04v_merge_descending(d01, d02, d03, d04); + sort_03v_merge_descending(d05, d06, d07); } static INLINE void sort_07v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07) { - __m256i tmp; + __m256i tmp; - tmp = d01; - - d01 = _mm256_min_epu32(d05, d01); - - d05 = _mm256_max_epu32(d05, tmp); + tmp = d01; + + d01 = _mm256_min_epu32(d05, d01); + + d05 = _mm256_max_epu32(d05, tmp); - tmp = d02; - - d02 = _mm256_min_epu32(d06, d02); - - d06 = _mm256_max_epu32(d06, tmp); + tmp = d02; + + d02 = _mm256_min_epu32(d06, d02); + + d06 = _mm256_max_epu32(d06, tmp); - tmp = d03; - - d03 = _mm256_min_epu32(d07, d03); - - d07 = _mm256_max_epu32(d07, tmp); + tmp = d03; + + d03 = _mm256_min_epu32(d07, d03); + + d07 = _mm256_max_epu32(d07, tmp); - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_03v_merge_ascending(d05, d06, d07); + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_03v_merge_ascending(d05, d06, d07); } static INLINE void sort_07v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07) { - __m256i tmp; + __m256i tmp; - tmp = d01; - - d01 = _mm256_min_epu32(d05, d01); - - d05 = _mm256_max_epu32(d05, tmp); + tmp = d01; + + d01 = _mm256_min_epu32(d05, d01); + + d05 = _mm256_max_epu32(d05, tmp); - tmp = d02; - - d02 = _mm256_min_epu32(d06, d02); - - d06 = _mm256_max_epu32(d06, tmp); + tmp = d02; + + d02 = _mm256_min_epu32(d06, d02); + + d06 = _mm256_max_epu32(d06, tmp); - tmp = d03; - - d03 = _mm256_min_epu32(d07, d03); - - d07 = _mm256_max_epu32(d07, tmp); + tmp = d03; + + d03 = _mm256_min_epu32(d07, d03); + + d07 = _mm256_max_epu32(d07, tmp); - sort_04v_merge_descending(d01, d02, d03, d04); - sort_03v_merge_descending(d05, d06, d07); + sort_04v_merge_descending(d01, d02, d03, d04); + sort_03v_merge_descending(d05, d06, d07); } static INLINE void sort_08v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08) { - __m256i tmp; + __m256i tmp; - sort_04v_ascending(d01, d02, d03, d04); - sort_04v_descending(d05, d06, d07, d08); + sort_04v_ascending(d01, d02, d03, d04); + sort_04v_descending(d05, d06, d07, d08); - tmp = d05; - - d05 = _mm256_max_epu32(d04, d05); - d04 = _mm256_min_epu32(d04, tmp); + tmp = d05; + + d05 = _mm256_max_epu32(d04, d05); + d04 = _mm256_min_epu32(d04, tmp); - tmp = d06; - - d06 = _mm256_max_epu32(d03, d06); - d03 = _mm256_min_epu32(d03, tmp); + tmp = d06; + + d06 = _mm256_max_epu32(d03, d06); + d03 = _mm256_min_epu32(d03, tmp); - tmp = d07; - - d07 = _mm256_max_epu32(d02, d07); - d02 = _mm256_min_epu32(d02, tmp); + tmp = d07; + + d07 = _mm256_max_epu32(d02, d07); + d02 = _mm256_min_epu32(d02, tmp); - tmp = d08; - - d08 = _mm256_max_epu32(d01, d08); - d01 = _mm256_min_epu32(d01, tmp); + tmp = d08; + + d08 = _mm256_max_epu32(d01, d08); + d01 = _mm256_min_epu32(d01, tmp); - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_04v_merge_ascending(d05, d06, d07, d08); + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_04v_merge_ascending(d05, d06, d07, d08); } static INLINE void sort_08v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08) { - __m256i tmp; + __m256i tmp; - sort_04v_descending(d01, d02, d03, d04); - sort_04v_ascending(d05, d06, d07, d08); + sort_04v_descending(d01, d02, d03, d04); + sort_04v_ascending(d05, d06, d07, d08); - tmp = d05; - - d05 = _mm256_max_epu32(d04, d05); - d04 = _mm256_min_epu32(d04, tmp); + tmp = d05; + + d05 = _mm256_max_epu32(d04, d05); + d04 = _mm256_min_epu32(d04, tmp); - tmp = d06; - - d06 = _mm256_max_epu32(d03, d06); - d03 = _mm256_min_epu32(d03, tmp); + tmp = d06; + + d06 = _mm256_max_epu32(d03, d06); + d03 = _mm256_min_epu32(d03, tmp); - tmp = d07; - - d07 = _mm256_max_epu32(d02, d07); - d02 = _mm256_min_epu32(d02, tmp); + tmp = d07; + + d07 = _mm256_max_epu32(d02, d07); + d02 = _mm256_min_epu32(d02, tmp); - tmp = d08; - - d08 = _mm256_max_epu32(d01, d08); - d01 = _mm256_min_epu32(d01, tmp); + tmp = d08; + + d08 = _mm256_max_epu32(d01, d08); + d01 = _mm256_min_epu32(d01, tmp); - sort_04v_merge_descending(d01, d02, d03, d04); - sort_04v_merge_descending(d05, d06, d07, d08); + sort_04v_merge_descending(d01, d02, d03, d04); + sort_04v_merge_descending(d05, d06, d07, d08); } static INLINE void sort_08v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08) { - __m256i tmp; + __m256i tmp; - tmp = d01; - - d01 = _mm256_min_epu32(d05, d01); - - d05 = _mm256_max_epu32(d05, tmp); + tmp = d01; + + d01 = _mm256_min_epu32(d05, d01); + + d05 = _mm256_max_epu32(d05, tmp); - tmp = d02; - - d02 = _mm256_min_epu32(d06, d02); - - d06 = _mm256_max_epu32(d06, tmp); + tmp = d02; + + d02 = _mm256_min_epu32(d06, d02); + + d06 = _mm256_max_epu32(d06, tmp); - tmp = d03; - - d03 = _mm256_min_epu32(d07, d03); - - d07 = _mm256_max_epu32(d07, tmp); + tmp = d03; + + d03 = _mm256_min_epu32(d07, d03); + + d07 = _mm256_max_epu32(d07, tmp); - tmp = d04; - - d04 = _mm256_min_epu32(d08, d04); - - d08 = _mm256_max_epu32(d08, tmp); + tmp = d04; + + d04 = _mm256_min_epu32(d08, d04); + + d08 = _mm256_max_epu32(d08, tmp); - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_04v_merge_ascending(d05, d06, d07, d08); + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_04v_merge_ascending(d05, d06, d07, d08); } static INLINE void sort_08v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08) { - __m256i tmp; + __m256i tmp; - tmp = d01; - - d01 = _mm256_min_epu32(d05, d01); - - d05 = _mm256_max_epu32(d05, tmp); + tmp = d01; + + d01 = _mm256_min_epu32(d05, d01); + + d05 = _mm256_max_epu32(d05, tmp); - tmp = d02; - - d02 = _mm256_min_epu32(d06, d02); - - d06 = _mm256_max_epu32(d06, tmp); + tmp = d02; + + d02 = _mm256_min_epu32(d06, d02); + + d06 = _mm256_max_epu32(d06, tmp); - tmp = d03; - - d03 = _mm256_min_epu32(d07, d03); - - d07 = _mm256_max_epu32(d07, tmp); + tmp = d03; + + d03 = _mm256_min_epu32(d07, d03); + + d07 = _mm256_max_epu32(d07, tmp); - tmp = d04; - - d04 = _mm256_min_epu32(d08, d04); - - d08 = _mm256_max_epu32(d08, tmp); + tmp = d04; + + d04 = _mm256_min_epu32(d08, d04); + + d08 = _mm256_max_epu32(d08, tmp); - sort_04v_merge_descending(d01, d02, d03, d04); - sort_04v_merge_descending(d05, d06, d07, d08); + sort_04v_merge_descending(d01, d02, d03, d04); + sort_04v_merge_descending(d05, d06, d07, d08); } static INLINE void sort_09v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09) { - __m256i tmp; + __m256i tmp; - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_01v_descending(d09); + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_01v_descending(d09); - tmp = d09; - - d09 = _mm256_max_epu32(d08, d09); - d08 = _mm256_min_epu32(d08, tmp); + tmp = d09; + + d09 = _mm256_max_epu32(d08, d09); + d08 = _mm256_min_epu32(d08, tmp); - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_01v_merge_ascending(d09); + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_01v_merge_ascending(d09); } static INLINE void sort_09v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09) { - __m256i tmp; + __m256i tmp; - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_01v_ascending(d09); + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_01v_ascending(d09); - tmp = d09; - - d09 = _mm256_max_epu32(d08, d09); - d08 = _mm256_min_epu32(d08, tmp); + tmp = d09; + + d09 = _mm256_max_epu32(d08, d09); + d08 = _mm256_min_epu32(d08, tmp); - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_01v_merge_descending(d09); + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_01v_merge_descending(d09); } static INLINE void sort_10v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10) { - __m256i tmp; + __m256i tmp; - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_02v_descending(d09, d10); + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_02v_descending(d09, d10); - tmp = d09; - - d09 = _mm256_max_epu32(d08, d09); - d08 = _mm256_min_epu32(d08, tmp); + tmp = d09; + + d09 = _mm256_max_epu32(d08, d09); + d08 = _mm256_min_epu32(d08, tmp); - tmp = d10; - - d10 = _mm256_max_epu32(d07, d10); - d07 = _mm256_min_epu32(d07, tmp); + tmp = d10; + + d10 = _mm256_max_epu32(d07, d10); + d07 = _mm256_min_epu32(d07, tmp); - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_02v_merge_ascending(d09, d10); + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_02v_merge_ascending(d09, d10); } static INLINE void sort_10v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10) { - __m256i tmp; + __m256i tmp; - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_02v_ascending(d09, d10); + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_02v_ascending(d09, d10); - tmp = d09; - - d09 = _mm256_max_epu32(d08, d09); - d08 = _mm256_min_epu32(d08, tmp); + tmp = d09; + + d09 = _mm256_max_epu32(d08, d09); + d08 = _mm256_min_epu32(d08, tmp); - tmp = d10; - - d10 = _mm256_max_epu32(d07, d10); - d07 = _mm256_min_epu32(d07, tmp); + tmp = d10; + + d10 = _mm256_max_epu32(d07, d10); + d07 = _mm256_min_epu32(d07, tmp); - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_02v_merge_descending(d09, d10); + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_02v_merge_descending(d09, d10); } static INLINE void sort_11v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11) { - __m256i tmp; + __m256i tmp; - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_03v_descending(d09, d10, d11); + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_03v_descending(d09, d10, d11); - tmp = d09; - - d09 = _mm256_max_epu32(d08, d09); - d08 = _mm256_min_epu32(d08, tmp); + tmp = d09; + + d09 = _mm256_max_epu32(d08, d09); + d08 = _mm256_min_epu32(d08, tmp); - tmp = d10; - - d10 = _mm256_max_epu32(d07, d10); - d07 = _mm256_min_epu32(d07, tmp); + tmp = d10; + + d10 = _mm256_max_epu32(d07, d10); + d07 = _mm256_min_epu32(d07, tmp); - tmp = d11; - - d11 = _mm256_max_epu32(d06, d11); - d06 = _mm256_min_epu32(d06, tmp); + tmp = d11; + + d11 = _mm256_max_epu32(d06, d11); + d06 = _mm256_min_epu32(d06, tmp); - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_03v_merge_ascending(d09, d10, d11); + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_03v_merge_ascending(d09, d10, d11); } static INLINE void sort_11v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11) { - __m256i tmp; + __m256i tmp; - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_03v_ascending(d09, d10, d11); + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_03v_ascending(d09, d10, d11); - tmp = d09; - - d09 = _mm256_max_epu32(d08, d09); - d08 = _mm256_min_epu32(d08, tmp); + tmp = d09; + + d09 = _mm256_max_epu32(d08, d09); + d08 = _mm256_min_epu32(d08, tmp); - tmp = d10; - - d10 = _mm256_max_epu32(d07, d10); - d07 = _mm256_min_epu32(d07, tmp); + tmp = d10; + + d10 = _mm256_max_epu32(d07, d10); + d07 = _mm256_min_epu32(d07, tmp); - tmp = d11; - - d11 = _mm256_max_epu32(d06, d11); - d06 = _mm256_min_epu32(d06, tmp); + tmp = d11; + + d11 = _mm256_max_epu32(d06, d11); + d06 = _mm256_min_epu32(d06, tmp); - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_03v_merge_descending(d09, d10, d11); + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_03v_merge_descending(d09, d10, d11); } static INLINE void sort_12v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12) { - __m256i tmp; + __m256i tmp; - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_04v_descending(d09, d10, d11, d12); + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_04v_descending(d09, d10, d11, d12); - tmp = d09; - - d09 = _mm256_max_epu32(d08, d09); - d08 = _mm256_min_epu32(d08, tmp); + tmp = d09; + + d09 = _mm256_max_epu32(d08, d09); + d08 = _mm256_min_epu32(d08, tmp); - tmp = d10; - - d10 = _mm256_max_epu32(d07, d10); - d07 = _mm256_min_epu32(d07, tmp); + tmp = d10; + + d10 = _mm256_max_epu32(d07, d10); + d07 = _mm256_min_epu32(d07, tmp); - tmp = d11; - - d11 = _mm256_max_epu32(d06, d11); - d06 = _mm256_min_epu32(d06, tmp); + tmp = d11; + + d11 = _mm256_max_epu32(d06, d11); + d06 = _mm256_min_epu32(d06, tmp); - tmp = d12; - - d12 = _mm256_max_epu32(d05, d12); - d05 = _mm256_min_epu32(d05, tmp); + tmp = d12; + + d12 = _mm256_max_epu32(d05, d12); + d05 = _mm256_min_epu32(d05, tmp); - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_04v_merge_ascending(d09, d10, d11, d12); + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_04v_merge_ascending(d09, d10, d11, d12); } static INLINE void sort_12v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12) { - __m256i tmp; + __m256i tmp; - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_04v_ascending(d09, d10, d11, d12); + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_04v_ascending(d09, d10, d11, d12); - tmp = d09; - - d09 = _mm256_max_epu32(d08, d09); - d08 = _mm256_min_epu32(d08, tmp); + tmp = d09; + + d09 = _mm256_max_epu32(d08, d09); + d08 = _mm256_min_epu32(d08, tmp); - tmp = d10; - - d10 = _mm256_max_epu32(d07, d10); - d07 = _mm256_min_epu32(d07, tmp); + tmp = d10; + + d10 = _mm256_max_epu32(d07, d10); + d07 = _mm256_min_epu32(d07, tmp); - tmp = d11; - - d11 = _mm256_max_epu32(d06, d11); - d06 = _mm256_min_epu32(d06, tmp); + tmp = d11; + + d11 = _mm256_max_epu32(d06, d11); + d06 = _mm256_min_epu32(d06, tmp); - tmp = d12; - - d12 = _mm256_max_epu32(d05, d12); - d05 = _mm256_min_epu32(d05, tmp); + tmp = d12; + + d12 = _mm256_max_epu32(d05, d12); + d05 = _mm256_min_epu32(d05, tmp); - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_04v_merge_descending(d09, d10, d11, d12); + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_04v_merge_descending(d09, d10, d11, d12); } static INLINE void sort_13v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13) { - __m256i tmp; + __m256i tmp; - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_05v_descending(d09, d10, d11, d12, d13); + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_05v_descending(d09, d10, d11, d12, d13); - tmp = d09; - - d09 = _mm256_max_epu32(d08, d09); - d08 = _mm256_min_epu32(d08, tmp); + tmp = d09; + + d09 = _mm256_max_epu32(d08, d09); + d08 = _mm256_min_epu32(d08, tmp); - tmp = d10; - - d10 = _mm256_max_epu32(d07, d10); - d07 = _mm256_min_epu32(d07, tmp); + tmp = d10; + + d10 = _mm256_max_epu32(d07, d10); + d07 = _mm256_min_epu32(d07, tmp); - tmp = d11; - - d11 = _mm256_max_epu32(d06, d11); - d06 = _mm256_min_epu32(d06, tmp); + tmp = d11; + + d11 = _mm256_max_epu32(d06, d11); + d06 = _mm256_min_epu32(d06, tmp); - tmp = d12; - - d12 = _mm256_max_epu32(d05, d12); - d05 = _mm256_min_epu32(d05, tmp); + tmp = d12; + + d12 = _mm256_max_epu32(d05, d12); + d05 = _mm256_min_epu32(d05, tmp); - tmp = d13; - - d13 = _mm256_max_epu32(d04, d13); - d04 = _mm256_min_epu32(d04, tmp); + tmp = d13; + + d13 = _mm256_max_epu32(d04, d13); + d04 = _mm256_min_epu32(d04, tmp); - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_05v_merge_ascending(d09, d10, d11, d12, d13); + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_05v_merge_ascending(d09, d10, d11, d12, d13); } static INLINE void sort_13v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13) { - __m256i tmp; + __m256i tmp; - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_05v_ascending(d09, d10, d11, d12, d13); + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_05v_ascending(d09, d10, d11, d12, d13); - tmp = d09; - - d09 = _mm256_max_epu32(d08, d09); - d08 = _mm256_min_epu32(d08, tmp); + tmp = d09; + + d09 = _mm256_max_epu32(d08, d09); + d08 = _mm256_min_epu32(d08, tmp); - tmp = d10; - - d10 = _mm256_max_epu32(d07, d10); - d07 = _mm256_min_epu32(d07, tmp); + tmp = d10; + + d10 = _mm256_max_epu32(d07, d10); + d07 = _mm256_min_epu32(d07, tmp); - tmp = d11; - - d11 = _mm256_max_epu32(d06, d11); - d06 = _mm256_min_epu32(d06, tmp); + tmp = d11; + + d11 = _mm256_max_epu32(d06, d11); + d06 = _mm256_min_epu32(d06, tmp); - tmp = d12; - - d12 = _mm256_max_epu32(d05, d12); - d05 = _mm256_min_epu32(d05, tmp); + tmp = d12; + + d12 = _mm256_max_epu32(d05, d12); + d05 = _mm256_min_epu32(d05, tmp); - tmp = d13; - - d13 = _mm256_max_epu32(d04, d13); - d04 = _mm256_min_epu32(d04, tmp); + tmp = d13; + + d13 = _mm256_max_epu32(d04, d13); + d04 = _mm256_min_epu32(d04, tmp); - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_05v_merge_descending(d09, d10, d11, d12, d13); + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_05v_merge_descending(d09, d10, d11, d12, d13); } static INLINE void sort_14v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14) { - __m256i tmp; + __m256i tmp; - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_06v_descending(d09, d10, d11, d12, d13, d14); + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_06v_descending(d09, d10, d11, d12, d13, d14); - tmp = d09; - - d09 = _mm256_max_epu32(d08, d09); - d08 = _mm256_min_epu32(d08, tmp); + tmp = d09; + + d09 = _mm256_max_epu32(d08, d09); + d08 = _mm256_min_epu32(d08, tmp); - tmp = d10; - - d10 = _mm256_max_epu32(d07, d10); - d07 = _mm256_min_epu32(d07, tmp); + tmp = d10; + + d10 = _mm256_max_epu32(d07, d10); + d07 = _mm256_min_epu32(d07, tmp); - tmp = d11; - - d11 = _mm256_max_epu32(d06, d11); - d06 = _mm256_min_epu32(d06, tmp); + tmp = d11; + + d11 = _mm256_max_epu32(d06, d11); + d06 = _mm256_min_epu32(d06, tmp); - tmp = d12; - - d12 = _mm256_max_epu32(d05, d12); - d05 = _mm256_min_epu32(d05, tmp); + tmp = d12; + + d12 = _mm256_max_epu32(d05, d12); + d05 = _mm256_min_epu32(d05, tmp); - tmp = d13; - - d13 = _mm256_max_epu32(d04, d13); - d04 = _mm256_min_epu32(d04, tmp); + tmp = d13; + + d13 = _mm256_max_epu32(d04, d13); + d04 = _mm256_min_epu32(d04, tmp); - tmp = d14; - - d14 = _mm256_max_epu32(d03, d14); - d03 = _mm256_min_epu32(d03, tmp); + tmp = d14; + + d14 = _mm256_max_epu32(d03, d14); + d03 = _mm256_min_epu32(d03, tmp); - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_06v_merge_ascending(d09, d10, d11, d12, d13, d14); + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_06v_merge_ascending(d09, d10, d11, d12, d13, d14); } static INLINE void sort_14v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14) { - __m256i tmp; + __m256i tmp; - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_06v_ascending(d09, d10, d11, d12, d13, d14); + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_06v_ascending(d09, d10, d11, d12, d13, d14); - tmp = d09; - - d09 = _mm256_max_epu32(d08, d09); - d08 = _mm256_min_epu32(d08, tmp); + tmp = d09; + + d09 = _mm256_max_epu32(d08, d09); + d08 = _mm256_min_epu32(d08, tmp); - tmp = d10; - - d10 = _mm256_max_epu32(d07, d10); - d07 = _mm256_min_epu32(d07, tmp); + tmp = d10; + + d10 = _mm256_max_epu32(d07, d10); + d07 = _mm256_min_epu32(d07, tmp); - tmp = d11; - - d11 = _mm256_max_epu32(d06, d11); - d06 = _mm256_min_epu32(d06, tmp); + tmp = d11; + + d11 = _mm256_max_epu32(d06, d11); + d06 = _mm256_min_epu32(d06, tmp); - tmp = d12; - - d12 = _mm256_max_epu32(d05, d12); - d05 = _mm256_min_epu32(d05, tmp); + tmp = d12; + + d12 = _mm256_max_epu32(d05, d12); + d05 = _mm256_min_epu32(d05, tmp); - tmp = d13; - - d13 = _mm256_max_epu32(d04, d13); - d04 = _mm256_min_epu32(d04, tmp); + tmp = d13; + + d13 = _mm256_max_epu32(d04, d13); + d04 = _mm256_min_epu32(d04, tmp); - tmp = d14; - - d14 = _mm256_max_epu32(d03, d14); - d03 = _mm256_min_epu32(d03, tmp); + tmp = d14; + + d14 = _mm256_max_epu32(d03, d14); + d03 = _mm256_min_epu32(d03, tmp); - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_06v_merge_descending(d09, d10, d11, d12, d13, d14); + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_06v_merge_descending(d09, d10, d11, d12, d13, d14); } static INLINE void sort_15v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14, __m256i& d15) { - __m256i tmp; + __m256i tmp; - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_07v_descending(d09, d10, d11, d12, d13, d14, d15); + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_07v_descending(d09, d10, d11, d12, d13, d14, d15); - tmp = d09; - - d09 = _mm256_max_epu32(d08, d09); - d08 = _mm256_min_epu32(d08, tmp); + tmp = d09; + + d09 = _mm256_max_epu32(d08, d09); + d08 = _mm256_min_epu32(d08, tmp); - tmp = d10; - - d10 = _mm256_max_epu32(d07, d10); - d07 = _mm256_min_epu32(d07, tmp); + tmp = d10; + + d10 = _mm256_max_epu32(d07, d10); + d07 = _mm256_min_epu32(d07, tmp); - tmp = d11; - - d11 = _mm256_max_epu32(d06, d11); - d06 = _mm256_min_epu32(d06, tmp); + tmp = d11; + + d11 = _mm256_max_epu32(d06, d11); + d06 = _mm256_min_epu32(d06, tmp); - tmp = d12; - - d12 = _mm256_max_epu32(d05, d12); - d05 = _mm256_min_epu32(d05, tmp); + tmp = d12; + + d12 = _mm256_max_epu32(d05, d12); + d05 = _mm256_min_epu32(d05, tmp); - tmp = d13; - - d13 = _mm256_max_epu32(d04, d13); - d04 = _mm256_min_epu32(d04, tmp); + tmp = d13; + + d13 = _mm256_max_epu32(d04, d13); + d04 = _mm256_min_epu32(d04, tmp); - tmp = d14; - - d14 = _mm256_max_epu32(d03, d14); - d03 = _mm256_min_epu32(d03, tmp); + tmp = d14; + + d14 = _mm256_max_epu32(d03, d14); + d03 = _mm256_min_epu32(d03, tmp); - tmp = d15; - - d15 = _mm256_max_epu32(d02, d15); - d02 = _mm256_min_epu32(d02, tmp); + tmp = d15; + + d15 = _mm256_max_epu32(d02, d15); + d02 = _mm256_min_epu32(d02, tmp); - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_07v_merge_ascending(d09, d10, d11, d12, d13, d14, d15); + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_07v_merge_ascending(d09, d10, d11, d12, d13, d14, d15); } static INLINE void sort_15v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14, __m256i& d15) { - __m256i tmp; + __m256i tmp; - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_07v_ascending(d09, d10, d11, d12, d13, d14, d15); + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_07v_ascending(d09, d10, d11, d12, d13, d14, d15); - tmp = d09; - - d09 = _mm256_max_epu32(d08, d09); - d08 = _mm256_min_epu32(d08, tmp); + tmp = d09; + + d09 = _mm256_max_epu32(d08, d09); + d08 = _mm256_min_epu32(d08, tmp); - tmp = d10; - - d10 = _mm256_max_epu32(d07, d10); - d07 = _mm256_min_epu32(d07, tmp); + tmp = d10; + + d10 = _mm256_max_epu32(d07, d10); + d07 = _mm256_min_epu32(d07, tmp); - tmp = d11; - - d11 = _mm256_max_epu32(d06, d11); - d06 = _mm256_min_epu32(d06, tmp); + tmp = d11; + + d11 = _mm256_max_epu32(d06, d11); + d06 = _mm256_min_epu32(d06, tmp); - tmp = d12; - - d12 = _mm256_max_epu32(d05, d12); - d05 = _mm256_min_epu32(d05, tmp); + tmp = d12; + + d12 = _mm256_max_epu32(d05, d12); + d05 = _mm256_min_epu32(d05, tmp); - tmp = d13; - - d13 = _mm256_max_epu32(d04, d13); - d04 = _mm256_min_epu32(d04, tmp); + tmp = d13; + + d13 = _mm256_max_epu32(d04, d13); + d04 = _mm256_min_epu32(d04, tmp); - tmp = d14; - - d14 = _mm256_max_epu32(d03, d14); - d03 = _mm256_min_epu32(d03, tmp); + tmp = d14; + + d14 = _mm256_max_epu32(d03, d14); + d03 = _mm256_min_epu32(d03, tmp); - tmp = d15; - - d15 = _mm256_max_epu32(d02, d15); - d02 = _mm256_min_epu32(d02, tmp); + tmp = d15; + + d15 = _mm256_max_epu32(d02, d15); + d02 = _mm256_min_epu32(d02, tmp); - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_07v_merge_descending(d09, d10, d11, d12, d13, d14, d15); + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_07v_merge_descending(d09, d10, d11, d12, d13, d14, d15); } static INLINE void sort_16v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14, __m256i& d15, __m256i& d16) { - __m256i tmp; + __m256i tmp; - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_08v_descending(d09, d10, d11, d12, d13, d14, d15, d16); + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_08v_descending(d09, d10, d11, d12, d13, d14, d15, d16); - tmp = d09; - - d09 = _mm256_max_epu32(d08, d09); - d08 = _mm256_min_epu32(d08, tmp); + tmp = d09; + + d09 = _mm256_max_epu32(d08, d09); + d08 = _mm256_min_epu32(d08, tmp); - tmp = d10; - - d10 = _mm256_max_epu32(d07, d10); - d07 = _mm256_min_epu32(d07, tmp); + tmp = d10; + + d10 = _mm256_max_epu32(d07, d10); + d07 = _mm256_min_epu32(d07, tmp); - tmp = d11; - - d11 = _mm256_max_epu32(d06, d11); - d06 = _mm256_min_epu32(d06, tmp); + tmp = d11; + + d11 = _mm256_max_epu32(d06, d11); + d06 = _mm256_min_epu32(d06, tmp); - tmp = d12; - - d12 = _mm256_max_epu32(d05, d12); - d05 = _mm256_min_epu32(d05, tmp); + tmp = d12; + + d12 = _mm256_max_epu32(d05, d12); + d05 = _mm256_min_epu32(d05, tmp); - tmp = d13; - - d13 = _mm256_max_epu32(d04, d13); - d04 = _mm256_min_epu32(d04, tmp); + tmp = d13; + + d13 = _mm256_max_epu32(d04, d13); + d04 = _mm256_min_epu32(d04, tmp); - tmp = d14; - - d14 = _mm256_max_epu32(d03, d14); - d03 = _mm256_min_epu32(d03, tmp); + tmp = d14; + + d14 = _mm256_max_epu32(d03, d14); + d03 = _mm256_min_epu32(d03, tmp); - tmp = d15; - - d15 = _mm256_max_epu32(d02, d15); - d02 = _mm256_min_epu32(d02, tmp); + tmp = d15; + + d15 = _mm256_max_epu32(d02, d15); + d02 = _mm256_min_epu32(d02, tmp); - tmp = d16; - - d16 = _mm256_max_epu32(d01, d16); - d01 = _mm256_min_epu32(d01, tmp); + tmp = d16; + + d16 = _mm256_max_epu32(d01, d16); + d01 = _mm256_min_epu32(d01, tmp); - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_08v_merge_ascending(d09, d10, d11, d12, d13, d14, d15, d16); + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_08v_merge_ascending(d09, d10, d11, d12, d13, d14, d15, d16); } static INLINE void sort_16v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14, __m256i& d15, __m256i& d16) { - __m256i tmp; + __m256i tmp; - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_08v_ascending(d09, d10, d11, d12, d13, d14, d15, d16); + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_08v_ascending(d09, d10, d11, d12, d13, d14, d15, d16); - tmp = d09; - - d09 = _mm256_max_epu32(d08, d09); - d08 = _mm256_min_epu32(d08, tmp); + tmp = d09; + + d09 = _mm256_max_epu32(d08, d09); + d08 = _mm256_min_epu32(d08, tmp); - tmp = d10; - - d10 = _mm256_max_epu32(d07, d10); - d07 = _mm256_min_epu32(d07, tmp); + tmp = d10; + + d10 = _mm256_max_epu32(d07, d10); + d07 = _mm256_min_epu32(d07, tmp); - tmp = d11; - - d11 = _mm256_max_epu32(d06, d11); - d06 = _mm256_min_epu32(d06, tmp); + tmp = d11; + + d11 = _mm256_max_epu32(d06, d11); + d06 = _mm256_min_epu32(d06, tmp); - tmp = d12; - - d12 = _mm256_max_epu32(d05, d12); - d05 = _mm256_min_epu32(d05, tmp); + tmp = d12; + + d12 = _mm256_max_epu32(d05, d12); + d05 = _mm256_min_epu32(d05, tmp); - tmp = d13; - - d13 = _mm256_max_epu32(d04, d13); - d04 = _mm256_min_epu32(d04, tmp); + tmp = d13; + + d13 = _mm256_max_epu32(d04, d13); + d04 = _mm256_min_epu32(d04, tmp); - tmp = d14; - - d14 = _mm256_max_epu32(d03, d14); - d03 = _mm256_min_epu32(d03, tmp); + tmp = d14; + + d14 = _mm256_max_epu32(d03, d14); + d03 = _mm256_min_epu32(d03, tmp); - tmp = d15; - - d15 = _mm256_max_epu32(d02, d15); - d02 = _mm256_min_epu32(d02, tmp); + tmp = d15; + + d15 = _mm256_max_epu32(d02, d15); + d02 = _mm256_min_epu32(d02, tmp); - tmp = d16; - - d16 = _mm256_max_epu32(d01, d16); - d01 = _mm256_min_epu32(d01, tmp); + tmp = d16; + + d16 = _mm256_max_epu32(d01, d16); + d01 = _mm256_min_epu32(d01, tmp); - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_08v_merge_descending(d09, d10, d11, d12, d13, d14, d15, d16); + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_08v_merge_descending(d09, d10, d11, d12, d13, d14, d15, d16); } -static NOINLINE void sort_01v(uint32_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - sort_01v_ascending(d01); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); + static NOINLINE void sort_01v(uint32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + sort_01v_ascending(d01); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); } -static NOINLINE void sort_02v(uint32_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - sort_02v_ascending(d01, d02); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); + static NOINLINE void sort_02v(uint32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + sort_02v_ascending(d01, d02); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); } -static NOINLINE void sort_03v(uint32_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - sort_03v_ascending(d01, d02, d03); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); + static NOINLINE void sort_03v(uint32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + sort_03v_ascending(d01, d02, d03); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); } -static NOINLINE void sort_04v(uint32_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - sort_04v_ascending(d01, d02, d03, d04); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); + static NOINLINE void sort_04v(uint32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + sort_04v_ascending(d01, d02, d03, d04); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); } -static NOINLINE void sort_05v(uint32_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - sort_05v_ascending(d01, d02, d03, d04, d05); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); + static NOINLINE void sort_05v(uint32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + sort_05v_ascending(d01, d02, d03, d04, d05); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); } -static NOINLINE void sort_06v(uint32_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - sort_06v_ascending(d01, d02, d03, d04, d05, d06); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); + static NOINLINE void sort_06v(uint32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + sort_06v_ascending(d01, d02, d03, d04, d05, d06); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); } -static NOINLINE void sort_07v(uint32_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; - sort_07v_ascending(d01, d02, d03, d04, d05, d06, d07); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); - _mm256_storeu_si256((__m256i *) ptr + 6, d07); + static NOINLINE void sort_07v(uint32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + sort_07v_ascending(d01, d02, d03, d04, d05, d06, d07); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); } -static NOINLINE void sort_08v(uint32_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); - _mm256_storeu_si256((__m256i *) ptr + 6, d07); - _mm256_storeu_si256((__m256i *) ptr + 7, d08); + static NOINLINE void sort_08v(uint32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); } -static NOINLINE void sort_09v(uint32_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; - __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; - sort_09v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); - _mm256_storeu_si256((__m256i *) ptr + 6, d07); - _mm256_storeu_si256((__m256i *) ptr + 7, d08); - _mm256_storeu_si256((__m256i *) ptr + 8, d09); + static NOINLINE void sort_09v(uint32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + sort_09v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); } -static NOINLINE void sort_10v(uint32_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; - __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; - __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; - sort_10v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); - _mm256_storeu_si256((__m256i *) ptr + 6, d07); - _mm256_storeu_si256((__m256i *) ptr + 7, d08); - _mm256_storeu_si256((__m256i *) ptr + 8, d09); - _mm256_storeu_si256((__m256i *) ptr + 9, d10); + static NOINLINE void sort_10v(uint32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + sort_10v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); } -static NOINLINE void sort_11v(uint32_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; - __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; - __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; - __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; - sort_11v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); - _mm256_storeu_si256((__m256i *) ptr + 6, d07); - _mm256_storeu_si256((__m256i *) ptr + 7, d08); - _mm256_storeu_si256((__m256i *) ptr + 8, d09); - _mm256_storeu_si256((__m256i *) ptr + 9, d10); - _mm256_storeu_si256((__m256i *) ptr + 10, d11); + static NOINLINE void sort_11v(uint32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; + sort_11v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); + _mm256_storeu_si256((__m256i *) ptr + 10, d11); } -static NOINLINE void sort_12v(uint32_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; - __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; - __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; - __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; - __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; - sort_12v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); - _mm256_storeu_si256((__m256i *) ptr + 6, d07); - _mm256_storeu_si256((__m256i *) ptr + 7, d08); - _mm256_storeu_si256((__m256i *) ptr + 8, d09); - _mm256_storeu_si256((__m256i *) ptr + 9, d10); - _mm256_storeu_si256((__m256i *) ptr + 10, d11); - _mm256_storeu_si256((__m256i *) ptr + 11, d12); + static NOINLINE void sort_12v(uint32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; + __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; + sort_12v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); + _mm256_storeu_si256((__m256i *) ptr + 10, d11); + _mm256_storeu_si256((__m256i *) ptr + 11, d12); } -static NOINLINE void sort_13v(uint32_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; - __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; - __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; - __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; - __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; - __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12);; - sort_13v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); - _mm256_storeu_si256((__m256i *) ptr + 6, d07); - _mm256_storeu_si256((__m256i *) ptr + 7, d08); - _mm256_storeu_si256((__m256i *) ptr + 8, d09); - _mm256_storeu_si256((__m256i *) ptr + 9, d10); - _mm256_storeu_si256((__m256i *) ptr + 10, d11); - _mm256_storeu_si256((__m256i *) ptr + 11, d12); - _mm256_storeu_si256((__m256i *) ptr + 12, d13); + static NOINLINE void sort_13v(uint32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; + __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; + __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12);; + sort_13v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); + _mm256_storeu_si256((__m256i *) ptr + 10, d11); + _mm256_storeu_si256((__m256i *) ptr + 11, d12); + _mm256_storeu_si256((__m256i *) ptr + 12, d13); } -static NOINLINE void sort_14v(uint32_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; - __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; - __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; - __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; - __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; - __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12);; - __m256i d14 = _mm256_lddqu_si256((__m256i const *) ptr + 13);; - sort_14v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); - _mm256_storeu_si256((__m256i *) ptr + 6, d07); - _mm256_storeu_si256((__m256i *) ptr + 7, d08); - _mm256_storeu_si256((__m256i *) ptr + 8, d09); - _mm256_storeu_si256((__m256i *) ptr + 9, d10); - _mm256_storeu_si256((__m256i *) ptr + 10, d11); - _mm256_storeu_si256((__m256i *) ptr + 11, d12); - _mm256_storeu_si256((__m256i *) ptr + 12, d13); - _mm256_storeu_si256((__m256i *) ptr + 13, d14); + static NOINLINE void sort_14v(uint32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; + __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; + __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12);; + __m256i d14 = _mm256_lddqu_si256((__m256i const *) ptr + 13);; + sort_14v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); + _mm256_storeu_si256((__m256i *) ptr + 10, d11); + _mm256_storeu_si256((__m256i *) ptr + 11, d12); + _mm256_storeu_si256((__m256i *) ptr + 12, d13); + _mm256_storeu_si256((__m256i *) ptr + 13, d14); } -static NOINLINE void sort_15v(uint32_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; - __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; - __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; - __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; - __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; - __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12);; - __m256i d14 = _mm256_lddqu_si256((__m256i const *) ptr + 13);; - __m256i d15 = _mm256_lddqu_si256((__m256i const *) ptr + 14);; - sort_15v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14, d15); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); - _mm256_storeu_si256((__m256i *) ptr + 6, d07); - _mm256_storeu_si256((__m256i *) ptr + 7, d08); - _mm256_storeu_si256((__m256i *) ptr + 8, d09); - _mm256_storeu_si256((__m256i *) ptr + 9, d10); - _mm256_storeu_si256((__m256i *) ptr + 10, d11); - _mm256_storeu_si256((__m256i *) ptr + 11, d12); - _mm256_storeu_si256((__m256i *) ptr + 12, d13); - _mm256_storeu_si256((__m256i *) ptr + 13, d14); - _mm256_storeu_si256((__m256i *) ptr + 14, d15); + static NOINLINE void sort_15v(uint32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; + __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; + __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12);; + __m256i d14 = _mm256_lddqu_si256((__m256i const *) ptr + 13);; + __m256i d15 = _mm256_lddqu_si256((__m256i const *) ptr + 14);; + sort_15v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14, d15); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); + _mm256_storeu_si256((__m256i *) ptr + 10, d11); + _mm256_storeu_si256((__m256i *) ptr + 11, d12); + _mm256_storeu_si256((__m256i *) ptr + 12, d13); + _mm256_storeu_si256((__m256i *) ptr + 13, d14); + _mm256_storeu_si256((__m256i *) ptr + 14, d15); } -static NOINLINE void sort_16v(uint32_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; - __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; - __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; - __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; - __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; - __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12);; - __m256i d14 = _mm256_lddqu_si256((__m256i const *) ptr + 13);; - __m256i d15 = _mm256_lddqu_si256((__m256i const *) ptr + 14);; - __m256i d16 = _mm256_lddqu_si256((__m256i const *) ptr + 15);; - sort_16v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14, d15, d16); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); - _mm256_storeu_si256((__m256i *) ptr + 6, d07); - _mm256_storeu_si256((__m256i *) ptr + 7, d08); - _mm256_storeu_si256((__m256i *) ptr + 8, d09); - _mm256_storeu_si256((__m256i *) ptr + 9, d10); - _mm256_storeu_si256((__m256i *) ptr + 10, d11); - _mm256_storeu_si256((__m256i *) ptr + 11, d12); - _mm256_storeu_si256((__m256i *) ptr + 12, d13); - _mm256_storeu_si256((__m256i *) ptr + 13, d14); - _mm256_storeu_si256((__m256i *) ptr + 14, d15); - _mm256_storeu_si256((__m256i *) ptr + 15, d16); -} - static void sort(uint32_t *ptr, size_t length) { - const int N = 8; - - switch(length / N) { - case 1: sort_01v(ptr); break; - case 2: sort_02v(ptr); break; - case 3: sort_03v(ptr); break; - case 4: sort_04v(ptr); break; - case 5: sort_05v(ptr); break; - case 6: sort_06v(ptr); break; - case 7: sort_07v(ptr); break; - case 8: sort_08v(ptr); break; - case 9: sort_09v(ptr); break; - case 10: sort_10v(ptr); break; - case 11: sort_11v(ptr); break; - case 12: sort_12v(ptr); break; - case 13: sort_13v(ptr); break; - case 14: sort_14v(ptr); break; - case 15: sort_15v(ptr); break; - case 16: sort_16v(ptr); break; - } + static NOINLINE void sort_16v(uint32_t *ptr) { + __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; + __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; + __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; + __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; + __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; + __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; + __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; + __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; + __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; + __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; + __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; + __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; + __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12);; + __m256i d14 = _mm256_lddqu_si256((__m256i const *) ptr + 13);; + __m256i d15 = _mm256_lddqu_si256((__m256i const *) ptr + 14);; + __m256i d16 = _mm256_lddqu_si256((__m256i const *) ptr + 15);; + sort_16v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14, d15, d16); + _mm256_storeu_si256((__m256i *) ptr + 0, d01); + _mm256_storeu_si256((__m256i *) ptr + 1, d02); + _mm256_storeu_si256((__m256i *) ptr + 2, d03); + _mm256_storeu_si256((__m256i *) ptr + 3, d04); + _mm256_storeu_si256((__m256i *) ptr + 4, d05); + _mm256_storeu_si256((__m256i *) ptr + 5, d06); + _mm256_storeu_si256((__m256i *) ptr + 6, d07); + _mm256_storeu_si256((__m256i *) ptr + 7, d08); + _mm256_storeu_si256((__m256i *) ptr + 8, d09); + _mm256_storeu_si256((__m256i *) ptr + 9, d10); + _mm256_storeu_si256((__m256i *) ptr + 10, d11); + _mm256_storeu_si256((__m256i *) ptr + 11, d12); + _mm256_storeu_si256((__m256i *) ptr + 12, d13); + _mm256_storeu_si256((__m256i *) ptr + 13, d14); + _mm256_storeu_si256((__m256i *) ptr + 14, d15); + _mm256_storeu_si256((__m256i *) ptr + 15, d16); } + static void sort(uint32_t *ptr, size_t length); + }; } } + +#undef i2d +#undef d2i +#undef i2s +#undef s2i +#undef s2d +#undef d2s + +#ifdef __GNUC__ +#ifdef __clang__ +#pragma clang attribute pop +#else +#pragma GCC pop_options +#endif #endif +#endif + diff --git a/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.uint64_t.generated.h b/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.uint64_t.generated.h deleted file mode 100644 index 4edbed703eb26a..00000000000000 --- a/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.uint64_t.generated.h +++ /dev/null @@ -1,1540 +0,0 @@ -///////////////////////////////////////////////////////////////////////////// -//// -// This file was auto-generated by a tool at 2020-05-31 19:46:17 -// -// It is recommended you DO NOT directly edit this file but instead edit -// the code-generator that generated this source file instead. -///////////////////////////////////////////////////////////////////////////// - -#ifndef BITONIC_SORT_AVX2_UINT64_T_H -#define BITONIC_SORT_AVX2_UINT64_T_H - -#include -#include "bitonic_sort.h" - -#ifdef _MSC_VER - // MSVC - #define INLINE __forceinline - #define NOINLINE __declspec(noinline) -#else - // GCC + Clang - #define INLINE __attribute__((always_inline)) - #define NOINLINE __attribute__((noinline)) -#endif - -#define i2d _mm256_castsi256_pd -#define d2i _mm256_castpd_si256 -#define i2s _mm256_castsi256_ps -#define s2i _mm256_castps_si256 -#define s2d _mm256_castps_pd -#define d2s _mm256_castpd_ps - -namespace gcsort { -namespace smallsort { -template<> struct bitonic { -public: - - static INLINE void sort_01v_ascending(__m256i& d01) { - __m256i min, max, s, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - s = d2i(_mm256_shuffle_pd(i2d(d01), i2d(d01), 0x5)); - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, s), _mm256_xor_si256(topBit, d01)); - min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); - max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); - d01 = d2i(_mm256_blend_pd(i2d(min), i2d(max), 0xA)); - - s = d2i(_mm256_permute4x64_pd(i2d(d01), 0x1B)); - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, s), _mm256_xor_si256(topBit, d01)); - min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); - max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); - d01 = d2i(_mm256_blend_pd(i2d(min), i2d(max), 0xC)); - - s = d2i(_mm256_shuffle_pd(i2d(d01), i2d(d01), 0x5)); - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, s), _mm256_xor_si256(topBit, d01)); - min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); - max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); - d01 = d2i(_mm256_blend_pd(i2d(min), i2d(max), 0xA)); -} - static INLINE void sort_01v_merge_ascending(__m256i& d01) { - __m256i min, max, s, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - s = d2i(_mm256_permute4x64_pd(i2d(d01), 0x4E)); - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, s), _mm256_xor_si256(topBit, d01)); - min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); - max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); - d01 = d2i(_mm256_blend_pd(i2d(min), i2d(max), 0xC)); - - s = d2i(_mm256_shuffle_pd(i2d(d01), i2d(d01), 0x5)); - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, s), _mm256_xor_si256(topBit, d01)); - min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); - max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); - d01 = d2i(_mm256_blend_pd(i2d(min), i2d(max), 0xA)); - } - static INLINE void sort_01v_descending(__m256i& d01) { - __m256i min, max, s, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - s = d2i(_mm256_shuffle_pd(i2d(d01), i2d(d01), 0x5)); - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, s), _mm256_xor_si256(topBit, d01)); - min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); - max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); - d01 = d2i(_mm256_blend_pd(i2d(max), i2d(min), 0xA)); - - s = d2i(_mm256_permute4x64_pd(i2d(d01), 0x1B)); - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, s), _mm256_xor_si256(topBit, d01)); - min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); - max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); - d01 = d2i(_mm256_blend_pd(i2d(max), i2d(min), 0xC)); - - s = d2i(_mm256_shuffle_pd(i2d(d01), i2d(d01), 0x5)); - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, s), _mm256_xor_si256(topBit, d01)); - min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); - max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); - d01 = d2i(_mm256_blend_pd(i2d(max), i2d(min), 0xA)); -} - static INLINE void sort_01v_merge_descending(__m256i& d01) { - __m256i min, max, s, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - s = d2i(_mm256_permute4x64_pd(i2d(d01), 0x4E)); - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, s), _mm256_xor_si256(topBit, d01)); - min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); - max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); - d01 = d2i(_mm256_blend_pd(i2d(max), i2d(min), 0xC)); - - s = d2i(_mm256_shuffle_pd(i2d(d01), i2d(d01), 0x5)); - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, s), _mm256_xor_si256(topBit, d01)); - min = d2i(_mm256_blendv_pd(i2d(s), i2d(d01), i2d(cmp))); - max = d2i(_mm256_blendv_pd(i2d(d01), i2d(s), i2d(cmp))); - d01 = d2i(_mm256_blend_pd(i2d(max), i2d(min), 0xA)); - } - static INLINE void sort_02v_ascending(__m256i& d01, __m256i& d02) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - sort_01v_ascending(d01); - sort_01v_descending(d02); - - tmp = d02; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d01), _mm256_xor_si256(topBit, d02)); - d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(d01), i2d(cmp))); - d01 = d2i(_mm256_blendv_pd(i2d(d01), i2d(tmp), i2d(cmp))); - - sort_01v_merge_ascending(d01); - sort_01v_merge_ascending(d02); - } - static INLINE void sort_02v_descending(__m256i& d01, __m256i& d02) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - sort_01v_descending(d01); - sort_01v_ascending(d02); - - tmp = d02; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d01), _mm256_xor_si256(topBit, d02)); - d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(d01), i2d(cmp))); - d01 = d2i(_mm256_blendv_pd(i2d(d01), i2d(tmp), i2d(cmp))); - - sort_01v_merge_descending(d01); - sort_01v_merge_descending(d02); - } - static INLINE void sort_02v_merge_ascending(__m256i& d01, __m256i& d02) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - tmp = d01; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d02), _mm256_xor_si256(topBit, d01)); - d01 = d2i(_mm256_blendv_pd(i2d(d02), i2d(d01), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d02), _mm256_xor_si256(topBit, tmp)); - d02 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d02), i2d(cmp))); - - sort_01v_merge_ascending(d01); - sort_01v_merge_ascending(d02); - } - static INLINE void sort_02v_merge_descending(__m256i& d01, __m256i& d02) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - tmp = d01; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d02), _mm256_xor_si256(topBit, d01)); - d01 = d2i(_mm256_blendv_pd(i2d(d02), i2d(d01), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d02), _mm256_xor_si256(topBit, tmp)); - d02 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d02), i2d(cmp))); - - sort_01v_merge_descending(d01); - sort_01v_merge_descending(d02); - } - static INLINE void sort_03v_ascending(__m256i& d01, __m256i& d02, __m256i& d03) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - sort_02v_ascending(d01, d02); - sort_01v_descending(d03); - - tmp = d03; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d02), _mm256_xor_si256(topBit, d03)); - d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(d02), i2d(cmp))); - d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); - - sort_02v_merge_ascending(d01, d02); - sort_01v_merge_ascending(d03); - } - static INLINE void sort_03v_descending(__m256i& d01, __m256i& d02, __m256i& d03) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - sort_02v_descending(d01, d02); - sort_01v_ascending(d03); - - tmp = d03; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d02), _mm256_xor_si256(topBit, d03)); - d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(d02), i2d(cmp))); - d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); - - sort_02v_merge_descending(d01, d02); - sort_01v_merge_descending(d03); - } - static INLINE void sort_03v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - tmp = d01; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d03), _mm256_xor_si256(topBit, d01)); - d01 = d2i(_mm256_blendv_pd(i2d(d03), i2d(d01), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d03), _mm256_xor_si256(topBit, tmp)); - d03 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d03), i2d(cmp))); - - sort_02v_merge_ascending(d01, d02); - sort_01v_merge_ascending(d03); - } - static INLINE void sort_03v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - tmp = d01; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d03), _mm256_xor_si256(topBit, d01)); - d01 = d2i(_mm256_blendv_pd(i2d(d03), i2d(d01), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d03), _mm256_xor_si256(topBit, tmp)); - d03 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d03), i2d(cmp))); - - sort_02v_merge_descending(d01, d02); - sort_01v_merge_descending(d03); - } - static INLINE void sort_04v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - sort_02v_ascending(d01, d02); - sort_02v_descending(d03, d04); - - tmp = d03; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d02), _mm256_xor_si256(topBit, d03)); - d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(d02), i2d(cmp))); - d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); - - tmp = d04; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d01), _mm256_xor_si256(topBit, d04)); - d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(d01), i2d(cmp))); - d01 = d2i(_mm256_blendv_pd(i2d(d01), i2d(tmp), i2d(cmp))); - - sort_02v_merge_ascending(d01, d02); - sort_02v_merge_ascending(d03, d04); - } - static INLINE void sort_04v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - sort_02v_descending(d01, d02); - sort_02v_ascending(d03, d04); - - tmp = d03; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d02), _mm256_xor_si256(topBit, d03)); - d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(d02), i2d(cmp))); - d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); - - tmp = d04; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d01), _mm256_xor_si256(topBit, d04)); - d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(d01), i2d(cmp))); - d01 = d2i(_mm256_blendv_pd(i2d(d01), i2d(tmp), i2d(cmp))); - - sort_02v_merge_descending(d01, d02); - sort_02v_merge_descending(d03, d04); - } - static INLINE void sort_04v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - tmp = d01; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d03), _mm256_xor_si256(topBit, d01)); - d01 = d2i(_mm256_blendv_pd(i2d(d03), i2d(d01), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d03), _mm256_xor_si256(topBit, tmp)); - d03 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d03), i2d(cmp))); - - tmp = d02; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d04), _mm256_xor_si256(topBit, d02)); - d02 = d2i(_mm256_blendv_pd(i2d(d04), i2d(d02), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d04), _mm256_xor_si256(topBit, tmp)); - d04 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d04), i2d(cmp))); - - sort_02v_merge_ascending(d01, d02); - sort_02v_merge_ascending(d03, d04); - } - static INLINE void sort_04v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - tmp = d01; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d03), _mm256_xor_si256(topBit, d01)); - d01 = d2i(_mm256_blendv_pd(i2d(d03), i2d(d01), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d03), _mm256_xor_si256(topBit, tmp)); - d03 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d03), i2d(cmp))); - - tmp = d02; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d04), _mm256_xor_si256(topBit, d02)); - d02 = d2i(_mm256_blendv_pd(i2d(d04), i2d(d02), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d04), _mm256_xor_si256(topBit, tmp)); - d04 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d04), i2d(cmp))); - - sort_02v_merge_descending(d01, d02); - sort_02v_merge_descending(d03, d04); - } - static INLINE void sort_05v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - sort_04v_ascending(d01, d02, d03, d04); - sort_01v_descending(d05); - - tmp = d05; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d04), _mm256_xor_si256(topBit, d05)); - d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d04), i2d(cmp))); - d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); - - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_01v_merge_ascending(d05); - } - static INLINE void sort_05v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - sort_04v_descending(d01, d02, d03, d04); - sort_01v_ascending(d05); - - tmp = d05; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d04), _mm256_xor_si256(topBit, d05)); - d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d04), i2d(cmp))); - d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); - - sort_04v_merge_descending(d01, d02, d03, d04); - sort_01v_merge_descending(d05); - } - static INLINE void sort_05v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - tmp = d01; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, d01)); - d01 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d01), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, tmp)); - d05 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d05), i2d(cmp))); - - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_01v_merge_ascending(d05); - } - static INLINE void sort_05v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - tmp = d01; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, d01)); - d01 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d01), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, tmp)); - d05 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d05), i2d(cmp))); - - sort_04v_merge_descending(d01, d02, d03, d04); - sort_01v_merge_descending(d05); - } - static INLINE void sort_06v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - sort_04v_ascending(d01, d02, d03, d04); - sort_02v_descending(d05, d06); - - tmp = d05; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d04), _mm256_xor_si256(topBit, d05)); - d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d04), i2d(cmp))); - d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); - - tmp = d06; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d03), _mm256_xor_si256(topBit, d06)); - d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d03), i2d(cmp))); - d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); - - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_02v_merge_ascending(d05, d06); - } - static INLINE void sort_06v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - sort_04v_descending(d01, d02, d03, d04); - sort_02v_ascending(d05, d06); - - tmp = d05; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d04), _mm256_xor_si256(topBit, d05)); - d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d04), i2d(cmp))); - d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); - - tmp = d06; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d03), _mm256_xor_si256(topBit, d06)); - d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d03), i2d(cmp))); - d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); - - sort_04v_merge_descending(d01, d02, d03, d04); - sort_02v_merge_descending(d05, d06); - } - static INLINE void sort_06v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - tmp = d01; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, d01)); - d01 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d01), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, tmp)); - d05 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d05), i2d(cmp))); - - tmp = d02; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, d02)); - d02 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d02), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, tmp)); - d06 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d06), i2d(cmp))); - - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_02v_merge_ascending(d05, d06); - } - static INLINE void sort_06v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - tmp = d01; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, d01)); - d01 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d01), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, tmp)); - d05 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d05), i2d(cmp))); - - tmp = d02; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, d02)); - d02 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d02), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, tmp)); - d06 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d06), i2d(cmp))); - - sort_04v_merge_descending(d01, d02, d03, d04); - sort_02v_merge_descending(d05, d06); - } - static INLINE void sort_07v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - sort_04v_ascending(d01, d02, d03, d04); - sort_03v_descending(d05, d06, d07); - - tmp = d05; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d04), _mm256_xor_si256(topBit, d05)); - d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d04), i2d(cmp))); - d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); - - tmp = d06; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d03), _mm256_xor_si256(topBit, d06)); - d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d03), i2d(cmp))); - d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); - - tmp = d07; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d02), _mm256_xor_si256(topBit, d07)); - d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(d02), i2d(cmp))); - d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); - - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_03v_merge_ascending(d05, d06, d07); - } - static INLINE void sort_07v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - sort_04v_descending(d01, d02, d03, d04); - sort_03v_ascending(d05, d06, d07); - - tmp = d05; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d04), _mm256_xor_si256(topBit, d05)); - d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d04), i2d(cmp))); - d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); - - tmp = d06; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d03), _mm256_xor_si256(topBit, d06)); - d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d03), i2d(cmp))); - d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); - - tmp = d07; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d02), _mm256_xor_si256(topBit, d07)); - d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(d02), i2d(cmp))); - d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); - - sort_04v_merge_descending(d01, d02, d03, d04); - sort_03v_merge_descending(d05, d06, d07); - } - static INLINE void sort_07v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - tmp = d01; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, d01)); - d01 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d01), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, tmp)); - d05 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d05), i2d(cmp))); - - tmp = d02; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, d02)); - d02 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d02), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, tmp)); - d06 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d06), i2d(cmp))); - - tmp = d03; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, d03)); - d03 = d2i(_mm256_blendv_pd(i2d(d07), i2d(d03), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, tmp)); - d07 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d07), i2d(cmp))); - - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_03v_merge_ascending(d05, d06, d07); - } - static INLINE void sort_07v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - tmp = d01; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, d01)); - d01 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d01), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, tmp)); - d05 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d05), i2d(cmp))); - - tmp = d02; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, d02)); - d02 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d02), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, tmp)); - d06 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d06), i2d(cmp))); - - tmp = d03; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, d03)); - d03 = d2i(_mm256_blendv_pd(i2d(d07), i2d(d03), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, tmp)); - d07 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d07), i2d(cmp))); - - sort_04v_merge_descending(d01, d02, d03, d04); - sort_03v_merge_descending(d05, d06, d07); - } - static INLINE void sort_08v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - sort_04v_ascending(d01, d02, d03, d04); - sort_04v_descending(d05, d06, d07, d08); - - tmp = d05; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d04), _mm256_xor_si256(topBit, d05)); - d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d04), i2d(cmp))); - d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); - - tmp = d06; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d03), _mm256_xor_si256(topBit, d06)); - d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d03), i2d(cmp))); - d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); - - tmp = d07; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d02), _mm256_xor_si256(topBit, d07)); - d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(d02), i2d(cmp))); - d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); - - tmp = d08; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d01), _mm256_xor_si256(topBit, d08)); - d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(d01), i2d(cmp))); - d01 = d2i(_mm256_blendv_pd(i2d(d01), i2d(tmp), i2d(cmp))); - - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_04v_merge_ascending(d05, d06, d07, d08); - } - static INLINE void sort_08v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - sort_04v_descending(d01, d02, d03, d04); - sort_04v_ascending(d05, d06, d07, d08); - - tmp = d05; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d04), _mm256_xor_si256(topBit, d05)); - d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d04), i2d(cmp))); - d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); - - tmp = d06; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d03), _mm256_xor_si256(topBit, d06)); - d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d03), i2d(cmp))); - d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); - - tmp = d07; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d02), _mm256_xor_si256(topBit, d07)); - d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(d02), i2d(cmp))); - d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); - - tmp = d08; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d01), _mm256_xor_si256(topBit, d08)); - d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(d01), i2d(cmp))); - d01 = d2i(_mm256_blendv_pd(i2d(d01), i2d(tmp), i2d(cmp))); - - sort_04v_merge_descending(d01, d02, d03, d04); - sort_04v_merge_descending(d05, d06, d07, d08); - } - static INLINE void sort_08v_merge_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - tmp = d01; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, d01)); - d01 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d01), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, tmp)); - d05 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d05), i2d(cmp))); - - tmp = d02; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, d02)); - d02 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d02), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, tmp)); - d06 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d06), i2d(cmp))); - - tmp = d03; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, d03)); - d03 = d2i(_mm256_blendv_pd(i2d(d07), i2d(d03), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, tmp)); - d07 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d07), i2d(cmp))); - - tmp = d04; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d08), _mm256_xor_si256(topBit, d04)); - d04 = d2i(_mm256_blendv_pd(i2d(d08), i2d(d04), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d08), _mm256_xor_si256(topBit, tmp)); - d08 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d08), i2d(cmp))); - - sort_04v_merge_ascending(d01, d02, d03, d04); - sort_04v_merge_ascending(d05, d06, d07, d08); - } - static INLINE void sort_08v_merge_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - tmp = d01; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, d01)); - d01 = d2i(_mm256_blendv_pd(i2d(d05), i2d(d01), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, tmp)); - d05 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d05), i2d(cmp))); - - tmp = d02; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, d02)); - d02 = d2i(_mm256_blendv_pd(i2d(d06), i2d(d02), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, tmp)); - d06 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d06), i2d(cmp))); - - tmp = d03; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, d03)); - d03 = d2i(_mm256_blendv_pd(i2d(d07), i2d(d03), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, tmp)); - d07 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d07), i2d(cmp))); - - tmp = d04; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d08), _mm256_xor_si256(topBit, d04)); - d04 = d2i(_mm256_blendv_pd(i2d(d08), i2d(d04), i2d(cmp))); - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d08), _mm256_xor_si256(topBit, tmp)); - d08 = d2i(_mm256_blendv_pd(i2d(tmp), i2d(d08), i2d(cmp))); - - sort_04v_merge_descending(d01, d02, d03, d04); - sort_04v_merge_descending(d05, d06, d07, d08); - } - static INLINE void sort_09v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_01v_descending(d09); - - tmp = d09; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d08), _mm256_xor_si256(topBit, d09)); - d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); - d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); - - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_01v_merge_ascending(d09); - } - static INLINE void sort_09v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_01v_ascending(d09); - - tmp = d09; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d08), _mm256_xor_si256(topBit, d09)); - d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); - d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); - - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_01v_merge_descending(d09); - } - static INLINE void sort_10v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_02v_descending(d09, d10); - - tmp = d09; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d08), _mm256_xor_si256(topBit, d09)); - d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); - d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); - - tmp = d10; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, d10)); - d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); - d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); - - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_02v_merge_ascending(d09, d10); - } - static INLINE void sort_10v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_02v_ascending(d09, d10); - - tmp = d09; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d08), _mm256_xor_si256(topBit, d09)); - d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); - d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); - - tmp = d10; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, d10)); - d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); - d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); - - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_02v_merge_descending(d09, d10); - } - static INLINE void sort_11v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_03v_descending(d09, d10, d11); - - tmp = d09; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d08), _mm256_xor_si256(topBit, d09)); - d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); - d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); - - tmp = d10; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, d10)); - d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); - d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); - - tmp = d11; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, d11)); - d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); - d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); - - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_03v_merge_ascending(d09, d10, d11); - } - static INLINE void sort_11v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_03v_ascending(d09, d10, d11); - - tmp = d09; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d08), _mm256_xor_si256(topBit, d09)); - d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); - d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); - - tmp = d10; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, d10)); - d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); - d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); - - tmp = d11; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, d11)); - d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); - d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); - - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_03v_merge_descending(d09, d10, d11); - } - static INLINE void sort_12v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_04v_descending(d09, d10, d11, d12); - - tmp = d09; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d08), _mm256_xor_si256(topBit, d09)); - d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); - d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); - - tmp = d10; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, d10)); - d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); - d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); - - tmp = d11; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, d11)); - d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); - d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); - - tmp = d12; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, d12)); - d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); - d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); - - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_04v_merge_ascending(d09, d10, d11, d12); - } - static INLINE void sort_12v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_04v_ascending(d09, d10, d11, d12); - - tmp = d09; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d08), _mm256_xor_si256(topBit, d09)); - d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); - d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); - - tmp = d10; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, d10)); - d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); - d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); - - tmp = d11; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, d11)); - d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); - d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); - - tmp = d12; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, d12)); - d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); - d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); - - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_04v_merge_descending(d09, d10, d11, d12); - } - static INLINE void sort_13v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_05v_descending(d09, d10, d11, d12, d13); - - tmp = d09; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d08), _mm256_xor_si256(topBit, d09)); - d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); - d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); - - tmp = d10; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, d10)); - d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); - d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); - - tmp = d11; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, d11)); - d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); - d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); - - tmp = d12; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, d12)); - d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); - d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); - - tmp = d13; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d04), _mm256_xor_si256(topBit, d13)); - d13 = d2i(_mm256_blendv_pd(i2d(d13), i2d(d04), i2d(cmp))); - d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); - - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_05v_merge_ascending(d09, d10, d11, d12, d13); - } - static INLINE void sort_13v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_05v_ascending(d09, d10, d11, d12, d13); - - tmp = d09; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d08), _mm256_xor_si256(topBit, d09)); - d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); - d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); - - tmp = d10; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, d10)); - d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); - d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); - - tmp = d11; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, d11)); - d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); - d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); - - tmp = d12; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, d12)); - d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); - d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); - - tmp = d13; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d04), _mm256_xor_si256(topBit, d13)); - d13 = d2i(_mm256_blendv_pd(i2d(d13), i2d(d04), i2d(cmp))); - d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); - - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_05v_merge_descending(d09, d10, d11, d12, d13); - } - static INLINE void sort_14v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_06v_descending(d09, d10, d11, d12, d13, d14); - - tmp = d09; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d08), _mm256_xor_si256(topBit, d09)); - d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); - d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); - - tmp = d10; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, d10)); - d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); - d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); - - tmp = d11; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, d11)); - d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); - d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); - - tmp = d12; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, d12)); - d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); - d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); - - tmp = d13; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d04), _mm256_xor_si256(topBit, d13)); - d13 = d2i(_mm256_blendv_pd(i2d(d13), i2d(d04), i2d(cmp))); - d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); - - tmp = d14; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d03), _mm256_xor_si256(topBit, d14)); - d14 = d2i(_mm256_blendv_pd(i2d(d14), i2d(d03), i2d(cmp))); - d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); - - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_06v_merge_ascending(d09, d10, d11, d12, d13, d14); - } - static INLINE void sort_14v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_06v_ascending(d09, d10, d11, d12, d13, d14); - - tmp = d09; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d08), _mm256_xor_si256(topBit, d09)); - d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); - d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); - - tmp = d10; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, d10)); - d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); - d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); - - tmp = d11; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, d11)); - d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); - d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); - - tmp = d12; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, d12)); - d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); - d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); - - tmp = d13; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d04), _mm256_xor_si256(topBit, d13)); - d13 = d2i(_mm256_blendv_pd(i2d(d13), i2d(d04), i2d(cmp))); - d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); - - tmp = d14; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d03), _mm256_xor_si256(topBit, d14)); - d14 = d2i(_mm256_blendv_pd(i2d(d14), i2d(d03), i2d(cmp))); - d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); - - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_06v_merge_descending(d09, d10, d11, d12, d13, d14); - } - static INLINE void sort_15v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14, __m256i& d15) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_07v_descending(d09, d10, d11, d12, d13, d14, d15); - - tmp = d09; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d08), _mm256_xor_si256(topBit, d09)); - d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); - d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); - - tmp = d10; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, d10)); - d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); - d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); - - tmp = d11; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, d11)); - d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); - d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); - - tmp = d12; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, d12)); - d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); - d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); - - tmp = d13; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d04), _mm256_xor_si256(topBit, d13)); - d13 = d2i(_mm256_blendv_pd(i2d(d13), i2d(d04), i2d(cmp))); - d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); - - tmp = d14; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d03), _mm256_xor_si256(topBit, d14)); - d14 = d2i(_mm256_blendv_pd(i2d(d14), i2d(d03), i2d(cmp))); - d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); - - tmp = d15; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d02), _mm256_xor_si256(topBit, d15)); - d15 = d2i(_mm256_blendv_pd(i2d(d15), i2d(d02), i2d(cmp))); - d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); - - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_07v_merge_ascending(d09, d10, d11, d12, d13, d14, d15); - } - static INLINE void sort_15v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14, __m256i& d15) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_07v_ascending(d09, d10, d11, d12, d13, d14, d15); - - tmp = d09; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d08), _mm256_xor_si256(topBit, d09)); - d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); - d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); - - tmp = d10; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, d10)); - d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); - d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); - - tmp = d11; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, d11)); - d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); - d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); - - tmp = d12; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, d12)); - d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); - d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); - - tmp = d13; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d04), _mm256_xor_si256(topBit, d13)); - d13 = d2i(_mm256_blendv_pd(i2d(d13), i2d(d04), i2d(cmp))); - d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); - - tmp = d14; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d03), _mm256_xor_si256(topBit, d14)); - d14 = d2i(_mm256_blendv_pd(i2d(d14), i2d(d03), i2d(cmp))); - d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); - - tmp = d15; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d02), _mm256_xor_si256(topBit, d15)); - d15 = d2i(_mm256_blendv_pd(i2d(d15), i2d(d02), i2d(cmp))); - d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); - - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_07v_merge_descending(d09, d10, d11, d12, d13, d14, d15); - } - static INLINE void sort_16v_ascending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14, __m256i& d15, __m256i& d16) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_08v_descending(d09, d10, d11, d12, d13, d14, d15, d16); - - tmp = d09; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d08), _mm256_xor_si256(topBit, d09)); - d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); - d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); - - tmp = d10; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, d10)); - d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); - d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); - - tmp = d11; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, d11)); - d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); - d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); - - tmp = d12; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, d12)); - d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); - d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); - - tmp = d13; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d04), _mm256_xor_si256(topBit, d13)); - d13 = d2i(_mm256_blendv_pd(i2d(d13), i2d(d04), i2d(cmp))); - d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); - - tmp = d14; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d03), _mm256_xor_si256(topBit, d14)); - d14 = d2i(_mm256_blendv_pd(i2d(d14), i2d(d03), i2d(cmp))); - d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); - - tmp = d15; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d02), _mm256_xor_si256(topBit, d15)); - d15 = d2i(_mm256_blendv_pd(i2d(d15), i2d(d02), i2d(cmp))); - d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); - - tmp = d16; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d01), _mm256_xor_si256(topBit, d16)); - d16 = d2i(_mm256_blendv_pd(i2d(d16), i2d(d01), i2d(cmp))); - d01 = d2i(_mm256_blendv_pd(i2d(d01), i2d(tmp), i2d(cmp))); - - sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_08v_merge_ascending(d09, d10, d11, d12, d13, d14, d15, d16); - } - static INLINE void sort_16v_descending(__m256i& d01, __m256i& d02, __m256i& d03, __m256i& d04, __m256i& d05, __m256i& d06, __m256i& d07, __m256i& d08, __m256i& d09, __m256i& d10, __m256i& d11, __m256i& d12, __m256i& d13, __m256i& d14, __m256i& d15, __m256i& d16) { - __m256i tmp, cmp; - __m256i topBit = _mm256_set1_epi64x(1LLU << 63); - - sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_08v_ascending(d09, d10, d11, d12, d13, d14, d15, d16); - - tmp = d09; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d08), _mm256_xor_si256(topBit, d09)); - d09 = d2i(_mm256_blendv_pd(i2d(d09), i2d(d08), i2d(cmp))); - d08 = d2i(_mm256_blendv_pd(i2d(d08), i2d(tmp), i2d(cmp))); - - tmp = d10; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d07), _mm256_xor_si256(topBit, d10)); - d10 = d2i(_mm256_blendv_pd(i2d(d10), i2d(d07), i2d(cmp))); - d07 = d2i(_mm256_blendv_pd(i2d(d07), i2d(tmp), i2d(cmp))); - - tmp = d11; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d06), _mm256_xor_si256(topBit, d11)); - d11 = d2i(_mm256_blendv_pd(i2d(d11), i2d(d06), i2d(cmp))); - d06 = d2i(_mm256_blendv_pd(i2d(d06), i2d(tmp), i2d(cmp))); - - tmp = d12; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d05), _mm256_xor_si256(topBit, d12)); - d12 = d2i(_mm256_blendv_pd(i2d(d12), i2d(d05), i2d(cmp))); - d05 = d2i(_mm256_blendv_pd(i2d(d05), i2d(tmp), i2d(cmp))); - - tmp = d13; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d04), _mm256_xor_si256(topBit, d13)); - d13 = d2i(_mm256_blendv_pd(i2d(d13), i2d(d04), i2d(cmp))); - d04 = d2i(_mm256_blendv_pd(i2d(d04), i2d(tmp), i2d(cmp))); - - tmp = d14; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d03), _mm256_xor_si256(topBit, d14)); - d14 = d2i(_mm256_blendv_pd(i2d(d14), i2d(d03), i2d(cmp))); - d03 = d2i(_mm256_blendv_pd(i2d(d03), i2d(tmp), i2d(cmp))); - - tmp = d15; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d02), _mm256_xor_si256(topBit, d15)); - d15 = d2i(_mm256_blendv_pd(i2d(d15), i2d(d02), i2d(cmp))); - d02 = d2i(_mm256_blendv_pd(i2d(d02), i2d(tmp), i2d(cmp))); - - tmp = d16; - cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, d01), _mm256_xor_si256(topBit, d16)); - d16 = d2i(_mm256_blendv_pd(i2d(d16), i2d(d01), i2d(cmp))); - d01 = d2i(_mm256_blendv_pd(i2d(d01), i2d(tmp), i2d(cmp))); - - sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); - sort_08v_merge_descending(d09, d10, d11, d12, d13, d14, d15, d16); - } - -static NOINLINE void sort_01v(uint64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - sort_01v_ascending(d01); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); -} - -static NOINLINE void sort_02v(uint64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - sort_02v_ascending(d01, d02); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); -} - -static NOINLINE void sort_03v(uint64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - sort_03v_ascending(d01, d02, d03); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); -} - -static NOINLINE void sort_04v(uint64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - sort_04v_ascending(d01, d02, d03, d04); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); -} - -static NOINLINE void sort_05v(uint64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - sort_05v_ascending(d01, d02, d03, d04, d05); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); -} - -static NOINLINE void sort_06v(uint64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - sort_06v_ascending(d01, d02, d03, d04, d05, d06); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); -} - -static NOINLINE void sort_07v(uint64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; - sort_07v_ascending(d01, d02, d03, d04, d05, d06, d07); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); - _mm256_storeu_si256((__m256i *) ptr + 6, d07); -} - -static NOINLINE void sort_08v(uint64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; - sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); - _mm256_storeu_si256((__m256i *) ptr + 6, d07); - _mm256_storeu_si256((__m256i *) ptr + 7, d08); -} - -static NOINLINE void sort_09v(uint64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; - __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; - sort_09v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); - _mm256_storeu_si256((__m256i *) ptr + 6, d07); - _mm256_storeu_si256((__m256i *) ptr + 7, d08); - _mm256_storeu_si256((__m256i *) ptr + 8, d09); -} - -static NOINLINE void sort_10v(uint64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; - __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; - __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; - sort_10v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); - _mm256_storeu_si256((__m256i *) ptr + 6, d07); - _mm256_storeu_si256((__m256i *) ptr + 7, d08); - _mm256_storeu_si256((__m256i *) ptr + 8, d09); - _mm256_storeu_si256((__m256i *) ptr + 9, d10); -} - -static NOINLINE void sort_11v(uint64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; - __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; - __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; - __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; - sort_11v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); - _mm256_storeu_si256((__m256i *) ptr + 6, d07); - _mm256_storeu_si256((__m256i *) ptr + 7, d08); - _mm256_storeu_si256((__m256i *) ptr + 8, d09); - _mm256_storeu_si256((__m256i *) ptr + 9, d10); - _mm256_storeu_si256((__m256i *) ptr + 10, d11); -} - -static NOINLINE void sort_12v(uint64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; - __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; - __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; - __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; - __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; - sort_12v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); - _mm256_storeu_si256((__m256i *) ptr + 6, d07); - _mm256_storeu_si256((__m256i *) ptr + 7, d08); - _mm256_storeu_si256((__m256i *) ptr + 8, d09); - _mm256_storeu_si256((__m256i *) ptr + 9, d10); - _mm256_storeu_si256((__m256i *) ptr + 10, d11); - _mm256_storeu_si256((__m256i *) ptr + 11, d12); -} - -static NOINLINE void sort_13v(uint64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; - __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; - __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; - __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; - __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; - __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12);; - sort_13v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); - _mm256_storeu_si256((__m256i *) ptr + 6, d07); - _mm256_storeu_si256((__m256i *) ptr + 7, d08); - _mm256_storeu_si256((__m256i *) ptr + 8, d09); - _mm256_storeu_si256((__m256i *) ptr + 9, d10); - _mm256_storeu_si256((__m256i *) ptr + 10, d11); - _mm256_storeu_si256((__m256i *) ptr + 11, d12); - _mm256_storeu_si256((__m256i *) ptr + 12, d13); -} - -static NOINLINE void sort_14v(uint64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; - __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; - __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; - __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; - __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; - __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12);; - __m256i d14 = _mm256_lddqu_si256((__m256i const *) ptr + 13);; - sort_14v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); - _mm256_storeu_si256((__m256i *) ptr + 6, d07); - _mm256_storeu_si256((__m256i *) ptr + 7, d08); - _mm256_storeu_si256((__m256i *) ptr + 8, d09); - _mm256_storeu_si256((__m256i *) ptr + 9, d10); - _mm256_storeu_si256((__m256i *) ptr + 10, d11); - _mm256_storeu_si256((__m256i *) ptr + 11, d12); - _mm256_storeu_si256((__m256i *) ptr + 12, d13); - _mm256_storeu_si256((__m256i *) ptr + 13, d14); -} - -static NOINLINE void sort_15v(uint64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; - __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; - __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; - __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; - __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; - __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12);; - __m256i d14 = _mm256_lddqu_si256((__m256i const *) ptr + 13);; - __m256i d15 = _mm256_lddqu_si256((__m256i const *) ptr + 14);; - sort_15v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14, d15); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); - _mm256_storeu_si256((__m256i *) ptr + 6, d07); - _mm256_storeu_si256((__m256i *) ptr + 7, d08); - _mm256_storeu_si256((__m256i *) ptr + 8, d09); - _mm256_storeu_si256((__m256i *) ptr + 9, d10); - _mm256_storeu_si256((__m256i *) ptr + 10, d11); - _mm256_storeu_si256((__m256i *) ptr + 11, d12); - _mm256_storeu_si256((__m256i *) ptr + 12, d13); - _mm256_storeu_si256((__m256i *) ptr + 13, d14); - _mm256_storeu_si256((__m256i *) ptr + 14, d15); -} - -static NOINLINE void sort_16v(uint64_t *ptr) { - __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; - __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; - __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; - __m256i d04 = _mm256_lddqu_si256((__m256i const *) ptr + 3);; - __m256i d05 = _mm256_lddqu_si256((__m256i const *) ptr + 4);; - __m256i d06 = _mm256_lddqu_si256((__m256i const *) ptr + 5);; - __m256i d07 = _mm256_lddqu_si256((__m256i const *) ptr + 6);; - __m256i d08 = _mm256_lddqu_si256((__m256i const *) ptr + 7);; - __m256i d09 = _mm256_lddqu_si256((__m256i const *) ptr + 8);; - __m256i d10 = _mm256_lddqu_si256((__m256i const *) ptr + 9);; - __m256i d11 = _mm256_lddqu_si256((__m256i const *) ptr + 10);; - __m256i d12 = _mm256_lddqu_si256((__m256i const *) ptr + 11);; - __m256i d13 = _mm256_lddqu_si256((__m256i const *) ptr + 12);; - __m256i d14 = _mm256_lddqu_si256((__m256i const *) ptr + 13);; - __m256i d15 = _mm256_lddqu_si256((__m256i const *) ptr + 14);; - __m256i d16 = _mm256_lddqu_si256((__m256i const *) ptr + 15);; - sort_16v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14, d15, d16); - _mm256_storeu_si256((__m256i *) ptr + 0, d01); - _mm256_storeu_si256((__m256i *) ptr + 1, d02); - _mm256_storeu_si256((__m256i *) ptr + 2, d03); - _mm256_storeu_si256((__m256i *) ptr + 3, d04); - _mm256_storeu_si256((__m256i *) ptr + 4, d05); - _mm256_storeu_si256((__m256i *) ptr + 5, d06); - _mm256_storeu_si256((__m256i *) ptr + 6, d07); - _mm256_storeu_si256((__m256i *) ptr + 7, d08); - _mm256_storeu_si256((__m256i *) ptr + 8, d09); - _mm256_storeu_si256((__m256i *) ptr + 9, d10); - _mm256_storeu_si256((__m256i *) ptr + 10, d11); - _mm256_storeu_si256((__m256i *) ptr + 11, d12); - _mm256_storeu_si256((__m256i *) ptr + 12, d13); - _mm256_storeu_si256((__m256i *) ptr + 13, d14); - _mm256_storeu_si256((__m256i *) ptr + 14, d15); - _mm256_storeu_si256((__m256i *) ptr + 15, d16); -} - static void sort(uint64_t *ptr, size_t length) { - const int N = 4; - - switch(length / N) { - case 1: sort_01v(ptr); break; - case 2: sort_02v(ptr); break; - case 3: sort_03v(ptr); break; - case 4: sort_04v(ptr); break; - case 5: sort_05v(ptr); break; - case 6: sort_06v(ptr); break; - case 7: sort_07v(ptr); break; - case 8: sort_08v(ptr); break; - case 9: sort_09v(ptr); break; - case 10: sort_10v(ptr); break; - case 11: sort_11v(ptr); break; - case 12: sort_12v(ptr); break; - case 13: sort_13v(ptr); break; - case 14: sort_14v(ptr); break; - case 15: sort_15v(ptr); break; - case 16: sort_16v(ptr); break; - } -} -}; -} -} -#endif diff --git a/src/coreclr/src/gc/smallsort/bitonic_sort.AVX512.int64_t.generated.cpp b/src/coreclr/src/gc/smallsort/bitonic_sort.AVX512.int64_t.generated.cpp new file mode 100644 index 00000000000000..b6cf913bd22df8 --- /dev/null +++ b/src/coreclr/src/gc/smallsort/bitonic_sort.AVX512.int64_t.generated.cpp @@ -0,0 +1,26 @@ +#include "bitonic_sort.AVX512.int64_t.generated.h" + +using namespace vxsort; + +void vxsort::smallsort::bitonic::sort(int64_t *ptr, size_t length) { + const int N = 8; + + switch(length / N) { + case 1: sort_01v(ptr); break; + case 2: sort_02v(ptr); break; + case 3: sort_03v(ptr); break; + case 4: sort_04v(ptr); break; + case 5: sort_05v(ptr); break; + case 6: sort_06v(ptr); break; + case 7: sort_07v(ptr); break; + case 8: sort_08v(ptr); break; + case 9: sort_09v(ptr); break; + case 10: sort_10v(ptr); break; + case 11: sort_11v(ptr); break; + case 12: sort_12v(ptr); break; + case 13: sort_13v(ptr); break; + case 14: sort_14v(ptr); break; + case 15: sort_15v(ptr); break; + case 16: sort_16v(ptr); break; + } +} diff --git a/src/coreclr/src/gc/smallsort/bitonic_sort.AVX512.int64_t.generated.h b/src/coreclr/src/gc/smallsort/bitonic_sort.AVX512.int64_t.generated.h new file mode 100644 index 00000000000000..2457e46fc28b03 --- /dev/null +++ b/src/coreclr/src/gc/smallsort/bitonic_sort.AVX512.int64_t.generated.h @@ -0,0 +1,1344 @@ +///////////////////////////////////////////////////////////////////////////// +//// +// This file was auto-generated by a tool at 2020-06-22 05:27:48 +// +// It is recommended you DO NOT directly edit this file but instead edit +// the code-generator that generated this source file instead. +///////////////////////////////////////////////////////////////////////////// + +#ifndef BITONIC_SORT_AVX512_INT64_T_H +#define BITONIC_SORT_AVX512_INT64_T_H + + +#ifdef __GNUC__ +#ifdef __clang__ +#pragma clang attribute push (__attribute__((target("avx512f"))), apply_to = any(function)) +#else +#pragma GCC push_options +#pragma GCC target("avx512f") +#endif +#endif + +#include +#include "bitonic_sort.h" + +#define i2d _mm512_castsi512_pd +#define d2i _mm512_castpd_si512 +#define i2s _mm512_castsi512_ps +#define s2i _mm512_castps_si512 +#define s2d _mm512_castps_pd +#define d2s _mm521_castpd_ps + +namespace vxsort { +namespace smallsort { +template<> struct bitonic { +public: + + static INLINE void sort_01v_ascending(__m512i& d01) { + __m512i min, s; + + s = d2i(_mm512_permute_pd(i2d(d01), _MM_PERM_BBBB)); + min = _mm512_min_epi64(s, d01); + d01 = _mm512_mask_max_epi64(min, 0x00AA, s, d01); + + s = d2i(_mm512_permutex_pd(i2d(d01), _MM_PERM_ABCD)); + min = _mm512_min_epi64(s, d01); + d01 = _mm512_mask_max_epi64(min, 0x00CC, s, d01); + + s = d2i(_mm512_permute_pd(i2d(d01), _MM_PERM_BBBB)); + min = _mm512_min_epi64(s, d01); + d01 = _mm512_mask_max_epi64(min, 0x00AA, s, d01); + + s = d2i(_mm512_shuffle_f64x2(_mm512_permutex_pd(i2d(d01), _MM_PERM_ABCD), _mm512_permutex_pd(i2d(d01), _MM_PERM_ABCD), _MM_PERM_BADC)); + min = _mm512_min_epi64(s, d01); + d01 = _mm512_mask_max_epi64(min, 0x00F0, s, d01); + + s = d2i(_mm512_permutex_pd(i2d(d01), _MM_PERM_BADC)); + min = _mm512_min_epi64(s, d01); + d01 = _mm512_mask_max_epi64(min, 0x00CC, s, d01); + + s = d2i(_mm512_permute_pd(i2d(d01), _MM_PERM_BBBB)); + min = _mm512_min_epi64(s, d01); + d01 = _mm512_mask_max_epi64(min, 0x00AA, s, d01); + } + static INLINE void sort_01v_merge_ascending(__m512i& d01) { + __m512i min, s; + + s = d2i(_mm512_shuffle_f64x2(i2d(d01), i2d(d01), _MM_PERM_BADC)); + min = _mm512_min_epi64(s, d01); + d01 = _mm512_mask_max_epi64(min, 0x00F0, s, d01); + + s = d2i(_mm512_permutex_pd(i2d(d01), _MM_PERM_BADC)); + min = _mm512_min_epi64(s, d01); + d01 = _mm512_mask_max_epi64(min, 0x00CC, s, d01); + + s = d2i(_mm512_permute_pd(i2d(d01), _MM_PERM_BBBB)); + min = _mm512_min_epi64(s, d01); + d01 = _mm512_mask_max_epi64(min, 0x00AA, s, d01); + } + static INLINE void sort_01v_descending(__m512i& d01) { + __m512i min, s; + + s = d2i(_mm512_permute_pd(i2d(d01), _MM_PERM_BBBB)); + min = _mm512_min_epi64(s, d01); + d01 = _mm512_mask_max_epi64(min, 0x0055, s, d01); + + s = d2i(_mm512_permutex_pd(i2d(d01), _MM_PERM_ABCD)); + min = _mm512_min_epi64(s, d01); + d01 = _mm512_mask_max_epi64(min, 0x0033, s, d01); + + s = d2i(_mm512_permute_pd(i2d(d01), _MM_PERM_BBBB)); + min = _mm512_min_epi64(s, d01); + d01 = _mm512_mask_max_epi64(min, 0x0055, s, d01); + + s = d2i(_mm512_shuffle_f64x2(_mm512_permutex_pd(i2d(d01), _MM_PERM_ABCD), _mm512_permutex_pd(i2d(d01), _MM_PERM_ABCD), _MM_PERM_BADC)); + min = _mm512_min_epi64(s, d01); + d01 = _mm512_mask_max_epi64(min, 0x000F, s, d01); + + s = d2i(_mm512_permutex_pd(i2d(d01), _MM_PERM_BADC)); + min = _mm512_min_epi64(s, d01); + d01 = _mm512_mask_max_epi64(min, 0x0033, s, d01); + + s = d2i(_mm512_permute_pd(i2d(d01), _MM_PERM_BBBB)); + min = _mm512_min_epi64(s, d01); + d01 = _mm512_mask_max_epi64(min, 0x0055, s, d01); + } + static INLINE void sort_01v_merge_descending(__m512i& d01) { + __m512i min, s; + + s = d2i(_mm512_shuffle_f64x2(i2d(d01), i2d(d01), _MM_PERM_BADC)); + min = _mm512_min_epi64(s, d01); + d01 = _mm512_mask_max_epi64(min, 0x000F, s, d01); + + s = d2i(_mm512_permutex_pd(i2d(d01), _MM_PERM_BADC)); + min = _mm512_min_epi64(s, d01); + d01 = _mm512_mask_max_epi64(min, 0x0033, s, d01); + + s = d2i(_mm512_permute_pd(i2d(d01), _MM_PERM_BBBB)); + min = _mm512_min_epi64(s, d01); + d01 = _mm512_mask_max_epi64(min, 0x0055, s, d01); + } + static INLINE void sort_02v_ascending(__m512i& d01, __m512i& d02) { + __m512i tmp; + + sort_01v_ascending(d01); + sort_01v_descending(d02); + + tmp = d02; + d02 = _mm512_max_epi64(d01, d02); + d01 = _mm512_min_epi64(d01, tmp); + + sort_01v_merge_ascending(d01); + sort_01v_merge_ascending(d02); + } + static INLINE void sort_02v_descending(__m512i& d01, __m512i& d02) { + __m512i tmp; + + sort_01v_descending(d01); + sort_01v_ascending(d02); + + tmp = d02; + d02 = _mm512_max_epi64(d01, d02); + d01 = _mm512_min_epi64(d01, tmp); + + sort_01v_merge_descending(d01); + sort_01v_merge_descending(d02); + } + static INLINE void sort_02v_merge_ascending(__m512i& d01, __m512i& d02) { + __m512i tmp; + + tmp = d01; + d01 = _mm512_min_epi64(d02, d01); + d02 = _mm512_max_epi64(d02, tmp); + + sort_01v_merge_ascending(d01); + sort_01v_merge_ascending(d02); + } + static INLINE void sort_02v_merge_descending(__m512i& d01, __m512i& d02) { + __m512i tmp; + + tmp = d01; + d01 = _mm512_min_epi64(d02, d01); + d02 = _mm512_max_epi64(d02, tmp); + + sort_01v_merge_descending(d01); + sort_01v_merge_descending(d02); + } + static INLINE void sort_03v_ascending(__m512i& d01, __m512i& d02, __m512i& d03) { + __m512i tmp; + + sort_02v_ascending(d01, d02); + sort_01v_descending(d03); + + tmp = d03; + d03 = _mm512_max_epi64(d02, d03); + d02 = _mm512_min_epi64(d02, tmp); + + sort_02v_merge_ascending(d01, d02); + sort_01v_merge_ascending(d03); + } + static INLINE void sort_03v_descending(__m512i& d01, __m512i& d02, __m512i& d03) { + __m512i tmp; + + sort_02v_descending(d01, d02); + sort_01v_ascending(d03); + + tmp = d03; + d03 = _mm512_max_epi64(d02, d03); + d02 = _mm512_min_epi64(d02, tmp); + + sort_02v_merge_descending(d01, d02); + sort_01v_merge_descending(d03); + } + static INLINE void sort_03v_merge_ascending(__m512i& d01, __m512i& d02, __m512i& d03) { + __m512i tmp; + + tmp = d01; + d01 = _mm512_min_epi64(d03, d01); + d03 = _mm512_max_epi64(d03, tmp); + + sort_02v_merge_ascending(d01, d02); + sort_01v_merge_ascending(d03); + } + static INLINE void sort_03v_merge_descending(__m512i& d01, __m512i& d02, __m512i& d03) { + __m512i tmp; + + tmp = d01; + d01 = _mm512_min_epi64(d03, d01); + d03 = _mm512_max_epi64(d03, tmp); + + sort_02v_merge_descending(d01, d02); + sort_01v_merge_descending(d03); + } + static INLINE void sort_04v_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04) { + __m512i tmp; + + sort_02v_ascending(d01, d02); + sort_02v_descending(d03, d04); + + tmp = d03; + d03 = _mm512_max_epi64(d02, d03); + d02 = _mm512_min_epi64(d02, tmp); + + tmp = d04; + d04 = _mm512_max_epi64(d01, d04); + d01 = _mm512_min_epi64(d01, tmp); + + sort_02v_merge_ascending(d01, d02); + sort_02v_merge_ascending(d03, d04); + } + static INLINE void sort_04v_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04) { + __m512i tmp; + + sort_02v_descending(d01, d02); + sort_02v_ascending(d03, d04); + + tmp = d03; + d03 = _mm512_max_epi64(d02, d03); + d02 = _mm512_min_epi64(d02, tmp); + + tmp = d04; + d04 = _mm512_max_epi64(d01, d04); + d01 = _mm512_min_epi64(d01, tmp); + + sort_02v_merge_descending(d01, d02); + sort_02v_merge_descending(d03, d04); + } + static INLINE void sort_04v_merge_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04) { + __m512i tmp; + + tmp = d01; + d01 = _mm512_min_epi64(d03, d01); + d03 = _mm512_max_epi64(d03, tmp); + + tmp = d02; + d02 = _mm512_min_epi64(d04, d02); + d04 = _mm512_max_epi64(d04, tmp); + + sort_02v_merge_ascending(d01, d02); + sort_02v_merge_ascending(d03, d04); + } + static INLINE void sort_04v_merge_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04) { + __m512i tmp; + + tmp = d01; + d01 = _mm512_min_epi64(d03, d01); + d03 = _mm512_max_epi64(d03, tmp); + + tmp = d02; + d02 = _mm512_min_epi64(d04, d02); + d04 = _mm512_max_epi64(d04, tmp); + + sort_02v_merge_descending(d01, d02); + sort_02v_merge_descending(d03, d04); + } + static INLINE void sort_05v_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05) { + __m512i tmp; + + sort_04v_ascending(d01, d02, d03, d04); + sort_01v_descending(d05); + + tmp = d05; + d05 = _mm512_max_epi64(d04, d05); + d04 = _mm512_min_epi64(d04, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_01v_merge_ascending(d05); + } + static INLINE void sort_05v_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05) { + __m512i tmp; + + sort_04v_descending(d01, d02, d03, d04); + sort_01v_ascending(d05); + + tmp = d05; + d05 = _mm512_max_epi64(d04, d05); + d04 = _mm512_min_epi64(d04, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_01v_merge_descending(d05); + } + static INLINE void sort_05v_merge_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05) { + __m512i tmp; + + tmp = d01; + d01 = _mm512_min_epi64(d05, d01); + d05 = _mm512_max_epi64(d05, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_01v_merge_ascending(d05); + } + static INLINE void sort_05v_merge_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05) { + __m512i tmp; + + tmp = d01; + d01 = _mm512_min_epi64(d05, d01); + d05 = _mm512_max_epi64(d05, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_01v_merge_descending(d05); + } + static INLINE void sort_06v_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06) { + __m512i tmp; + + sort_04v_ascending(d01, d02, d03, d04); + sort_02v_descending(d05, d06); + + tmp = d05; + d05 = _mm512_max_epi64(d04, d05); + d04 = _mm512_min_epi64(d04, tmp); + + tmp = d06; + d06 = _mm512_max_epi64(d03, d06); + d03 = _mm512_min_epi64(d03, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_02v_merge_ascending(d05, d06); + } + static INLINE void sort_06v_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06) { + __m512i tmp; + + sort_04v_descending(d01, d02, d03, d04); + sort_02v_ascending(d05, d06); + + tmp = d05; + d05 = _mm512_max_epi64(d04, d05); + d04 = _mm512_min_epi64(d04, tmp); + + tmp = d06; + d06 = _mm512_max_epi64(d03, d06); + d03 = _mm512_min_epi64(d03, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_02v_merge_descending(d05, d06); + } + static INLINE void sort_06v_merge_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06) { + __m512i tmp; + + tmp = d01; + d01 = _mm512_min_epi64(d05, d01); + d05 = _mm512_max_epi64(d05, tmp); + + tmp = d02; + d02 = _mm512_min_epi64(d06, d02); + d06 = _mm512_max_epi64(d06, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_02v_merge_ascending(d05, d06); + } + static INLINE void sort_06v_merge_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06) { + __m512i tmp; + + tmp = d01; + d01 = _mm512_min_epi64(d05, d01); + d05 = _mm512_max_epi64(d05, tmp); + + tmp = d02; + d02 = _mm512_min_epi64(d06, d02); + d06 = _mm512_max_epi64(d06, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_02v_merge_descending(d05, d06); + } + static INLINE void sort_07v_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07) { + __m512i tmp; + + sort_04v_ascending(d01, d02, d03, d04); + sort_03v_descending(d05, d06, d07); + + tmp = d05; + d05 = _mm512_max_epi64(d04, d05); + d04 = _mm512_min_epi64(d04, tmp); + + tmp = d06; + d06 = _mm512_max_epi64(d03, d06); + d03 = _mm512_min_epi64(d03, tmp); + + tmp = d07; + d07 = _mm512_max_epi64(d02, d07); + d02 = _mm512_min_epi64(d02, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_03v_merge_ascending(d05, d06, d07); + } + static INLINE void sort_07v_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07) { + __m512i tmp; + + sort_04v_descending(d01, d02, d03, d04); + sort_03v_ascending(d05, d06, d07); + + tmp = d05; + d05 = _mm512_max_epi64(d04, d05); + d04 = _mm512_min_epi64(d04, tmp); + + tmp = d06; + d06 = _mm512_max_epi64(d03, d06); + d03 = _mm512_min_epi64(d03, tmp); + + tmp = d07; + d07 = _mm512_max_epi64(d02, d07); + d02 = _mm512_min_epi64(d02, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_03v_merge_descending(d05, d06, d07); + } + static INLINE void sort_07v_merge_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07) { + __m512i tmp; + + tmp = d01; + d01 = _mm512_min_epi64(d05, d01); + d05 = _mm512_max_epi64(d05, tmp); + + tmp = d02; + d02 = _mm512_min_epi64(d06, d02); + d06 = _mm512_max_epi64(d06, tmp); + + tmp = d03; + d03 = _mm512_min_epi64(d07, d03); + d07 = _mm512_max_epi64(d07, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_03v_merge_ascending(d05, d06, d07); + } + static INLINE void sort_07v_merge_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07) { + __m512i tmp; + + tmp = d01; + d01 = _mm512_min_epi64(d05, d01); + d05 = _mm512_max_epi64(d05, tmp); + + tmp = d02; + d02 = _mm512_min_epi64(d06, d02); + d06 = _mm512_max_epi64(d06, tmp); + + tmp = d03; + d03 = _mm512_min_epi64(d07, d03); + d07 = _mm512_max_epi64(d07, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_03v_merge_descending(d05, d06, d07); + } + static INLINE void sort_08v_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08) { + __m512i tmp; + + sort_04v_ascending(d01, d02, d03, d04); + sort_04v_descending(d05, d06, d07, d08); + + tmp = d05; + d05 = _mm512_max_epi64(d04, d05); + d04 = _mm512_min_epi64(d04, tmp); + + tmp = d06; + d06 = _mm512_max_epi64(d03, d06); + d03 = _mm512_min_epi64(d03, tmp); + + tmp = d07; + d07 = _mm512_max_epi64(d02, d07); + d02 = _mm512_min_epi64(d02, tmp); + + tmp = d08; + d08 = _mm512_max_epi64(d01, d08); + d01 = _mm512_min_epi64(d01, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_04v_merge_ascending(d05, d06, d07, d08); + } + static INLINE void sort_08v_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08) { + __m512i tmp; + + sort_04v_descending(d01, d02, d03, d04); + sort_04v_ascending(d05, d06, d07, d08); + + tmp = d05; + d05 = _mm512_max_epi64(d04, d05); + d04 = _mm512_min_epi64(d04, tmp); + + tmp = d06; + d06 = _mm512_max_epi64(d03, d06); + d03 = _mm512_min_epi64(d03, tmp); + + tmp = d07; + d07 = _mm512_max_epi64(d02, d07); + d02 = _mm512_min_epi64(d02, tmp); + + tmp = d08; + d08 = _mm512_max_epi64(d01, d08); + d01 = _mm512_min_epi64(d01, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_04v_merge_descending(d05, d06, d07, d08); + } + static INLINE void sort_08v_merge_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08) { + __m512i tmp; + + tmp = d01; + d01 = _mm512_min_epi64(d05, d01); + d05 = _mm512_max_epi64(d05, tmp); + + tmp = d02; + d02 = _mm512_min_epi64(d06, d02); + d06 = _mm512_max_epi64(d06, tmp); + + tmp = d03; + d03 = _mm512_min_epi64(d07, d03); + d07 = _mm512_max_epi64(d07, tmp); + + tmp = d04; + d04 = _mm512_min_epi64(d08, d04); + d08 = _mm512_max_epi64(d08, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_04v_merge_ascending(d05, d06, d07, d08); + } + static INLINE void sort_08v_merge_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08) { + __m512i tmp; + + tmp = d01; + d01 = _mm512_min_epi64(d05, d01); + d05 = _mm512_max_epi64(d05, tmp); + + tmp = d02; + d02 = _mm512_min_epi64(d06, d02); + d06 = _mm512_max_epi64(d06, tmp); + + tmp = d03; + d03 = _mm512_min_epi64(d07, d03); + d07 = _mm512_max_epi64(d07, tmp); + + tmp = d04; + d04 = _mm512_min_epi64(d08, d04); + d08 = _mm512_max_epi64(d08, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_04v_merge_descending(d05, d06, d07, d08); + } + static INLINE void sort_09v_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09) { + __m512i tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_01v_descending(d09); + + tmp = d09; + d09 = _mm512_max_epi64(d08, d09); + d08 = _mm512_min_epi64(d08, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_01v_merge_ascending(d09); + } + static INLINE void sort_09v_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09) { + __m512i tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_01v_ascending(d09); + + tmp = d09; + d09 = _mm512_max_epi64(d08, d09); + d08 = _mm512_min_epi64(d08, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_01v_merge_descending(d09); + } + static INLINE void sort_10v_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10) { + __m512i tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_02v_descending(d09, d10); + + tmp = d09; + d09 = _mm512_max_epi64(d08, d09); + d08 = _mm512_min_epi64(d08, tmp); + + tmp = d10; + d10 = _mm512_max_epi64(d07, d10); + d07 = _mm512_min_epi64(d07, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_02v_merge_ascending(d09, d10); + } + static INLINE void sort_10v_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10) { + __m512i tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_02v_ascending(d09, d10); + + tmp = d09; + d09 = _mm512_max_epi64(d08, d09); + d08 = _mm512_min_epi64(d08, tmp); + + tmp = d10; + d10 = _mm512_max_epi64(d07, d10); + d07 = _mm512_min_epi64(d07, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_02v_merge_descending(d09, d10); + } + static INLINE void sort_11v_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10, __m512i& d11) { + __m512i tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_03v_descending(d09, d10, d11); + + tmp = d09; + d09 = _mm512_max_epi64(d08, d09); + d08 = _mm512_min_epi64(d08, tmp); + + tmp = d10; + d10 = _mm512_max_epi64(d07, d10); + d07 = _mm512_min_epi64(d07, tmp); + + tmp = d11; + d11 = _mm512_max_epi64(d06, d11); + d06 = _mm512_min_epi64(d06, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_03v_merge_ascending(d09, d10, d11); + } + static INLINE void sort_11v_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10, __m512i& d11) { + __m512i tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_03v_ascending(d09, d10, d11); + + tmp = d09; + d09 = _mm512_max_epi64(d08, d09); + d08 = _mm512_min_epi64(d08, tmp); + + tmp = d10; + d10 = _mm512_max_epi64(d07, d10); + d07 = _mm512_min_epi64(d07, tmp); + + tmp = d11; + d11 = _mm512_max_epi64(d06, d11); + d06 = _mm512_min_epi64(d06, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_03v_merge_descending(d09, d10, d11); + } + static INLINE void sort_12v_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10, __m512i& d11, __m512i& d12) { + __m512i tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_04v_descending(d09, d10, d11, d12); + + tmp = d09; + d09 = _mm512_max_epi64(d08, d09); + d08 = _mm512_min_epi64(d08, tmp); + + tmp = d10; + d10 = _mm512_max_epi64(d07, d10); + d07 = _mm512_min_epi64(d07, tmp); + + tmp = d11; + d11 = _mm512_max_epi64(d06, d11); + d06 = _mm512_min_epi64(d06, tmp); + + tmp = d12; + d12 = _mm512_max_epi64(d05, d12); + d05 = _mm512_min_epi64(d05, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_04v_merge_ascending(d09, d10, d11, d12); + } + static INLINE void sort_12v_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10, __m512i& d11, __m512i& d12) { + __m512i tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_04v_ascending(d09, d10, d11, d12); + + tmp = d09; + d09 = _mm512_max_epi64(d08, d09); + d08 = _mm512_min_epi64(d08, tmp); + + tmp = d10; + d10 = _mm512_max_epi64(d07, d10); + d07 = _mm512_min_epi64(d07, tmp); + + tmp = d11; + d11 = _mm512_max_epi64(d06, d11); + d06 = _mm512_min_epi64(d06, tmp); + + tmp = d12; + d12 = _mm512_max_epi64(d05, d12); + d05 = _mm512_min_epi64(d05, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_04v_merge_descending(d09, d10, d11, d12); + } + static INLINE void sort_13v_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10, __m512i& d11, __m512i& d12, __m512i& d13) { + __m512i tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_05v_descending(d09, d10, d11, d12, d13); + + tmp = d09; + d09 = _mm512_max_epi64(d08, d09); + d08 = _mm512_min_epi64(d08, tmp); + + tmp = d10; + d10 = _mm512_max_epi64(d07, d10); + d07 = _mm512_min_epi64(d07, tmp); + + tmp = d11; + d11 = _mm512_max_epi64(d06, d11); + d06 = _mm512_min_epi64(d06, tmp); + + tmp = d12; + d12 = _mm512_max_epi64(d05, d12); + d05 = _mm512_min_epi64(d05, tmp); + + tmp = d13; + d13 = _mm512_max_epi64(d04, d13); + d04 = _mm512_min_epi64(d04, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_05v_merge_ascending(d09, d10, d11, d12, d13); + } + static INLINE void sort_13v_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10, __m512i& d11, __m512i& d12, __m512i& d13) { + __m512i tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_05v_ascending(d09, d10, d11, d12, d13); + + tmp = d09; + d09 = _mm512_max_epi64(d08, d09); + d08 = _mm512_min_epi64(d08, tmp); + + tmp = d10; + d10 = _mm512_max_epi64(d07, d10); + d07 = _mm512_min_epi64(d07, tmp); + + tmp = d11; + d11 = _mm512_max_epi64(d06, d11); + d06 = _mm512_min_epi64(d06, tmp); + + tmp = d12; + d12 = _mm512_max_epi64(d05, d12); + d05 = _mm512_min_epi64(d05, tmp); + + tmp = d13; + d13 = _mm512_max_epi64(d04, d13); + d04 = _mm512_min_epi64(d04, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_05v_merge_descending(d09, d10, d11, d12, d13); + } + static INLINE void sort_14v_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10, __m512i& d11, __m512i& d12, __m512i& d13, __m512i& d14) { + __m512i tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_06v_descending(d09, d10, d11, d12, d13, d14); + + tmp = d09; + d09 = _mm512_max_epi64(d08, d09); + d08 = _mm512_min_epi64(d08, tmp); + + tmp = d10; + d10 = _mm512_max_epi64(d07, d10); + d07 = _mm512_min_epi64(d07, tmp); + + tmp = d11; + d11 = _mm512_max_epi64(d06, d11); + d06 = _mm512_min_epi64(d06, tmp); + + tmp = d12; + d12 = _mm512_max_epi64(d05, d12); + d05 = _mm512_min_epi64(d05, tmp); + + tmp = d13; + d13 = _mm512_max_epi64(d04, d13); + d04 = _mm512_min_epi64(d04, tmp); + + tmp = d14; + d14 = _mm512_max_epi64(d03, d14); + d03 = _mm512_min_epi64(d03, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_06v_merge_ascending(d09, d10, d11, d12, d13, d14); + } + static INLINE void sort_14v_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10, __m512i& d11, __m512i& d12, __m512i& d13, __m512i& d14) { + __m512i tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_06v_ascending(d09, d10, d11, d12, d13, d14); + + tmp = d09; + d09 = _mm512_max_epi64(d08, d09); + d08 = _mm512_min_epi64(d08, tmp); + + tmp = d10; + d10 = _mm512_max_epi64(d07, d10); + d07 = _mm512_min_epi64(d07, tmp); + + tmp = d11; + d11 = _mm512_max_epi64(d06, d11); + d06 = _mm512_min_epi64(d06, tmp); + + tmp = d12; + d12 = _mm512_max_epi64(d05, d12); + d05 = _mm512_min_epi64(d05, tmp); + + tmp = d13; + d13 = _mm512_max_epi64(d04, d13); + d04 = _mm512_min_epi64(d04, tmp); + + tmp = d14; + d14 = _mm512_max_epi64(d03, d14); + d03 = _mm512_min_epi64(d03, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_06v_merge_descending(d09, d10, d11, d12, d13, d14); + } + static INLINE void sort_15v_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10, __m512i& d11, __m512i& d12, __m512i& d13, __m512i& d14, __m512i& d15) { + __m512i tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_07v_descending(d09, d10, d11, d12, d13, d14, d15); + + tmp = d09; + d09 = _mm512_max_epi64(d08, d09); + d08 = _mm512_min_epi64(d08, tmp); + + tmp = d10; + d10 = _mm512_max_epi64(d07, d10); + d07 = _mm512_min_epi64(d07, tmp); + + tmp = d11; + d11 = _mm512_max_epi64(d06, d11); + d06 = _mm512_min_epi64(d06, tmp); + + tmp = d12; + d12 = _mm512_max_epi64(d05, d12); + d05 = _mm512_min_epi64(d05, tmp); + + tmp = d13; + d13 = _mm512_max_epi64(d04, d13); + d04 = _mm512_min_epi64(d04, tmp); + + tmp = d14; + d14 = _mm512_max_epi64(d03, d14); + d03 = _mm512_min_epi64(d03, tmp); + + tmp = d15; + d15 = _mm512_max_epi64(d02, d15); + d02 = _mm512_min_epi64(d02, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_07v_merge_ascending(d09, d10, d11, d12, d13, d14, d15); + } + static INLINE void sort_15v_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10, __m512i& d11, __m512i& d12, __m512i& d13, __m512i& d14, __m512i& d15) { + __m512i tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_07v_ascending(d09, d10, d11, d12, d13, d14, d15); + + tmp = d09; + d09 = _mm512_max_epi64(d08, d09); + d08 = _mm512_min_epi64(d08, tmp); + + tmp = d10; + d10 = _mm512_max_epi64(d07, d10); + d07 = _mm512_min_epi64(d07, tmp); + + tmp = d11; + d11 = _mm512_max_epi64(d06, d11); + d06 = _mm512_min_epi64(d06, tmp); + + tmp = d12; + d12 = _mm512_max_epi64(d05, d12); + d05 = _mm512_min_epi64(d05, tmp); + + tmp = d13; + d13 = _mm512_max_epi64(d04, d13); + d04 = _mm512_min_epi64(d04, tmp); + + tmp = d14; + d14 = _mm512_max_epi64(d03, d14); + d03 = _mm512_min_epi64(d03, tmp); + + tmp = d15; + d15 = _mm512_max_epi64(d02, d15); + d02 = _mm512_min_epi64(d02, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_07v_merge_descending(d09, d10, d11, d12, d13, d14, d15); + } + static INLINE void sort_16v_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10, __m512i& d11, __m512i& d12, __m512i& d13, __m512i& d14, __m512i& d15, __m512i& d16) { + __m512i tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_08v_descending(d09, d10, d11, d12, d13, d14, d15, d16); + + tmp = d09; + d09 = _mm512_max_epi64(d08, d09); + d08 = _mm512_min_epi64(d08, tmp); + + tmp = d10; + d10 = _mm512_max_epi64(d07, d10); + d07 = _mm512_min_epi64(d07, tmp); + + tmp = d11; + d11 = _mm512_max_epi64(d06, d11); + d06 = _mm512_min_epi64(d06, tmp); + + tmp = d12; + d12 = _mm512_max_epi64(d05, d12); + d05 = _mm512_min_epi64(d05, tmp); + + tmp = d13; + d13 = _mm512_max_epi64(d04, d13); + d04 = _mm512_min_epi64(d04, tmp); + + tmp = d14; + d14 = _mm512_max_epi64(d03, d14); + d03 = _mm512_min_epi64(d03, tmp); + + tmp = d15; + d15 = _mm512_max_epi64(d02, d15); + d02 = _mm512_min_epi64(d02, tmp); + + tmp = d16; + d16 = _mm512_max_epi64(d01, d16); + d01 = _mm512_min_epi64(d01, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_08v_merge_ascending(d09, d10, d11, d12, d13, d14, d15, d16); + } + static INLINE void sort_16v_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10, __m512i& d11, __m512i& d12, __m512i& d13, __m512i& d14, __m512i& d15, __m512i& d16) { + __m512i tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_08v_ascending(d09, d10, d11, d12, d13, d14, d15, d16); + + tmp = d09; + d09 = _mm512_max_epi64(d08, d09); + d08 = _mm512_min_epi64(d08, tmp); + + tmp = d10; + d10 = _mm512_max_epi64(d07, d10); + d07 = _mm512_min_epi64(d07, tmp); + + tmp = d11; + d11 = _mm512_max_epi64(d06, d11); + d06 = _mm512_min_epi64(d06, tmp); + + tmp = d12; + d12 = _mm512_max_epi64(d05, d12); + d05 = _mm512_min_epi64(d05, tmp); + + tmp = d13; + d13 = _mm512_max_epi64(d04, d13); + d04 = _mm512_min_epi64(d04, tmp); + + tmp = d14; + d14 = _mm512_max_epi64(d03, d14); + d03 = _mm512_min_epi64(d03, tmp); + + tmp = d15; + d15 = _mm512_max_epi64(d02, d15); + d02 = _mm512_min_epi64(d02, tmp); + + tmp = d16; + d16 = _mm512_max_epi64(d01, d16); + d01 = _mm512_min_epi64(d01, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_08v_merge_descending(d09, d10, d11, d12, d13, d14, d15, d16); + } + + static NOINLINE void sort_01v(int64_t *ptr) { + __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; + sort_01v_ascending(d01); + _mm512_storeu_si512((__m512i *) ptr + 0, d01); +} + + static NOINLINE void sort_02v(int64_t *ptr) { + __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; + __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; + sort_02v_ascending(d01, d02); + _mm512_storeu_si512((__m512i *) ptr + 0, d01); + _mm512_storeu_si512((__m512i *) ptr + 1, d02); +} + + static NOINLINE void sort_03v(int64_t *ptr) { + __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; + __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; + __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; + sort_03v_ascending(d01, d02, d03); + _mm512_storeu_si512((__m512i *) ptr + 0, d01); + _mm512_storeu_si512((__m512i *) ptr + 1, d02); + _mm512_storeu_si512((__m512i *) ptr + 2, d03); +} + + static NOINLINE void sort_04v(int64_t *ptr) { + __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; + __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; + __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; + __m512i d04 = _mm512_loadu_si512((__m512i const *) ptr + 3);; + sort_04v_ascending(d01, d02, d03, d04); + _mm512_storeu_si512((__m512i *) ptr + 0, d01); + _mm512_storeu_si512((__m512i *) ptr + 1, d02); + _mm512_storeu_si512((__m512i *) ptr + 2, d03); + _mm512_storeu_si512((__m512i *) ptr + 3, d04); +} + + static NOINLINE void sort_05v(int64_t *ptr) { + __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; + __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; + __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; + __m512i d04 = _mm512_loadu_si512((__m512i const *) ptr + 3);; + __m512i d05 = _mm512_loadu_si512((__m512i const *) ptr + 4);; + sort_05v_ascending(d01, d02, d03, d04, d05); + _mm512_storeu_si512((__m512i *) ptr + 0, d01); + _mm512_storeu_si512((__m512i *) ptr + 1, d02); + _mm512_storeu_si512((__m512i *) ptr + 2, d03); + _mm512_storeu_si512((__m512i *) ptr + 3, d04); + _mm512_storeu_si512((__m512i *) ptr + 4, d05); +} + + static NOINLINE void sort_06v(int64_t *ptr) { + __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; + __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; + __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; + __m512i d04 = _mm512_loadu_si512((__m512i const *) ptr + 3);; + __m512i d05 = _mm512_loadu_si512((__m512i const *) ptr + 4);; + __m512i d06 = _mm512_loadu_si512((__m512i const *) ptr + 5);; + sort_06v_ascending(d01, d02, d03, d04, d05, d06); + _mm512_storeu_si512((__m512i *) ptr + 0, d01); + _mm512_storeu_si512((__m512i *) ptr + 1, d02); + _mm512_storeu_si512((__m512i *) ptr + 2, d03); + _mm512_storeu_si512((__m512i *) ptr + 3, d04); + _mm512_storeu_si512((__m512i *) ptr + 4, d05); + _mm512_storeu_si512((__m512i *) ptr + 5, d06); +} + + static NOINLINE void sort_07v(int64_t *ptr) { + __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; + __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; + __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; + __m512i d04 = _mm512_loadu_si512((__m512i const *) ptr + 3);; + __m512i d05 = _mm512_loadu_si512((__m512i const *) ptr + 4);; + __m512i d06 = _mm512_loadu_si512((__m512i const *) ptr + 5);; + __m512i d07 = _mm512_loadu_si512((__m512i const *) ptr + 6);; + sort_07v_ascending(d01, d02, d03, d04, d05, d06, d07); + _mm512_storeu_si512((__m512i *) ptr + 0, d01); + _mm512_storeu_si512((__m512i *) ptr + 1, d02); + _mm512_storeu_si512((__m512i *) ptr + 2, d03); + _mm512_storeu_si512((__m512i *) ptr + 3, d04); + _mm512_storeu_si512((__m512i *) ptr + 4, d05); + _mm512_storeu_si512((__m512i *) ptr + 5, d06); + _mm512_storeu_si512((__m512i *) ptr + 6, d07); +} + + static NOINLINE void sort_08v(int64_t *ptr) { + __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; + __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; + __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; + __m512i d04 = _mm512_loadu_si512((__m512i const *) ptr + 3);; + __m512i d05 = _mm512_loadu_si512((__m512i const *) ptr + 4);; + __m512i d06 = _mm512_loadu_si512((__m512i const *) ptr + 5);; + __m512i d07 = _mm512_loadu_si512((__m512i const *) ptr + 6);; + __m512i d08 = _mm512_loadu_si512((__m512i const *) ptr + 7);; + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + _mm512_storeu_si512((__m512i *) ptr + 0, d01); + _mm512_storeu_si512((__m512i *) ptr + 1, d02); + _mm512_storeu_si512((__m512i *) ptr + 2, d03); + _mm512_storeu_si512((__m512i *) ptr + 3, d04); + _mm512_storeu_si512((__m512i *) ptr + 4, d05); + _mm512_storeu_si512((__m512i *) ptr + 5, d06); + _mm512_storeu_si512((__m512i *) ptr + 6, d07); + _mm512_storeu_si512((__m512i *) ptr + 7, d08); +} + + static NOINLINE void sort_09v(int64_t *ptr) { + __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; + __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; + __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; + __m512i d04 = _mm512_loadu_si512((__m512i const *) ptr + 3);; + __m512i d05 = _mm512_loadu_si512((__m512i const *) ptr + 4);; + __m512i d06 = _mm512_loadu_si512((__m512i const *) ptr + 5);; + __m512i d07 = _mm512_loadu_si512((__m512i const *) ptr + 6);; + __m512i d08 = _mm512_loadu_si512((__m512i const *) ptr + 7);; + __m512i d09 = _mm512_loadu_si512((__m512i const *) ptr + 8);; + sort_09v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09); + _mm512_storeu_si512((__m512i *) ptr + 0, d01); + _mm512_storeu_si512((__m512i *) ptr + 1, d02); + _mm512_storeu_si512((__m512i *) ptr + 2, d03); + _mm512_storeu_si512((__m512i *) ptr + 3, d04); + _mm512_storeu_si512((__m512i *) ptr + 4, d05); + _mm512_storeu_si512((__m512i *) ptr + 5, d06); + _mm512_storeu_si512((__m512i *) ptr + 6, d07); + _mm512_storeu_si512((__m512i *) ptr + 7, d08); + _mm512_storeu_si512((__m512i *) ptr + 8, d09); +} + + static NOINLINE void sort_10v(int64_t *ptr) { + __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; + __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; + __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; + __m512i d04 = _mm512_loadu_si512((__m512i const *) ptr + 3);; + __m512i d05 = _mm512_loadu_si512((__m512i const *) ptr + 4);; + __m512i d06 = _mm512_loadu_si512((__m512i const *) ptr + 5);; + __m512i d07 = _mm512_loadu_si512((__m512i const *) ptr + 6);; + __m512i d08 = _mm512_loadu_si512((__m512i const *) ptr + 7);; + __m512i d09 = _mm512_loadu_si512((__m512i const *) ptr + 8);; + __m512i d10 = _mm512_loadu_si512((__m512i const *) ptr + 9);; + sort_10v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10); + _mm512_storeu_si512((__m512i *) ptr + 0, d01); + _mm512_storeu_si512((__m512i *) ptr + 1, d02); + _mm512_storeu_si512((__m512i *) ptr + 2, d03); + _mm512_storeu_si512((__m512i *) ptr + 3, d04); + _mm512_storeu_si512((__m512i *) ptr + 4, d05); + _mm512_storeu_si512((__m512i *) ptr + 5, d06); + _mm512_storeu_si512((__m512i *) ptr + 6, d07); + _mm512_storeu_si512((__m512i *) ptr + 7, d08); + _mm512_storeu_si512((__m512i *) ptr + 8, d09); + _mm512_storeu_si512((__m512i *) ptr + 9, d10); +} + + static NOINLINE void sort_11v(int64_t *ptr) { + __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; + __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; + __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; + __m512i d04 = _mm512_loadu_si512((__m512i const *) ptr + 3);; + __m512i d05 = _mm512_loadu_si512((__m512i const *) ptr + 4);; + __m512i d06 = _mm512_loadu_si512((__m512i const *) ptr + 5);; + __m512i d07 = _mm512_loadu_si512((__m512i const *) ptr + 6);; + __m512i d08 = _mm512_loadu_si512((__m512i const *) ptr + 7);; + __m512i d09 = _mm512_loadu_si512((__m512i const *) ptr + 8);; + __m512i d10 = _mm512_loadu_si512((__m512i const *) ptr + 9);; + __m512i d11 = _mm512_loadu_si512((__m512i const *) ptr + 10);; + sort_11v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11); + _mm512_storeu_si512((__m512i *) ptr + 0, d01); + _mm512_storeu_si512((__m512i *) ptr + 1, d02); + _mm512_storeu_si512((__m512i *) ptr + 2, d03); + _mm512_storeu_si512((__m512i *) ptr + 3, d04); + _mm512_storeu_si512((__m512i *) ptr + 4, d05); + _mm512_storeu_si512((__m512i *) ptr + 5, d06); + _mm512_storeu_si512((__m512i *) ptr + 6, d07); + _mm512_storeu_si512((__m512i *) ptr + 7, d08); + _mm512_storeu_si512((__m512i *) ptr + 8, d09); + _mm512_storeu_si512((__m512i *) ptr + 9, d10); + _mm512_storeu_si512((__m512i *) ptr + 10, d11); +} + + static NOINLINE void sort_12v(int64_t *ptr) { + __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; + __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; + __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; + __m512i d04 = _mm512_loadu_si512((__m512i const *) ptr + 3);; + __m512i d05 = _mm512_loadu_si512((__m512i const *) ptr + 4);; + __m512i d06 = _mm512_loadu_si512((__m512i const *) ptr + 5);; + __m512i d07 = _mm512_loadu_si512((__m512i const *) ptr + 6);; + __m512i d08 = _mm512_loadu_si512((__m512i const *) ptr + 7);; + __m512i d09 = _mm512_loadu_si512((__m512i const *) ptr + 8);; + __m512i d10 = _mm512_loadu_si512((__m512i const *) ptr + 9);; + __m512i d11 = _mm512_loadu_si512((__m512i const *) ptr + 10);; + __m512i d12 = _mm512_loadu_si512((__m512i const *) ptr + 11);; + sort_12v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12); + _mm512_storeu_si512((__m512i *) ptr + 0, d01); + _mm512_storeu_si512((__m512i *) ptr + 1, d02); + _mm512_storeu_si512((__m512i *) ptr + 2, d03); + _mm512_storeu_si512((__m512i *) ptr + 3, d04); + _mm512_storeu_si512((__m512i *) ptr + 4, d05); + _mm512_storeu_si512((__m512i *) ptr + 5, d06); + _mm512_storeu_si512((__m512i *) ptr + 6, d07); + _mm512_storeu_si512((__m512i *) ptr + 7, d08); + _mm512_storeu_si512((__m512i *) ptr + 8, d09); + _mm512_storeu_si512((__m512i *) ptr + 9, d10); + _mm512_storeu_si512((__m512i *) ptr + 10, d11); + _mm512_storeu_si512((__m512i *) ptr + 11, d12); +} + + static NOINLINE void sort_13v(int64_t *ptr) { + __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; + __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; + __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; + __m512i d04 = _mm512_loadu_si512((__m512i const *) ptr + 3);; + __m512i d05 = _mm512_loadu_si512((__m512i const *) ptr + 4);; + __m512i d06 = _mm512_loadu_si512((__m512i const *) ptr + 5);; + __m512i d07 = _mm512_loadu_si512((__m512i const *) ptr + 6);; + __m512i d08 = _mm512_loadu_si512((__m512i const *) ptr + 7);; + __m512i d09 = _mm512_loadu_si512((__m512i const *) ptr + 8);; + __m512i d10 = _mm512_loadu_si512((__m512i const *) ptr + 9);; + __m512i d11 = _mm512_loadu_si512((__m512i const *) ptr + 10);; + __m512i d12 = _mm512_loadu_si512((__m512i const *) ptr + 11);; + __m512i d13 = _mm512_loadu_si512((__m512i const *) ptr + 12);; + sort_13v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13); + _mm512_storeu_si512((__m512i *) ptr + 0, d01); + _mm512_storeu_si512((__m512i *) ptr + 1, d02); + _mm512_storeu_si512((__m512i *) ptr + 2, d03); + _mm512_storeu_si512((__m512i *) ptr + 3, d04); + _mm512_storeu_si512((__m512i *) ptr + 4, d05); + _mm512_storeu_si512((__m512i *) ptr + 5, d06); + _mm512_storeu_si512((__m512i *) ptr + 6, d07); + _mm512_storeu_si512((__m512i *) ptr + 7, d08); + _mm512_storeu_si512((__m512i *) ptr + 8, d09); + _mm512_storeu_si512((__m512i *) ptr + 9, d10); + _mm512_storeu_si512((__m512i *) ptr + 10, d11); + _mm512_storeu_si512((__m512i *) ptr + 11, d12); + _mm512_storeu_si512((__m512i *) ptr + 12, d13); +} + + static NOINLINE void sort_14v(int64_t *ptr) { + __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; + __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; + __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; + __m512i d04 = _mm512_loadu_si512((__m512i const *) ptr + 3);; + __m512i d05 = _mm512_loadu_si512((__m512i const *) ptr + 4);; + __m512i d06 = _mm512_loadu_si512((__m512i const *) ptr + 5);; + __m512i d07 = _mm512_loadu_si512((__m512i const *) ptr + 6);; + __m512i d08 = _mm512_loadu_si512((__m512i const *) ptr + 7);; + __m512i d09 = _mm512_loadu_si512((__m512i const *) ptr + 8);; + __m512i d10 = _mm512_loadu_si512((__m512i const *) ptr + 9);; + __m512i d11 = _mm512_loadu_si512((__m512i const *) ptr + 10);; + __m512i d12 = _mm512_loadu_si512((__m512i const *) ptr + 11);; + __m512i d13 = _mm512_loadu_si512((__m512i const *) ptr + 12);; + __m512i d14 = _mm512_loadu_si512((__m512i const *) ptr + 13);; + sort_14v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14); + _mm512_storeu_si512((__m512i *) ptr + 0, d01); + _mm512_storeu_si512((__m512i *) ptr + 1, d02); + _mm512_storeu_si512((__m512i *) ptr + 2, d03); + _mm512_storeu_si512((__m512i *) ptr + 3, d04); + _mm512_storeu_si512((__m512i *) ptr + 4, d05); + _mm512_storeu_si512((__m512i *) ptr + 5, d06); + _mm512_storeu_si512((__m512i *) ptr + 6, d07); + _mm512_storeu_si512((__m512i *) ptr + 7, d08); + _mm512_storeu_si512((__m512i *) ptr + 8, d09); + _mm512_storeu_si512((__m512i *) ptr + 9, d10); + _mm512_storeu_si512((__m512i *) ptr + 10, d11); + _mm512_storeu_si512((__m512i *) ptr + 11, d12); + _mm512_storeu_si512((__m512i *) ptr + 12, d13); + _mm512_storeu_si512((__m512i *) ptr + 13, d14); +} + + static NOINLINE void sort_15v(int64_t *ptr) { + __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; + __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; + __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; + __m512i d04 = _mm512_loadu_si512((__m512i const *) ptr + 3);; + __m512i d05 = _mm512_loadu_si512((__m512i const *) ptr + 4);; + __m512i d06 = _mm512_loadu_si512((__m512i const *) ptr + 5);; + __m512i d07 = _mm512_loadu_si512((__m512i const *) ptr + 6);; + __m512i d08 = _mm512_loadu_si512((__m512i const *) ptr + 7);; + __m512i d09 = _mm512_loadu_si512((__m512i const *) ptr + 8);; + __m512i d10 = _mm512_loadu_si512((__m512i const *) ptr + 9);; + __m512i d11 = _mm512_loadu_si512((__m512i const *) ptr + 10);; + __m512i d12 = _mm512_loadu_si512((__m512i const *) ptr + 11);; + __m512i d13 = _mm512_loadu_si512((__m512i const *) ptr + 12);; + __m512i d14 = _mm512_loadu_si512((__m512i const *) ptr + 13);; + __m512i d15 = _mm512_loadu_si512((__m512i const *) ptr + 14);; + sort_15v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14, d15); + _mm512_storeu_si512((__m512i *) ptr + 0, d01); + _mm512_storeu_si512((__m512i *) ptr + 1, d02); + _mm512_storeu_si512((__m512i *) ptr + 2, d03); + _mm512_storeu_si512((__m512i *) ptr + 3, d04); + _mm512_storeu_si512((__m512i *) ptr + 4, d05); + _mm512_storeu_si512((__m512i *) ptr + 5, d06); + _mm512_storeu_si512((__m512i *) ptr + 6, d07); + _mm512_storeu_si512((__m512i *) ptr + 7, d08); + _mm512_storeu_si512((__m512i *) ptr + 8, d09); + _mm512_storeu_si512((__m512i *) ptr + 9, d10); + _mm512_storeu_si512((__m512i *) ptr + 10, d11); + _mm512_storeu_si512((__m512i *) ptr + 11, d12); + _mm512_storeu_si512((__m512i *) ptr + 12, d13); + _mm512_storeu_si512((__m512i *) ptr + 13, d14); + _mm512_storeu_si512((__m512i *) ptr + 14, d15); +} + + static NOINLINE void sort_16v(int64_t *ptr) { + __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; + __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; + __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; + __m512i d04 = _mm512_loadu_si512((__m512i const *) ptr + 3);; + __m512i d05 = _mm512_loadu_si512((__m512i const *) ptr + 4);; + __m512i d06 = _mm512_loadu_si512((__m512i const *) ptr + 5);; + __m512i d07 = _mm512_loadu_si512((__m512i const *) ptr + 6);; + __m512i d08 = _mm512_loadu_si512((__m512i const *) ptr + 7);; + __m512i d09 = _mm512_loadu_si512((__m512i const *) ptr + 8);; + __m512i d10 = _mm512_loadu_si512((__m512i const *) ptr + 9);; + __m512i d11 = _mm512_loadu_si512((__m512i const *) ptr + 10);; + __m512i d12 = _mm512_loadu_si512((__m512i const *) ptr + 11);; + __m512i d13 = _mm512_loadu_si512((__m512i const *) ptr + 12);; + __m512i d14 = _mm512_loadu_si512((__m512i const *) ptr + 13);; + __m512i d15 = _mm512_loadu_si512((__m512i const *) ptr + 14);; + __m512i d16 = _mm512_loadu_si512((__m512i const *) ptr + 15);; + sort_16v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14, d15, d16); + _mm512_storeu_si512((__m512i *) ptr + 0, d01); + _mm512_storeu_si512((__m512i *) ptr + 1, d02); + _mm512_storeu_si512((__m512i *) ptr + 2, d03); + _mm512_storeu_si512((__m512i *) ptr + 3, d04); + _mm512_storeu_si512((__m512i *) ptr + 4, d05); + _mm512_storeu_si512((__m512i *) ptr + 5, d06); + _mm512_storeu_si512((__m512i *) ptr + 6, d07); + _mm512_storeu_si512((__m512i *) ptr + 7, d08); + _mm512_storeu_si512((__m512i *) ptr + 8, d09); + _mm512_storeu_si512((__m512i *) ptr + 9, d10); + _mm512_storeu_si512((__m512i *) ptr + 10, d11); + _mm512_storeu_si512((__m512i *) ptr + 11, d12); + _mm512_storeu_si512((__m512i *) ptr + 12, d13); + _mm512_storeu_si512((__m512i *) ptr + 13, d14); + _mm512_storeu_si512((__m512i *) ptr + 14, d15); + _mm512_storeu_si512((__m512i *) ptr + 15, d16); +} + static void sort(int64_t *ptr, size_t length); + +}; +} +} + +#undef i2d +#undef d2i +#undef i2s +#undef s2i +#undef s2d +#undef d2s + +#ifdef __GNUC__ +#ifdef __clang__ +#pragma clang attribute pop +#else +#pragma GCC pop_options +#endif +#endif +#endif + diff --git a/src/coreclr/src/gc/smallsort/bitonic_sort.AVX512.uint32_t.generated.cpp b/src/coreclr/src/gc/smallsort/bitonic_sort.AVX512.uint32_t.generated.cpp new file mode 100644 index 00000000000000..840d970a43279b --- /dev/null +++ b/src/coreclr/src/gc/smallsort/bitonic_sort.AVX512.uint32_t.generated.cpp @@ -0,0 +1,26 @@ +#include "bitonic_sort.AVX512.uint32_t.generated.h" + +using namespace vxsort; + +void vxsort::smallsort::bitonic::sort(uint32_t *ptr, size_t length) { + const int N = 16; + + switch(length / N) { + case 1: sort_01v(ptr); break; + case 2: sort_02v(ptr); break; + case 3: sort_03v(ptr); break; + case 4: sort_04v(ptr); break; + case 5: sort_05v(ptr); break; + case 6: sort_06v(ptr); break; + case 7: sort_07v(ptr); break; + case 8: sort_08v(ptr); break; + case 9: sort_09v(ptr); break; + case 10: sort_10v(ptr); break; + case 11: sort_11v(ptr); break; + case 12: sort_12v(ptr); break; + case 13: sort_13v(ptr); break; + case 14: sort_14v(ptr); break; + case 15: sort_15v(ptr); break; + case 16: sort_16v(ptr); break; + } +} diff --git a/src/coreclr/src/gc/smallsort/bitonic_sort.AVX512.uint32_t.generated.h b/src/coreclr/src/gc/smallsort/bitonic_sort.AVX512.uint32_t.generated.h new file mode 100644 index 00000000000000..c0fb49eb564cba --- /dev/null +++ b/src/coreclr/src/gc/smallsort/bitonic_sort.AVX512.uint32_t.generated.h @@ -0,0 +1,1384 @@ +///////////////////////////////////////////////////////////////////////////// +//// +// This file was auto-generated by a tool at 2020-06-22 05:27:48 +// +// It is recommended you DO NOT directly edit this file but instead edit +// the code-generator that generated this source file instead. +///////////////////////////////////////////////////////////////////////////// + +#ifndef BITONIC_SORT_AVX512_UINT32_T_H +#define BITONIC_SORT_AVX512_UINT32_T_H + + +#ifdef __GNUC__ +#ifdef __clang__ +#pragma clang attribute push (__attribute__((target("avx512f"))), apply_to = any(function)) +#else +#pragma GCC push_options +#pragma GCC target("avx512f") +#endif +#endif + +#include +#include "bitonic_sort.h" + +#define i2d _mm512_castsi512_pd +#define d2i _mm512_castpd_si512 +#define i2s _mm512_castsi512_ps +#define s2i _mm512_castps_si512 +#define s2d _mm512_castps_pd +#define d2s _mm521_castpd_ps + +namespace vxsort { +namespace smallsort { +template<> struct bitonic { +public: + + static INLINE void sort_01v_ascending(__m512i& d01) { + __m512i min, s; + + s = _mm512_shuffle_epi32(d01, _MM_PERM_CDAB); + min = _mm512_min_epu32(s, d01); + d01 = _mm512_mask_max_epu32(min, 0xAAAA, s, d01); + + s = _mm512_shuffle_epi32(d01, _MM_PERM_ABCD); + min = _mm512_min_epu32(s, d01); + d01 = _mm512_mask_max_epu32(min, 0xCCCC, s, d01); + + s = _mm512_shuffle_epi32(d01, _MM_PERM_CDAB); + min = _mm512_min_epu32(s, d01); + d01 = _mm512_mask_max_epu32(min, 0xAAAA, s, d01); + + s = _mm512_permutex_epi64(_mm512_shuffle_epi32(d01, _MM_PERM_ABCD), _MM_PERM_BADC); + min = _mm512_min_epu32(s, d01); + d01 = _mm512_mask_max_epu32(min, 0xF0F0, s, d01); + + s = _mm512_shuffle_epi32(d01, _MM_PERM_BADC); + min = _mm512_min_epu32(s, d01); + d01 = _mm512_mask_max_epu32(min, 0xCCCC, s, d01); + + s = _mm512_shuffle_epi32(d01, _MM_PERM_CDAB); + min = _mm512_min_epu32(s, d01); + d01 = _mm512_mask_max_epu32(min, 0xAAAA, s, d01); + + s = _mm512_shuffle_i64x2(_mm512_shuffle_epi32(d01, _MM_PERM_ABCD), _mm512_shuffle_epi32(d01, _MM_PERM_ABCD), _MM_PERM_ABCD); + min = _mm512_min_epu32(s, d01); + d01 = _mm512_mask_max_epu32(min, 0xFF00, s, d01); + + s = _mm512_permutex_epi64(d01, _MM_PERM_BADC); + min = _mm512_min_epu32(s, d01); + d01 = _mm512_mask_max_epu32(min, 0xF0F0, s, d01); + + s = _mm512_shuffle_epi32(d01, _MM_PERM_BADC); + min = _mm512_min_epu32(s, d01); + d01 = _mm512_mask_max_epu32(min, 0xCCCC, s, d01); + + s = _mm512_shuffle_epi32(d01, _MM_PERM_CDAB); + min = _mm512_min_epu32(s, d01); + d01 = _mm512_mask_max_epu32(min, 0xAAAA, s, d01); + } + static INLINE void sort_01v_merge_ascending(__m512i& d01) { + __m512i min, s; + + s = _mm512_shuffle_i64x2(d01, d01, _MM_PERM_BADC); + min = _mm512_min_epu32(s, d01); + d01 = _mm512_mask_max_epu32(min, 0xFF00, s, d01); + + s = _mm512_permutex_epi64(d01, _MM_PERM_BADC); + min = _mm512_min_epu32(s, d01); + d01 = _mm512_mask_max_epu32(min, 0xF0F0, s, d01); + + s = _mm512_shuffle_epi32(d01, _MM_PERM_BADC); + min = _mm512_min_epu32(s, d01); + d01 = _mm512_mask_max_epu32(min, 0xCCCC, s, d01); + + s = _mm512_shuffle_epi32(d01, _MM_PERM_CDAB); + min = _mm512_min_epu32(s, d01); + d01 = _mm512_mask_max_epu32(min, 0xAAAA, s, d01); + } + static INLINE void sort_01v_descending(__m512i& d01) { + __m512i min, s; + + s = _mm512_shuffle_epi32(d01, _MM_PERM_CDAB); + min = _mm512_min_epu32(s, d01); + d01 = _mm512_mask_max_epu32(min, 0x5555, s, d01); + + s = _mm512_shuffle_epi32(d01, _MM_PERM_ABCD); + min = _mm512_min_epu32(s, d01); + d01 = _mm512_mask_max_epu32(min, 0x3333, s, d01); + + s = _mm512_shuffle_epi32(d01, _MM_PERM_CDAB); + min = _mm512_min_epu32(s, d01); + d01 = _mm512_mask_max_epu32(min, 0x5555, s, d01); + + s = _mm512_permutex_epi64(_mm512_shuffle_epi32(d01, _MM_PERM_ABCD), _MM_PERM_BADC); + min = _mm512_min_epu32(s, d01); + d01 = _mm512_mask_max_epu32(min, 0x0F0F, s, d01); + + s = _mm512_shuffle_epi32(d01, _MM_PERM_BADC); + min = _mm512_min_epu32(s, d01); + d01 = _mm512_mask_max_epu32(min, 0x3333, s, d01); + + s = _mm512_shuffle_epi32(d01, _MM_PERM_CDAB); + min = _mm512_min_epu32(s, d01); + d01 = _mm512_mask_max_epu32(min, 0x5555, s, d01); + + s = _mm512_shuffle_i64x2(_mm512_shuffle_epi32(d01, _MM_PERM_ABCD), _mm512_shuffle_epi32(d01, _MM_PERM_ABCD), _MM_PERM_ABCD); + min = _mm512_min_epu32(s, d01); + d01 = _mm512_mask_max_epu32(min, 0x00FF, s, d01); + + s = _mm512_permutex_epi64(d01, _MM_PERM_BADC); + min = _mm512_min_epu32(s, d01); + d01 = _mm512_mask_max_epu32(min, 0x0F0F, s, d01); + + s = _mm512_shuffle_epi32(d01, _MM_PERM_BADC); + min = _mm512_min_epu32(s, d01); + d01 = _mm512_mask_max_epu32(min, 0x3333, s, d01); + + s = _mm512_shuffle_epi32(d01, _MM_PERM_CDAB); + min = _mm512_min_epu32(s, d01); + d01 = _mm512_mask_max_epu32(min, 0x5555, s, d01); + } + static INLINE void sort_01v_merge_descending(__m512i& d01) { + __m512i min, s; + + s = _mm512_shuffle_i64x2(d01, d01, _MM_PERM_BADC); + min = _mm512_min_epu32(s, d01); + d01 = _mm512_mask_max_epu32(min, 0x00FF, s, d01); + + s = _mm512_permutex_epi64(d01, _MM_PERM_BADC); + min = _mm512_min_epu32(s, d01); + d01 = _mm512_mask_max_epu32(min, 0x0F0F, s, d01); + + s = _mm512_shuffle_epi32(d01, _MM_PERM_BADC); + min = _mm512_min_epu32(s, d01); + d01 = _mm512_mask_max_epu32(min, 0x3333, s, d01); + + s = _mm512_shuffle_epi32(d01, _MM_PERM_CDAB); + min = _mm512_min_epu32(s, d01); + d01 = _mm512_mask_max_epu32(min, 0x5555, s, d01); + } + static INLINE void sort_02v_ascending(__m512i& d01, __m512i& d02) { + __m512i tmp; + + sort_01v_ascending(d01); + sort_01v_descending(d02); + + tmp = d02; + d02 = _mm512_max_epu32(d01, d02); + d01 = _mm512_min_epu32(d01, tmp); + + sort_01v_merge_ascending(d01); + sort_01v_merge_ascending(d02); + } + static INLINE void sort_02v_descending(__m512i& d01, __m512i& d02) { + __m512i tmp; + + sort_01v_descending(d01); + sort_01v_ascending(d02); + + tmp = d02; + d02 = _mm512_max_epu32(d01, d02); + d01 = _mm512_min_epu32(d01, tmp); + + sort_01v_merge_descending(d01); + sort_01v_merge_descending(d02); + } + static INLINE void sort_02v_merge_ascending(__m512i& d01, __m512i& d02) { + __m512i tmp; + + tmp = d01; + d01 = _mm512_min_epu32(d02, d01); + d02 = _mm512_max_epu32(d02, tmp); + + sort_01v_merge_ascending(d01); + sort_01v_merge_ascending(d02); + } + static INLINE void sort_02v_merge_descending(__m512i& d01, __m512i& d02) { + __m512i tmp; + + tmp = d01; + d01 = _mm512_min_epu32(d02, d01); + d02 = _mm512_max_epu32(d02, tmp); + + sort_01v_merge_descending(d01); + sort_01v_merge_descending(d02); + } + static INLINE void sort_03v_ascending(__m512i& d01, __m512i& d02, __m512i& d03) { + __m512i tmp; + + sort_02v_ascending(d01, d02); + sort_01v_descending(d03); + + tmp = d03; + d03 = _mm512_max_epu32(d02, d03); + d02 = _mm512_min_epu32(d02, tmp); + + sort_02v_merge_ascending(d01, d02); + sort_01v_merge_ascending(d03); + } + static INLINE void sort_03v_descending(__m512i& d01, __m512i& d02, __m512i& d03) { + __m512i tmp; + + sort_02v_descending(d01, d02); + sort_01v_ascending(d03); + + tmp = d03; + d03 = _mm512_max_epu32(d02, d03); + d02 = _mm512_min_epu32(d02, tmp); + + sort_02v_merge_descending(d01, d02); + sort_01v_merge_descending(d03); + } + static INLINE void sort_03v_merge_ascending(__m512i& d01, __m512i& d02, __m512i& d03) { + __m512i tmp; + + tmp = d01; + d01 = _mm512_min_epu32(d03, d01); + d03 = _mm512_max_epu32(d03, tmp); + + sort_02v_merge_ascending(d01, d02); + sort_01v_merge_ascending(d03); + } + static INLINE void sort_03v_merge_descending(__m512i& d01, __m512i& d02, __m512i& d03) { + __m512i tmp; + + tmp = d01; + d01 = _mm512_min_epu32(d03, d01); + d03 = _mm512_max_epu32(d03, tmp); + + sort_02v_merge_descending(d01, d02); + sort_01v_merge_descending(d03); + } + static INLINE void sort_04v_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04) { + __m512i tmp; + + sort_02v_ascending(d01, d02); + sort_02v_descending(d03, d04); + + tmp = d03; + d03 = _mm512_max_epu32(d02, d03); + d02 = _mm512_min_epu32(d02, tmp); + + tmp = d04; + d04 = _mm512_max_epu32(d01, d04); + d01 = _mm512_min_epu32(d01, tmp); + + sort_02v_merge_ascending(d01, d02); + sort_02v_merge_ascending(d03, d04); + } + static INLINE void sort_04v_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04) { + __m512i tmp; + + sort_02v_descending(d01, d02); + sort_02v_ascending(d03, d04); + + tmp = d03; + d03 = _mm512_max_epu32(d02, d03); + d02 = _mm512_min_epu32(d02, tmp); + + tmp = d04; + d04 = _mm512_max_epu32(d01, d04); + d01 = _mm512_min_epu32(d01, tmp); + + sort_02v_merge_descending(d01, d02); + sort_02v_merge_descending(d03, d04); + } + static INLINE void sort_04v_merge_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04) { + __m512i tmp; + + tmp = d01; + d01 = _mm512_min_epu32(d03, d01); + d03 = _mm512_max_epu32(d03, tmp); + + tmp = d02; + d02 = _mm512_min_epu32(d04, d02); + d04 = _mm512_max_epu32(d04, tmp); + + sort_02v_merge_ascending(d01, d02); + sort_02v_merge_ascending(d03, d04); + } + static INLINE void sort_04v_merge_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04) { + __m512i tmp; + + tmp = d01; + d01 = _mm512_min_epu32(d03, d01); + d03 = _mm512_max_epu32(d03, tmp); + + tmp = d02; + d02 = _mm512_min_epu32(d04, d02); + d04 = _mm512_max_epu32(d04, tmp); + + sort_02v_merge_descending(d01, d02); + sort_02v_merge_descending(d03, d04); + } + static INLINE void sort_05v_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05) { + __m512i tmp; + + sort_04v_ascending(d01, d02, d03, d04); + sort_01v_descending(d05); + + tmp = d05; + d05 = _mm512_max_epu32(d04, d05); + d04 = _mm512_min_epu32(d04, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_01v_merge_ascending(d05); + } + static INLINE void sort_05v_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05) { + __m512i tmp; + + sort_04v_descending(d01, d02, d03, d04); + sort_01v_ascending(d05); + + tmp = d05; + d05 = _mm512_max_epu32(d04, d05); + d04 = _mm512_min_epu32(d04, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_01v_merge_descending(d05); + } + static INLINE void sort_05v_merge_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05) { + __m512i tmp; + + tmp = d01; + d01 = _mm512_min_epu32(d05, d01); + d05 = _mm512_max_epu32(d05, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_01v_merge_ascending(d05); + } + static INLINE void sort_05v_merge_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05) { + __m512i tmp; + + tmp = d01; + d01 = _mm512_min_epu32(d05, d01); + d05 = _mm512_max_epu32(d05, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_01v_merge_descending(d05); + } + static INLINE void sort_06v_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06) { + __m512i tmp; + + sort_04v_ascending(d01, d02, d03, d04); + sort_02v_descending(d05, d06); + + tmp = d05; + d05 = _mm512_max_epu32(d04, d05); + d04 = _mm512_min_epu32(d04, tmp); + + tmp = d06; + d06 = _mm512_max_epu32(d03, d06); + d03 = _mm512_min_epu32(d03, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_02v_merge_ascending(d05, d06); + } + static INLINE void sort_06v_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06) { + __m512i tmp; + + sort_04v_descending(d01, d02, d03, d04); + sort_02v_ascending(d05, d06); + + tmp = d05; + d05 = _mm512_max_epu32(d04, d05); + d04 = _mm512_min_epu32(d04, tmp); + + tmp = d06; + d06 = _mm512_max_epu32(d03, d06); + d03 = _mm512_min_epu32(d03, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_02v_merge_descending(d05, d06); + } + static INLINE void sort_06v_merge_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06) { + __m512i tmp; + + tmp = d01; + d01 = _mm512_min_epu32(d05, d01); + d05 = _mm512_max_epu32(d05, tmp); + + tmp = d02; + d02 = _mm512_min_epu32(d06, d02); + d06 = _mm512_max_epu32(d06, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_02v_merge_ascending(d05, d06); + } + static INLINE void sort_06v_merge_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06) { + __m512i tmp; + + tmp = d01; + d01 = _mm512_min_epu32(d05, d01); + d05 = _mm512_max_epu32(d05, tmp); + + tmp = d02; + d02 = _mm512_min_epu32(d06, d02); + d06 = _mm512_max_epu32(d06, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_02v_merge_descending(d05, d06); + } + static INLINE void sort_07v_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07) { + __m512i tmp; + + sort_04v_ascending(d01, d02, d03, d04); + sort_03v_descending(d05, d06, d07); + + tmp = d05; + d05 = _mm512_max_epu32(d04, d05); + d04 = _mm512_min_epu32(d04, tmp); + + tmp = d06; + d06 = _mm512_max_epu32(d03, d06); + d03 = _mm512_min_epu32(d03, tmp); + + tmp = d07; + d07 = _mm512_max_epu32(d02, d07); + d02 = _mm512_min_epu32(d02, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_03v_merge_ascending(d05, d06, d07); + } + static INLINE void sort_07v_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07) { + __m512i tmp; + + sort_04v_descending(d01, d02, d03, d04); + sort_03v_ascending(d05, d06, d07); + + tmp = d05; + d05 = _mm512_max_epu32(d04, d05); + d04 = _mm512_min_epu32(d04, tmp); + + tmp = d06; + d06 = _mm512_max_epu32(d03, d06); + d03 = _mm512_min_epu32(d03, tmp); + + tmp = d07; + d07 = _mm512_max_epu32(d02, d07); + d02 = _mm512_min_epu32(d02, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_03v_merge_descending(d05, d06, d07); + } + static INLINE void sort_07v_merge_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07) { + __m512i tmp; + + tmp = d01; + d01 = _mm512_min_epu32(d05, d01); + d05 = _mm512_max_epu32(d05, tmp); + + tmp = d02; + d02 = _mm512_min_epu32(d06, d02); + d06 = _mm512_max_epu32(d06, tmp); + + tmp = d03; + d03 = _mm512_min_epu32(d07, d03); + d07 = _mm512_max_epu32(d07, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_03v_merge_ascending(d05, d06, d07); + } + static INLINE void sort_07v_merge_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07) { + __m512i tmp; + + tmp = d01; + d01 = _mm512_min_epu32(d05, d01); + d05 = _mm512_max_epu32(d05, tmp); + + tmp = d02; + d02 = _mm512_min_epu32(d06, d02); + d06 = _mm512_max_epu32(d06, tmp); + + tmp = d03; + d03 = _mm512_min_epu32(d07, d03); + d07 = _mm512_max_epu32(d07, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_03v_merge_descending(d05, d06, d07); + } + static INLINE void sort_08v_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08) { + __m512i tmp; + + sort_04v_ascending(d01, d02, d03, d04); + sort_04v_descending(d05, d06, d07, d08); + + tmp = d05; + d05 = _mm512_max_epu32(d04, d05); + d04 = _mm512_min_epu32(d04, tmp); + + tmp = d06; + d06 = _mm512_max_epu32(d03, d06); + d03 = _mm512_min_epu32(d03, tmp); + + tmp = d07; + d07 = _mm512_max_epu32(d02, d07); + d02 = _mm512_min_epu32(d02, tmp); + + tmp = d08; + d08 = _mm512_max_epu32(d01, d08); + d01 = _mm512_min_epu32(d01, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_04v_merge_ascending(d05, d06, d07, d08); + } + static INLINE void sort_08v_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08) { + __m512i tmp; + + sort_04v_descending(d01, d02, d03, d04); + sort_04v_ascending(d05, d06, d07, d08); + + tmp = d05; + d05 = _mm512_max_epu32(d04, d05); + d04 = _mm512_min_epu32(d04, tmp); + + tmp = d06; + d06 = _mm512_max_epu32(d03, d06); + d03 = _mm512_min_epu32(d03, tmp); + + tmp = d07; + d07 = _mm512_max_epu32(d02, d07); + d02 = _mm512_min_epu32(d02, tmp); + + tmp = d08; + d08 = _mm512_max_epu32(d01, d08); + d01 = _mm512_min_epu32(d01, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_04v_merge_descending(d05, d06, d07, d08); + } + static INLINE void sort_08v_merge_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08) { + __m512i tmp; + + tmp = d01; + d01 = _mm512_min_epu32(d05, d01); + d05 = _mm512_max_epu32(d05, tmp); + + tmp = d02; + d02 = _mm512_min_epu32(d06, d02); + d06 = _mm512_max_epu32(d06, tmp); + + tmp = d03; + d03 = _mm512_min_epu32(d07, d03); + d07 = _mm512_max_epu32(d07, tmp); + + tmp = d04; + d04 = _mm512_min_epu32(d08, d04); + d08 = _mm512_max_epu32(d08, tmp); + + sort_04v_merge_ascending(d01, d02, d03, d04); + sort_04v_merge_ascending(d05, d06, d07, d08); + } + static INLINE void sort_08v_merge_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08) { + __m512i tmp; + + tmp = d01; + d01 = _mm512_min_epu32(d05, d01); + d05 = _mm512_max_epu32(d05, tmp); + + tmp = d02; + d02 = _mm512_min_epu32(d06, d02); + d06 = _mm512_max_epu32(d06, tmp); + + tmp = d03; + d03 = _mm512_min_epu32(d07, d03); + d07 = _mm512_max_epu32(d07, tmp); + + tmp = d04; + d04 = _mm512_min_epu32(d08, d04); + d08 = _mm512_max_epu32(d08, tmp); + + sort_04v_merge_descending(d01, d02, d03, d04); + sort_04v_merge_descending(d05, d06, d07, d08); + } + static INLINE void sort_09v_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09) { + __m512i tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_01v_descending(d09); + + tmp = d09; + d09 = _mm512_max_epu32(d08, d09); + d08 = _mm512_min_epu32(d08, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_01v_merge_ascending(d09); + } + static INLINE void sort_09v_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09) { + __m512i tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_01v_ascending(d09); + + tmp = d09; + d09 = _mm512_max_epu32(d08, d09); + d08 = _mm512_min_epu32(d08, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_01v_merge_descending(d09); + } + static INLINE void sort_10v_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10) { + __m512i tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_02v_descending(d09, d10); + + tmp = d09; + d09 = _mm512_max_epu32(d08, d09); + d08 = _mm512_min_epu32(d08, tmp); + + tmp = d10; + d10 = _mm512_max_epu32(d07, d10); + d07 = _mm512_min_epu32(d07, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_02v_merge_ascending(d09, d10); + } + static INLINE void sort_10v_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10) { + __m512i tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_02v_ascending(d09, d10); + + tmp = d09; + d09 = _mm512_max_epu32(d08, d09); + d08 = _mm512_min_epu32(d08, tmp); + + tmp = d10; + d10 = _mm512_max_epu32(d07, d10); + d07 = _mm512_min_epu32(d07, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_02v_merge_descending(d09, d10); + } + static INLINE void sort_11v_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10, __m512i& d11) { + __m512i tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_03v_descending(d09, d10, d11); + + tmp = d09; + d09 = _mm512_max_epu32(d08, d09); + d08 = _mm512_min_epu32(d08, tmp); + + tmp = d10; + d10 = _mm512_max_epu32(d07, d10); + d07 = _mm512_min_epu32(d07, tmp); + + tmp = d11; + d11 = _mm512_max_epu32(d06, d11); + d06 = _mm512_min_epu32(d06, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_03v_merge_ascending(d09, d10, d11); + } + static INLINE void sort_11v_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10, __m512i& d11) { + __m512i tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_03v_ascending(d09, d10, d11); + + tmp = d09; + d09 = _mm512_max_epu32(d08, d09); + d08 = _mm512_min_epu32(d08, tmp); + + tmp = d10; + d10 = _mm512_max_epu32(d07, d10); + d07 = _mm512_min_epu32(d07, tmp); + + tmp = d11; + d11 = _mm512_max_epu32(d06, d11); + d06 = _mm512_min_epu32(d06, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_03v_merge_descending(d09, d10, d11); + } + static INLINE void sort_12v_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10, __m512i& d11, __m512i& d12) { + __m512i tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_04v_descending(d09, d10, d11, d12); + + tmp = d09; + d09 = _mm512_max_epu32(d08, d09); + d08 = _mm512_min_epu32(d08, tmp); + + tmp = d10; + d10 = _mm512_max_epu32(d07, d10); + d07 = _mm512_min_epu32(d07, tmp); + + tmp = d11; + d11 = _mm512_max_epu32(d06, d11); + d06 = _mm512_min_epu32(d06, tmp); + + tmp = d12; + d12 = _mm512_max_epu32(d05, d12); + d05 = _mm512_min_epu32(d05, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_04v_merge_ascending(d09, d10, d11, d12); + } + static INLINE void sort_12v_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10, __m512i& d11, __m512i& d12) { + __m512i tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_04v_ascending(d09, d10, d11, d12); + + tmp = d09; + d09 = _mm512_max_epu32(d08, d09); + d08 = _mm512_min_epu32(d08, tmp); + + tmp = d10; + d10 = _mm512_max_epu32(d07, d10); + d07 = _mm512_min_epu32(d07, tmp); + + tmp = d11; + d11 = _mm512_max_epu32(d06, d11); + d06 = _mm512_min_epu32(d06, tmp); + + tmp = d12; + d12 = _mm512_max_epu32(d05, d12); + d05 = _mm512_min_epu32(d05, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_04v_merge_descending(d09, d10, d11, d12); + } + static INLINE void sort_13v_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10, __m512i& d11, __m512i& d12, __m512i& d13) { + __m512i tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_05v_descending(d09, d10, d11, d12, d13); + + tmp = d09; + d09 = _mm512_max_epu32(d08, d09); + d08 = _mm512_min_epu32(d08, tmp); + + tmp = d10; + d10 = _mm512_max_epu32(d07, d10); + d07 = _mm512_min_epu32(d07, tmp); + + tmp = d11; + d11 = _mm512_max_epu32(d06, d11); + d06 = _mm512_min_epu32(d06, tmp); + + tmp = d12; + d12 = _mm512_max_epu32(d05, d12); + d05 = _mm512_min_epu32(d05, tmp); + + tmp = d13; + d13 = _mm512_max_epu32(d04, d13); + d04 = _mm512_min_epu32(d04, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_05v_merge_ascending(d09, d10, d11, d12, d13); + } + static INLINE void sort_13v_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10, __m512i& d11, __m512i& d12, __m512i& d13) { + __m512i tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_05v_ascending(d09, d10, d11, d12, d13); + + tmp = d09; + d09 = _mm512_max_epu32(d08, d09); + d08 = _mm512_min_epu32(d08, tmp); + + tmp = d10; + d10 = _mm512_max_epu32(d07, d10); + d07 = _mm512_min_epu32(d07, tmp); + + tmp = d11; + d11 = _mm512_max_epu32(d06, d11); + d06 = _mm512_min_epu32(d06, tmp); + + tmp = d12; + d12 = _mm512_max_epu32(d05, d12); + d05 = _mm512_min_epu32(d05, tmp); + + tmp = d13; + d13 = _mm512_max_epu32(d04, d13); + d04 = _mm512_min_epu32(d04, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_05v_merge_descending(d09, d10, d11, d12, d13); + } + static INLINE void sort_14v_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10, __m512i& d11, __m512i& d12, __m512i& d13, __m512i& d14) { + __m512i tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_06v_descending(d09, d10, d11, d12, d13, d14); + + tmp = d09; + d09 = _mm512_max_epu32(d08, d09); + d08 = _mm512_min_epu32(d08, tmp); + + tmp = d10; + d10 = _mm512_max_epu32(d07, d10); + d07 = _mm512_min_epu32(d07, tmp); + + tmp = d11; + d11 = _mm512_max_epu32(d06, d11); + d06 = _mm512_min_epu32(d06, tmp); + + tmp = d12; + d12 = _mm512_max_epu32(d05, d12); + d05 = _mm512_min_epu32(d05, tmp); + + tmp = d13; + d13 = _mm512_max_epu32(d04, d13); + d04 = _mm512_min_epu32(d04, tmp); + + tmp = d14; + d14 = _mm512_max_epu32(d03, d14); + d03 = _mm512_min_epu32(d03, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_06v_merge_ascending(d09, d10, d11, d12, d13, d14); + } + static INLINE void sort_14v_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10, __m512i& d11, __m512i& d12, __m512i& d13, __m512i& d14) { + __m512i tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_06v_ascending(d09, d10, d11, d12, d13, d14); + + tmp = d09; + d09 = _mm512_max_epu32(d08, d09); + d08 = _mm512_min_epu32(d08, tmp); + + tmp = d10; + d10 = _mm512_max_epu32(d07, d10); + d07 = _mm512_min_epu32(d07, tmp); + + tmp = d11; + d11 = _mm512_max_epu32(d06, d11); + d06 = _mm512_min_epu32(d06, tmp); + + tmp = d12; + d12 = _mm512_max_epu32(d05, d12); + d05 = _mm512_min_epu32(d05, tmp); + + tmp = d13; + d13 = _mm512_max_epu32(d04, d13); + d04 = _mm512_min_epu32(d04, tmp); + + tmp = d14; + d14 = _mm512_max_epu32(d03, d14); + d03 = _mm512_min_epu32(d03, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_06v_merge_descending(d09, d10, d11, d12, d13, d14); + } + static INLINE void sort_15v_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10, __m512i& d11, __m512i& d12, __m512i& d13, __m512i& d14, __m512i& d15) { + __m512i tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_07v_descending(d09, d10, d11, d12, d13, d14, d15); + + tmp = d09; + d09 = _mm512_max_epu32(d08, d09); + d08 = _mm512_min_epu32(d08, tmp); + + tmp = d10; + d10 = _mm512_max_epu32(d07, d10); + d07 = _mm512_min_epu32(d07, tmp); + + tmp = d11; + d11 = _mm512_max_epu32(d06, d11); + d06 = _mm512_min_epu32(d06, tmp); + + tmp = d12; + d12 = _mm512_max_epu32(d05, d12); + d05 = _mm512_min_epu32(d05, tmp); + + tmp = d13; + d13 = _mm512_max_epu32(d04, d13); + d04 = _mm512_min_epu32(d04, tmp); + + tmp = d14; + d14 = _mm512_max_epu32(d03, d14); + d03 = _mm512_min_epu32(d03, tmp); + + tmp = d15; + d15 = _mm512_max_epu32(d02, d15); + d02 = _mm512_min_epu32(d02, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_07v_merge_ascending(d09, d10, d11, d12, d13, d14, d15); + } + static INLINE void sort_15v_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10, __m512i& d11, __m512i& d12, __m512i& d13, __m512i& d14, __m512i& d15) { + __m512i tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_07v_ascending(d09, d10, d11, d12, d13, d14, d15); + + tmp = d09; + d09 = _mm512_max_epu32(d08, d09); + d08 = _mm512_min_epu32(d08, tmp); + + tmp = d10; + d10 = _mm512_max_epu32(d07, d10); + d07 = _mm512_min_epu32(d07, tmp); + + tmp = d11; + d11 = _mm512_max_epu32(d06, d11); + d06 = _mm512_min_epu32(d06, tmp); + + tmp = d12; + d12 = _mm512_max_epu32(d05, d12); + d05 = _mm512_min_epu32(d05, tmp); + + tmp = d13; + d13 = _mm512_max_epu32(d04, d13); + d04 = _mm512_min_epu32(d04, tmp); + + tmp = d14; + d14 = _mm512_max_epu32(d03, d14); + d03 = _mm512_min_epu32(d03, tmp); + + tmp = d15; + d15 = _mm512_max_epu32(d02, d15); + d02 = _mm512_min_epu32(d02, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_07v_merge_descending(d09, d10, d11, d12, d13, d14, d15); + } + static INLINE void sort_16v_ascending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10, __m512i& d11, __m512i& d12, __m512i& d13, __m512i& d14, __m512i& d15, __m512i& d16) { + __m512i tmp; + + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_08v_descending(d09, d10, d11, d12, d13, d14, d15, d16); + + tmp = d09; + d09 = _mm512_max_epu32(d08, d09); + d08 = _mm512_min_epu32(d08, tmp); + + tmp = d10; + d10 = _mm512_max_epu32(d07, d10); + d07 = _mm512_min_epu32(d07, tmp); + + tmp = d11; + d11 = _mm512_max_epu32(d06, d11); + d06 = _mm512_min_epu32(d06, tmp); + + tmp = d12; + d12 = _mm512_max_epu32(d05, d12); + d05 = _mm512_min_epu32(d05, tmp); + + tmp = d13; + d13 = _mm512_max_epu32(d04, d13); + d04 = _mm512_min_epu32(d04, tmp); + + tmp = d14; + d14 = _mm512_max_epu32(d03, d14); + d03 = _mm512_min_epu32(d03, tmp); + + tmp = d15; + d15 = _mm512_max_epu32(d02, d15); + d02 = _mm512_min_epu32(d02, tmp); + + tmp = d16; + d16 = _mm512_max_epu32(d01, d16); + d01 = _mm512_min_epu32(d01, tmp); + + sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_08v_merge_ascending(d09, d10, d11, d12, d13, d14, d15, d16); + } + static INLINE void sort_16v_descending(__m512i& d01, __m512i& d02, __m512i& d03, __m512i& d04, __m512i& d05, __m512i& d06, __m512i& d07, __m512i& d08, __m512i& d09, __m512i& d10, __m512i& d11, __m512i& d12, __m512i& d13, __m512i& d14, __m512i& d15, __m512i& d16) { + __m512i tmp; + + sort_08v_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_08v_ascending(d09, d10, d11, d12, d13, d14, d15, d16); + + tmp = d09; + d09 = _mm512_max_epu32(d08, d09); + d08 = _mm512_min_epu32(d08, tmp); + + tmp = d10; + d10 = _mm512_max_epu32(d07, d10); + d07 = _mm512_min_epu32(d07, tmp); + + tmp = d11; + d11 = _mm512_max_epu32(d06, d11); + d06 = _mm512_min_epu32(d06, tmp); + + tmp = d12; + d12 = _mm512_max_epu32(d05, d12); + d05 = _mm512_min_epu32(d05, tmp); + + tmp = d13; + d13 = _mm512_max_epu32(d04, d13); + d04 = _mm512_min_epu32(d04, tmp); + + tmp = d14; + d14 = _mm512_max_epu32(d03, d14); + d03 = _mm512_min_epu32(d03, tmp); + + tmp = d15; + d15 = _mm512_max_epu32(d02, d15); + d02 = _mm512_min_epu32(d02, tmp); + + tmp = d16; + d16 = _mm512_max_epu32(d01, d16); + d01 = _mm512_min_epu32(d01, tmp); + + sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); + sort_08v_merge_descending(d09, d10, d11, d12, d13, d14, d15, d16); + } + + static NOINLINE void sort_01v(uint32_t *ptr) { + __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; + sort_01v_ascending(d01); + _mm512_storeu_si512((__m512i *) ptr + 0, d01); +} + + static NOINLINE void sort_02v(uint32_t *ptr) { + __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; + __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; + sort_02v_ascending(d01, d02); + _mm512_storeu_si512((__m512i *) ptr + 0, d01); + _mm512_storeu_si512((__m512i *) ptr + 1, d02); +} + + static NOINLINE void sort_03v(uint32_t *ptr) { + __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; + __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; + __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; + sort_03v_ascending(d01, d02, d03); + _mm512_storeu_si512((__m512i *) ptr + 0, d01); + _mm512_storeu_si512((__m512i *) ptr + 1, d02); + _mm512_storeu_si512((__m512i *) ptr + 2, d03); +} + + static NOINLINE void sort_04v(uint32_t *ptr) { + __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; + __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; + __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; + __m512i d04 = _mm512_loadu_si512((__m512i const *) ptr + 3);; + sort_04v_ascending(d01, d02, d03, d04); + _mm512_storeu_si512((__m512i *) ptr + 0, d01); + _mm512_storeu_si512((__m512i *) ptr + 1, d02); + _mm512_storeu_si512((__m512i *) ptr + 2, d03); + _mm512_storeu_si512((__m512i *) ptr + 3, d04); +} + + static NOINLINE void sort_05v(uint32_t *ptr) { + __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; + __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; + __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; + __m512i d04 = _mm512_loadu_si512((__m512i const *) ptr + 3);; + __m512i d05 = _mm512_loadu_si512((__m512i const *) ptr + 4);; + sort_05v_ascending(d01, d02, d03, d04, d05); + _mm512_storeu_si512((__m512i *) ptr + 0, d01); + _mm512_storeu_si512((__m512i *) ptr + 1, d02); + _mm512_storeu_si512((__m512i *) ptr + 2, d03); + _mm512_storeu_si512((__m512i *) ptr + 3, d04); + _mm512_storeu_si512((__m512i *) ptr + 4, d05); +} + + static NOINLINE void sort_06v(uint32_t *ptr) { + __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; + __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; + __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; + __m512i d04 = _mm512_loadu_si512((__m512i const *) ptr + 3);; + __m512i d05 = _mm512_loadu_si512((__m512i const *) ptr + 4);; + __m512i d06 = _mm512_loadu_si512((__m512i const *) ptr + 5);; + sort_06v_ascending(d01, d02, d03, d04, d05, d06); + _mm512_storeu_si512((__m512i *) ptr + 0, d01); + _mm512_storeu_si512((__m512i *) ptr + 1, d02); + _mm512_storeu_si512((__m512i *) ptr + 2, d03); + _mm512_storeu_si512((__m512i *) ptr + 3, d04); + _mm512_storeu_si512((__m512i *) ptr + 4, d05); + _mm512_storeu_si512((__m512i *) ptr + 5, d06); +} + + static NOINLINE void sort_07v(uint32_t *ptr) { + __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; + __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; + __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; + __m512i d04 = _mm512_loadu_si512((__m512i const *) ptr + 3);; + __m512i d05 = _mm512_loadu_si512((__m512i const *) ptr + 4);; + __m512i d06 = _mm512_loadu_si512((__m512i const *) ptr + 5);; + __m512i d07 = _mm512_loadu_si512((__m512i const *) ptr + 6);; + sort_07v_ascending(d01, d02, d03, d04, d05, d06, d07); + _mm512_storeu_si512((__m512i *) ptr + 0, d01); + _mm512_storeu_si512((__m512i *) ptr + 1, d02); + _mm512_storeu_si512((__m512i *) ptr + 2, d03); + _mm512_storeu_si512((__m512i *) ptr + 3, d04); + _mm512_storeu_si512((__m512i *) ptr + 4, d05); + _mm512_storeu_si512((__m512i *) ptr + 5, d06); + _mm512_storeu_si512((__m512i *) ptr + 6, d07); +} + + static NOINLINE void sort_08v(uint32_t *ptr) { + __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; + __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; + __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; + __m512i d04 = _mm512_loadu_si512((__m512i const *) ptr + 3);; + __m512i d05 = _mm512_loadu_si512((__m512i const *) ptr + 4);; + __m512i d06 = _mm512_loadu_si512((__m512i const *) ptr + 5);; + __m512i d07 = _mm512_loadu_si512((__m512i const *) ptr + 6);; + __m512i d08 = _mm512_loadu_si512((__m512i const *) ptr + 7);; + sort_08v_ascending(d01, d02, d03, d04, d05, d06, d07, d08); + _mm512_storeu_si512((__m512i *) ptr + 0, d01); + _mm512_storeu_si512((__m512i *) ptr + 1, d02); + _mm512_storeu_si512((__m512i *) ptr + 2, d03); + _mm512_storeu_si512((__m512i *) ptr + 3, d04); + _mm512_storeu_si512((__m512i *) ptr + 4, d05); + _mm512_storeu_si512((__m512i *) ptr + 5, d06); + _mm512_storeu_si512((__m512i *) ptr + 6, d07); + _mm512_storeu_si512((__m512i *) ptr + 7, d08); +} + + static NOINLINE void sort_09v(uint32_t *ptr) { + __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; + __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; + __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; + __m512i d04 = _mm512_loadu_si512((__m512i const *) ptr + 3);; + __m512i d05 = _mm512_loadu_si512((__m512i const *) ptr + 4);; + __m512i d06 = _mm512_loadu_si512((__m512i const *) ptr + 5);; + __m512i d07 = _mm512_loadu_si512((__m512i const *) ptr + 6);; + __m512i d08 = _mm512_loadu_si512((__m512i const *) ptr + 7);; + __m512i d09 = _mm512_loadu_si512((__m512i const *) ptr + 8);; + sort_09v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09); + _mm512_storeu_si512((__m512i *) ptr + 0, d01); + _mm512_storeu_si512((__m512i *) ptr + 1, d02); + _mm512_storeu_si512((__m512i *) ptr + 2, d03); + _mm512_storeu_si512((__m512i *) ptr + 3, d04); + _mm512_storeu_si512((__m512i *) ptr + 4, d05); + _mm512_storeu_si512((__m512i *) ptr + 5, d06); + _mm512_storeu_si512((__m512i *) ptr + 6, d07); + _mm512_storeu_si512((__m512i *) ptr + 7, d08); + _mm512_storeu_si512((__m512i *) ptr + 8, d09); +} + + static NOINLINE void sort_10v(uint32_t *ptr) { + __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; + __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; + __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; + __m512i d04 = _mm512_loadu_si512((__m512i const *) ptr + 3);; + __m512i d05 = _mm512_loadu_si512((__m512i const *) ptr + 4);; + __m512i d06 = _mm512_loadu_si512((__m512i const *) ptr + 5);; + __m512i d07 = _mm512_loadu_si512((__m512i const *) ptr + 6);; + __m512i d08 = _mm512_loadu_si512((__m512i const *) ptr + 7);; + __m512i d09 = _mm512_loadu_si512((__m512i const *) ptr + 8);; + __m512i d10 = _mm512_loadu_si512((__m512i const *) ptr + 9);; + sort_10v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10); + _mm512_storeu_si512((__m512i *) ptr + 0, d01); + _mm512_storeu_si512((__m512i *) ptr + 1, d02); + _mm512_storeu_si512((__m512i *) ptr + 2, d03); + _mm512_storeu_si512((__m512i *) ptr + 3, d04); + _mm512_storeu_si512((__m512i *) ptr + 4, d05); + _mm512_storeu_si512((__m512i *) ptr + 5, d06); + _mm512_storeu_si512((__m512i *) ptr + 6, d07); + _mm512_storeu_si512((__m512i *) ptr + 7, d08); + _mm512_storeu_si512((__m512i *) ptr + 8, d09); + _mm512_storeu_si512((__m512i *) ptr + 9, d10); +} + + static NOINLINE void sort_11v(uint32_t *ptr) { + __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; + __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; + __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; + __m512i d04 = _mm512_loadu_si512((__m512i const *) ptr + 3);; + __m512i d05 = _mm512_loadu_si512((__m512i const *) ptr + 4);; + __m512i d06 = _mm512_loadu_si512((__m512i const *) ptr + 5);; + __m512i d07 = _mm512_loadu_si512((__m512i const *) ptr + 6);; + __m512i d08 = _mm512_loadu_si512((__m512i const *) ptr + 7);; + __m512i d09 = _mm512_loadu_si512((__m512i const *) ptr + 8);; + __m512i d10 = _mm512_loadu_si512((__m512i const *) ptr + 9);; + __m512i d11 = _mm512_loadu_si512((__m512i const *) ptr + 10);; + sort_11v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11); + _mm512_storeu_si512((__m512i *) ptr + 0, d01); + _mm512_storeu_si512((__m512i *) ptr + 1, d02); + _mm512_storeu_si512((__m512i *) ptr + 2, d03); + _mm512_storeu_si512((__m512i *) ptr + 3, d04); + _mm512_storeu_si512((__m512i *) ptr + 4, d05); + _mm512_storeu_si512((__m512i *) ptr + 5, d06); + _mm512_storeu_si512((__m512i *) ptr + 6, d07); + _mm512_storeu_si512((__m512i *) ptr + 7, d08); + _mm512_storeu_si512((__m512i *) ptr + 8, d09); + _mm512_storeu_si512((__m512i *) ptr + 9, d10); + _mm512_storeu_si512((__m512i *) ptr + 10, d11); +} + + static NOINLINE void sort_12v(uint32_t *ptr) { + __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; + __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; + __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; + __m512i d04 = _mm512_loadu_si512((__m512i const *) ptr + 3);; + __m512i d05 = _mm512_loadu_si512((__m512i const *) ptr + 4);; + __m512i d06 = _mm512_loadu_si512((__m512i const *) ptr + 5);; + __m512i d07 = _mm512_loadu_si512((__m512i const *) ptr + 6);; + __m512i d08 = _mm512_loadu_si512((__m512i const *) ptr + 7);; + __m512i d09 = _mm512_loadu_si512((__m512i const *) ptr + 8);; + __m512i d10 = _mm512_loadu_si512((__m512i const *) ptr + 9);; + __m512i d11 = _mm512_loadu_si512((__m512i const *) ptr + 10);; + __m512i d12 = _mm512_loadu_si512((__m512i const *) ptr + 11);; + sort_12v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12); + _mm512_storeu_si512((__m512i *) ptr + 0, d01); + _mm512_storeu_si512((__m512i *) ptr + 1, d02); + _mm512_storeu_si512((__m512i *) ptr + 2, d03); + _mm512_storeu_si512((__m512i *) ptr + 3, d04); + _mm512_storeu_si512((__m512i *) ptr + 4, d05); + _mm512_storeu_si512((__m512i *) ptr + 5, d06); + _mm512_storeu_si512((__m512i *) ptr + 6, d07); + _mm512_storeu_si512((__m512i *) ptr + 7, d08); + _mm512_storeu_si512((__m512i *) ptr + 8, d09); + _mm512_storeu_si512((__m512i *) ptr + 9, d10); + _mm512_storeu_si512((__m512i *) ptr + 10, d11); + _mm512_storeu_si512((__m512i *) ptr + 11, d12); +} + + static NOINLINE void sort_13v(uint32_t *ptr) { + __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; + __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; + __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; + __m512i d04 = _mm512_loadu_si512((__m512i const *) ptr + 3);; + __m512i d05 = _mm512_loadu_si512((__m512i const *) ptr + 4);; + __m512i d06 = _mm512_loadu_si512((__m512i const *) ptr + 5);; + __m512i d07 = _mm512_loadu_si512((__m512i const *) ptr + 6);; + __m512i d08 = _mm512_loadu_si512((__m512i const *) ptr + 7);; + __m512i d09 = _mm512_loadu_si512((__m512i const *) ptr + 8);; + __m512i d10 = _mm512_loadu_si512((__m512i const *) ptr + 9);; + __m512i d11 = _mm512_loadu_si512((__m512i const *) ptr + 10);; + __m512i d12 = _mm512_loadu_si512((__m512i const *) ptr + 11);; + __m512i d13 = _mm512_loadu_si512((__m512i const *) ptr + 12);; + sort_13v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13); + _mm512_storeu_si512((__m512i *) ptr + 0, d01); + _mm512_storeu_si512((__m512i *) ptr + 1, d02); + _mm512_storeu_si512((__m512i *) ptr + 2, d03); + _mm512_storeu_si512((__m512i *) ptr + 3, d04); + _mm512_storeu_si512((__m512i *) ptr + 4, d05); + _mm512_storeu_si512((__m512i *) ptr + 5, d06); + _mm512_storeu_si512((__m512i *) ptr + 6, d07); + _mm512_storeu_si512((__m512i *) ptr + 7, d08); + _mm512_storeu_si512((__m512i *) ptr + 8, d09); + _mm512_storeu_si512((__m512i *) ptr + 9, d10); + _mm512_storeu_si512((__m512i *) ptr + 10, d11); + _mm512_storeu_si512((__m512i *) ptr + 11, d12); + _mm512_storeu_si512((__m512i *) ptr + 12, d13); +} + + static NOINLINE void sort_14v(uint32_t *ptr) { + __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; + __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; + __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; + __m512i d04 = _mm512_loadu_si512((__m512i const *) ptr + 3);; + __m512i d05 = _mm512_loadu_si512((__m512i const *) ptr + 4);; + __m512i d06 = _mm512_loadu_si512((__m512i const *) ptr + 5);; + __m512i d07 = _mm512_loadu_si512((__m512i const *) ptr + 6);; + __m512i d08 = _mm512_loadu_si512((__m512i const *) ptr + 7);; + __m512i d09 = _mm512_loadu_si512((__m512i const *) ptr + 8);; + __m512i d10 = _mm512_loadu_si512((__m512i const *) ptr + 9);; + __m512i d11 = _mm512_loadu_si512((__m512i const *) ptr + 10);; + __m512i d12 = _mm512_loadu_si512((__m512i const *) ptr + 11);; + __m512i d13 = _mm512_loadu_si512((__m512i const *) ptr + 12);; + __m512i d14 = _mm512_loadu_si512((__m512i const *) ptr + 13);; + sort_14v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14); + _mm512_storeu_si512((__m512i *) ptr + 0, d01); + _mm512_storeu_si512((__m512i *) ptr + 1, d02); + _mm512_storeu_si512((__m512i *) ptr + 2, d03); + _mm512_storeu_si512((__m512i *) ptr + 3, d04); + _mm512_storeu_si512((__m512i *) ptr + 4, d05); + _mm512_storeu_si512((__m512i *) ptr + 5, d06); + _mm512_storeu_si512((__m512i *) ptr + 6, d07); + _mm512_storeu_si512((__m512i *) ptr + 7, d08); + _mm512_storeu_si512((__m512i *) ptr + 8, d09); + _mm512_storeu_si512((__m512i *) ptr + 9, d10); + _mm512_storeu_si512((__m512i *) ptr + 10, d11); + _mm512_storeu_si512((__m512i *) ptr + 11, d12); + _mm512_storeu_si512((__m512i *) ptr + 12, d13); + _mm512_storeu_si512((__m512i *) ptr + 13, d14); +} + + static NOINLINE void sort_15v(uint32_t *ptr) { + __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; + __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; + __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; + __m512i d04 = _mm512_loadu_si512((__m512i const *) ptr + 3);; + __m512i d05 = _mm512_loadu_si512((__m512i const *) ptr + 4);; + __m512i d06 = _mm512_loadu_si512((__m512i const *) ptr + 5);; + __m512i d07 = _mm512_loadu_si512((__m512i const *) ptr + 6);; + __m512i d08 = _mm512_loadu_si512((__m512i const *) ptr + 7);; + __m512i d09 = _mm512_loadu_si512((__m512i const *) ptr + 8);; + __m512i d10 = _mm512_loadu_si512((__m512i const *) ptr + 9);; + __m512i d11 = _mm512_loadu_si512((__m512i const *) ptr + 10);; + __m512i d12 = _mm512_loadu_si512((__m512i const *) ptr + 11);; + __m512i d13 = _mm512_loadu_si512((__m512i const *) ptr + 12);; + __m512i d14 = _mm512_loadu_si512((__m512i const *) ptr + 13);; + __m512i d15 = _mm512_loadu_si512((__m512i const *) ptr + 14);; + sort_15v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14, d15); + _mm512_storeu_si512((__m512i *) ptr + 0, d01); + _mm512_storeu_si512((__m512i *) ptr + 1, d02); + _mm512_storeu_si512((__m512i *) ptr + 2, d03); + _mm512_storeu_si512((__m512i *) ptr + 3, d04); + _mm512_storeu_si512((__m512i *) ptr + 4, d05); + _mm512_storeu_si512((__m512i *) ptr + 5, d06); + _mm512_storeu_si512((__m512i *) ptr + 6, d07); + _mm512_storeu_si512((__m512i *) ptr + 7, d08); + _mm512_storeu_si512((__m512i *) ptr + 8, d09); + _mm512_storeu_si512((__m512i *) ptr + 9, d10); + _mm512_storeu_si512((__m512i *) ptr + 10, d11); + _mm512_storeu_si512((__m512i *) ptr + 11, d12); + _mm512_storeu_si512((__m512i *) ptr + 12, d13); + _mm512_storeu_si512((__m512i *) ptr + 13, d14); + _mm512_storeu_si512((__m512i *) ptr + 14, d15); +} + + static NOINLINE void sort_16v(uint32_t *ptr) { + __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; + __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; + __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; + __m512i d04 = _mm512_loadu_si512((__m512i const *) ptr + 3);; + __m512i d05 = _mm512_loadu_si512((__m512i const *) ptr + 4);; + __m512i d06 = _mm512_loadu_si512((__m512i const *) ptr + 5);; + __m512i d07 = _mm512_loadu_si512((__m512i const *) ptr + 6);; + __m512i d08 = _mm512_loadu_si512((__m512i const *) ptr + 7);; + __m512i d09 = _mm512_loadu_si512((__m512i const *) ptr + 8);; + __m512i d10 = _mm512_loadu_si512((__m512i const *) ptr + 9);; + __m512i d11 = _mm512_loadu_si512((__m512i const *) ptr + 10);; + __m512i d12 = _mm512_loadu_si512((__m512i const *) ptr + 11);; + __m512i d13 = _mm512_loadu_si512((__m512i const *) ptr + 12);; + __m512i d14 = _mm512_loadu_si512((__m512i const *) ptr + 13);; + __m512i d15 = _mm512_loadu_si512((__m512i const *) ptr + 14);; + __m512i d16 = _mm512_loadu_si512((__m512i const *) ptr + 15);; + sort_16v_ascending(d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12, d13, d14, d15, d16); + _mm512_storeu_si512((__m512i *) ptr + 0, d01); + _mm512_storeu_si512((__m512i *) ptr + 1, d02); + _mm512_storeu_si512((__m512i *) ptr + 2, d03); + _mm512_storeu_si512((__m512i *) ptr + 3, d04); + _mm512_storeu_si512((__m512i *) ptr + 4, d05); + _mm512_storeu_si512((__m512i *) ptr + 5, d06); + _mm512_storeu_si512((__m512i *) ptr + 6, d07); + _mm512_storeu_si512((__m512i *) ptr + 7, d08); + _mm512_storeu_si512((__m512i *) ptr + 8, d09); + _mm512_storeu_si512((__m512i *) ptr + 9, d10); + _mm512_storeu_si512((__m512i *) ptr + 10, d11); + _mm512_storeu_si512((__m512i *) ptr + 11, d12); + _mm512_storeu_si512((__m512i *) ptr + 12, d13); + _mm512_storeu_si512((__m512i *) ptr + 13, d14); + _mm512_storeu_si512((__m512i *) ptr + 14, d15); + _mm512_storeu_si512((__m512i *) ptr + 15, d16); +} + static void sort(uint32_t *ptr, size_t length); + +}; +} +} + +#undef i2d +#undef d2i +#undef i2s +#undef s2i +#undef s2d +#undef d2s + +#ifdef __GNUC__ +#ifdef __clang__ +#pragma clang attribute pop +#else +#pragma GCC pop_options +#endif +#endif +#endif + diff --git a/src/coreclr/src/gc/smallsort/bitonic_sort.h b/src/coreclr/src/gc/smallsort/bitonic_sort.h index e7f07235cec672..9978454431c8c7 100644 --- a/src/coreclr/src/gc/smallsort/bitonic_sort.h +++ b/src/coreclr/src/gc/smallsort/bitonic_sort.h @@ -2,10 +2,13 @@ #define BITONIC_SORT_H #include -namespace gcsort { +#include "../defs.h" +#include "../machine_traits.h" + +namespace vxsort { namespace smallsort { -template -class bitonic { +template +struct bitonic { public: static void sort(T* ptr, size_t length); }; diff --git a/src/coreclr/src/gc/smallsort/codegen/avx2.py b/src/coreclr/src/gc/smallsort/codegen/avx2.py new file mode 100644 index 00000000000000..1e49c7e47db217 --- /dev/null +++ b/src/coreclr/src/gc/smallsort/codegen/avx2.py @@ -0,0 +1,485 @@ +import os +from datetime import datetime + +from utils import native_size_map, next_power_of_2 +from bitonic_isa import BitonicISA + + +class AVX2BitonicISA(BitonicISA): + def __init__(self, type): + self.vector_size_in_bytes = 32 + + self.type = type + + self.bitonic_size_map = {} + + for t, s in native_size_map.items(): + self.bitonic_size_map[t] = int(self.vector_size_in_bytes / s) + + self.bitonic_type_map = { + "int32_t": "__m256i", + "uint32_t": "__m256i", + "float": "__m256", + "int64_t": "__m256i", + "uint64_t": "__m256i", + "double": "__m256d", + } + + def max_bitonic_sort_vectors(self): + return 16 + + def vector_size(self): + return self.bitonic_size_map[self.type] + + def vector_type(self): + return self.bitonic_type_map[self.type] + + @classmethod + def supported_types(cls): + return native_size_map.keys() + + def i2d(self, v): + t = self.type + if t == "double": + return v + elif t == "float": + return f"s2d({v})" + return f"i2d({v})" + + def i2s(self, v): + t = self.type + if t == "double": + raise Exception("WTF") + elif t == "float": + return f"i2s({v})" + return v + + def d2i(self, v): + t = self.type + if t == "double": + return v + elif t == "float": + return f"d2s({v})" + return f"d2i({v})" + + def s2i(self, v): + t = self.type + if t == "double": + raise Exception("WTF") + elif t == "float": + return f"s2i({v})" + return v + + def generate_param_list(self, start, numParams): + return str.join(", ", list(map(lambda p: f"d{p:02d}", range(start, start + numParams)))) + + def generate_param_def_list(self, numParams): + t = self.type + return str.join(", ", list(map(lambda p: f"{self.vector_type()}& d{p:02d}", range(1, numParams + 1)))) + + def generate_shuffle_X1(self, v): + size = self.vector_size() + if size == 8: + return self.i2s(f"_mm256_shuffle_epi32({self.s2i(v)}, 0xB1)") + elif size == 4: + return self.d2i(f"_mm256_shuffle_pd({self.i2d(v)}, {self.i2d(v)}, 0x5)") + + def generate_shuffle_X2(self, v): + size = self.vector_size() + if size == 8: + return self.i2s(f"_mm256_shuffle_epi32({self.s2i(v)}, 0x4E)") + elif size == 4: + return self.d2i(f"_mm256_permute4x64_pd({self.i2d(v)}, 0x4E)") + + def generate_shuffle_XR(self, v): + size = self.vector_size() + if size == 8: + return self.i2s(f"_mm256_shuffle_epi32({self.s2i(v)}, 0x1B)") + elif size == 4: + return self.d2i(f"_mm256_permute4x64_pd({self.i2d(v)}, 0x1B)") + + def generate_blend_B1(self, v1, v2, ascending): + size = self.vector_size() + if size == 8: + if ascending: + return self.i2s(f"_mm256_blend_epi32({self.s2i(v1)}, {self.s2i(v2)}, 0xAA)") + else: + return self.i2s(f"_mm256_blend_epi32({self.s2i(v2)}, {self.s2i(v1)}, 0xAA)") + elif size == 4: + if ascending: + return self.d2i(f"_mm256_blend_pd({self.i2d(v1)}, {self.i2d(v2)}, 0xA)") + else: + return self.d2i(f"_mm256_blend_pd({self.i2d(v2)}, {self.i2d(v1)}, 0xA)") + + def generate_blend_B2(self, v1, v2, ascending): + size = self.vector_size() + if size == 8: + if ascending: + return self.i2s(f"_mm256_blend_epi32({self.s2i(v1)}, {self.s2i(v2)}, 0xCC)") + else: + return self.i2s(f"_mm256_blend_epi32({self.s2i(v2)}, {self.s2i(v1)}, 0xCC)") + elif size == 4: + if ascending: + return self.d2i(f"_mm256_blend_pd({self.i2d(v1)}, {self.i2d(v2)}, 0xC)") + else: + return self.d2i(f"_mm256_blend_pd({self.i2d(v2)}, {self.i2d(v1)}, 0xC)") + + def generate_blend_B4(self, v1, v2, ascending): + size = self.vector_size() + if size == 8: + if ascending: + return self.i2s(f"_mm256_blend_epi32({self.s2i(v1)}, {self.s2i(v2)}, 0xF0)") + else: + return self.i2s(f"_mm256_blend_epi32({self.s2i(v2)}, {self.s2i(v1)}, 0xF0)") + elif size == 4: + raise Exception("WTF") + + def generate_cross(self, v): + size = self.vector_size() + if size == 8: + return self.d2i(f"_mm256_permute4x64_pd({self.i2d(v)}, 0x4E)") + elif size == 4: + raise Exception("WTF") + + def generate_reverse(self, v): + size = self.vector_size() + if size == 8: + v = f"_mm256_shuffle_epi32({self.s2i(v)}, 0x1B)" + return self.d2i(f"_mm256_permute4x64_pd(i2d({v}), 0x4E)") + elif size == 4: + return self.d2i(f"_mm256_permute4x64_pd({self.i2d(v)}, 0x1B)") + + def crappity_crap_crap(self, v1, v2): + t = self.type + if t == "int64_t": + return f"cmp = _mm256_cmpgt_epi64({v1}, {v2});" + elif t == "uint64_t": + return f"cmp = _mm256_cmpgt_epi64(_mm256_xor_si256(topBit, {v1}), _mm256_xor_si256(topBit, {v2}));" + + return "" + + def generate_min(self, v1, v2): + t = self.type + if t == "int32_t": + return f"_mm256_min_epi32({v1}, {v2})" + elif t == "uint32_t": + return f"_mm256_min_epu32({v1}, {v2})" + elif t == "float": + return f"_mm256_min_ps({v1}, {v2})" + elif t == "int64_t": + return self.d2i(f"_mm256_blendv_pd({self.i2d(v1)}, {self.i2d(v2)}, i2d(cmp))") + elif t == "uint64_t": + return self.d2i(f"_mm256_blendv_pd({self.i2d(v1)}, {self.i2d(v2)}, i2d(cmp))") + elif t == "double": + return f"_mm256_min_pd({v1}, {v2})" + + def generate_max(self, v1, v2): + t = self.type + if t == "int32_t": + return f"_mm256_max_epi32({v1}, {v2})" + elif t == "uint32_t": + return f"_mm256_max_epu32({v1}, {v2})" + elif t == "float": + return f"_mm256_max_ps({v1}, {v2})" + elif t == "int64_t": + return self.d2i(f"_mm256_blendv_pd({self.i2d(v2)}, {self.i2d(v1)}, i2d(cmp))") + elif t == "uint64_t": + return self.d2i(f"_mm256_blendv_pd({self.i2d(v2)}, {self.i2d(v1)}, i2d(cmp))") + elif t == "double": + return f"_mm256_max_pd({v1}, {v2})" + + def get_load_intrinsic(self, v, offset): + t = self.type + if t == "double": + return f"_mm256_loadu_pd(({t} const *) ((__m256d const *) {v} + {offset}))" + if t == "float": + return f"_mm256_loadu_ps(({t} const *) ((__m256 const *) {v} + {offset}))" + return f"_mm256_lddqu_si256((__m256i const *) {v} + {offset});" + + + def get_store_intrinsic(self, ptr, offset, value): + t = self.type + if t == "double": + return f"_mm256_storeu_pd(({t} *) ((__m256d *) {ptr} + {offset}), {value})" + if t == "float": + return f"_mm256_storeu_ps(({t} *) ((__m256 *) {ptr} + {offset}), {value})" + return f"_mm256_storeu_si256((__m256i *) {ptr} + {offset}, {value})" + + def autogenerated_blabber(self): + return f"""///////////////////////////////////////////////////////////////////////////// +//// +// This file was auto-generated by a tool at {datetime.now().strftime("%F %H:%M:%S")} +// +// It is recommended you DO NOT directly edit this file but instead edit +// the code-generator that generated this source file instead. +/////////////////////////////////////////////////////////////////////////////""" + + def generate_prologue(self, f): + t = self.type + s = f"""{self.autogenerated_blabber()} + +#ifndef BITONIC_SORT_AVX2_{t.upper()}_H +#define BITONIC_SORT_AVX2_{t.upper()}_H + +#ifdef __GNUC__ +#ifdef __clang__ +#pragma clang attribute push (__attribute__((target("avx2"))), apply_to = any(function)) +#else +#pragma GCC push_options +#pragma GCC target("avx2") +#endif +#endif + +#include +#include "bitonic_sort.h" + +#define i2d _mm256_castsi256_pd +#define d2i _mm256_castpd_si256 +#define i2s _mm256_castsi256_ps +#define s2i _mm256_castps_si256 +#define s2d _mm256_castps_pd +#define d2s _mm256_castpd_ps + +namespace vxsort {{ +namespace smallsort {{ +template<> struct bitonic<{t}, AVX2> {{ +public: +""" + print(s, file=f) + + def generate_epilogue(self, f): + s = f""" +}}; +}} +}} + +#undef i2d +#undef d2i +#undef i2s +#undef s2i +#undef s2d +#undef d2s + +#ifdef __GNUC__ +#ifdef __clang__ +#pragma clang attribute pop +#else +#pragma GCC pop_options +#endif +#endif +#endif + """ + print(s, file=f) + + def generate_1v_basic_sorters(self, f, ascending): + g = self + type = self.type + maybe_cmp = lambda: ", cmp" if (type == "int64_t" or type == "uint64_t") else "" + maybe_topbit = lambda: f"\n {g.vector_type()} topBit = _mm256_set1_epi64x(1LLU << 63);" if (type == "uint64_t") else "" + suffix = "ascending" if ascending else "descending" + + s = f""" static INLINE void sort_01v_{suffix}({g.generate_param_def_list(1)}) {{ + {g.vector_type()} min, max, s{maybe_cmp()};{maybe_topbit()} + + s = {g.generate_shuffle_X1("d01")}; + {g.crappity_crap_crap("s", "d01")} + min = {g.generate_min("s", "d01")}; + max = {g.generate_max("s", "d01")}; + d01 = {g.generate_blend_B1("min", "max", ascending)}; + + s = {g.generate_shuffle_XR("d01")}; + {g.crappity_crap_crap("s", "d01")} + min = {g.generate_min("s", "d01")}; + max = {g.generate_max("s", "d01")}; + d01 = {g.generate_blend_B2("min", "max", ascending)}; + + s = {g.generate_shuffle_X1("d01")}; + {g.crappity_crap_crap("s", "d01")} + min = {g.generate_min("s", "d01")}; + max = {g.generate_max("s", "d01")}; + d01 = {g.generate_blend_B1("min", "max", ascending)};""" + + print(s, file=f) + + if g.vector_size() == 8: + s = f""" + s = {g.generate_reverse("d01")}; + min = {g.generate_min("s", "d01")}; + max = {g.generate_max("s", "d01")}; + d01 = {g.generate_blend_B4("min", "max", ascending)}; + + s = {g.generate_shuffle_X2("d01")}; + min = {g.generate_min("s", "d01")}; + max = {g.generate_max("s", "d01")}; + d01 = {g.generate_blend_B2("min", "max", ascending)}; + + s = {g.generate_shuffle_X1("d01")}; + min = {g.generate_min("s", "d01")}; + max = {g.generate_max("s", "d01")}; + d01 = {g.generate_blend_B1("min", "max", ascending)};""" + print(s, file=f) + print("}", file=f) + + + + def generate_1v_merge_sorters(self, f, ascending: bool): + g = self + type = self.type + maybe_cmp = lambda: ", cmp" if (type == "int64_t" or type == "uint64_t") else "" + maybe_topbit = lambda: f"\n {g.vector_type()} topBit = _mm256_set1_epi64x(1LLU << 63);" if ( + type == "uint64_t") else "" + + suffix = "ascending" if ascending else "descending" + + s = f""" static INLINE void sort_01v_merge_{suffix}({g.generate_param_def_list(1)}) {{ + {g.vector_type()} min, max, s{maybe_cmp()};{maybe_topbit()}""" + print(s, file=f) + + if g.vector_size() == 8: + s = f""" + s = {g.generate_cross("d01")}; + min = {g.generate_min("s", "d01")}; + max = {g.generate_max("s", "d01")}; + d01 = {g.generate_blend_B4("min", "max", ascending)};""" + print(s, file=f) + + s = f""" + s = {g.generate_shuffle_X2("d01")}; + {g.crappity_crap_crap("s", "d01")} + min = {g.generate_min("s", "d01")}; + max = {g.generate_max("s", "d01")}; + d01 = {g.generate_blend_B2("min", "max", ascending)}; + + s = {g.generate_shuffle_X1("d01")}; + {g.crappity_crap_crap("s", "d01")} + min = {g.generate_min("s", "d01")}; + max = {g.generate_max("s", "d01")}; + d01 = {g.generate_blend_B1("min", "max", ascending)};""" + + print(s, file=f) + print(" }", file=f) + + def generate_compounded_sorter(self, f, width, ascending, inline): + type = self.type + g = self + maybe_cmp = lambda: ", cmp" if (type == "int64_t" or type == "uint64_t") else "" + maybe_topbit = lambda: f"\n {g.vector_type()} topBit = _mm256_set1_epi64x(1LLU << 63);" if ( + type == "uint64_t") else "" + + w1 = int(next_power_of_2(width) / 2) + w2 = int(width - w1) + + suffix = "ascending" if ascending else "descending" + rev_suffix = "descending" if ascending else "ascending" + + inl = "INLINE" if inline else "NOINLINE" + + s = f""" static {inl} void sort_{width:02d}v_{suffix}({g.generate_param_def_list(width)}) {{ + {g.vector_type()} tmp{maybe_cmp()};{maybe_topbit()} + + sort_{w1:02d}v_{suffix}({g.generate_param_list(1, w1)}); + sort_{w2:02d}v_{rev_suffix}({g.generate_param_list(w1 + 1, w2)});""" + + print(s, file=f) + + for r in range(w1 + 1, width + 1): + x = w1 + 1 - (r - w1) + s = f""" + tmp = d{r:02d}; + {g.crappity_crap_crap(f"d{x:02d}", f"d{r:02d}")} + d{r:02d} = {g.generate_max(f"d{x:02d}", f"d{r:02d}")}; + d{x:02d} = {g.generate_min(f"d{x:02d}", "tmp")};""" + + print(s, file=f) + + s = f""" + sort_{w1:02d}v_merge_{suffix}({g.generate_param_list(1, w1)}); + sort_{w2:02d}v_merge_{suffix}({g.generate_param_list(w1 + 1, w2)});""" + print(s, file=f) + print(" }", file=f) + + + def generate_compounded_merger(self, f, width, ascending, inline): + type = self.type + g = self + maybe_cmp = lambda: ", cmp" if (type == "int64_t" or type == "uint64_t") else "" + maybe_topbit = lambda: f"\n {g.vector_type()} topBit = _mm256_set1_epi64x(1LLU << 63);" if ( + type == "uint64_t") else "" + + w1 = int(next_power_of_2(width) / 2) + w2 = int(width - w1) + + suffix = "ascending" if ascending else "descending" + rev_suffix = "descending" if ascending else "ascending" + + inl = "INLINE" if inline else "NOINLINE" + + s = f""" static {inl} void sort_{width:02d}v_merge_{suffix}({g.generate_param_def_list(width)}) {{ + {g.vector_type()} tmp{maybe_cmp()};{maybe_topbit()}""" + print(s, file=f) + + for r in range(w1 + 1, width + 1): + x = r - w1 + s = f""" + tmp = d{x:02d}; + {g.crappity_crap_crap(f"d{r:02d}", f"d{x:02d}")} + d{x:02d} = {g.generate_min(f"d{r:02d}", f"d{x:02d}")}; + {g.crappity_crap_crap(f"d{r:02d}", "tmp")} + d{r:02d} = {g.generate_max(f"d{r:02d}", "tmp")};""" + print(s, file=f) + + s = f""" + sort_{w1:02d}v_merge_{suffix}({g.generate_param_list(1, w1)}); + sort_{w2:02d}v_merge_{suffix}({g.generate_param_list(w1 + 1, w2)});""" + print(s, file=f) + print(" }", file=f) + + + def generate_entry_points(self, f): + type = self.type + g = self + for m in range(1, g.max_bitonic_sort_vectors() + 1): + s = f""" + static NOINLINE void sort_{m:02d}v({type} *ptr) {{""" + print(s, file=f) + + for l in range(0, m): + s = f" {g.vector_type()} d{l + 1:02d} = {g.get_load_intrinsic('ptr', l)};" + print(s, file=f) + + s = f" sort_{m:02d}v_ascending({g.generate_param_list(1, m)});" + print(s, file=f) + + for l in range(0, m): + s = f" {g.get_store_intrinsic('ptr', l, f'd{l + 1:02d}')};" + print(s, file=f) + + print("}", file=f) + + + def generate_master_entry_point(self, f_header, f_src): + basename = os.path.basename(f_header.name) + s = f"""#include "{basename}" + +using namespace vxsort; +""" + print(s, file=f_src) + + t = self.type + g = self + + s = f""" static void sort({t} *ptr, size_t length);""" + print(s, file=f_header) + + s = f"""void vxsort::smallsort::bitonic<{t}, vector_machine::AVX2 >::sort({t} *ptr, size_t length) {{ + const int N = {g.vector_size()}; + + switch(length / N) {{""" + print(s, file=f_src) + + for m in range(1, self.max_bitonic_sort_vectors() + 1): + s = f" case {m}: sort_{m:02d}v(ptr); break;" + print(s, file=f_src) + print(" }", file=f_src) + print("}", file=f_src) + pass diff --git a/src/coreclr/src/gc/smallsort/codegen/avx512.py b/src/coreclr/src/gc/smallsort/codegen/avx512.py new file mode 100644 index 00000000000000..ffbf612b00b023 --- /dev/null +++ b/src/coreclr/src/gc/smallsort/codegen/avx512.py @@ -0,0 +1,485 @@ +from datetime import datetime + +from utils import native_size_map, next_power_of_2 +from bitonic_isa import BitonicISA +import os + + +class AVX512BitonicISA(BitonicISA): + def __init__(self, type): + self.vector_size_in_bytes = 64 + + self.type = type + + self.bitonic_size_map = {} + + for t, s in native_size_map.items(): + self.bitonic_size_map[t] = int(self.vector_size_in_bytes / s) + + self.bitonic_type_map = { + "int32_t": "__m512i", + "uint32_t": "__m512i", + "float": "__m512", + "int64_t": "__m512i", + "uint64_t": "__m512i", + "double": "__m512d", + } + + def max_bitonic_sort_vectors(self): + return 16 + + def vector_size(self): + return self.bitonic_size_map[self.type] + + def vector_type(self): + return self.bitonic_type_map[self.type] + + @classmethod + def supported_types(cls): + return native_size_map.keys() + + def i2d(self, v): + t = self.type + if t == "double": + return v + elif t == "float": + raise Exception("WTF") + return f"i2d({v})" + + def i2s(self, v): + t = self.type + if t == "double": + raise Exception("WTF") + elif t == "float": + return f"i2s({v})" + return v + + def d2i(self, v): + t = self.type + if t == "double": + return v + elif t == "float": + raise Exception("WTF") + return f"d2i({v})" + + def s2i(self, v): + t = self.type + if t == "double": + raise Exception("WTF") + elif t == "float": + return f"s2i({v})" + return v + + def generate_param_list(self, start, numParams): + return str.join(", ", list(map(lambda p: f"d{p:02d}", range(start, start + numParams)))) + + def generate_param_def_list(self, numParams): + t = self.type + return str.join(", ", list(map(lambda p: f"{self.vector_type()}& d{p:02d}", range(1, numParams + 1)))) + + def generate_shuffle_S1(self, v): + t = self.type + size = self.bitonic_size_map[t] + if size == 16: + return self.i2s(f"_mm512_shuffle_epi32({self.s2i(v)}, _MM_PERM_CDAB)") + elif size == 8: + return self.d2i(f"_mm512_permute_pd({self.i2d(v)}, _MM_PERM_BBBB)") + + def generate_shuffle_X4(self, v): + t = self.type + size = self.bitonic_size_map[t] + if size == 16: + return self.i2s(f"_mm512_shuffle_epi32({self.s2i(v)}, _MM_PERM_ABCD)") + elif size == 8: + return self.d2i(f"_mm512_permutex_pd({self.i2d(v)}, _MM_PERM_ABCD)") + + def generate_shuffle_X8(self, v): + t = self.type + size = self.bitonic_size_map[t] + if size == 16: + s1 = f"_mm512_shuffle_epi32({self.s2i(v)}, _MM_PERM_ABCD)" + return self.i2s(f"_mm512_permutex_epi64({s1}, _MM_PERM_BADC)") + elif size == 8: + s1 = f"_mm512_permutex_pd({self.i2d(v)}, _MM_PERM_ABCD)" + return self.d2i(f"_mm512_shuffle_f64x2({s1}, {s1}, _MM_PERM_BADC)") + + def generate_shuffle_S2(self, v): + t = self.type + size = self.bitonic_size_map[t] + if size == 16: + return self.i2s(f"_mm512_shuffle_epi32({self.s2i(v)}, _MM_PERM_BADC)") + elif size == 8: + return self.d2i(f"_mm512_permutex_pd({self.i2d(v)}, _MM_PERM_BADC)") + + def generate_shuffle_X16(self, v): + t = self.type + size = self.bitonic_size_map[t] + if size == 16: + s1 = f"_mm512_shuffle_epi32({self.s2i(v)}, _MM_PERM_ABCD)" + return self.i2s(f"_mm512_shuffle_i64x2({s1}, {s1}, _MM_PERM_ABCD)") + elif size == 8: + return self.d2i(f"_mm512_shuffle_pd({self.i2d(v)}, {self.i2d(v)}, 0xB1)") + + def generate_shuffle_S4(self, v): + t = self.type + size = self.bitonic_size_map[t] + if size == 16: + return self.i2s(f"_mm512_permutex_epi64({self.s2i(v)}, _MM_PERM_BADC)") + elif size == 8: + return self.d2i(f"_mm512_shuffle_f64x2({self.i2d(v)}, {self.i2d(v)}, _MM_PERM_BADC)") + + def generate_shuffle_S8(self, v): + t = self.type + size = self.bitonic_size_map[t] + if size == 16: + return self.i2s(f"_mm512_shuffle_i64x2({self.s2i(v)}, {self.s2i(v)}, _MM_PERM_BADC)") + elif size == 8: + return self.d2i(f"_mm512_shuffle_pd({self.i2d(v)}, {self.i2d(v)}, 0xB1)") + + def generate_min(self, v1, v2): + t = self.type + if t == "int32_t": + return f"_mm512_min_epi32({v1}, {v2})" + elif t == "uint32_t": + return f"_mm512_min_epu32({v1}, {v2})" + elif t == "float": + return f"_mm512_min_ps({v1}, {v2})" + elif t == "int64_t": + return f"_mm512_min_epi64({v1}, {v2})" + elif t == "uint64_t": + return f"_mm512_min_epu64({v1}, {v2})" + elif t == "double": + return f"_mm512_min_pd({v1}, {v2})" + + def generate_max(self, v1, v2): + t = self.type + if t == "int32_t": + return f"_mm512_max_epi32({v1}, {v2})" + elif t == "uint32_t": + return f"_mm512_max_epu32({v1}, {v2})" + elif t == "float": + return f"_mm512_max_ps({v1}, {v2})" + elif t == "int64_t": + return f"_mm512_max_epi64({v1}, {v2})" + elif t == "uint64_t": + return f"_mm512_max_epu64({v1}, {v2})" + elif t == "double": + return f"_mm512_max_pd({v1}, {v2})" + + def generate_mask(self, stride, ascending): + b = 1 << stride + b = b - 1 + if ascending: + b = b << stride + + mask = 0 + size = self.vector_size() + while size > 0: + mask = mask << (stride * 2) | b + size = size - (stride * 2) + return mask + + + def generate_max_with_blend(self, src, v1, v2, stride, ascending): + mask = self.generate_mask(stride, ascending) + t = self.type + if t == "int32_t": + return f"_mm512_mask_max_epi32({src}, 0x{mask:04X}, {v1}, {v2})" + elif t == "uint32_t": + return f"_mm512_mask_max_epu32({src}, 0x{mask:04X}, {v1}, {v2})" + elif t == "float": + return f"_mm512_mask_max_ps({src}, 0x{mask:04X}, {v1}, {v2})" + elif t == "int64_t": + return f"_mm512_mask_max_epi64({src}, 0x{mask:04X}, {v1}, {v2})" + elif t == "uint64_t": + return f"_mm512_mask_max_epu64({src}, 0x{mask:04X}, {v1}, {v2})" + elif t == "double": + return f"_mm512_mask_max_pd({src}, 0x{mask:04X}, {v1}, {v2})" + + + def get_load_intrinsic(self, v, offset): + t = self.type + if t == "double": + return f"_mm512_loadu_pd(({t} const *) ((__m512d const *) {v} + {offset}))" + if t == "float": + return f"_mm512_loadu_ps(({t} const *) ((__m512 const *) {v} + {offset}))" + return f"_mm512_loadu_si512((__m512i const *) {v} + {offset});" + + + def get_store_intrinsic(self, ptr, offset, value): + t = self.type + if t == "double": + return f"_mm512_storeu_pd(({t} *) ((__m512d *) {ptr} + {offset}), {value})" + if t == "float": + return f"_mm512_storeu_ps(({t} *) ((__m512 *) {ptr} + {offset}), {value})" + return f"_mm512_storeu_si512((__m512i *) {ptr} + {offset}, {value})" + + def autogenerated_blabber(self): + return f"""///////////////////////////////////////////////////////////////////////////// +//// +// This file was auto-generated by a tool at {datetime.now().strftime("%F %H:%M:%S")} +// +// It is recommended you DO NOT directly edit this file but instead edit +// the code-generator that generated this source file instead. +/////////////////////////////////////////////////////////////////////////////""" + + def generate_prologue(self, f): + t = self.type + s = f"""{self.autogenerated_blabber()} + +#ifndef BITONIC_SORT_AVX512_{t.upper()}_H +#define BITONIC_SORT_AVX512_{t.upper()}_H + + +#ifdef __GNUC__ +#ifdef __clang__ +#pragma clang attribute push (__attribute__((target("avx512f"))), apply_to = any(function)) +#else +#pragma GCC push_options +#pragma GCC target("avx512f") +#endif +#endif + +#include +#include "bitonic_sort.h" + +#define i2d _mm512_castsi512_pd +#define d2i _mm512_castpd_si512 +#define i2s _mm512_castsi512_ps +#define s2i _mm512_castps_si512 +#define s2d _mm512_castps_pd +#define d2s _mm521_castpd_ps + +namespace vxsort {{ +namespace smallsort {{ +template<> struct bitonic<{t}, AVX512> {{ +public: +""" + print(s, file=f) + + def generate_epilogue(self, f): + s = f""" +}}; +}} +}} + +#undef i2d +#undef d2i +#undef i2s +#undef s2i +#undef s2d +#undef d2s + +#ifdef __GNUC__ +#ifdef __clang__ +#pragma clang attribute pop +#else +#pragma GCC pop_options +#endif +#endif +#endif +""" + print(s, file=f) + + def generate_1v_basic_sorters(self, f, ascending): + g = self + type = self.type + suffix = "ascending" if ascending else "descending" + + s = f""" static INLINE void sort_01v_{suffix}({g.generate_param_def_list(1)}) {{ + {g.vector_type()} min, s; + + s = {g.generate_shuffle_S1("d01")}; + min = {g.generate_min("s", "d01")}; + d01 = {g.generate_max_with_blend("min", "s", "d01", 1, ascending)}; + + s = {g.generate_shuffle_X4("d01")}; + min = {g.generate_min("s", "d01")}; + d01 = {g.generate_max_with_blend("min", "s", "d01", 2, ascending)}; + + s = {g.generate_shuffle_S1("d01")}; + min = {g.generate_min("s", "d01")}; + d01 = {g.generate_max_with_blend("min", "s", "d01", 1, ascending)}; + + s = {g.generate_shuffle_X8("d01")}; + min = {g.generate_min("s", "d01")}; + d01 = {g.generate_max_with_blend("min", "s", "d01", 4, ascending)}; + + s = {g.generate_shuffle_S2("d01")}; + min = {g.generate_min("s", "d01")}; + d01 = {g.generate_max_with_blend("min", "s", "d01", 2, ascending)}; + + s = {g.generate_shuffle_S1("d01")}; + min = {g.generate_min("s", "d01")}; + d01 = {g.generate_max_with_blend("min", "s", "d01", 1, ascending)};""" + + print(s, file=f) + + if g.vector_size() == 16: + s = f""" + s = {g.generate_shuffle_X16("d01")}; + min = {g.generate_min("s", "d01")}; + d01 = {g.generate_max_with_blend("min", "s", "d01", 8, ascending)}; + + s = {g.generate_shuffle_S4("d01")}; + min = {g.generate_min("s", "d01")}; + d01 = {g.generate_max_with_blend("min", "s", "d01", 4, ascending)}; + + s = {g.generate_shuffle_S2("d01")}; + min = {g.generate_min("s", "d01")}; + d01 = {g.generate_max_with_blend("min", "s", "d01", 2, ascending)}; + + s = {g.generate_shuffle_S1("d01")}; + min = {g.generate_min("s", "d01")}; + d01 = {g.generate_max_with_blend("min", "s", "d01", 1, ascending)};""" + print(s, file=f) + print(" }", file=f) + + def generate_1v_merge_sorters(self, f, ascending: bool): + g = self + type = self.type + suffix = "ascending" if ascending else "descending" + + s = f""" static INLINE void sort_01v_merge_{suffix}({g.generate_param_def_list(1)}) {{ + {g.vector_type()} min, s;""" + print(s, file=f) + + if g.vector_size() == 16: + s = f""" + s = {g.generate_shuffle_S8("d01")}; + min = {g.generate_min("s", "d01")}; + d01 = {g.generate_max_with_blend("min", "s", "d01", 8, ascending)};""" + print(s, file=f) + + s = f""" + s = {g.generate_shuffle_S4("d01")}; + min = {g.generate_min("s", "d01")}; + d01 = {g.generate_max_with_blend("min", "s", "d01", 4, ascending)}; + + s = {g.generate_shuffle_S2("d01")}; + min = {g.generate_min("s", "d01")}; + d01 = {g.generate_max_with_blend("min", "s", "d01", 2, ascending)}; + + s = {g.generate_shuffle_S1("d01")}; + min = {g.generate_min("s", "d01")}; + d01 = {g.generate_max_with_blend("min", "s", "d01", 1, ascending)};""" + + print(s, file=f) + print(" }", file=f) + + + def generate_compounded_sorter(self, f, width, ascending, inline): + type = self.type + g = self + + w1 = int(next_power_of_2(width) / 2) + w2 = int(width - w1) + + suffix = "ascending" if ascending else "descending" + rev_suffix = "descending" if ascending else "ascending" + + inl = "INLINE" if inline else "NOINLINE" + + s = f""" static {inl} void sort_{width:02d}v_{suffix}({g.generate_param_def_list(width)}) {{ + {g.vector_type()} tmp; + + sort_{w1:02d}v_{suffix}({g.generate_param_list(1, w1)}); + sort_{w2:02d}v_{rev_suffix}({g.generate_param_list(w1 + 1, w2)});""" + + print(s, file=f) + + for r in range(w1 + 1, width + 1): + x = w1 + 1 - (r - w1) + s = f""" + tmp = d{r:02d}; + d{r:02d} = {g.generate_max(f"d{x:02d}", f"d{r:02d}")}; + d{x:02d} = {g.generate_min(f"d{x:02d}", "tmp")};""" + + print(s, file=f) + + s = f""" + sort_{w1:02d}v_merge_{suffix}({g.generate_param_list(1, w1)}); + sort_{w2:02d}v_merge_{suffix}({g.generate_param_list(w1 + 1, w2)});""" + print(s, file=f) + print(" }", file=f) + + + def generate_compounded_merger(self, f, width, ascending, inline): + type = self.type + g = self + + w1 = int(next_power_of_2(width) / 2) + w2 = int(width - w1) + + suffix = "ascending" if ascending else "descending" + rev_suffix = "descending" if ascending else "ascending" + + inl = "INLINE" if inline else "NOINLINE" + + s = f""" static {inl} void sort_{width:02d}v_merge_{suffix}({g.generate_param_def_list(width)}) {{ + {g.vector_type()} tmp;""" + print(s, file=f) + + for r in range(w1 + 1, width + 1): + x = r - w1 + s = f""" + tmp = d{x:02d}; + d{x:02d} = {g.generate_min(f"d{r:02d}", f"d{x:02d}")}; + d{r:02d} = {g.generate_max(f"d{r:02d}", "tmp")};""" + print(s, file=f) + + s = f""" + sort_{w1:02d}v_merge_{suffix}({g.generate_param_list(1, w1)}); + sort_{w2:02d}v_merge_{suffix}({g.generate_param_list(w1 + 1, w2)});""" + print(s, file=f) + print(" }", file=f) + + + def generate_entry_points(self, f): + type = self.type + g = self + for m in range(1, g.max_bitonic_sort_vectors() + 1): + s = f""" + static NOINLINE void sort_{m:02d}v({type} *ptr) {{""" + print(s, file=f) + + for l in range(0, m): + s = f" {g.vector_type()} d{l + 1:02d} = {g.get_load_intrinsic('ptr', l)};" + print(s, file=f) + + s = f" sort_{m:02d}v_ascending({g.generate_param_list(1, m)});" + print(s, file=f) + + for l in range(0, m): + s = f" {g.get_store_intrinsic('ptr', l, f'd{l + 1:02d}')};" + print(s, file=f) + + print("}", file=f) + + + def generate_master_entry_point(self, f_header, f_src): + basename = os.path.basename(f_header.name) + s = f"""#include "{basename}" + +using namespace vxsort; +""" + print(s, file=f_src) + + t = self.type + g = self + + s = f""" static void sort({t} *ptr, size_t length);""" + print(s, file=f_header) + + s = f"""void vxsort::smallsort::bitonic<{t}, vector_machine::AVX512 >::sort({t} *ptr, size_t length) {{ + const int N = {g.vector_size()}; + + switch(length / N) {{""" + print(s, file=f_src) + + for m in range(1, self.max_bitonic_sort_vectors() + 1): + s = f" case {m}: sort_{m:02d}v(ptr); break;" + print(s, file=f_src) + print(" }", file=f_src) + print("}", file=f_src) + pass diff --git a/src/coreclr/src/gc/smallsort/codegen/bitonic_gen.py b/src/coreclr/src/gc/smallsort/codegen/bitonic_gen.py new file mode 100644 index 00000000000000..47e7f8f6756be8 --- /dev/null +++ b/src/coreclr/src/gc/smallsort/codegen/bitonic_gen.py @@ -0,0 +1,105 @@ +#!/usr/bin/env python3 +import argparse +import os +from enum import Enum + +from avx2 import AVX2BitonicISA +from avx512 import AVX512BitonicISA +from bitonic_isa import BitonicISA + +BitonicISA.register(AVX2BitonicISA) +BitonicISA.register(AVX512BitonicISA) + + +def get_generator_supported_types(vector_isa): + if isinstance(vector_isa, str): + vector_isa = VectorISA[vector_isa] + if vector_isa == VectorISA.AVX2: + return AVX2BitonicISA.supported_types() + elif vector_isa == VectorISA.AVX512: + return AVX512BitonicISA.supported_types() + else: + raise Exception(f"Non-supported vector machine-type: {vector_isa}") + + +def get_generator(vector_isa, type): + if isinstance(vector_isa, str): + vector_isa = VectorISA[vector_isa] + if vector_isa == VectorISA.AVX2: + return AVX2BitonicISA(type) + elif vector_isa == VectorISA.AVX512: + return AVX512BitonicISA(type) + else: + raise Exception(f"Non-supported vector machine-type: {vector_isa}") + + +def generate_per_type(f_header, f_src, type, vector_isa, break_inline): + g = get_generator(vector_isa, type) + g.generate_prologue(f_header) + g.generate_1v_sorters(f_header, ascending=True) + g.generate_1v_sorters(f_header, ascending=False) + for width in range(2, g.max_bitonic_sort_vectors() + 1): + + # Allow breaking the inline chain once in a while (configurable) + if break_inline == 0 or width & break_inline != 0: + inline = True + else: + inline = False + g.generate_compounded_sorter(f_header, width, ascending=True, inline=inline) + g.generate_compounded_sorter(f_header, width, ascending=False, inline=inline) + if width <= g.largest_merge_variant_needed(): + g.generate_compounded_merger(f_header, width, ascending=True, inline=inline) + g.generate_compounded_merger(f_header, width, ascending=False, inline=inline) + + g.generate_entry_points(f_header) + g.generate_master_entry_point(f_header, f_src) + g.generate_epilogue(f_header) + + +class Language(Enum): + csharp = 'csharp' + cpp = 'cpp' + rust = 'rust' + + def __str__(self): + return self.value + + +class VectorISA(Enum): + AVX2 = 'AVX2' + AVX512 = 'AVX512' + SVE = 'SVE' + + def __str__(self): + return self.value + +def generate_all_types(): + parser = argparse.ArgumentParser() + #parser.add_argument("--language", type=Language, choices=list(Language), + # help="select output language: csharp/cpp/rust") + parser.add_argument("--vector-isa", + nargs='+', + default='all', + help='list of vector ISA to generate', + choices=list(VectorISA).append("all")) + parser.add_argument("--break-inline", type=int, default=0, help="break inlining every N levels") + + parser.add_argument("--output-dir", type=str, + help="output directory") + + opts = parser.parse_args() + + if 'all' in opts.vector_isa: + opts.vector_isa = list(VectorISA) + + for isa in opts.vector_isa: + for t in get_generator_supported_types(isa): + filename = f"bitonic_sort.{isa}.{t}.generated" + print(f"Generating {filename}.{{h,.cpp}}") + h_filename = os.path.join(opts.output_dir, filename + ".h") + h_src = os.path.join(opts.output_dir, filename + ".cpp") + with open(h_filename, "w") as f_header, open(h_src, "w") as f_source: + generate_per_type(f_header, f_source, t, isa, opts.break_inline) + +if __name__ == '__main__': + generate_all_types() diff --git a/src/coreclr/src/gc/smallsort/codegen/bitonic_isa.py b/src/coreclr/src/gc/smallsort/codegen/bitonic_isa.py new file mode 100644 index 00000000000000..1356a24637eb7b --- /dev/null +++ b/src/coreclr/src/gc/smallsort/codegen/bitonic_isa.py @@ -0,0 +1,67 @@ +from abc import ABC, ABCMeta, abstractmethod + +from utils import next_power_of_2 + + +class BitonicISA(ABC, metaclass=ABCMeta): + + @abstractmethod + def vector_size(self): + pass + + @abstractmethod + def max_bitonic_sort_vectors(self): + pass + + def largest_merge_variant_needed(self): + return next_power_of_2(self.max_bitonic_sort_vectors()) / 2; + + @abstractmethod + def vector_size(self): + pass + + @abstractmethod + def vector_type(self): + pass + + @classmethod + @abstractmethod + def supported_types(cls): + pass + + @abstractmethod + def generate_prologue(self, f): + pass + + @abstractmethod + def generate_epilogue(self, f): + pass + + + @abstractmethod + def generate_1v_basic_sorters(self, f, ascending): + pass + + @abstractmethod + def generate_1v_merge_sorters(self, f, ascending): + pass + + def generate_1v_sorters(self, f, ascending): + self.generate_1v_basic_sorters(f, ascending) + self.generate_1v_merge_sorters(f, ascending) + + @abstractmethod + def generate_compounded_sorter(self, f, width, ascending, inline): + pass + + @abstractmethod + def generate_compounded_merger(self, f, width, ascending, inline): + pass + + @abstractmethod + def generate_entry_points(self, f): + pass + + @abstractmethod + def generate_master_entry_point(self, f): + pass diff --git a/src/coreclr/src/gc/smallsort/codegen/utils.py b/src/coreclr/src/gc/smallsort/codegen/utils.py new file mode 100644 index 00000000000000..af53207dc563ab --- /dev/null +++ b/src/coreclr/src/gc/smallsort/codegen/utils.py @@ -0,0 +1,19 @@ +native_size_map = { + "int32_t": 4, + "uint32_t": 4, + "float": 4, + "int64_t": 8, + "uint64_t": 8, + "double": 8, +} + + +def next_power_of_2(v): + v = v - 1 + v |= v >> 1 + v |= v >> 2 + v |= v >> 4 + v |= v >> 8 + v |= v >> 16 + v = v + 1 + return int(v) diff --git a/src/coreclr/src/gc/vxsort.h b/src/coreclr/src/gc/vxsort.h index 7a0198eff7cd20..60551787d84e1f 100644 --- a/src/coreclr/src/gc/vxsort.h +++ b/src/coreclr/src/gc/vxsort.h @@ -1,340 +1,77 @@ -#ifndef GCSORT_VXSORT_H -#define GCSORT_VXSORT_H +#ifndef VXSORT_VXSORT_H +#define VXSORT_VXSORT_H -#include -#include -#include - -#include "smallsort/bitonic_sort.AVX2.int64_t.generated.h" -#include "smallsort/bitonic_sort.AVX2.uint64_t.generated.h" -#include "smallsort/bitonic_sort.AVX2.double.generated.h" -#include "smallsort/bitonic_sort.AVX2.float.generated.h" -#include "smallsort/bitonic_sort.AVX2.int32_t.generated.h" -#include "smallsort/bitonic_sort.AVX2.uint32_t.generated.h" - -#if _MSC_VER -#ifdef _M_X86 -#define ARCH_X86 -#endif -#ifdef _M_X64 -#define ARCH_X64 -#endif -#ifdef _M_ARM64 -#define ARCH_ARM -#endif +#ifdef __GNUC__ +#ifdef __clang__ +#pragma clang attribute push (__attribute__((target("popcnt"))), apply_to = any(function)) #else -#ifdef __i386__ -#define ARCH_X86 -#endif -#ifdef __amd64__ -#define ARCH_X64 +#pragma GCC push_options +#pragma GCC target("popcnt") #endif -#ifdef __arm__ -#define ARCH_ARM #endif -#endif - -#ifdef _MSC_VER -// MSVC -#include -#define mess_up_cmov() _ReadBarrier(); -#define INLINE __forceinline -#define NOINLINE __declspec(noinline) -#else -// GCC + Clang -#define mess_up_cmov() -#define INLINE __attribute__((always_inline)) -#define NOINLINE __attribute__((noinline)) -#endif - -#define i2d _mm256_castsi256_pd -#define d2i _mm256_castpd_si256 -#define i2s _mm256_castsi256_ps -#define s2i _mm256_castps_si256 -#define s2d _mm256_castps_pd -#define d2s _mm256_castpd_ps - -namespace gcsort { -using gcsort::smallsort::bitonic; -struct alignment_hint { - public: - static const size_t ALIGN = 32; - static const int8_t REALIGN = 0x66; - - alignment_hint() : left_align(REALIGN), right_align(REALIGN) {} - alignment_hint realign_left() { - alignment_hint copy = *this; - copy.left_align = REALIGN; - return copy; - } - - alignment_hint realign_right() { - alignment_hint copy = *this; - copy.right_align = REALIGN; - return copy; - } - - static bool is_aligned(void *p) { - return (size_t)p % ALIGN == 0; - } - - - int left_align : 8; - int right_align : 8; -}; - -enum vector_machine { - AVX2, - AVX512, - SVE, -}; - -template -struct vxsort_machine_traits { -public: - typedef __m256 Tv; - - static Tv load_vec(Tv* ptr); - static Tv store_vec(Tv* ptr, Tv v); - //static __m256i get_perm(int mask); - static Tv partition_vector(Tv v, int mask); - static Tv get_vec_pivot(T pivot); - static uint32_t get_cmpgt_mask(Tv a, Tv b); -}; - -#ifdef ARCH_X64 - -extern const int8_t perm_table_64[128]; -extern const int8_t perm_table_32[2048]; - -template <> -class vxsort_machine_traits { -private: -public: - typedef __m256i Tv; - - static INLINE Tv load_vec(Tv* p) { - return _mm256_lddqu_si256(p); - } - - static INLINE void store_vec(Tv* ptr, Tv v) { - _mm256_storeu_si256(ptr, v); - } +#include +#include - static INLINE Tv partition_vector(Tv v, int mask) { - assert(mask >= 0); - assert(mask <= 15); - return s2i(_mm256_permutevar8x32_ps(i2s(v), _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i*)(perm_table_64 + mask * 8))))); - } - static INLINE Tv get_vec_pivot(int64_t pivot) { - return _mm256_set1_epi64x(pivot); - } - static INLINE uint32_t get_cmpgt_mask(Tv a, Tv b) { - return _mm256_movemask_pd(i2d(_mm256_cmpgt_epi64(a, b))); - } -}; +#include "defs.h" +//#include "isa_detection.h" +#include "machine_traits.h" +#include "smallsort/bitonic_sort.h" -template <> -class vxsort_machine_traits { - private: - public: - typedef __m256i Tv; - - static INLINE Tv load_vec(Tv* p) { - return _mm256_lddqu_si256(p); - } - - static INLINE void store_vec(Tv* ptr, Tv v) { - _mm256_storeu_si256(ptr, v); - } - - static INLINE Tv partition_vector(Tv v, int mask) { - assert(mask >= 0); - assert(mask <= 15); - return s2i(_mm256_permutevar8x32_ps(i2s(v), _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i*)(perm_table_64 + mask * 8))))); - } - static INLINE Tv get_vec_pivot(int64_t pivot) { - return _mm256_set1_epi64x(pivot); - } - static INLINE uint32_t get_cmpgt_mask(Tv a, Tv b) { - __m256i top_bit = _mm256_set1_epi64x(1LLU << 63); - return _mm256_movemask_pd(i2d(_mm256_cmpgt_epi64(_mm256_xor_si256(top_bit, a), _mm256_xor_si256(top_bit, b)))); - } -}; +//#include +//#include +//#include -template <> -class vxsort_machine_traits { - private: - public: - typedef __m256d Tv; - - static INLINE Tv load_vec(Tv* p) { - return _mm256_loadu_pd((double *) p); - } - - static INLINE void store_vec(Tv* ptr, Tv v) { - _mm256_storeu_pd((double *) ptr, v); - } - - static INLINE Tv partition_vector(Tv v, int mask) { - assert(mask >= 0); - assert(mask <= 15); - return s2d(_mm256_permutevar8x32_ps(d2s(v), _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i*)(perm_table_64 + mask * 8))))); - } - - static INLINE Tv get_vec_pivot(double pivot) { - return _mm256_set1_pd(pivot); - } - static INLINE uint32_t get_cmpgt_mask(Tv a, Tv b) { - /// 0x0E: Greater-than (ordered, signaling) \n - /// 0x1E: Greater-than (ordered, non-signaling) - return _mm256_movemask_pd(_mm256_cmp_pd(a, b, 0x0E)); - } -}; +namespace vxsort { +using vxsort::smallsort::bitonic; -template <> -class vxsort_machine_traits { +template +struct alignment_hint { public: - typedef __m256i Tv; - static INLINE Tv load_vec(Tv* p) { - return _mm256_lddqu_si256(p); + static const size_t ALIGN = N; + static const int8_t REALIGN = 0x66; + + alignment_hint() : left_align(REALIGN), right_align(REALIGN) {} + alignment_hint realign_left() { + alignment_hint copy = *this; + copy.left_align = REALIGN; + return copy; } - static INLINE void store_vec(Tv* ptr, Tv v) { - _mm256_storeu_si256(ptr, v); + alignment_hint realign_right() { + alignment_hint copy = *this; + copy.right_align = REALIGN; + return copy; } - static INLINE Tv partition_vector(Tv v, int mask) { - assert(mask >= 0); - assert(mask <= 255); - return s2i(_mm256_permutevar8x32_ps(i2s(v), _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i*)(perm_table_32 + mask * 8))))); + static bool is_aligned(void *p) { + return (size_t)p % ALIGN == 0; } - static INLINE Tv get_vec_pivot(int32_t pivot) { - return _mm256_set1_epi32(pivot); - } - static INLINE uint32_t get_cmpgt_mask(Tv a, Tv b) { - return _mm256_movemask_ps(i2s(_mm256_cmpgt_epi32(a, b))); - } -}; - -template <> -class vxsort_machine_traits { - public: - typedef __m256i Tv; - static INLINE Tv load_vec(Tv* p) { - return _mm256_lddqu_si256(p); - } - - static INLINE void store_vec(Tv* ptr, Tv v) { - _mm256_storeu_si256(ptr, v); - } - - static INLINE Tv partition_vector(Tv v, int mask) { - assert(mask >= 0); - assert(mask <= 255); - return s2i(_mm256_permutevar8x32_ps(i2s(v), _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i*)(perm_table_32 + mask * 8))))); - } - - static INLINE Tv get_vec_pivot(uint32_t pivot) { - return _mm256_set1_epi32(pivot); - } - static INLINE uint32_t get_cmpgt_mask(Tv a, Tv b) { - __m256i top_bit = _mm256_set1_epi32(1U << 31); - return _mm256_movemask_ps(i2s(_mm256_cmpgt_epi32(_mm256_xor_si256(top_bit, a), _mm256_xor_si256(top_bit, b)))); - } -}; - -template <> -class vxsort_machine_traits { - public: - typedef __m256 Tv; - static INLINE Tv load_vec(Tv* p) { - return _mm256_loadu_ps((float *)p); - } - - static INLINE void store_vec(Tv* ptr, Tv v) { - _mm256_storeu_ps((float *) ptr, v); - } - - static INLINE Tv partition_vector(Tv v, int mask) { - assert(mask >= 0); - assert(mask <= 255); - return _mm256_permutevar8x32_ps(v, _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i*)(perm_table_32 + mask * 8)))); - } - - static INLINE Tv get_vec_pivot(float pivot) { - return _mm256_set1_ps(pivot); - } - - static INLINE uint32_t get_cmpgt_mask(Tv a, Tv b) { - /// 0x0E: Greater-than (ordered, signaling) \n - /// 0x1E: Greater-than (ordered, non-signaling) - return _mm256_movemask_ps(_mm256_cmp_ps(a, b, 0x0E)); - } + int left_align : 8; + int right_align : 8; }; -#endif -#ifdef ARCH_ARM64 -#error "─│─│──╫▓▓▓╫──│─────│─│─│──────╫▓▓╫│──│─│" -#error "──│─▓███████▓─╫╫╫╫╫╫╫╫╫╫╫╫╫│▓███████╫──" -#error "───██████████████████████████████████▓─" -#error "│─████████████│─│─│─│─────▓███████████╫" -#error "╫█████────│───│─│───│─────│─│─│───╫████▓" -#error "│████│──│───│───│─│───│───────│─│─│▓███╫" -#error "─▓███│───────│─▓██───│╫██╫─│─│─│───▓███│" -#error "──███─│──────╫████▓───█████────────▓███─" -#error "──╫██──│─│──╫██████│─│██████─│─────▓██─│" -#error "│─│▓█││─│─││███▓▓██─│─██▓▓███─│─│──▓█─│─" -#error "────█│─│───███╫▓▓█▓│──█▓▓▓▓██▓─────▓█───" -#error "│─││█││───▓███╫██▓╫─│─▓▓█▓▓███─────▓█───" -#error "─│─╫█│─│─│████▓╫▓▓─────█▓╫████▓──│─▓█───" -#error "│─││█╫│─││███████─│██╫│▓███████─│─│██─│─" -#error "─│─│█▓╫╫─▓██████╫│─▓█│──▓██████│╫╫│██│─│" -#error "│─│─██│╫│▓█████╫│───▓───│▓█████╫╫╫╫█▓──" -#error "─│─│▓█╫││╫████╫│││╫██▓││││▓████│╫─▓█╫│─│" -#error "│─│─│██│││╫▓▓││╫╫╫╫╫▓╫╫╫╫╫│╫▓▓╫││╫██──│─" -#error "─│───▓██╫─────││││││─││││││────│▓██│────" -#error "│─│─│─▓██▓╫╫╫╫╫╫╫╫▓▓▓▓▓╫╫╫╫╫╫╫▓███│────" -#error "───────╫██████████▓▓▓▓▓██████████│────│" -#error "│─│─│───▓█████████╫─│─▓█████████│─│─│─│" -#error "─────────██████████──│█████████╫─│───││" -#error "│─│─│───│▓█╫███████││▓███████╫█││─│─│─│" -#error "───────│─██─╫██████▓─███████││█╫───│──│" -#error "│───│───│██─││█████▓─█████▓─│╫█╫│──────" -#error "─│─│───│─▓█──│─╫▓██│─▓██▓│─│─▓█│───────" -#error "│───│─│─│─██────│─│───│─────│██───│─│─│" -#error "─│─│───│─│▓██╫─│─│─────│─│─▓██││─│───│─│" -#error "│───────│─│██████████████████▓│─│─│─│─│" -#error "─│───│─│───│███████▓▓████████│─│───│──│" -#error "│─│───│─│─│─│██████╫─▓█████▓────│─│─│──" -#error "─────│─────╫│╫▓████▓─█████▓│╫╫───────│" -#error "│─│───│───╫─╫╫╫╫███╫╫╫██▓╫│╫╫╫│─│─────" -#endif - - template -class vxsort { +class cvxsort { static_assert(Unroll >= 1, "Unroll can be in the range 1..12"); static_assert(Unroll <= 12, "Unroll can be in the range 1..12"); private: - //using Tv2 = Tp::Tv; using Tp = vxsort_machine_traits; - typedef typename Tp::Tv TV; + typedef typename Tp::TV TV; + typedef alignment_hint AH; static const int ELEMENT_ALIGN = sizeof(T) - 1; - static const int N = 32 / sizeof(T); + static const int N = sizeof(TV) / sizeof(T); static const int32_t MAX_BITONIC_SORT_VECTORS = 16; static const int32_t SMALL_SORT_THRESHOLD_ELEMENTS = MAX_BITONIC_SORT_VECTORS * N; - //static const int32_t MaxInnerUnroll = ((SMALL_SORT_THRESHOLD_ELEMENTS - (N - 2*N)) / (2 * N)); static const int32_t MaxInnerUnroll = (MAX_BITONIC_SORT_VECTORS - 3) / 2; static const int32_t SafeInnerUnroll = MaxInnerUnroll > Unroll ? Unroll : MaxInnerUnroll; static const int32_t SLACK_PER_SIDE_IN_VECTORS = Unroll; - static const size_t ALIGN = alignment_hint::ALIGN; + static const size_t ALIGN = AH::ALIGN; static const size_t ALIGN_MASK = ALIGN - 1; static const int SLACK_PER_SIDE_IN_ELEMENTS = SLACK_PER_SIDE_IN_VECTORS * N; @@ -415,8 +152,8 @@ class vxsort { T* _startPtr = nullptr; T* _endPtr = nullptr; - T _temp[PARTITION_TMP_SIZE_IN_ELEMENTS]; + int _depth = 0; NOINLINE @@ -457,8 +194,7 @@ class vxsort { return readRight; } - void realsort(T* left, T* right, - alignment_hint realignHint, + void sort(T* left, T* right, AH realignHint, int depthLimit) { auto length = (size_t)(right - left + 1); @@ -486,7 +222,7 @@ class vxsort { auto extraSpaceNeeded = nextLength - length; auto fakeLeft = left - extraSpaceNeeded; if (fakeLeft >= _startPtr) { - bitonic::sort(fakeLeft, nextLength); + bitonic::sort(fakeLeft, nextLength); } else { insertion_sort(left, right); } @@ -521,7 +257,7 @@ class vxsort { // do this. In reality we need more like 2x 4bits for each side, but I // don't think there is a real difference' - if (realignHint.left_align == alignment_hint::REALIGN) { + if (realignHint.left_align == AH::REALIGN) { // Alignment flow: // * Calculate pre-alignment on the left // * See it would cause us an out-of bounds read @@ -530,10 +266,11 @@ class vxsort { auto preAlignedLeft = reinterpret_cast(reinterpret_cast(left) & ~ALIGN_MASK); auto cannotPreAlignLeft = (preAlignedLeft - _startPtr) >> 63; realignHint.left_align = (preAlignedLeft - left) + (N & cannotPreAlignLeft); - assert(alignment_hint::is_aligned(left + realignHint.left_align)); + assert(realignHint.left_align >= -N && realignHint.left_align <= N); + assert(AH::is_aligned(left + realignHint.left_align)); } - if (realignHint.right_align == alignment_hint::REALIGN) { + if (realignHint.right_align == AH::REALIGN) { // Same as above, but in addition: // right is pointing just PAST the last element we intend to partition // (it's pointing to where we will store the pivot!) So we calculate alignment based on @@ -541,7 +278,8 @@ class vxsort { auto preAlignedRight = reinterpret_cast(((reinterpret_cast(right) - 1) & ~ALIGN_MASK) + ALIGN); auto cannotPreAlignRight = (_endPtr - preAlignedRight) >> 63; realignHint.right_align = (preAlignedRight - right - (N & cannotPreAlignRight)); - assert(alignment_hint::is_aligned(right + realignHint.right_align)); + assert(realignHint.right_align >= -N && realignHint.right_align <= N); + assert(AH::is_aligned(right + realignHint.right_align)); } // Compute median-of-three, of: @@ -561,15 +299,27 @@ class vxsort { _depth++; - realsort(left, sep - 2, realignHint.realign_right(), depthLimit); - realsort(sep, right, realignHint.realign_left(), depthLimit); + sort(left, sep - 2, realignHint.realign_right(), depthLimit); + sort(sep, right, realignHint.realign_left(), depthLimit); _depth--; } + static INLINE void partition_block(TV& dataVec, const TV& P, T*& left, T*& right) { + if (Tp::supports_compress_writes()) { + partition_block_with_compress(dataVec, P, left, right); + } else { + partition_block_without_compress(dataVec, P, left, right); + } + } + + static INLINE void partition_block_without_compress(TV& dataVec, + const TV& P, + T*& left, + T*& right) { auto mask = Tp::get_cmpgt_mask(dataVec, P); dataVec = Tp::partition_vector(dataVec, mask); Tp::store_vec(reinterpret_cast(left), dataVec); @@ -579,8 +329,20 @@ class vxsort { left += popCount + N; } + static INLINE void partition_block_with_compress(TV& dataVec, + const TV& P, + T*& left, + T*& right) { + auto mask = Tp::get_cmpgt_mask(dataVec, P); + auto popCount = -_mm_popcnt_u64(mask); + Tp::store_compress_vec(reinterpret_cast(left), dataVec, ~mask); + Tp::store_compress_vec(reinterpret_cast(right + N + popCount), dataVec, mask); + right += popCount; + left += popCount + N; + } + template - T* vectorized_partition(T* const left, T* const right, const alignment_hint hint) { + T* vectorized_partition(T* const left, T* const right, const AH hint) { assert(right - left >= SMALL_SORT_THRESHOLD_ELEMENTS); assert((reinterpret_cast(left) & ELEMENT_ALIGN) == 0); assert((reinterpret_cast(right) & ELEMENT_ALIGN) == 0); @@ -644,43 +406,11 @@ class vxsort { // Once that happens, we can read with Avx.LoadAlignedVector256 // And also know for sure that our reads will never cross cache-lines // Otherwise, 50% of our AVX2 Loads will need to read from two cache-lines + align_vectorized(left, right, hint, P, readLeft, readRight, + tmpStartLeft, tmpLeft, tmpStartRight, tmpRight); const auto leftAlign = hint.left_align; const auto rightAlign = hint.right_align; - - auto preAlignedLeft = (TV*) (left + leftAlign); - auto preAlignedRight = (TV*) (right + rightAlign - N); - - // Read overlapped data from right (includes re-reading the pivot) - auto RT0 = Tp::load_vec(preAlignedRight); - auto LT0 = Tp::load_vec(preAlignedLeft); - auto rtMask = Tp::get_cmpgt_mask(RT0, P); - auto ltMask = Tp::get_cmpgt_mask(LT0, P); - auto rtPopCount = max(_mm_popcnt_u32(rtMask), rightAlign); - auto ltPopCount = _mm_popcnt_u32(ltMask); - RT0 = Tp::partition_vector(RT0, rtMask); - LT0 = Tp::partition_vector(LT0, ltMask); - Tp::store_vec((TV*) tmpRight, RT0); - Tp::store_vec((TV*) tmpLeft, LT0); - - auto rai = ~((rightAlign - 1) >> 31); - auto lai = leftAlign >> 31; - - tmpRight -= rtPopCount & rai; - rtPopCount = N - rtPopCount; - readRight += (rightAlign - N) & rai; - - Tp::store_vec((TV*) tmpRight, LT0); - tmpRight -= ltPopCount & lai; - ltPopCount = N - ltPopCount; - tmpLeft += ltPopCount & lai; - tmpStartLeft += -leftAlign & lai; - readLeft += (leftAlign + N) & lai; - - Tp::store_vec((TV*) tmpLeft, RT0); - tmpLeft += rtPopCount & rai; - tmpStartRight -= rightAlign & rai; - if (leftAlign > 0) { tmpRight += N; readLeft = align_left_scalar_uncommon(readLeft, pivot, tmpLeft, tmpRight); @@ -765,8 +495,8 @@ class vxsort { case 3: partition_block(d03, P, writeLeft, writeRight); case 2: partition_block(d02, P, writeLeft, writeRight); case 1: partition_block(d01, P, writeLeft, writeRight); - } - } + } + } readRightV += (InnerUnroll - 1); @@ -782,6 +512,7 @@ class vxsort { auto d = Tp::load_vec(nextPtr); partition_block(d, P, writeLeft, writeRight); + //partition_block_without_compress(d, P, writeLeft, writeRight); } // 3. Copy-back the 4 registers + remainder we partitioned in the beginning @@ -800,13 +531,95 @@ class vxsort { return writeLeft; } -public: + void align_vectorized(const T* left, + const T* right, + const AH& hint, + const TV P, + T*& readLeft, + T*& readRight, + T*& tmpStartLeft, + T*& tmpLeft, + T*& tmpStartRight, + T*& tmpRight) const { + const auto leftAlign = hint.left_align; + const auto rightAlign = hint.right_align; + const auto rai = ~((rightAlign - 1) >> 31); + const auto lai = leftAlign >> 31; + const auto preAlignedLeft = (TV*) (left + leftAlign); + const auto preAlignedRight = (TV*) (right + rightAlign - N); + + // Alignment with vectorization is tricky, so read carefully before changing code: + // 1. We load data, which we might need to align, if the alignment hints + // mean pre-alignment (or overlapping alignment) + // 2. We partition and store in the following order: + // a) right-portion of right vector to the right-side + // b) left-portion of left vector to the right side + // c) at this point one-half of each partitioned vector has been committed + // back to memory. + // d) we advance the right write (tmpRight) pointer by how many elements + // were actually needed to be written to the right hand side + // e) We write the right portion of the left vector to the right side + // now that its write position has been updated + auto RT0 = Tp::load_vec(preAlignedRight); + auto LT0 = Tp::load_vec(preAlignedLeft); + auto rtMask = Tp::get_cmpgt_mask(RT0, P); + auto ltMask = Tp::get_cmpgt_mask(LT0, P); + const auto rtPopCountRightPart = max(_mm_popcnt_u32(rtMask), rightAlign); + const auto ltPopCountRightPart = _mm_popcnt_u32(ltMask); + const auto rtPopCountLeftPart = N - rtPopCountRightPart; + const auto ltPopCountLeftPart = N - ltPopCountRightPart; + + if (Tp::supports_compress_writes()) { + Tp::store_compress_vec((TV *) (tmpRight + N - rtPopCountRightPart), RT0, rtMask); + Tp::store_compress_vec((TV *) tmpLeft, LT0, ~ltMask); + + tmpRight -= rtPopCountRightPart & rai; + readRight += (rightAlign - N) & rai; + + Tp::store_compress_vec((TV *) (tmpRight + N - ltPopCountRightPart), LT0, ltMask); + tmpRight -= ltPopCountRightPart & lai; + tmpLeft += ltPopCountLeftPart & lai; + tmpStartLeft += -leftAlign & lai; + readLeft += (leftAlign + N) & lai; + + Tp::store_compress_vec((TV*) tmpLeft, RT0, ~rtMask); + tmpLeft += rtPopCountLeftPart & rai; + tmpStartRight -= rightAlign & rai; + } + else { + RT0 = Tp::partition_vector(RT0, rtMask); + LT0 = Tp::partition_vector(LT0, ltMask); + Tp::store_vec((TV*) tmpRight, RT0); + Tp::store_vec((TV*) tmpLeft, LT0); + + + tmpRight -= rtPopCountRightPart & rai; + readRight += (rightAlign - N) & rai; + + Tp::store_vec((TV*) tmpRight, LT0); + tmpRight -= ltPopCountRightPart & lai; + + tmpLeft += ltPopCountLeftPart & lai; + tmpStartLeft += -leftAlign & lai; + readLeft += (leftAlign + N) & lai; + + Tp::store_vec((TV*) tmpLeft, RT0); + tmpLeft += rtPopCountLeftPart & rai; + tmpStartRight -= rightAlign & rai; + } + } + + public: NOINLINE void sort(T* left, T* right) { +// init_isa_detection(); reset(left, right); auto depthLimit = 2 * floor_log2_plus_one(right + 1 - left); - realsort(left, right, alignment_hint(), depthLimit); + sort(left, right, AH(), depthLimit); } }; } // namespace gcsort + +#include "vxsort_targets_disable.h" + #endif diff --git a/src/coreclr/src/gc/vxsort_targets_disable.h b/src/coreclr/src/gc/vxsort_targets_disable.h new file mode 100644 index 00000000000000..56fd0ff02325b9 --- /dev/null +++ b/src/coreclr/src/gc/vxsort_targets_disable.h @@ -0,0 +1,8 @@ + +#ifdef __GNUC__ +#ifdef __clang__ +#pragma clang attribute pop +#else +#pragma GCC pop_options +#endif +#endif \ No newline at end of file diff --git a/src/coreclr/src/gc/vxsort_targets_enable_avx2.h b/src/coreclr/src/gc/vxsort_targets_enable_avx2.h new file mode 100644 index 00000000000000..b29cd1ae0aa774 --- /dev/null +++ b/src/coreclr/src/gc/vxsort_targets_enable_avx2.h @@ -0,0 +1,8 @@ +#ifdef __GNUC__ +#ifdef __clang__ +#pragma clang attribute push (__attribute__((target("avx2"))), apply_to = any(function)) +#else +#pragma GCC push_options +#pragma GCC target("avx512f") +#endif +#endif diff --git a/src/coreclr/src/gc/vxsort_targets_enable_avx512.h b/src/coreclr/src/gc/vxsort_targets_enable_avx512.h new file mode 100644 index 00000000000000..fb6930a3014565 --- /dev/null +++ b/src/coreclr/src/gc/vxsort_targets_enable_avx512.h @@ -0,0 +1,8 @@ +#ifdef __GNUC__ +#ifdef __clang__ +#pragma clang attribute push (__attribute__((target("avx512f"))), apply_to = any(function)) +#else +#pragma GCC push_options +#pragma GCC target("avx512f") +#endif +#endif From 0f543def8b2d29a227a0dbb376c835aa3fdba754 Mon Sep 17 00:00:00 2001 From: Peter Sollich Date: Tue, 23 Jun 2020 14:17:19 +0200 Subject: [PATCH 13/31] Implement runtime test for AVX512 support. --- src/coreclr/src/gc/gc.cpp | 28 +++++++++++---- src/coreclr/src/inc/corinfoinstructionset.h | 2 ++ src/coreclr/src/vm/amd64/AsmHelpers.asm | 15 ++++++++ src/coreclr/src/vm/amd64/unixstubs.cpp | 12 +++++++ src/coreclr/src/vm/cgensys.h | 1 + src/coreclr/src/vm/codeman.cpp | 39 ++++++++++++++++++--- src/coreclr/src/vm/gcenv.ee.cpp | 2 +- 7 files changed, 88 insertions(+), 11 deletions(-) diff --git a/src/coreclr/src/gc/gc.cpp b/src/coreclr/src/gc/gc.cpp index eb32d8e9e2e650..03f236c71fd62e 100644 --- a/src/coreclr/src/gc/gc.cpp +++ b/src/coreclr/src/gc/gc.cpp @@ -2123,9 +2123,17 @@ namespace std #ifdef USE_VXSORT void do_vxsort(uint8_t** low, uint8_t** high, unsigned int depth) { -// auto sorter = vxsort::cvxsort(); - auto sorter = vxsort::cvxsort(); - sorter.sort((int64_t*)low, (int64_t*)high); + assert(GCToEEInterface::HasInstructionSet(kInstructionSetAVX2)); + if (GCToEEInterface::HasInstructionSet(kInstructionSetAVX512)) + { + auto sorter = vxsort::cvxsort(); + sorter.sort((int64_t*)low, (int64_t*)high); + } + else + { + auto sorter = vxsort::cvxsort(); + sorter.sort((int64_t*)low, (int64_t*)high); + } #ifdef _DEBUG for (uint8_t** p = low; p < high; p++) { @@ -2135,9 +2143,17 @@ void do_vxsort(uint8_t** low, uint8_t** high, unsigned int depth) } void do_vxsort(uint32_t* low, uint32_t* high, unsigned int depth) { -// auto sorter = vxsort::cvxsort(); - auto sorter = vxsort::cvxsort(); - sorter.sort(low, high); + assert(GCToEEInterface::HasInstructionSet(kInstructionSetAVX2)); + if (GCToEEInterface::HasInstructionSet(kInstructionSetAVX512)) + { + auto sorter = vxsort::cvxsort(); + sorter.sort(low, high); + } + else + { + auto sorter = vxsort::cvxsort(); + sorter.sort(low, high); + } #ifdef _DEBUG for (uint32_t* p = low; p < high; p++) { diff --git a/src/coreclr/src/inc/corinfoinstructionset.h b/src/coreclr/src/inc/corinfoinstructionset.h index 8e667bc29b07b1..1e326328639a07 100644 --- a/src/coreclr/src/inc/corinfoinstructionset.h +++ b/src/coreclr/src/inc/corinfoinstructionset.h @@ -59,6 +59,7 @@ enum CORINFO_InstructionSet InstructionSet_SSE2_X64=25, InstructionSet_SSE41_X64=26, InstructionSet_SSE42_X64=27, + InstructionSet_AVX512=28, #endif // TARGET_AMD64 #ifdef TARGET_X86 InstructionSet_X86Base=1, @@ -88,6 +89,7 @@ enum CORINFO_InstructionSet InstructionSet_SSE2_X64=25, InstructionSet_SSE41_X64=26, InstructionSet_SSE42_X64=27, + InstructionSet_AVX512=28, #endif // TARGET_X86 }; diff --git a/src/coreclr/src/vm/amd64/AsmHelpers.asm b/src/coreclr/src/vm/amd64/AsmHelpers.asm index f65c6f0be8b5cb..b02ac54f2fd028 100644 --- a/src/coreclr/src/vm/amd64/AsmHelpers.asm +++ b/src/coreclr/src/vm/amd64/AsmHelpers.asm @@ -704,6 +704,21 @@ LEAF_ENTRY xmmYmmStateSupport, _TEXT ret LEAF_END xmmYmmStateSupport, _TEXT +;; extern "C" DWORD __stdcall zmmStateSupport(); +LEAF_ENTRY zmmStateSupport, _TEXT + mov ecx, 0 ; Specify xcr0 + xgetbv ; result in EDX:EAX + and eax, 0E6H + cmp eax, 0E6H ; check OS has enabled ZMM, XMM and YMM state support + jne not_supported + mov eax, 1 + jmp done + not_supported: + mov eax, 0 + done: + ret +LEAF_END zmmStateSupport, _TEXT + ;The following function uses Deterministic Cache Parameter leafs to determine the cache hierarchy information on Prescott & Above platforms. ; This function takes 3 arguments: ; Arg1 is an input to ECX. Used as index to specify which cache level to return information on by CPUID. diff --git a/src/coreclr/src/vm/amd64/unixstubs.cpp b/src/coreclr/src/vm/amd64/unixstubs.cpp index 4d680ae1317f2e..3fcef24f7adab4 100644 --- a/src/coreclr/src/vm/amd64/unixstubs.cpp +++ b/src/coreclr/src/vm/amd64/unixstubs.cpp @@ -54,6 +54,18 @@ extern "C" return ((eax & 0x06) == 0x06) ? 1 : 0; } + DWORD zmmStateSupport() + { + DWORD eax; + __asm(" xgetbv\n" \ + : "=a"(eax) /*output in eax*/\ + : "c"(0) /*inputs - 0 in ecx*/\ + : "edx" /* registers that are clobbered*/ + ); + // check OS has enabled both XMM and YMM state support + return ((eax & 0xe6) == 0xe6) ? 1 : 0; + } + void STDMETHODCALLTYPE JIT_ProfilerEnterLeaveTailcallStub(UINT_PTR ProfilerHandle) { } diff --git a/src/coreclr/src/vm/cgensys.h b/src/coreclr/src/vm/cgensys.h index 17213af0e7594c..bb0cdfd06a0be4 100644 --- a/src/coreclr/src/vm/cgensys.h +++ b/src/coreclr/src/vm/cgensys.h @@ -99,6 +99,7 @@ inline void GetSpecificCpuInfo(CORINFO_CPU * cpuInfo) extern "C" DWORD __stdcall getcpuid(DWORD arg, unsigned char result[16]); extern "C" DWORD __stdcall getextcpuid(DWORD arg1, DWORD arg2, unsigned char result[16]); extern "C" DWORD __stdcall xmmYmmStateSupport(); +extern "C" DWORD __stdcall zmmStateSupport(); #endif inline bool TargetHasAVXSupport() diff --git a/src/coreclr/src/vm/codeman.cpp b/src/coreclr/src/vm/codeman.cpp index a41e19c02b5e8c..824f254fedbaea 100644 --- a/src/coreclr/src/vm/codeman.cpp +++ b/src/coreclr/src/vm/codeman.cpp @@ -1232,27 +1232,38 @@ EEJitManager::EEJitManager() #if defined(TARGET_X86) || defined(TARGET_AMD64) -bool DoesOSSupportAVX() +#ifndef TARGET_UNIX +static DWORD64 GetEnabledXStateFeaturesHelper() { LIMITED_METHOD_CONTRACT; -#ifndef TARGET_UNIX // On Windows we have an api(GetEnabledXStateFeatures) to check if AVX is supported typedef DWORD64 (WINAPI *PGETENABLEDXSTATEFEATURES)(); PGETENABLEDXSTATEFEATURES pfnGetEnabledXStateFeatures = NULL; HMODULE hMod = WszLoadLibraryEx(WINDOWS_KERNEL32_DLLNAME_W, NULL, LOAD_LIBRARY_SEARCH_SYSTEM32); if(hMod == NULL) - return FALSE; + return 0; pfnGetEnabledXStateFeatures = (PGETENABLEDXSTATEFEATURES)GetProcAddress(hMod, "GetEnabledXStateFeatures"); if (pfnGetEnabledXStateFeatures == NULL) { - return FALSE; + return 0; } DWORD64 FeatureMask = pfnGetEnabledXStateFeatures(); + + return FeatureMask; +} +#endif // !TARGET_UNIX + +bool DoesOSSupportAVX() +{ + LIMITED_METHOD_CONTRACT; + +#ifndef TARGET_UNIX + DWORD64 FeatureMask = GetEnabledXStateFeaturesHelper(); if ((FeatureMask & XSTATE_MASK_AVX) == 0) { return FALSE; @@ -1262,6 +1273,21 @@ bool DoesOSSupportAVX() return TRUE; } +bool DoesOSSupportAVX512() +{ + LIMITED_METHOD_CONTRACT; + +#ifndef TARGET_UNIX + DWORD64 FeatureMask = GetEnabledXStateFeaturesHelper(); + if ((FeatureMask & XSTATE_MASK_AVX512) == XSTATE_MASK_AVX512) + { + return TRUE; + } +#endif // !TARGET_UNIX + + return FALSE; +} + #endif // defined(TARGET_X86) || defined(TARGET_AMD64) void EEJitManager::SetCpuInfo() @@ -1421,6 +1447,11 @@ void EEJitManager::SetCpuInfo() { CPUCompileFlags.Set(InstructionSet_AVX2); } + + if (DoesOSSupportAVX512() && zmmStateSupport() == 1 && (buffer[6] & 0x01) != 0) + { + CPUCompileFlags.Set(InstructionSet_AVX512); + } } } } diff --git a/src/coreclr/src/vm/gcenv.ee.cpp b/src/coreclr/src/vm/gcenv.ee.cpp index baf8c11a57d101..b530e105331241 100644 --- a/src/coreclr/src/vm/gcenv.ee.cpp +++ b/src/coreclr/src/vm/gcenv.ee.cpp @@ -1662,7 +1662,7 @@ bool GCToEEInterface::HasInstructionSet(InstructionSet requestedInstructionSet) switch (requestedInstructionSet) { case kInstructionSetAVX2: return cpuCompileFlags.IsSet(InstructionSet_AVX2); - case kInstructionSetAVX512: return false; + case kInstructionSetAVX512: return cpuCompileFlags.IsSet(InstructionSet_AVX512); default: return false; } #else From f49b16f3e2c5b7fb83ee91a17416a77bbc34c70e Mon Sep 17 00:00:00 2001 From: Peter Sollich Date: Wed, 24 Jun 2020 12:38:21 +0200 Subject: [PATCH 14/31] Move the files for the vectorized sort to their own directory, add stubs to call AVX2 or AVX512 flavor of the sort. --- src/coreclr/src/gc/CMakeLists.txt | 9 ++- src/coreclr/src/gc/gc.cpp | 48 ++------------- src/coreclr/src/gc/gcsvr.cpp | 11 ++-- src/coreclr/src/gc/gcwks.cpp | 11 ++-- src/coreclr/src/gc/sample/CMakeLists.txt | 7 +++ src/coreclr/src/gc/{ => vxsort}/defs.h | 0 src/coreclr/src/gc/vxsort/do_vxsort.h | 4 ++ src/coreclr/src/gc/vxsort/do_vxsort_avx2.cpp | 59 ++++++++++++++++++ .../src/gc/vxsort/do_vxsort_avx512.cpp | 60 +++++++++++++++++++ .../gc/{ => vxsort}/machine_traits.avx2.cpp | 1 + .../src/gc/{ => vxsort}/machine_traits.avx2.h | 15 +++-- .../gc/{ => vxsort}/machine_traits.avx512.h | 0 .../src/gc/{ => vxsort}/machine_traits.h | 0 .../bitonic_sort.AVX2.int64_t.generated.cpp | 1 + .../bitonic_sort.AVX2.int64_t.generated.h | 0 .../bitonic_sort.AVX2.uint32_t.generated.cpp | 1 + .../bitonic_sort.AVX2.uint32_t.generated.h | 0 .../bitonic_sort.AVX512.int64_t.generated.cpp | 1 + .../bitonic_sort.AVX512.int64_t.generated.h | 0 ...bitonic_sort.AVX512.uint32_t.generated.cpp | 1 + .../bitonic_sort.AVX512.uint32_t.generated.h | 0 .../gc/{ => vxsort}/smallsort/bitonic_sort.h | 0 .../gc/{ => vxsort}/smallsort/codegen/avx2.py | 0 .../{ => vxsort}/smallsort/codegen/avx512.py | 0 .../smallsort/codegen/bitonic_gen.py | 0 .../smallsort/codegen/bitonic_isa.py | 0 .../{ => vxsort}/smallsort/codegen/utils.py | 0 src/coreclr/src/gc/{ => vxsort}/vxsort.h | 5 +- .../gc/{ => vxsort}/vxsort_targets_disable.h | 0 .../{ => vxsort}/vxsort_targets_enable_avx2.h | 0 .../vxsort_targets_enable_avx512.h | 0 src/coreclr/src/vm/CMakeLists.txt | 10 +++- 32 files changed, 174 insertions(+), 70 deletions(-) rename src/coreclr/src/gc/{ => vxsort}/defs.h (100%) create mode 100644 src/coreclr/src/gc/vxsort/do_vxsort.h create mode 100644 src/coreclr/src/gc/vxsort/do_vxsort_avx2.cpp create mode 100644 src/coreclr/src/gc/vxsort/do_vxsort_avx512.cpp rename src/coreclr/src/gc/{ => vxsort}/machine_traits.avx2.cpp (99%) rename src/coreclr/src/gc/{ => vxsort}/machine_traits.avx2.h (90%) rename src/coreclr/src/gc/{ => vxsort}/machine_traits.avx512.h (100%) rename src/coreclr/src/gc/{ => vxsort}/machine_traits.h (100%) rename src/coreclr/src/gc/{ => vxsort}/smallsort/bitonic_sort.AVX2.int64_t.generated.cpp (97%) rename src/coreclr/src/gc/{ => vxsort}/smallsort/bitonic_sort.AVX2.int64_t.generated.h (100%) rename src/coreclr/src/gc/{ => vxsort}/smallsort/bitonic_sort.AVX2.uint32_t.generated.cpp (97%) rename src/coreclr/src/gc/{ => vxsort}/smallsort/bitonic_sort.AVX2.uint32_t.generated.h (100%) rename src/coreclr/src/gc/{ => vxsort}/smallsort/bitonic_sort.AVX512.int64_t.generated.cpp (97%) rename src/coreclr/src/gc/{ => vxsort}/smallsort/bitonic_sort.AVX512.int64_t.generated.h (100%) rename src/coreclr/src/gc/{ => vxsort}/smallsort/bitonic_sort.AVX512.uint32_t.generated.cpp (97%) rename src/coreclr/src/gc/{ => vxsort}/smallsort/bitonic_sort.AVX512.uint32_t.generated.h (100%) rename src/coreclr/src/gc/{ => vxsort}/smallsort/bitonic_sort.h (100%) rename src/coreclr/src/gc/{ => vxsort}/smallsort/codegen/avx2.py (100%) rename src/coreclr/src/gc/{ => vxsort}/smallsort/codegen/avx512.py (100%) rename src/coreclr/src/gc/{ => vxsort}/smallsort/codegen/bitonic_gen.py (100%) rename src/coreclr/src/gc/{ => vxsort}/smallsort/codegen/bitonic_isa.py (100%) rename src/coreclr/src/gc/{ => vxsort}/smallsort/codegen/utils.py (100%) rename src/coreclr/src/gc/{ => vxsort}/vxsort.h (99%) rename src/coreclr/src/gc/{ => vxsort}/vxsort_targets_disable.h (100%) rename src/coreclr/src/gc/{ => vxsort}/vxsort_targets_enable_avx2.h (100%) rename src/coreclr/src/gc/{ => vxsort}/vxsort_targets_enable_avx512.h (100%) diff --git a/src/coreclr/src/gc/CMakeLists.txt b/src/coreclr/src/gc/CMakeLists.txt index ba4758f8ca82de..1c62db5b2aad5e 100644 --- a/src/coreclr/src/gc/CMakeLists.txt +++ b/src/coreclr/src/gc/CMakeLists.txt @@ -37,6 +37,13 @@ else() set ( GC_SOURCES ${GC_SOURCES} windows/gcenv.windows.cpp + vxsort/do_vxsort_avx2.cpp + vxsort/do_vxsort_avx512.cpp + vxsort/machine_traits.avx2.cpp + vxsort/smallsort/bitonic_sort.AVX2.int64_t.generated.cpp + vxsort/smallsort/bitonic_sort.AVX2.uint32_t.generated.cpp + vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.cpp + vxsort/smallsort/bitonic_sort.AVX512.uint32_t.generated.cpp ) endif(CLR_CMAKE_HOST_UNIX) @@ -76,7 +83,7 @@ if (CLR_CMAKE_TARGET_WIN32) handletablepriv.h objecthandle.h softwarewritewatch.h - vxsort.h) + vxsort/do_vxsort.h) endif(CLR_CMAKE_TARGET_WIN32) if(CLR_CMAKE_HOST_WIN32) diff --git a/src/coreclr/src/gc/gc.cpp b/src/coreclr/src/gc/gc.cpp index 03f236c71fd62e..370098bbeb326a 100644 --- a/src/coreclr/src/gc/gc.cpp +++ b/src/coreclr/src/gc/gc.cpp @@ -2085,54 +2085,17 @@ uint8_t* tree_search (uint8_t* tree, uint8_t* old_address); #define _sort introsort::sort #elif defined(USE_VXSORT) #define _sort do_vxsort -namespace std -{ - template - class numeric_limits - { - public: - static _Ty Max() - { - return _Ty(); - } - }; - template <> - class numeric_limits - { - public: - static uint32_t Max() - { - return 0xffffffff; - } - }; - template <> - class numeric_limits - { - public: - static int64_t Max() - { - return 0x7fffffffffffffff; - } - }; -} - -#include "machine_traits.avx2.h" -#include "machine_traits.avx512.h" -#include "vxsort.h" - #ifdef USE_VXSORT void do_vxsort(uint8_t** low, uint8_t** high, unsigned int depth) { assert(GCToEEInterface::HasInstructionSet(kInstructionSetAVX2)); if (GCToEEInterface::HasInstructionSet(kInstructionSetAVX512)) { - auto sorter = vxsort::cvxsort(); - sorter.sort((int64_t*)low, (int64_t*)high); + do_vxsort_avx512(low, high); } else { - auto sorter = vxsort::cvxsort(); - sorter.sort((int64_t*)low, (int64_t*)high); + do_vxsort_avx2(low, high); } #ifdef _DEBUG for (uint8_t** p = low; p < high; p++) @@ -2141,18 +2104,17 @@ void do_vxsort(uint8_t** low, uint8_t** high, unsigned int depth) } #endif } + void do_vxsort(uint32_t* low, uint32_t* high, unsigned int depth) { assert(GCToEEInterface::HasInstructionSet(kInstructionSetAVX2)); if (GCToEEInterface::HasInstructionSet(kInstructionSetAVX512)) { - auto sorter = vxsort::cvxsort(); - sorter.sort(low, high); + do_vxsort_avx512(low, high); } else { - auto sorter = vxsort::cvxsort(); - sorter.sort(low, high); + do_vxsort_avx2(low, high); } #ifdef _DEBUG for (uint32_t* p = low; p < high; p++) diff --git a/src/coreclr/src/gc/gcsvr.cpp b/src/coreclr/src/gc/gcsvr.cpp index 30f0f8c7fe3976..8cdef316ea953a 100644 --- a/src/coreclr/src/gc/gcsvr.cpp +++ b/src/coreclr/src/gc/gcsvr.cpp @@ -21,16 +21,13 @@ #define SERVER_GC 1 +#if defined(TARGET_AMD64) && defined(TARGET_WINDOWS) +#include "vxsort/do_vxsort.h" +#endif + namespace SVR { #include "gcimpl.h" #include "gc.cpp" -#ifdef USE_VXSORT -#include "machine_traits.avx2.cpp" -#include "smallsort/bitonic_sort.AVX2.uint32_t.generated.cpp" -#include "smallsort/bitonic_sort.AVX2.int64_t.generated.cpp" -#include "smallsort/bitonic_sort.AVX512.uint32_t.generated.cpp" -#include "smallsort/bitonic_sort.AVX512.int64_t.generated.cpp" -#endif //USE_VXSORT } #endif // defined(FEATURE_SVR_GC) diff --git a/src/coreclr/src/gc/gcwks.cpp b/src/coreclr/src/gc/gcwks.cpp index 5b32db8503e900..531e8a0afdf283 100644 --- a/src/coreclr/src/gc/gcwks.cpp +++ b/src/coreclr/src/gc/gcwks.cpp @@ -21,15 +21,12 @@ #undef SERVER_GC #endif +#if defined(TARGET_AMD64) && defined(TARGET_WINDOWS) +#include "vxsort/do_vxsort.h" +#endif + namespace WKS { #include "gcimpl.h" #include "gc.cpp" -#ifdef USE_VXSORT -#include "machine_traits.avx2.cpp" -#include "smallsort/bitonic_sort.AVX2.uint32_t.generated.cpp" -#include "smallsort/bitonic_sort.AVX2.int64_t.generated.cpp" -#include "smallsort/bitonic_sort.AVX512.uint32_t.generated.cpp" -#include "smallsort/bitonic_sort.AVX512.int64_t.generated.cpp" -#endif //USE_VXSORT } diff --git a/src/coreclr/src/gc/sample/CMakeLists.txt b/src/coreclr/src/gc/sample/CMakeLists.txt index 059c5e768b2bbb..28baa56671490e 100644 --- a/src/coreclr/src/gc/sample/CMakeLists.txt +++ b/src/coreclr/src/gc/sample/CMakeLists.txt @@ -22,6 +22,13 @@ set(SOURCES ../handletablescan.cpp ../objecthandle.cpp ../softwarewritewatch.cpp + ../vxsort/do_vxsort_avx2.cpp + ../vxsort/do_vxsort_avx512.cpp + ../vxsort/machine_traits.avx2.cpp + ../vxsort/smallsort/bitonic_sort.AVX2.int64_t.generated.cpp + ../vxsort/smallsort/bitonic_sort.AVX2.uint32_t.generated.cpp + ../vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.cpp + ../vxsort/smallsort/bitonic_sort.AVX512.uint32_t.generated.cpp ) if(CLR_CMAKE_TARGET_WIN32) diff --git a/src/coreclr/src/gc/defs.h b/src/coreclr/src/gc/vxsort/defs.h similarity index 100% rename from src/coreclr/src/gc/defs.h rename to src/coreclr/src/gc/vxsort/defs.h diff --git a/src/coreclr/src/gc/vxsort/do_vxsort.h b/src/coreclr/src/gc/vxsort/do_vxsort.h new file mode 100644 index 00000000000000..a48c56f5e34e0b --- /dev/null +++ b/src/coreclr/src/gc/vxsort/do_vxsort.h @@ -0,0 +1,4 @@ +void do_vxsort_avx2(uint8_t** low, uint8_t** high); +void do_vxsort_avx2(uint32_t* low, uint32_t* high); +void do_vxsort_avx512(uint8_t** low, uint8_t** high); +void do_vxsort_avx512(uint32_t* low, uint32_t* high); diff --git a/src/coreclr/src/gc/vxsort/do_vxsort_avx2.cpp b/src/coreclr/src/gc/vxsort/do_vxsort_avx2.cpp new file mode 100644 index 00000000000000..addac15a3ced54 --- /dev/null +++ b/src/coreclr/src/gc/vxsort/do_vxsort_avx2.cpp @@ -0,0 +1,59 @@ +#include "common.h" + +#include "vxsort_targets_enable_avx2.h" + +namespace std +{ + template + class numeric_limits + { + public: + static _Ty Max() + { + return _Ty(); + } + }; + template <> + class numeric_limits + { + public: + static uint32_t Max() + { + return 0xffffffff; + } + }; + template <> + class numeric_limits + { + public: + static int64_t Max() + { + return 0x7fffffffffffffff; + } + }; +} + +#ifndef max +template +T max(T a, T b) +{ + if (a > b) return a; else return b; +} +#endif +#include "vxsort.h" +#include "machine_traits.avx2.h" +#include "smallsort/bitonic_sort.AVX2.int32_t.generated.h" + +void do_vxsort_avx2(uint8_t** low, uint8_t** high) +{ + auto sorter = vxsort::vxsort(); + sorter.sort((int64_t*)low, (int64_t*)high); +} + +void do_vxsort_avx2(uint32_t* low, uint32_t* high) +{ + auto sorter = vxsort::vxsort(); + sorter.sort(low, high); +} + +#include "vxsort_targets_disable.h" diff --git a/src/coreclr/src/gc/vxsort/do_vxsort_avx512.cpp b/src/coreclr/src/gc/vxsort/do_vxsort_avx512.cpp new file mode 100644 index 00000000000000..73c48f701d4c69 --- /dev/null +++ b/src/coreclr/src/gc/vxsort/do_vxsort_avx512.cpp @@ -0,0 +1,60 @@ +#include "common.h" + +#include "vxsort_targets_enable_avx512.h" + +namespace std +{ + template + class numeric_limits + { + public: + static _Ty Max() + { + return _Ty(); + } + }; + template <> + class numeric_limits + { + public: + static uint32_t Max() + { + return 0xffffffff; + } + }; + template <> + class numeric_limits + { + public: + static int64_t Max() + { + return 0x7fffffffffffffff; + } + }; +} + +#ifndef max +template +T max(T a, T b) +{ + if (a > b) return a; else return b; +} +#endif + +#include "vxsort.h" +#include "machine_traits.avx512.h" +#include "smallsort/bitonic_sort.AVX2.int32_t.generated.h" + +void do_vxsort_avx512(uint8_t** low, uint8_t** high) +{ + auto sorter = vxsort::vxsort(); + sorter.sort((int64_t*)low, (int64_t*)high); +} + +void do_vxsort_avx512(uint32_t* low, uint32_t* high) +{ + auto sorter = vxsort::vxsort(); + sorter.sort(low, high); +} + +#include "vxsort_targets_disable.h" diff --git a/src/coreclr/src/gc/machine_traits.avx2.cpp b/src/coreclr/src/gc/vxsort/machine_traits.avx2.cpp similarity index 99% rename from src/coreclr/src/gc/machine_traits.avx2.cpp rename to src/coreclr/src/gc/vxsort/machine_traits.avx2.cpp index 4b5a9219c1fb7a..b2d4feb31736ec 100644 --- a/src/coreclr/src/gc/machine_traits.avx2.cpp +++ b/src/coreclr/src/gc/vxsort/machine_traits.avx2.cpp @@ -1,3 +1,4 @@ +#include "common.h" //#include #include "machine_traits.avx2.h" diff --git a/src/coreclr/src/gc/machine_traits.avx2.h b/src/coreclr/src/gc/vxsort/machine_traits.avx2.h similarity index 90% rename from src/coreclr/src/gc/machine_traits.avx2.h rename to src/coreclr/src/gc/vxsort/machine_traits.avx2.h index 2f0368d9e39999..8eb068a704cb02 100644 --- a/src/coreclr/src/gc/machine_traits.avx2.h +++ b/src/coreclr/src/gc/vxsort/machine_traits.avx2.h @@ -25,10 +25,9 @@ namespace vxsort { extern const int8_t perm_table_64[128]; extern const int8_t perm_table_32[2048]; -void unsupported_operation() +static void not_supported() { assert(!"operation is unsupported"); - GCToOSInterface::DebugBreak(); } template <> @@ -47,7 +46,7 @@ class vxsort_machine_traits { _mm256_storeu_si256(ptr, v); } - static void store_compress_vec(TV* ptr, TV v, TMASK mask) { unsupported_operation(); } + static void store_compress_vec(TV* ptr, TV v, TMASK mask) { not_supported(); } static INLINE TV partition_vector(TV v, int mask) { assert(mask >= 0); @@ -79,7 +78,7 @@ class vxsort_machine_traits { _mm256_storeu_si256(ptr, v); } - static void store_compress_vec(TV* ptr, TV v, TMASK mask) { unsupported_operation(); } + static void store_compress_vec(TV* ptr, TV v, TMASK mask) { not_supported(); } static INLINE TV partition_vector(TV v, int mask) { assert(mask >= 0); @@ -112,7 +111,7 @@ class vxsort_machine_traits { _mm256_storeu_ps((float *) ptr, v); } - static void store_compress_vec(TV* ptr, TV v, TMASK mask) { unsupported_operation(); } + static void store_compress_vec(TV* ptr, TV v, TMASK mask) { not_supported(); } static INLINE TV partition_vector(TV v, int mask) { assert(mask >= 0); @@ -147,7 +146,7 @@ class vxsort_machine_traits { _mm256_storeu_si256(ptr, v); } - static void store_compress_vec(TV* ptr, TV v, TMASK mask) { unsupported_operation(); } + static void store_compress_vec(TV* ptr, TV v, TMASK mask) { not_supported(); } static INLINE TV partition_vector(TV v, int mask) { assert(mask >= 0); @@ -179,7 +178,7 @@ class vxsort_machine_traits { _mm256_storeu_si256(ptr, v); } - static void store_compress_vec(TV* ptr, TV v, TMASK mask) { unsupported_operation(); } + static void store_compress_vec(TV* ptr, TV v, TMASK mask) { not_supported(); } static INLINE TV partition_vector(TV v, int mask) { assert(mask >= 0); @@ -211,7 +210,7 @@ class vxsort_machine_traits { _mm256_storeu_pd((double *) ptr, v); } - static void store_compress_vec(TV* ptr, TV v, TMASK mask) { unsupported_operation(); } + static void store_compress_vec(TV* ptr, TV v, TMASK mask) { not_supported(); } static INLINE TV partition_vector(TV v, int mask) { assert(mask >= 0); diff --git a/src/coreclr/src/gc/machine_traits.avx512.h b/src/coreclr/src/gc/vxsort/machine_traits.avx512.h similarity index 100% rename from src/coreclr/src/gc/machine_traits.avx512.h rename to src/coreclr/src/gc/vxsort/machine_traits.avx512.h diff --git a/src/coreclr/src/gc/machine_traits.h b/src/coreclr/src/gc/vxsort/machine_traits.h similarity index 100% rename from src/coreclr/src/gc/machine_traits.h rename to src/coreclr/src/gc/vxsort/machine_traits.h diff --git a/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.int64_t.generated.cpp b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int64_t.generated.cpp similarity index 97% rename from src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.int64_t.generated.cpp rename to src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int64_t.generated.cpp index 615d4162e62b6f..81c308736a22a6 100644 --- a/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.int64_t.generated.cpp +++ b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int64_t.generated.cpp @@ -1,3 +1,4 @@ +#include "common.h" #include "bitonic_sort.AVX2.int64_t.generated.h" using namespace vxsort; diff --git a/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.int64_t.generated.h b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int64_t.generated.h similarity index 100% rename from src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.int64_t.generated.h rename to src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int64_t.generated.h diff --git a/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.uint32_t.generated.cpp b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.uint32_t.generated.cpp similarity index 97% rename from src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.uint32_t.generated.cpp rename to src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.uint32_t.generated.cpp index 93d974cc2edf78..34320362588c33 100644 --- a/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.uint32_t.generated.cpp +++ b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.uint32_t.generated.cpp @@ -1,3 +1,4 @@ +#include "common.h" #include "bitonic_sort.AVX2.uint32_t.generated.h" using namespace vxsort; diff --git a/src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.uint32_t.generated.h b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.uint32_t.generated.h similarity index 100% rename from src/coreclr/src/gc/smallsort/bitonic_sort.AVX2.uint32_t.generated.h rename to src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.uint32_t.generated.h diff --git a/src/coreclr/src/gc/smallsort/bitonic_sort.AVX512.int64_t.generated.cpp b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.cpp similarity index 97% rename from src/coreclr/src/gc/smallsort/bitonic_sort.AVX512.int64_t.generated.cpp rename to src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.cpp index b6cf913bd22df8..42e26f9c383511 100644 --- a/src/coreclr/src/gc/smallsort/bitonic_sort.AVX512.int64_t.generated.cpp +++ b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.cpp @@ -1,3 +1,4 @@ +#include "common.h" #include "bitonic_sort.AVX512.int64_t.generated.h" using namespace vxsort; diff --git a/src/coreclr/src/gc/smallsort/bitonic_sort.AVX512.int64_t.generated.h b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.h similarity index 100% rename from src/coreclr/src/gc/smallsort/bitonic_sort.AVX512.int64_t.generated.h rename to src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.h diff --git a/src/coreclr/src/gc/smallsort/bitonic_sort.AVX512.uint32_t.generated.cpp b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.uint32_t.generated.cpp similarity index 97% rename from src/coreclr/src/gc/smallsort/bitonic_sort.AVX512.uint32_t.generated.cpp rename to src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.uint32_t.generated.cpp index 840d970a43279b..59271c70e9760a 100644 --- a/src/coreclr/src/gc/smallsort/bitonic_sort.AVX512.uint32_t.generated.cpp +++ b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.uint32_t.generated.cpp @@ -1,3 +1,4 @@ +#include "common.h" #include "bitonic_sort.AVX512.uint32_t.generated.h" using namespace vxsort; diff --git a/src/coreclr/src/gc/smallsort/bitonic_sort.AVX512.uint32_t.generated.h b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.uint32_t.generated.h similarity index 100% rename from src/coreclr/src/gc/smallsort/bitonic_sort.AVX512.uint32_t.generated.h rename to src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.uint32_t.generated.h diff --git a/src/coreclr/src/gc/smallsort/bitonic_sort.h b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.h similarity index 100% rename from src/coreclr/src/gc/smallsort/bitonic_sort.h rename to src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.h diff --git a/src/coreclr/src/gc/smallsort/codegen/avx2.py b/src/coreclr/src/gc/vxsort/smallsort/codegen/avx2.py similarity index 100% rename from src/coreclr/src/gc/smallsort/codegen/avx2.py rename to src/coreclr/src/gc/vxsort/smallsort/codegen/avx2.py diff --git a/src/coreclr/src/gc/smallsort/codegen/avx512.py b/src/coreclr/src/gc/vxsort/smallsort/codegen/avx512.py similarity index 100% rename from src/coreclr/src/gc/smallsort/codegen/avx512.py rename to src/coreclr/src/gc/vxsort/smallsort/codegen/avx512.py diff --git a/src/coreclr/src/gc/smallsort/codegen/bitonic_gen.py b/src/coreclr/src/gc/vxsort/smallsort/codegen/bitonic_gen.py similarity index 100% rename from src/coreclr/src/gc/smallsort/codegen/bitonic_gen.py rename to src/coreclr/src/gc/vxsort/smallsort/codegen/bitonic_gen.py diff --git a/src/coreclr/src/gc/smallsort/codegen/bitonic_isa.py b/src/coreclr/src/gc/vxsort/smallsort/codegen/bitonic_isa.py similarity index 100% rename from src/coreclr/src/gc/smallsort/codegen/bitonic_isa.py rename to src/coreclr/src/gc/vxsort/smallsort/codegen/bitonic_isa.py diff --git a/src/coreclr/src/gc/smallsort/codegen/utils.py b/src/coreclr/src/gc/vxsort/smallsort/codegen/utils.py similarity index 100% rename from src/coreclr/src/gc/smallsort/codegen/utils.py rename to src/coreclr/src/gc/vxsort/smallsort/codegen/utils.py diff --git a/src/coreclr/src/gc/vxsort.h b/src/coreclr/src/gc/vxsort/vxsort.h similarity index 99% rename from src/coreclr/src/gc/vxsort.h rename to src/coreclr/src/gc/vxsort/vxsort.h index 60551787d84e1f..65255f477f4fcc 100644 --- a/src/coreclr/src/gc/vxsort.h +++ b/src/coreclr/src/gc/vxsort/vxsort.h @@ -55,7 +55,7 @@ struct alignment_hint { }; template -class cvxsort { +class vxsort { static_assert(Unroll >= 1, "Unroll can be in the range 1..12"); static_assert(Unroll <= 12, "Unroll can be in the range 1..12"); @@ -152,8 +152,8 @@ class cvxsort { T* _startPtr = nullptr; T* _endPtr = nullptr; - T _temp[PARTITION_TMP_SIZE_IN_ELEMENTS]; + T _temp[PARTITION_TMP_SIZE_IN_ELEMENTS]; int _depth = 0; NOINLINE @@ -611,7 +611,6 @@ class cvxsort { public: NOINLINE void sort(T* left, T* right) { -// init_isa_detection(); reset(left, right); auto depthLimit = 2 * floor_log2_plus_one(right + 1 - left); sort(left, right, AH(), depthLimit); diff --git a/src/coreclr/src/gc/vxsort_targets_disable.h b/src/coreclr/src/gc/vxsort/vxsort_targets_disable.h similarity index 100% rename from src/coreclr/src/gc/vxsort_targets_disable.h rename to src/coreclr/src/gc/vxsort/vxsort_targets_disable.h diff --git a/src/coreclr/src/gc/vxsort_targets_enable_avx2.h b/src/coreclr/src/gc/vxsort/vxsort_targets_enable_avx2.h similarity index 100% rename from src/coreclr/src/gc/vxsort_targets_enable_avx2.h rename to src/coreclr/src/gc/vxsort/vxsort_targets_enable_avx2.h diff --git a/src/coreclr/src/gc/vxsort_targets_enable_avx512.h b/src/coreclr/src/gc/vxsort/vxsort_targets_enable_avx512.h similarity index 100% rename from src/coreclr/src/gc/vxsort_targets_enable_avx512.h rename to src/coreclr/src/gc/vxsort/vxsort_targets_enable_avx512.h diff --git a/src/coreclr/src/vm/CMakeLists.txt b/src/coreclr/src/vm/CMakeLists.txt index 00aa175dba7d0d..1489ab89e2e0a0 100644 --- a/src/coreclr/src/vm/CMakeLists.txt +++ b/src/coreclr/src/vm/CMakeLists.txt @@ -540,7 +540,15 @@ set(GC_SOURCES_WKS ../gc/gceewks.cpp ../gc/gcload.cpp ../gc/softwarewritewatch.cpp - ../gc/handletablecache.cpp) + ../gc/handletablecache.cpp + ../gc/vxsort/do_vxsort_avx2.cpp + ../gc/vxsort/do_vxsort_avx512.cpp + ../gc/vxsort/machine_traits.avx2.cpp + ../gc/vxsort/smallsort/bitonic_sort.AVX2.int64_t.generated.cpp + ../gc/vxsort/smallsort/bitonic_sort.AVX2.uint32_t.generated.cpp + ../gc/vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.cpp + ../gc/vxsort/smallsort/bitonic_sort.AVX512.uint32_t.generated.cpp +) set(GC_HEADERS_WKS ${GC_HEADERS_DAC_AND_WKS_COMMON} From d38cfee22ae12a39b4312150e1482d5eb7ae7f86 Mon Sep 17 00:00:00 2001 From: Peter Sollich Date: Wed, 24 Jun 2020 13:01:18 +0200 Subject: [PATCH 15/31] Get rid of unneeded #include statement in two files. --- src/coreclr/src/gc/vxsort/do_vxsort_avx2.cpp | 1 - src/coreclr/src/gc/vxsort/do_vxsort_avx512.cpp | 1 - 2 files changed, 2 deletions(-) diff --git a/src/coreclr/src/gc/vxsort/do_vxsort_avx2.cpp b/src/coreclr/src/gc/vxsort/do_vxsort_avx2.cpp index addac15a3ced54..02283aa0626c92 100644 --- a/src/coreclr/src/gc/vxsort/do_vxsort_avx2.cpp +++ b/src/coreclr/src/gc/vxsort/do_vxsort_avx2.cpp @@ -42,7 +42,6 @@ T max(T a, T b) #endif #include "vxsort.h" #include "machine_traits.avx2.h" -#include "smallsort/bitonic_sort.AVX2.int32_t.generated.h" void do_vxsort_avx2(uint8_t** low, uint8_t** high) { diff --git a/src/coreclr/src/gc/vxsort/do_vxsort_avx512.cpp b/src/coreclr/src/gc/vxsort/do_vxsort_avx512.cpp index 73c48f701d4c69..8ed453ff609e86 100644 --- a/src/coreclr/src/gc/vxsort/do_vxsort_avx512.cpp +++ b/src/coreclr/src/gc/vxsort/do_vxsort_avx512.cpp @@ -43,7 +43,6 @@ T max(T a, T b) #include "vxsort.h" #include "machine_traits.avx512.h" -#include "smallsort/bitonic_sort.AVX2.int32_t.generated.h" void do_vxsort_avx512(uint8_t** low, uint8_t** high) { From 8a64ab26ceb5034680e284fb5c0502f891138880 Mon Sep 17 00:00:00 2001 From: Peter Sollich Date: Wed, 24 Jun 2020 17:00:28 +0200 Subject: [PATCH 16/31] Address codereview feedback to specifically say AVX512F instead of just AVX512 as there are multiple subsets. --- src/coreclr/src/gc/gc.cpp | 4 ++-- src/coreclr/src/gc/gcinterface.ee.h | 2 +- src/coreclr/src/inc/corinfoinstructionset.h | 4 ++-- src/coreclr/src/vm/codeman.cpp | 2 +- src/coreclr/src/vm/gcenv.ee.cpp | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/coreclr/src/gc/gc.cpp b/src/coreclr/src/gc/gc.cpp index 370098bbeb326a..0fbdf418a3829e 100644 --- a/src/coreclr/src/gc/gc.cpp +++ b/src/coreclr/src/gc/gc.cpp @@ -2089,7 +2089,7 @@ uint8_t* tree_search (uint8_t* tree, uint8_t* old_address); void do_vxsort(uint8_t** low, uint8_t** high, unsigned int depth) { assert(GCToEEInterface::HasInstructionSet(kInstructionSetAVX2)); - if (GCToEEInterface::HasInstructionSet(kInstructionSetAVX512)) + if (GCToEEInterface::HasInstructionSet(kInstructionSetAVX512F)) { do_vxsort_avx512(low, high); } @@ -2108,7 +2108,7 @@ void do_vxsort(uint8_t** low, uint8_t** high, unsigned int depth) void do_vxsort(uint32_t* low, uint32_t* high, unsigned int depth) { assert(GCToEEInterface::HasInstructionSet(kInstructionSetAVX2)); - if (GCToEEInterface::HasInstructionSet(kInstructionSetAVX512)) + if (GCToEEInterface::HasInstructionSet(kInstructionSetAVX512F)) { do_vxsort_avx512(low, high); } diff --git a/src/coreclr/src/gc/gcinterface.ee.h b/src/coreclr/src/gc/gcinterface.ee.h index 35aee4a47f5996..7aafd7944ebd43 100644 --- a/src/coreclr/src/gc/gcinterface.ee.h +++ b/src/coreclr/src/gc/gcinterface.ee.h @@ -25,7 +25,7 @@ enum InstructionSet { #if defined(TARGET_X86) || defined(TARGET_AMD64) kInstructionSetAVX2 = 0, - kInstructionSetAVX512 = 1, + kInstructionSetAVX512F = 1, #endif //defined(TARGET_X86) || defined(TARGET_AMD64) }; diff --git a/src/coreclr/src/inc/corinfoinstructionset.h b/src/coreclr/src/inc/corinfoinstructionset.h index 1e326328639a07..1c828f853b03b5 100644 --- a/src/coreclr/src/inc/corinfoinstructionset.h +++ b/src/coreclr/src/inc/corinfoinstructionset.h @@ -59,7 +59,7 @@ enum CORINFO_InstructionSet InstructionSet_SSE2_X64=25, InstructionSet_SSE41_X64=26, InstructionSet_SSE42_X64=27, - InstructionSet_AVX512=28, + InstructionSet_AVX512F=28, #endif // TARGET_AMD64 #ifdef TARGET_X86 InstructionSet_X86Base=1, @@ -89,7 +89,7 @@ enum CORINFO_InstructionSet InstructionSet_SSE2_X64=25, InstructionSet_SSE41_X64=26, InstructionSet_SSE42_X64=27, - InstructionSet_AVX512=28, + InstructionSet_AVX512F=28, #endif // TARGET_X86 }; diff --git a/src/coreclr/src/vm/codeman.cpp b/src/coreclr/src/vm/codeman.cpp index 824f254fedbaea..0e2809268d984f 100644 --- a/src/coreclr/src/vm/codeman.cpp +++ b/src/coreclr/src/vm/codeman.cpp @@ -1450,7 +1450,7 @@ void EEJitManager::SetCpuInfo() if (DoesOSSupportAVX512() && zmmStateSupport() == 1 && (buffer[6] & 0x01) != 0) { - CPUCompileFlags.Set(InstructionSet_AVX512); + CPUCompileFlags.Set(InstructionSet_AVX512F); } } } diff --git a/src/coreclr/src/vm/gcenv.ee.cpp b/src/coreclr/src/vm/gcenv.ee.cpp index b530e105331241..b549921d3d319f 100644 --- a/src/coreclr/src/vm/gcenv.ee.cpp +++ b/src/coreclr/src/vm/gcenv.ee.cpp @@ -1662,7 +1662,7 @@ bool GCToEEInterface::HasInstructionSet(InstructionSet requestedInstructionSet) switch (requestedInstructionSet) { case kInstructionSetAVX2: return cpuCompileFlags.IsSet(InstructionSet_AVX2); - case kInstructionSetAVX512: return cpuCompileFlags.IsSet(InstructionSet_AVX512); + case kInstructionSetAVX512F: return cpuCompileFlags.IsSet(InstructionSet_AVX512F); default: return false; } #else From 10506974daf8509dc5ffd37d04e366bcffed47fb Mon Sep 17 00:00:00 2001 From: Peter Sollich Date: Thu, 25 Jun 2020 17:13:36 +0200 Subject: [PATCH 17/31] Fix CMakeLists.tx files for non-x64 non-Windows targets, introduce separate max mark list sizes for WKS, remove dead code from grow_mark_list, add #ifdef to AVX512 detection to make the other architectures build. --- src/coreclr/src/gc/CMakeLists.txt | 8 +++++++- src/coreclr/src/gc/gc.cpp | 11 +++++++---- src/coreclr/src/gc/sample/CMakeLists.txt | 6 ++++++ src/coreclr/src/vm/CMakeLists.txt | 6 ++++++ src/coreclr/src/vm/codeman.cpp | 6 ++++-- 5 files changed, 30 insertions(+), 7 deletions(-) diff --git a/src/coreclr/src/gc/CMakeLists.txt b/src/coreclr/src/gc/CMakeLists.txt index 1c62db5b2aad5e..e584148f7038c0 100644 --- a/src/coreclr/src/gc/CMakeLists.txt +++ b/src/coreclr/src/gc/CMakeLists.txt @@ -37,6 +37,12 @@ else() set ( GC_SOURCES ${GC_SOURCES} windows/gcenv.windows.cpp +) +endif(CLR_CMAKE_HOST_UNIX) + +if (CLR_CMAKE_TARGET_WIN32 AND CLR_CMAKE_TARGET_ARCH_AMD64) + set ( GC_SOURCES + ${GC_SOURCES} vxsort/do_vxsort_avx2.cpp vxsort/do_vxsort_avx512.cpp vxsort/machine_traits.avx2.cpp @@ -45,7 +51,7 @@ else() vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.cpp vxsort/smallsort/bitonic_sort.AVX512.uint32_t.generated.cpp ) -endif(CLR_CMAKE_HOST_UNIX) +endif (CLR_CMAKE_TARGET_WIN32 AND CLR_CMAKE_TARGET_ARCH_AMD64) if (CLR_CMAKE_TARGET_WIN32) set(GC_HEADERS diff --git a/src/coreclr/src/gc/gc.cpp b/src/coreclr/src/gc/gc.cpp index 0fbdf418a3829e..5737660ab36ee7 100644 --- a/src/coreclr/src/gc/gc.cpp +++ b/src/coreclr/src/gc/gc.cpp @@ -8791,7 +8791,13 @@ void gc_heap::combine_mark_lists() void gc_heap::grow_mark_list () { - size_t new_mark_list_size = min (mark_list_size * 2, 1000 * 1024); +#ifdef MULTIPLE_HEAPS + const size_t MAX_MARK_LIST_SIZE = 1000 * 1024; +#else //MULTIPLE_HEAPS + const size_t MAX_MARK_LIST_SIZE = 32 * 1024; +#endif //MULTIPLE_HEAPS + + size_t new_mark_list_size = min (mark_list_size * 2, MAX_MARK_LIST_SIZE); if (new_mark_list_size == mark_list_size) return; @@ -8800,9 +8806,6 @@ void gc_heap::grow_mark_list () #ifdef PARALLEL_MARK_LIST_SORT uint8_t** new_mark_list_copy = make_mark_list (new_mark_list_size * n_heaps); -#ifdef BIT_MAP_SORT - memset (new_mark_list_copy, 0, new_mark_list_size * n_heaps * sizeof(new_mark_list_copy[0])); -#endif //BIT_MAP_SORT #endif //PARALLEL_MARK_LIST_SORT if (new_mark_list != nullptr diff --git a/src/coreclr/src/gc/sample/CMakeLists.txt b/src/coreclr/src/gc/sample/CMakeLists.txt index 28baa56671490e..7cd5820b9ed9b8 100644 --- a/src/coreclr/src/gc/sample/CMakeLists.txt +++ b/src/coreclr/src/gc/sample/CMakeLists.txt @@ -22,6 +22,11 @@ set(SOURCES ../handletablescan.cpp ../objecthandle.cpp ../softwarewritewatch.cpp +) + +if (CLR_CMAKE_TARGET_WIN32 AND CLR_CMAKE_TARGET_ARCH_AMD64) + set ( SOURCES + ${SOURCES} ../vxsort/do_vxsort_avx2.cpp ../vxsort/do_vxsort_avx512.cpp ../vxsort/machine_traits.avx2.cpp @@ -30,6 +35,7 @@ set(SOURCES ../vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.cpp ../vxsort/smallsort/bitonic_sort.AVX512.uint32_t.generated.cpp ) +endif (CLR_CMAKE_TARGET_WIN32 AND CLR_CMAKE_TARGET_ARCH_AMD64) if(CLR_CMAKE_TARGET_WIN32) set (GC_LINK_LIBRARIES diff --git a/src/coreclr/src/vm/CMakeLists.txt b/src/coreclr/src/vm/CMakeLists.txt index 1489ab89e2e0a0..47ba6ae6baca1d 100644 --- a/src/coreclr/src/vm/CMakeLists.txt +++ b/src/coreclr/src/vm/CMakeLists.txt @@ -541,6 +541,11 @@ set(GC_SOURCES_WKS ../gc/gcload.cpp ../gc/softwarewritewatch.cpp ../gc/handletablecache.cpp +) + +if (CLR_CMAKE_TARGET_WIN32 AND CLR_CMAKE_TARGET_ARCH_AMD64) + set ( GC_SOURCES_WKS + ${GC_SOURCES_WKS} ../gc/vxsort/do_vxsort_avx2.cpp ../gc/vxsort/do_vxsort_avx512.cpp ../gc/vxsort/machine_traits.avx2.cpp @@ -549,6 +554,7 @@ set(GC_SOURCES_WKS ../gc/vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.cpp ../gc/vxsort/smallsort/bitonic_sort.AVX512.uint32_t.generated.cpp ) +endif (CLR_CMAKE_TARGET_WIN32 AND CLR_CMAKE_TARGET_ARCH_AMD64) set(GC_HEADERS_WKS ${GC_HEADERS_DAC_AND_WKS_COMMON} diff --git a/src/coreclr/src/vm/codeman.cpp b/src/coreclr/src/vm/codeman.cpp index 0e2809268d984f..2ae4c80948ee55 100644 --- a/src/coreclr/src/vm/codeman.cpp +++ b/src/coreclr/src/vm/codeman.cpp @@ -1273,20 +1273,20 @@ bool DoesOSSupportAVX() return TRUE; } +#if defined(TARGET_AMD64) && defined(TARGET_WINDOWS) bool DoesOSSupportAVX512() { LIMITED_METHOD_CONTRACT; -#ifndef TARGET_UNIX DWORD64 FeatureMask = GetEnabledXStateFeaturesHelper(); if ((FeatureMask & XSTATE_MASK_AVX512) == XSTATE_MASK_AVX512) { return TRUE; } -#endif // !TARGET_UNIX return FALSE; } +#endif //defined(TARGET_AMD64) && defined(TARGET_WINDOWS) #endif // defined(TARGET_X86) || defined(TARGET_AMD64) @@ -1448,10 +1448,12 @@ void EEJitManager::SetCpuInfo() CPUCompileFlags.Set(InstructionSet_AVX2); } +#if defined(TARGET_AMD64) && defined(TARGET_WINDOWS) if (DoesOSSupportAVX512() && zmmStateSupport() == 1 && (buffer[6] & 0x01) != 0) { CPUCompileFlags.Set(InstructionSet_AVX512F); } +#endif //defined(TARGET_AMD64) && defined(TARGET_WINDOWS) } } } From 01f6797ce87598a0bdfe4145dd20a9dece4019e3 Mon Sep 17 00:00:00 2001 From: Peter Sollich Date: Fri, 26 Jun 2020 17:57:11 +0200 Subject: [PATCH 18/31] Instead of modifying the tool-generated header file corinfoinstructionset.h, modify InstructionSetDesc.txt that it is generated from, and run the tools that generates all the files from it. --- src/coreclr/src/inc/corinfoinstructionset.h | 48 +++++++++++-------- .../Runtime/ReadyToRunInstructionSetHelper.cs | 2 + .../JitInterface/CorInfoInstructionSet.cs | 48 ++++++++++++------- .../ThunkGenerator/InstructionSetDesc.txt | 2 + 4 files changed, 62 insertions(+), 38 deletions(-) diff --git a/src/coreclr/src/inc/corinfoinstructionset.h b/src/coreclr/src/inc/corinfoinstructionset.h index 1c828f853b03b5..98e3797f42ba11 100644 --- a/src/coreclr/src/inc/corinfoinstructionset.h +++ b/src/coreclr/src/inc/corinfoinstructionset.h @@ -50,16 +50,16 @@ enum CORINFO_InstructionSet InstructionSet_POPCNT=16, InstructionSet_Vector128=17, InstructionSet_Vector256=18, - InstructionSet_X86Base_X64=19, - InstructionSet_BMI1_X64=20, - InstructionSet_BMI2_X64=21, - InstructionSet_LZCNT_X64=22, - InstructionSet_POPCNT_X64=23, - InstructionSet_SSE_X64=24, - InstructionSet_SSE2_X64=25, - InstructionSet_SSE41_X64=26, - InstructionSet_SSE42_X64=27, - InstructionSet_AVX512F=28, + InstructionSet_AVX512F=19, + InstructionSet_X86Base_X64=20, + InstructionSet_BMI1_X64=21, + InstructionSet_BMI2_X64=22, + InstructionSet_LZCNT_X64=23, + InstructionSet_POPCNT_X64=24, + InstructionSet_SSE_X64=25, + InstructionSet_SSE2_X64=26, + InstructionSet_SSE41_X64=27, + InstructionSet_SSE42_X64=28, #endif // TARGET_AMD64 #ifdef TARGET_X86 InstructionSet_X86Base=1, @@ -80,16 +80,16 @@ enum CORINFO_InstructionSet InstructionSet_POPCNT=16, InstructionSet_Vector128=17, InstructionSet_Vector256=18, - InstructionSet_X86Base_X64=19, - InstructionSet_BMI1_X64=20, - InstructionSet_BMI2_X64=21, - InstructionSet_LZCNT_X64=22, - InstructionSet_POPCNT_X64=23, - InstructionSet_SSE_X64=24, - InstructionSet_SSE2_X64=25, - InstructionSet_SSE41_X64=26, - InstructionSet_SSE42_X64=27, - InstructionSet_AVX512F=28, + InstructionSet_AVX512F=19, + InstructionSet_X86Base_X64=20, + InstructionSet_BMI1_X64=21, + InstructionSet_BMI2_X64=22, + InstructionSet_LZCNT_X64=23, + InstructionSet_POPCNT_X64=24, + InstructionSet_SSE_X64=25, + InstructionSet_SSE2_X64=26, + InstructionSet_SSE41_X64=27, + InstructionSet_SSE42_X64=28, #endif // TARGET_X86 }; @@ -278,6 +278,8 @@ inline CORINFO_InstructionSetFlags EnsureInstructionSetFlagsAreValid(CORINFO_Ins resultflags.RemoveInstructionSet(InstructionSet_POPCNT); if (resultflags.HasInstructionSet(InstructionSet_Vector256) && !resultflags.HasInstructionSet(InstructionSet_AVX)) resultflags.RemoveInstructionSet(InstructionSet_Vector256); + if (resultflags.HasInstructionSet(InstructionSet_AVX512F) && !resultflags.HasInstructionSet(InstructionSet_AVX2)) + resultflags.RemoveInstructionSet(InstructionSet_AVX512F); #endif // TARGET_AMD64 #ifdef TARGET_X86 if (resultflags.HasInstructionSet(InstructionSet_SSE) && !resultflags.HasInstructionSet(InstructionSet_X86Base)) @@ -310,6 +312,8 @@ inline CORINFO_InstructionSetFlags EnsureInstructionSetFlagsAreValid(CORINFO_Ins resultflags.RemoveInstructionSet(InstructionSet_POPCNT); if (resultflags.HasInstructionSet(InstructionSet_Vector256) && !resultflags.HasInstructionSet(InstructionSet_AVX)) resultflags.RemoveInstructionSet(InstructionSet_Vector256); + if (resultflags.HasInstructionSet(InstructionSet_AVX512F) && !resultflags.HasInstructionSet(InstructionSet_AVX2)) + resultflags.RemoveInstructionSet(InstructionSet_AVX512F); #endif // TARGET_X86 } while (!oldflags.Equals(resultflags)); @@ -406,6 +410,8 @@ inline const char *InstructionSetToString(CORINFO_InstructionSet instructionSet) return "Vector128"; case InstructionSet_Vector256 : return "Vector256"; + case InstructionSet_AVX512F : + return "AVX512F"; #endif // TARGET_AMD64 #ifdef TARGET_X86 case InstructionSet_X86Base : @@ -444,6 +450,8 @@ inline const char *InstructionSetToString(CORINFO_InstructionSet instructionSet) return "Vector128"; case InstructionSet_Vector256 : return "Vector256"; + case InstructionSet_AVX512F : + return "AVX512F"; #endif // TARGET_X86 default: diff --git a/src/coreclr/src/tools/Common/Internal/Runtime/ReadyToRunInstructionSetHelper.cs b/src/coreclr/src/tools/Common/Internal/Runtime/ReadyToRunInstructionSetHelper.cs index 16cf47f43d686f..92a5eacf9071a8 100644 --- a/src/coreclr/src/tools/Common/Internal/Runtime/ReadyToRunInstructionSetHelper.cs +++ b/src/coreclr/src/tools/Common/Internal/Runtime/ReadyToRunInstructionSetHelper.cs @@ -73,6 +73,7 @@ public static class ReadyToRunInstructionSetHelper case InstructionSet.X64_POPCNT_X64: return ReadyToRunInstructionSet.Popcnt; case InstructionSet.X64_Vector128: return null; case InstructionSet.X64_Vector256: return null; + case InstructionSet.X64_AVX512F: return null; default: throw new Exception("Unknown instruction set"); } @@ -100,6 +101,7 @@ public static class ReadyToRunInstructionSetHelper case InstructionSet.X86_POPCNT: return ReadyToRunInstructionSet.Popcnt; case InstructionSet.X86_Vector128: return null; case InstructionSet.X86_Vector256: return null; + case InstructionSet.X86_AVX512F: return null; default: throw new Exception("Unknown instruction set"); } diff --git a/src/coreclr/src/tools/Common/JitInterface/CorInfoInstructionSet.cs b/src/coreclr/src/tools/Common/JitInterface/CorInfoInstructionSet.cs index dae2003712ff19..d86d0b83689b08 100644 --- a/src/coreclr/src/tools/Common/JitInterface/CorInfoInstructionSet.cs +++ b/src/coreclr/src/tools/Common/JitInterface/CorInfoInstructionSet.cs @@ -49,15 +49,16 @@ public enum InstructionSet X64_POPCNT=16, X64_Vector128=17, X64_Vector256=18, - X64_X86Base_X64=19, - X64_BMI1_X64=20, - X64_BMI2_X64=21, - X64_LZCNT_X64=22, - X64_POPCNT_X64=23, - X64_SSE_X64=24, - X64_SSE2_X64=25, - X64_SSE41_X64=26, - X64_SSE42_X64=27, + X64_AVX512F=19, + X64_X86Base_X64=20, + X64_BMI1_X64=21, + X64_BMI2_X64=22, + X64_LZCNT_X64=23, + X64_POPCNT_X64=24, + X64_SSE_X64=25, + X64_SSE2_X64=26, + X64_SSE41_X64=27, + X64_SSE42_X64=28, X86_X86Base=1, X86_SSE=2, X86_SSE2=3, @@ -76,15 +77,16 @@ public enum InstructionSet X86_POPCNT=16, X86_Vector128=17, X86_Vector256=18, - X86_X86Base_X64=19, - X86_BMI1_X64=20, - X86_BMI2_X64=21, - X86_LZCNT_X64=22, - X86_POPCNT_X64=23, - X86_SSE_X64=24, - X86_SSE2_X64=25, - X86_SSE41_X64=26, - X86_SSE42_X64=27, + X86_AVX512F=19, + X86_X86Base_X64=20, + X86_BMI1_X64=21, + X86_BMI2_X64=22, + X86_LZCNT_X64=23, + X86_POPCNT_X64=24, + X86_SSE_X64=25, + X86_SSE2_X64=26, + X86_SSE41_X64=27, + X86_SSE42_X64=28, } @@ -256,6 +258,8 @@ public static InstructionSetFlags ExpandInstructionSetByImplicationHelper(Target resultflags.AddInstructionSet(InstructionSet.X64_SSE42); if (resultflags.HasInstructionSet(InstructionSet.X64_Vector256)) resultflags.AddInstructionSet(InstructionSet.X64_AVX); + if (resultflags.HasInstructionSet(InstructionSet.X64_AVX512F)) + resultflags.AddInstructionSet(InstructionSet.X64_AVX2); break; case TargetArchitecture.X86: @@ -289,6 +293,8 @@ public static InstructionSetFlags ExpandInstructionSetByImplicationHelper(Target resultflags.AddInstructionSet(InstructionSet.X86_SSE42); if (resultflags.HasInstructionSet(InstructionSet.X86_Vector256)) resultflags.AddInstructionSet(InstructionSet.X86_AVX); + if (resultflags.HasInstructionSet(InstructionSet.X86_AVX512F)) + resultflags.AddInstructionSet(InstructionSet.X86_AVX2); break; } @@ -379,6 +385,8 @@ private static InstructionSetFlags ExpandInstructionSetByReverseImplicationHelpe resultflags.AddInstructionSet(InstructionSet.X64_POPCNT); if (resultflags.HasInstructionSet(InstructionSet.X64_AVX)) resultflags.AddInstructionSet(InstructionSet.X64_Vector256); + if (resultflags.HasInstructionSet(InstructionSet.X64_AVX2)) + resultflags.AddInstructionSet(InstructionSet.X64_AVX512F); break; case TargetArchitecture.X86: @@ -412,6 +420,8 @@ private static InstructionSetFlags ExpandInstructionSetByReverseImplicationHelpe resultflags.AddInstructionSet(InstructionSet.X86_POPCNT); if (resultflags.HasInstructionSet(InstructionSet.X86_AVX)) resultflags.AddInstructionSet(InstructionSet.X86_Vector256); + if (resultflags.HasInstructionSet(InstructionSet.X86_AVX2)) + resultflags.AddInstructionSet(InstructionSet.X86_AVX512F); break; } @@ -471,6 +481,7 @@ public static IEnumerable ArchitectureToValidInstructionSets yield return new InstructionSetInfo("popcnt", "Popcnt", InstructionSet.X64_POPCNT, true); yield return new InstructionSetInfo("Vector128", "", InstructionSet.X64_Vector128, false); yield return new InstructionSetInfo("Vector256", "", InstructionSet.X64_Vector256, false); + yield return new InstructionSetInfo("avx512f", "", InstructionSet.X64_AVX512F, true); break; case TargetArchitecture.X86: @@ -492,6 +503,7 @@ public static IEnumerable ArchitectureToValidInstructionSets yield return new InstructionSetInfo("popcnt", "Popcnt", InstructionSet.X86_POPCNT, true); yield return new InstructionSetInfo("Vector128", "", InstructionSet.X86_Vector128, false); yield return new InstructionSetInfo("Vector256", "", InstructionSet.X86_Vector256, false); + yield return new InstructionSetInfo("avx512f", "", InstructionSet.X86_AVX512F, true); break; } diff --git a/src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/InstructionSetDesc.txt b/src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/InstructionSetDesc.txt index 44a9139982108b..b1985acc42f956 100644 --- a/src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/InstructionSetDesc.txt +++ b/src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/InstructionSetDesc.txt @@ -51,6 +51,8 @@ implication ,X86 ,POPCNT ,SSE42 instructionset ,X86 , , , ,Vector128, instructionset ,X86 , , , ,Vector256, implication ,X86 ,Vector256 ,AVX +instructionset ,X86 , , , ,AVX512F ,avx512f +implication ,X86 ,AVX512F ,AVX2 ; Definition of X64 instruction sets (Define ) definearch ,X64 ,64Bit ,X64 From 3dc4f0a0e20a69ccba3097c6436b415bc9c0c64e Mon Sep 17 00:00:00 2001 From: Peter Sollich Date: Tue, 30 Jun 2020 15:03:53 +0200 Subject: [PATCH 19/31] Move AVX2/AVX512 instruction set detection to GC side. --- src/coreclr/src/gc/CMakeLists.txt | 4 +- src/coreclr/src/gc/env/gcenv.ee.h | 1 - src/coreclr/src/gc/gc.cpp | 12 +- src/coreclr/src/gc/gcenv.ee.standalone.inl | 6 - src/coreclr/src/gc/gcinterface.ee.h | 11 -- src/coreclr/src/gc/sample/CMakeLists.txt | 1 + src/coreclr/src/gc/sample/gcenv.ee.cpp | 5 - src/coreclr/src/gc/vxsort/do_vxsort.h | 9 ++ src/coreclr/src/gc/vxsort/isa_detection.cpp | 134 ++++++++++++++++++ .../src/gc/vxsort/isa_detection_dummy.cpp | 15 ++ src/coreclr/src/inc/corinfoinstructionset.h | 46 +++--- .../Runtime/ReadyToRunInstructionSetHelper.cs | 2 - .../JitInterface/CorInfoInstructionSet.cs | 48 +++---- .../ThunkGenerator/InstructionSetDesc.txt | 2 - src/coreclr/src/vm/CMakeLists.txt | 4 +- src/coreclr/src/vm/amd64/AsmHelpers.asm | 15 -- src/coreclr/src/vm/amd64/unixstubs.cpp | 12 -- src/coreclr/src/vm/cgensys.h | 1 - src/coreclr/src/vm/codeman.cpp | 41 +----- src/coreclr/src/vm/gcenv.ee.cpp | 15 -- src/coreclr/src/vm/gcenv.ee.h | 2 - 21 files changed, 209 insertions(+), 177 deletions(-) create mode 100644 src/coreclr/src/gc/vxsort/isa_detection.cpp create mode 100644 src/coreclr/src/gc/vxsort/isa_detection_dummy.cpp diff --git a/src/coreclr/src/gc/CMakeLists.txt b/src/coreclr/src/gc/CMakeLists.txt index e584148f7038c0..61f75f4b71917a 100644 --- a/src/coreclr/src/gc/CMakeLists.txt +++ b/src/coreclr/src/gc/CMakeLists.txt @@ -36,13 +36,13 @@ if(CLR_CMAKE_HOST_UNIX) else() set ( GC_SOURCES ${GC_SOURCES} - windows/gcenv.windows.cpp -) + windows/gcenv.windows.cpp) endif(CLR_CMAKE_HOST_UNIX) if (CLR_CMAKE_TARGET_WIN32 AND CLR_CMAKE_TARGET_ARCH_AMD64) set ( GC_SOURCES ${GC_SOURCES} + vxsort/isa_detection_dummy.cpp vxsort/do_vxsort_avx2.cpp vxsort/do_vxsort_avx512.cpp vxsort/machine_traits.avx2.cpp diff --git a/src/coreclr/src/gc/env/gcenv.ee.h b/src/coreclr/src/gc/env/gcenv.ee.h index 0212bf60164604..fa4f2dcd765889 100644 --- a/src/coreclr/src/gc/env/gcenv.ee.h +++ b/src/coreclr/src/gc/env/gcenv.ee.h @@ -91,7 +91,6 @@ class GCToEEInterface static void VerifySyncTableEntry(); static void UpdateGCEventStatus(int publicLevel, int publicKeywords, int privateLevel, int privateKeywords); - static bool HasInstructionSet(InstructionSet requestedInstructionSet); }; #endif // __GCENV_EE_H__ diff --git a/src/coreclr/src/gc/gc.cpp b/src/coreclr/src/gc/gc.cpp index 5737660ab36ee7..bc5b368fd95254 100644 --- a/src/coreclr/src/gc/gc.cpp +++ b/src/coreclr/src/gc/gc.cpp @@ -2088,8 +2088,8 @@ uint8_t* tree_search (uint8_t* tree, uint8_t* old_address); #ifdef USE_VXSORT void do_vxsort(uint8_t** low, uint8_t** high, unsigned int depth) { - assert(GCToEEInterface::HasInstructionSet(kInstructionSetAVX2)); - if (GCToEEInterface::HasInstructionSet(kInstructionSetAVX512F)) + assert(SupportsInstructionSet(InstructionSet::AVX2)); + if (SupportsInstructionSet(InstructionSet::AVX512F)) { do_vxsort_avx512(low, high); } @@ -2107,8 +2107,8 @@ void do_vxsort(uint8_t** low, uint8_t** high, unsigned int depth) void do_vxsort(uint32_t* low, uint32_t* high, unsigned int depth) { - assert(GCToEEInterface::HasInstructionSet(kInstructionSetAVX2)); - if (GCToEEInterface::HasInstructionSet(kInstructionSetAVX512F)) + assert(SupportsInstructionSet(InstructionSet::AVX2)); + if (SupportsInstructionSet(InstructionSet::AVX512F)) { do_vxsort_avx512(low, high); } @@ -8352,7 +8352,7 @@ void gc_heap::sort_mark_list() #ifdef USE_VXSORT // runtime test if AVX2 is indeed available - if (GCToEEInterface::HasInstructionSet(kInstructionSetAVX2)) + if (SupportsInstructionSet(InstructionSet::AVX2)) { // is the range small enough for a 32-bit sort? ptrdiff_t range = high - low; @@ -22267,7 +22267,7 @@ void gc_heap::plan_phase (int condemned_gen_number) { #ifndef MULTIPLE_HEAPS #ifdef USE_VXSORT - if (GCToEEInterface::HasInstructionSet(kInstructionSetAVX2)) + if (SupportsInstructionSet(InstructionSet::AVX2)) { ptrdiff_t entry_count = mark_list_index - mark_list; uint32_t* mark_list_32 = (uint32_t*)mark_list; diff --git a/src/coreclr/src/gc/gcenv.ee.standalone.inl b/src/coreclr/src/gc/gcenv.ee.standalone.inl index 0cd24cf2ae0673..b91d0c4d5b8915 100644 --- a/src/coreclr/src/gc/gcenv.ee.standalone.inl +++ b/src/coreclr/src/gc/gcenv.ee.standalone.inl @@ -295,10 +295,4 @@ inline void GCToEEInterface::UpdateGCEventStatus(int publicLevel, int publicKeyw #endif // __linux__ } -inline bool GCToEEInterface::HasInstructionSet(InstructionSet instructionSet) -{ - assert(g_theGCToCLR != nullptr); - return g_theGCToCLR->HasInstructionSet(instructionSet); -} - #endif // __GCTOENV_EE_STANDALONE_INL__ diff --git a/src/coreclr/src/gc/gcinterface.ee.h b/src/coreclr/src/gc/gcinterface.ee.h index 7aafd7944ebd43..bc9a0ab162c34d 100644 --- a/src/coreclr/src/gc/gcinterface.ee.h +++ b/src/coreclr/src/gc/gcinterface.ee.h @@ -21,14 +21,6 @@ enum EtwGCRootKind kEtwGCRootKindOther = 3, }; -enum InstructionSet -{ -#if defined(TARGET_X86) || defined(TARGET_AMD64) - kInstructionSetAVX2 = 0, - kInstructionSetAVX512F = 1, -#endif //defined(TARGET_X86) || defined(TARGET_AMD64) -}; - // This interface provides functions that the GC can use to fire events. // Events fired on this interface are split into two categories: "known" // events and "dynamic" events. Known events are events that are baked-in @@ -430,9 +422,6 @@ class IGCToCLR { virtual void UpdateGCEventStatus(int publicLevel, int publicKeywords, int privateLEvel, int privateKeywords) = 0; - - virtual - bool HasInstructionSet(InstructionSet requestedInstructionSet) = 0; }; #endif // _GCINTERFACE_EE_H_ diff --git a/src/coreclr/src/gc/sample/CMakeLists.txt b/src/coreclr/src/gc/sample/CMakeLists.txt index 7cd5820b9ed9b8..224e4cadd69726 100644 --- a/src/coreclr/src/gc/sample/CMakeLists.txt +++ b/src/coreclr/src/gc/sample/CMakeLists.txt @@ -27,6 +27,7 @@ set(SOURCES if (CLR_CMAKE_TARGET_WIN32 AND CLR_CMAKE_TARGET_ARCH_AMD64) set ( SOURCES ${SOURCES} + ../vxsort/isa_detection_dummy.cpp ../vxsort/do_vxsort_avx2.cpp ../vxsort/do_vxsort_avx512.cpp ../vxsort/machine_traits.avx2.cpp diff --git a/src/coreclr/src/gc/sample/gcenv.ee.cpp b/src/coreclr/src/gc/sample/gcenv.ee.cpp index 4b99a9e2714aa2..6f5151ee1534cd 100644 --- a/src/coreclr/src/gc/sample/gcenv.ee.cpp +++ b/src/coreclr/src/gc/sample/gcenv.ee.cpp @@ -344,8 +344,3 @@ inline void GCToEEInterface::AnalyzeSurvivorsFinished(int condemnedGeneration) { } - -bool GCToEEInterface::HasInstructionSet(InstructionSet requestedInstructionSet) -{ - return false; -} diff --git a/src/coreclr/src/gc/vxsort/do_vxsort.h b/src/coreclr/src/gc/vxsort/do_vxsort.h index a48c56f5e34e0b..666af8c0c0e2a1 100644 --- a/src/coreclr/src/gc/vxsort/do_vxsort.h +++ b/src/coreclr/src/gc/vxsort/do_vxsort.h @@ -1,3 +1,12 @@ +// Enum for the GCToOSInterface::SupportsInstructionSet method +enum class InstructionSet +{ + AVX2 = 0, + AVX512F = 1, +}; + +bool SupportsInstructionSet(InstructionSet instructionSet); + void do_vxsort_avx2(uint8_t** low, uint8_t** high); void do_vxsort_avx2(uint32_t* low, uint32_t* high); void do_vxsort_avx512(uint8_t** low, uint8_t** high); diff --git a/src/coreclr/src/gc/vxsort/isa_detection.cpp b/src/coreclr/src/gc/vxsort/isa_detection.cpp new file mode 100644 index 00000000000000..806b7923ef108c --- /dev/null +++ b/src/coreclr/src/gc/vxsort/isa_detection.cpp @@ -0,0 +1,134 @@ +// ISA_Detection.cpp : Diese Datei enthält die Funktion "main". Hier beginnt und endet die Ausführung des Programms. +// +#include "common.h" +#include + +#include "do_vxsort.h" + +enum class SupportedISA +{ + None = 0, + AVX2 = 1 << (int)InstructionSet::AVX2, + AVX512F = 1 << (int)InstructionSet::AVX512F +}; + +#if defined(TARGET_AMD64) && defined(TARGET_WINDOWS) + +static DWORD64 GetEnabledXStateFeaturesHelper() +{ + LIMITED_METHOD_CONTRACT; + + // On Windows we have an api(GetEnabledXStateFeatures) to check if AVX is supported + typedef DWORD64(WINAPI* PGETENABLEDXSTATEFEATURES)(); + PGETENABLEDXSTATEFEATURES pfnGetEnabledXStateFeatures = NULL; + + HMODULE hMod = WszLoadLibraryEx(WINDOWS_KERNEL32_DLLNAME_W, NULL, LOAD_LIBRARY_SEARCH_SYSTEM32); + if (hMod == NULL) + return 0; + + pfnGetEnabledXStateFeatures = (PGETENABLEDXSTATEFEATURES)GetProcAddress(hMod, "GetEnabledXStateFeatures"); + + if (pfnGetEnabledXStateFeatures == NULL) + { + return 0; + } + + DWORD64 FeatureMask = pfnGetEnabledXStateFeatures(); + + return FeatureMask; +} + +SupportedISA DetermineSupportedISA() +{ + // register definitions to make the following code more readable + enum reg + { + EAX = 0, + EBX = 1, + ECX = 2, + EDX = 3, + COUNT = 4 + }; + + // bit definitions to make code more readable + enum bits + { + OCXSAVE = 1<<27, + AVX = 1<<28, + AVX2 = 1<<5, + AVX512F=1<<16, + }; + int reg[COUNT]; + + __cpuid(reg, 0); + if (reg[EAX] < 7) + return SupportedISA::None; + + __cpuid(reg, 1); + + // both AVX and OCXSAVE feature flags must be enabled + if ((reg[ECX] & (OCXSAVE|AVX)) != (OCXSAVE | AVX)) + return SupportedISA::None; + + // get xcr0 register + DWORD64 xcr0 = _xgetbv(0); + + // get OS XState info + DWORD64 FeatureMask = GetEnabledXStateFeaturesHelper(); + + // get processor extended feature flag info + __cpuid(reg, 7); + + // check if both AVX2 and AVX512F are supported by both processor and OS + if ((reg[EBX] & (AVX2 | AVX512F)) == (AVX2 | AVX512F) && + (xcr0 & 0xe6) == 0xe6 && + (FeatureMask & (XSTATE_MASK_AVX | XSTATE_MASK_AVX512)) == (XSTATE_MASK_AVX | XSTATE_MASK_AVX512)) + { + return (SupportedISA)((int)SupportedISA::AVX2 | (int)SupportedISA::AVX512F); + } + + // check if AVX2 is supported by both processor and OS + if ((reg[EBX] & AVX2) && + (xcr0 & 0x06) == 0x06 && + (FeatureMask & XSTATE_MASK_AVX) == XSTATE_MASK_AVX) + { + return SupportedISA::AVX2; + } + + return SupportedISA::None; +} + +#elif defined(TARGET_UNIX) + +SupportedISA DetermineSupportedISA() +{ + __builtin_cpu_init(); + if (__builtin_cpu_supports("avx2")) + { + if (__builtin_cpu_supports("avx512f")) + return (SupportedISA)((int)SupportedISA::AVX2 | (int)SupportedISA::AVX512F); + else + return SupportedISA::AVX2; + } + else + { + return SupportedISA::None; + } +} + +#endif // defined(TARGET_UNIX) + +static bool s_initialized; +static SupportedISA s_supportedISA; + +bool SupportsInstructionSet(InstructionSet instructionSet) +{ + assert(instructionSet == InstructionSet::AVX2 || instructionSet == InstructionSet::AVX512F); + if (!s_initialized) + { + s_supportedISA = DetermineSupportedISA(); + s_initialized = true; + } + return ((int)s_supportedISA & (1 << (int)instructionSet)) != 0; +} + diff --git a/src/coreclr/src/gc/vxsort/isa_detection_dummy.cpp b/src/coreclr/src/gc/vxsort/isa_detection_dummy.cpp new file mode 100644 index 00000000000000..9049ec60c69c75 --- /dev/null +++ b/src/coreclr/src/gc/vxsort/isa_detection_dummy.cpp @@ -0,0 +1,15 @@ +// ISA_Detection.cpp : Diese Datei enthält die Funktion "main". Hier beginnt und endet die Ausführung des Programms. +// +#include "common.h" +#include + +#include "do_vxsort.h" + +#if defined(TARGET_AMD64) && defined(TARGET_WINDOWS) + +bool SupportsInstructionSet(InstructionSet instructionSet) +{ + return false; +} +#endif // defined(TARGET_AMD64) && defined(TARGET_WINDOWS) + diff --git a/src/coreclr/src/inc/corinfoinstructionset.h b/src/coreclr/src/inc/corinfoinstructionset.h index 98e3797f42ba11..8e667bc29b07b1 100644 --- a/src/coreclr/src/inc/corinfoinstructionset.h +++ b/src/coreclr/src/inc/corinfoinstructionset.h @@ -50,16 +50,15 @@ enum CORINFO_InstructionSet InstructionSet_POPCNT=16, InstructionSet_Vector128=17, InstructionSet_Vector256=18, - InstructionSet_AVX512F=19, - InstructionSet_X86Base_X64=20, - InstructionSet_BMI1_X64=21, - InstructionSet_BMI2_X64=22, - InstructionSet_LZCNT_X64=23, - InstructionSet_POPCNT_X64=24, - InstructionSet_SSE_X64=25, - InstructionSet_SSE2_X64=26, - InstructionSet_SSE41_X64=27, - InstructionSet_SSE42_X64=28, + InstructionSet_X86Base_X64=19, + InstructionSet_BMI1_X64=20, + InstructionSet_BMI2_X64=21, + InstructionSet_LZCNT_X64=22, + InstructionSet_POPCNT_X64=23, + InstructionSet_SSE_X64=24, + InstructionSet_SSE2_X64=25, + InstructionSet_SSE41_X64=26, + InstructionSet_SSE42_X64=27, #endif // TARGET_AMD64 #ifdef TARGET_X86 InstructionSet_X86Base=1, @@ -80,16 +79,15 @@ enum CORINFO_InstructionSet InstructionSet_POPCNT=16, InstructionSet_Vector128=17, InstructionSet_Vector256=18, - InstructionSet_AVX512F=19, - InstructionSet_X86Base_X64=20, - InstructionSet_BMI1_X64=21, - InstructionSet_BMI2_X64=22, - InstructionSet_LZCNT_X64=23, - InstructionSet_POPCNT_X64=24, - InstructionSet_SSE_X64=25, - InstructionSet_SSE2_X64=26, - InstructionSet_SSE41_X64=27, - InstructionSet_SSE42_X64=28, + InstructionSet_X86Base_X64=19, + InstructionSet_BMI1_X64=20, + InstructionSet_BMI2_X64=21, + InstructionSet_LZCNT_X64=22, + InstructionSet_POPCNT_X64=23, + InstructionSet_SSE_X64=24, + InstructionSet_SSE2_X64=25, + InstructionSet_SSE41_X64=26, + InstructionSet_SSE42_X64=27, #endif // TARGET_X86 }; @@ -278,8 +276,6 @@ inline CORINFO_InstructionSetFlags EnsureInstructionSetFlagsAreValid(CORINFO_Ins resultflags.RemoveInstructionSet(InstructionSet_POPCNT); if (resultflags.HasInstructionSet(InstructionSet_Vector256) && !resultflags.HasInstructionSet(InstructionSet_AVX)) resultflags.RemoveInstructionSet(InstructionSet_Vector256); - if (resultflags.HasInstructionSet(InstructionSet_AVX512F) && !resultflags.HasInstructionSet(InstructionSet_AVX2)) - resultflags.RemoveInstructionSet(InstructionSet_AVX512F); #endif // TARGET_AMD64 #ifdef TARGET_X86 if (resultflags.HasInstructionSet(InstructionSet_SSE) && !resultflags.HasInstructionSet(InstructionSet_X86Base)) @@ -312,8 +308,6 @@ inline CORINFO_InstructionSetFlags EnsureInstructionSetFlagsAreValid(CORINFO_Ins resultflags.RemoveInstructionSet(InstructionSet_POPCNT); if (resultflags.HasInstructionSet(InstructionSet_Vector256) && !resultflags.HasInstructionSet(InstructionSet_AVX)) resultflags.RemoveInstructionSet(InstructionSet_Vector256); - if (resultflags.HasInstructionSet(InstructionSet_AVX512F) && !resultflags.HasInstructionSet(InstructionSet_AVX2)) - resultflags.RemoveInstructionSet(InstructionSet_AVX512F); #endif // TARGET_X86 } while (!oldflags.Equals(resultflags)); @@ -410,8 +404,6 @@ inline const char *InstructionSetToString(CORINFO_InstructionSet instructionSet) return "Vector128"; case InstructionSet_Vector256 : return "Vector256"; - case InstructionSet_AVX512F : - return "AVX512F"; #endif // TARGET_AMD64 #ifdef TARGET_X86 case InstructionSet_X86Base : @@ -450,8 +442,6 @@ inline const char *InstructionSetToString(CORINFO_InstructionSet instructionSet) return "Vector128"; case InstructionSet_Vector256 : return "Vector256"; - case InstructionSet_AVX512F : - return "AVX512F"; #endif // TARGET_X86 default: diff --git a/src/coreclr/src/tools/Common/Internal/Runtime/ReadyToRunInstructionSetHelper.cs b/src/coreclr/src/tools/Common/Internal/Runtime/ReadyToRunInstructionSetHelper.cs index 92a5eacf9071a8..16cf47f43d686f 100644 --- a/src/coreclr/src/tools/Common/Internal/Runtime/ReadyToRunInstructionSetHelper.cs +++ b/src/coreclr/src/tools/Common/Internal/Runtime/ReadyToRunInstructionSetHelper.cs @@ -73,7 +73,6 @@ public static class ReadyToRunInstructionSetHelper case InstructionSet.X64_POPCNT_X64: return ReadyToRunInstructionSet.Popcnt; case InstructionSet.X64_Vector128: return null; case InstructionSet.X64_Vector256: return null; - case InstructionSet.X64_AVX512F: return null; default: throw new Exception("Unknown instruction set"); } @@ -101,7 +100,6 @@ public static class ReadyToRunInstructionSetHelper case InstructionSet.X86_POPCNT: return ReadyToRunInstructionSet.Popcnt; case InstructionSet.X86_Vector128: return null; case InstructionSet.X86_Vector256: return null; - case InstructionSet.X86_AVX512F: return null; default: throw new Exception("Unknown instruction set"); } diff --git a/src/coreclr/src/tools/Common/JitInterface/CorInfoInstructionSet.cs b/src/coreclr/src/tools/Common/JitInterface/CorInfoInstructionSet.cs index d86d0b83689b08..dae2003712ff19 100644 --- a/src/coreclr/src/tools/Common/JitInterface/CorInfoInstructionSet.cs +++ b/src/coreclr/src/tools/Common/JitInterface/CorInfoInstructionSet.cs @@ -49,16 +49,15 @@ public enum InstructionSet X64_POPCNT=16, X64_Vector128=17, X64_Vector256=18, - X64_AVX512F=19, - X64_X86Base_X64=20, - X64_BMI1_X64=21, - X64_BMI2_X64=22, - X64_LZCNT_X64=23, - X64_POPCNT_X64=24, - X64_SSE_X64=25, - X64_SSE2_X64=26, - X64_SSE41_X64=27, - X64_SSE42_X64=28, + X64_X86Base_X64=19, + X64_BMI1_X64=20, + X64_BMI2_X64=21, + X64_LZCNT_X64=22, + X64_POPCNT_X64=23, + X64_SSE_X64=24, + X64_SSE2_X64=25, + X64_SSE41_X64=26, + X64_SSE42_X64=27, X86_X86Base=1, X86_SSE=2, X86_SSE2=3, @@ -77,16 +76,15 @@ public enum InstructionSet X86_POPCNT=16, X86_Vector128=17, X86_Vector256=18, - X86_AVX512F=19, - X86_X86Base_X64=20, - X86_BMI1_X64=21, - X86_BMI2_X64=22, - X86_LZCNT_X64=23, - X86_POPCNT_X64=24, - X86_SSE_X64=25, - X86_SSE2_X64=26, - X86_SSE41_X64=27, - X86_SSE42_X64=28, + X86_X86Base_X64=19, + X86_BMI1_X64=20, + X86_BMI2_X64=21, + X86_LZCNT_X64=22, + X86_POPCNT_X64=23, + X86_SSE_X64=24, + X86_SSE2_X64=25, + X86_SSE41_X64=26, + X86_SSE42_X64=27, } @@ -258,8 +256,6 @@ public static InstructionSetFlags ExpandInstructionSetByImplicationHelper(Target resultflags.AddInstructionSet(InstructionSet.X64_SSE42); if (resultflags.HasInstructionSet(InstructionSet.X64_Vector256)) resultflags.AddInstructionSet(InstructionSet.X64_AVX); - if (resultflags.HasInstructionSet(InstructionSet.X64_AVX512F)) - resultflags.AddInstructionSet(InstructionSet.X64_AVX2); break; case TargetArchitecture.X86: @@ -293,8 +289,6 @@ public static InstructionSetFlags ExpandInstructionSetByImplicationHelper(Target resultflags.AddInstructionSet(InstructionSet.X86_SSE42); if (resultflags.HasInstructionSet(InstructionSet.X86_Vector256)) resultflags.AddInstructionSet(InstructionSet.X86_AVX); - if (resultflags.HasInstructionSet(InstructionSet.X86_AVX512F)) - resultflags.AddInstructionSet(InstructionSet.X86_AVX2); break; } @@ -385,8 +379,6 @@ private static InstructionSetFlags ExpandInstructionSetByReverseImplicationHelpe resultflags.AddInstructionSet(InstructionSet.X64_POPCNT); if (resultflags.HasInstructionSet(InstructionSet.X64_AVX)) resultflags.AddInstructionSet(InstructionSet.X64_Vector256); - if (resultflags.HasInstructionSet(InstructionSet.X64_AVX2)) - resultflags.AddInstructionSet(InstructionSet.X64_AVX512F); break; case TargetArchitecture.X86: @@ -420,8 +412,6 @@ private static InstructionSetFlags ExpandInstructionSetByReverseImplicationHelpe resultflags.AddInstructionSet(InstructionSet.X86_POPCNT); if (resultflags.HasInstructionSet(InstructionSet.X86_AVX)) resultflags.AddInstructionSet(InstructionSet.X86_Vector256); - if (resultflags.HasInstructionSet(InstructionSet.X86_AVX2)) - resultflags.AddInstructionSet(InstructionSet.X86_AVX512F); break; } @@ -481,7 +471,6 @@ public static IEnumerable ArchitectureToValidInstructionSets yield return new InstructionSetInfo("popcnt", "Popcnt", InstructionSet.X64_POPCNT, true); yield return new InstructionSetInfo("Vector128", "", InstructionSet.X64_Vector128, false); yield return new InstructionSetInfo("Vector256", "", InstructionSet.X64_Vector256, false); - yield return new InstructionSetInfo("avx512f", "", InstructionSet.X64_AVX512F, true); break; case TargetArchitecture.X86: @@ -503,7 +492,6 @@ public static IEnumerable ArchitectureToValidInstructionSets yield return new InstructionSetInfo("popcnt", "Popcnt", InstructionSet.X86_POPCNT, true); yield return new InstructionSetInfo("Vector128", "", InstructionSet.X86_Vector128, false); yield return new InstructionSetInfo("Vector256", "", InstructionSet.X86_Vector256, false); - yield return new InstructionSetInfo("avx512f", "", InstructionSet.X86_AVX512F, true); break; } diff --git a/src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/InstructionSetDesc.txt b/src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/InstructionSetDesc.txt index b1985acc42f956..44a9139982108b 100644 --- a/src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/InstructionSetDesc.txt +++ b/src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/InstructionSetDesc.txt @@ -51,8 +51,6 @@ implication ,X86 ,POPCNT ,SSE42 instructionset ,X86 , , , ,Vector128, instructionset ,X86 , , , ,Vector256, implication ,X86 ,Vector256 ,AVX -instructionset ,X86 , , , ,AVX512F ,avx512f -implication ,X86 ,AVX512F ,AVX2 ; Definition of X64 instruction sets (Define ) definearch ,X64 ,64Bit ,X64 diff --git a/src/coreclr/src/vm/CMakeLists.txt b/src/coreclr/src/vm/CMakeLists.txt index 47ba6ae6baca1d..4c28c1708ab3dd 100644 --- a/src/coreclr/src/vm/CMakeLists.txt +++ b/src/coreclr/src/vm/CMakeLists.txt @@ -540,12 +540,12 @@ set(GC_SOURCES_WKS ../gc/gceewks.cpp ../gc/gcload.cpp ../gc/softwarewritewatch.cpp - ../gc/handletablecache.cpp -) + ../gc/handletablecache.cpp) if (CLR_CMAKE_TARGET_WIN32 AND CLR_CMAKE_TARGET_ARCH_AMD64) set ( GC_SOURCES_WKS ${GC_SOURCES_WKS} + ../gc/vxsort/isa_detection.cpp ../gc/vxsort/do_vxsort_avx2.cpp ../gc/vxsort/do_vxsort_avx512.cpp ../gc/vxsort/machine_traits.avx2.cpp diff --git a/src/coreclr/src/vm/amd64/AsmHelpers.asm b/src/coreclr/src/vm/amd64/AsmHelpers.asm index b02ac54f2fd028..f65c6f0be8b5cb 100644 --- a/src/coreclr/src/vm/amd64/AsmHelpers.asm +++ b/src/coreclr/src/vm/amd64/AsmHelpers.asm @@ -704,21 +704,6 @@ LEAF_ENTRY xmmYmmStateSupport, _TEXT ret LEAF_END xmmYmmStateSupport, _TEXT -;; extern "C" DWORD __stdcall zmmStateSupport(); -LEAF_ENTRY zmmStateSupport, _TEXT - mov ecx, 0 ; Specify xcr0 - xgetbv ; result in EDX:EAX - and eax, 0E6H - cmp eax, 0E6H ; check OS has enabled ZMM, XMM and YMM state support - jne not_supported - mov eax, 1 - jmp done - not_supported: - mov eax, 0 - done: - ret -LEAF_END zmmStateSupport, _TEXT - ;The following function uses Deterministic Cache Parameter leafs to determine the cache hierarchy information on Prescott & Above platforms. ; This function takes 3 arguments: ; Arg1 is an input to ECX. Used as index to specify which cache level to return information on by CPUID. diff --git a/src/coreclr/src/vm/amd64/unixstubs.cpp b/src/coreclr/src/vm/amd64/unixstubs.cpp index 3fcef24f7adab4..4d680ae1317f2e 100644 --- a/src/coreclr/src/vm/amd64/unixstubs.cpp +++ b/src/coreclr/src/vm/amd64/unixstubs.cpp @@ -54,18 +54,6 @@ extern "C" return ((eax & 0x06) == 0x06) ? 1 : 0; } - DWORD zmmStateSupport() - { - DWORD eax; - __asm(" xgetbv\n" \ - : "=a"(eax) /*output in eax*/\ - : "c"(0) /*inputs - 0 in ecx*/\ - : "edx" /* registers that are clobbered*/ - ); - // check OS has enabled both XMM and YMM state support - return ((eax & 0xe6) == 0xe6) ? 1 : 0; - } - void STDMETHODCALLTYPE JIT_ProfilerEnterLeaveTailcallStub(UINT_PTR ProfilerHandle) { } diff --git a/src/coreclr/src/vm/cgensys.h b/src/coreclr/src/vm/cgensys.h index bb0cdfd06a0be4..17213af0e7594c 100644 --- a/src/coreclr/src/vm/cgensys.h +++ b/src/coreclr/src/vm/cgensys.h @@ -99,7 +99,6 @@ inline void GetSpecificCpuInfo(CORINFO_CPU * cpuInfo) extern "C" DWORD __stdcall getcpuid(DWORD arg, unsigned char result[16]); extern "C" DWORD __stdcall getextcpuid(DWORD arg1, DWORD arg2, unsigned char result[16]); extern "C" DWORD __stdcall xmmYmmStateSupport(); -extern "C" DWORD __stdcall zmmStateSupport(); #endif inline bool TargetHasAVXSupport() diff --git a/src/coreclr/src/vm/codeman.cpp b/src/coreclr/src/vm/codeman.cpp index 2ae4c80948ee55..a41e19c02b5e8c 100644 --- a/src/coreclr/src/vm/codeman.cpp +++ b/src/coreclr/src/vm/codeman.cpp @@ -1232,38 +1232,27 @@ EEJitManager::EEJitManager() #if defined(TARGET_X86) || defined(TARGET_AMD64) -#ifndef TARGET_UNIX -static DWORD64 GetEnabledXStateFeaturesHelper() +bool DoesOSSupportAVX() { LIMITED_METHOD_CONTRACT; +#ifndef TARGET_UNIX // On Windows we have an api(GetEnabledXStateFeatures) to check if AVX is supported typedef DWORD64 (WINAPI *PGETENABLEDXSTATEFEATURES)(); PGETENABLEDXSTATEFEATURES pfnGetEnabledXStateFeatures = NULL; HMODULE hMod = WszLoadLibraryEx(WINDOWS_KERNEL32_DLLNAME_W, NULL, LOAD_LIBRARY_SEARCH_SYSTEM32); if(hMod == NULL) - return 0; + return FALSE; pfnGetEnabledXStateFeatures = (PGETENABLEDXSTATEFEATURES)GetProcAddress(hMod, "GetEnabledXStateFeatures"); if (pfnGetEnabledXStateFeatures == NULL) { - return 0; + return FALSE; } DWORD64 FeatureMask = pfnGetEnabledXStateFeatures(); - - return FeatureMask; -} -#endif // !TARGET_UNIX - -bool DoesOSSupportAVX() -{ - LIMITED_METHOD_CONTRACT; - -#ifndef TARGET_UNIX - DWORD64 FeatureMask = GetEnabledXStateFeaturesHelper(); if ((FeatureMask & XSTATE_MASK_AVX) == 0) { return FALSE; @@ -1273,21 +1262,6 @@ bool DoesOSSupportAVX() return TRUE; } -#if defined(TARGET_AMD64) && defined(TARGET_WINDOWS) -bool DoesOSSupportAVX512() -{ - LIMITED_METHOD_CONTRACT; - - DWORD64 FeatureMask = GetEnabledXStateFeaturesHelper(); - if ((FeatureMask & XSTATE_MASK_AVX512) == XSTATE_MASK_AVX512) - { - return TRUE; - } - - return FALSE; -} -#endif //defined(TARGET_AMD64) && defined(TARGET_WINDOWS) - #endif // defined(TARGET_X86) || defined(TARGET_AMD64) void EEJitManager::SetCpuInfo() @@ -1447,13 +1421,6 @@ void EEJitManager::SetCpuInfo() { CPUCompileFlags.Set(InstructionSet_AVX2); } - -#if defined(TARGET_AMD64) && defined(TARGET_WINDOWS) - if (DoesOSSupportAVX512() && zmmStateSupport() == 1 && (buffer[6] & 0x01) != 0) - { - CPUCompileFlags.Set(InstructionSet_AVX512F); - } -#endif //defined(TARGET_AMD64) && defined(TARGET_WINDOWS) } } } diff --git a/src/coreclr/src/vm/gcenv.ee.cpp b/src/coreclr/src/vm/gcenv.ee.cpp index b549921d3d319f..7bb9fdde70a612 100644 --- a/src/coreclr/src/vm/gcenv.ee.cpp +++ b/src/coreclr/src/vm/gcenv.ee.cpp @@ -1654,18 +1654,3 @@ void GCToEEInterface::UpdateGCEventStatus(int currentPublicLevel, int currentPub } #endif // __linux__ && FEATURE_EVENT_TRACE } - -bool GCToEEInterface::HasInstructionSet(InstructionSet requestedInstructionSet) -{ -#if defined(TARGET_X86) || defined(TARGET_AMD64) - CORJIT_FLAGS cpuCompileFlags = ExecutionManager::GetEEJitManager()->GetCPUCompileFlags(); - switch (requestedInstructionSet) - { - case kInstructionSetAVX2: return cpuCompileFlags.IsSet(InstructionSet_AVX2); - case kInstructionSetAVX512F: return cpuCompileFlags.IsSet(InstructionSet_AVX512F); - default: return false; - } -#else - return false; -#endif //defined(TARGET_X86) || defined(TARGET_AMD64) -} diff --git a/src/coreclr/src/vm/gcenv.ee.h b/src/coreclr/src/vm/gcenv.ee.h index 3132b455b22781..37f9dc9f68c4df 100644 --- a/src/coreclr/src/vm/gcenv.ee.h +++ b/src/coreclr/src/vm/gcenv.ee.h @@ -83,8 +83,6 @@ class GCToEEInterface : public IGCToCLR { void VerifySyncTableEntry(); void UpdateGCEventStatus(int publicLevel, int publicKeywords, int privateLevel, int privateKeywords); - - bool HasInstructionSet(InstructionSet requestedInstructionSet); }; } // namespace standalone From 16f178292485570dc3a2571cab831f5b35e3fb45 Mon Sep 17 00:00:00 2001 From: Peter Sollich Date: Thu, 2 Jul 2020 13:04:10 +0200 Subject: [PATCH 20/31] Use vectorized packer, switch packed range from uint32_t to int32_t, because that makes the sorting a bit more efficient. --- src/coreclr/src/gc/CMakeLists.txt | 4 +- src/coreclr/src/gc/gc.cpp | 97 +-- src/coreclr/src/gc/sample/CMakeLists.txt | 4 +- src/coreclr/src/gc/vxsort/alignment.h | 36 + src/coreclr/src/gc/vxsort/do_vxsort.h | 9 +- src/coreclr/src/gc/vxsort/do_vxsort_avx2.cpp | 37 +- .../src/gc/vxsort/do_vxsort_avx512.cpp | 25 +- .../src/gc/vxsort/machine_traits.avx2.h | 375 +++++----- .../src/gc/vxsort/machine_traits.avx512.h | 12 +- src/coreclr/src/gc/vxsort/machine_traits.h | 46 +- src/coreclr/src/gc/vxsort/packer.h | 196 ++++++ ...> bitonic_sort.AVX2.int32_t.generated.cpp} | 4 +- ... => bitonic_sort.AVX2.int32_t.generated.h} | 624 ++++++++-------- ...bitonic_sort.AVX512.int32_t.generated.cpp} | 4 +- ...> bitonic_sort.AVX512.int32_t.generated.h} | 664 +++++++++--------- src/coreclr/src/gc/vxsort/vxsort.h | 109 ++- src/coreclr/src/vm/CMakeLists.txt | 4 +- 17 files changed, 1278 insertions(+), 972 deletions(-) create mode 100644 src/coreclr/src/gc/vxsort/alignment.h create mode 100644 src/coreclr/src/gc/vxsort/packer.h rename src/coreclr/src/gc/vxsort/smallsort/{bitonic_sort.AVX2.uint32_t.generated.cpp => bitonic_sort.AVX2.int32_t.generated.cpp} (82%) rename src/coreclr/src/gc/vxsort/smallsort/{bitonic_sort.AVX2.uint32_t.generated.h => bitonic_sort.AVX2.int32_t.generated.h} (75%) rename src/coreclr/src/gc/vxsort/smallsort/{bitonic_sort.AVX512.uint32_t.generated.cpp => bitonic_sort.AVX512.int32_t.generated.cpp} (82%) rename src/coreclr/src/gc/vxsort/smallsort/{bitonic_sort.AVX512.uint32_t.generated.h => bitonic_sort.AVX512.int32_t.generated.h} (73%) diff --git a/src/coreclr/src/gc/CMakeLists.txt b/src/coreclr/src/gc/CMakeLists.txt index 61f75f4b71917a..cb880d30e56617 100644 --- a/src/coreclr/src/gc/CMakeLists.txt +++ b/src/coreclr/src/gc/CMakeLists.txt @@ -47,9 +47,9 @@ if (CLR_CMAKE_TARGET_WIN32 AND CLR_CMAKE_TARGET_ARCH_AMD64) vxsort/do_vxsort_avx512.cpp vxsort/machine_traits.avx2.cpp vxsort/smallsort/bitonic_sort.AVX2.int64_t.generated.cpp - vxsort/smallsort/bitonic_sort.AVX2.uint32_t.generated.cpp + vxsort/smallsort/bitonic_sort.AVX2.int32_t.generated.cpp vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.cpp - vxsort/smallsort/bitonic_sort.AVX512.uint32_t.generated.cpp + vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.cpp ) endif (CLR_CMAKE_TARGET_WIN32 AND CLR_CMAKE_TARGET_ARCH_AMD64) diff --git a/src/coreclr/src/gc/gc.cpp b/src/coreclr/src/gc/gc.cpp index bc5b368fd95254..20d7f3eeaaaa18 100644 --- a/src/coreclr/src/gc/gc.cpp +++ b/src/coreclr/src/gc/gc.cpp @@ -2105,7 +2105,7 @@ void do_vxsort(uint8_t** low, uint8_t** high, unsigned int depth) #endif } -void do_vxsort(uint32_t* low, uint32_t* high, unsigned int depth) +void do_vxsort(int32_t* low, int32_t* high, unsigned int depth) { assert(SupportsInstructionSet(InstructionSet::AVX2)); if (SupportsInstructionSet(InstructionSet::AVX512F)) @@ -2117,7 +2117,7 @@ void do_vxsort(uint32_t* low, uint32_t* high, unsigned int depth) do_vxsort_avx2(low, high); } #ifdef _DEBUG - for (uint32_t* p = low; p < high; p++) + for (int32_t* p = low; p < high; p++) { assert(p[0] <= p[1]); } @@ -8354,6 +8354,19 @@ void gc_heap::sort_mark_list() // runtime test if AVX2 is indeed available if (SupportsInstructionSet(InstructionSet::AVX2)) { + ptrdiff_t item_count = mark_list_index - mark_list; +#if defined(_DEBUG) || defined(WRITE_SORT_DATA) + // in debug, make a copy of the mark list + // for checking and debugging purposes + uint8_t** mark_list_copy = &g_mark_list_copy[heap_number * mark_list_size]; + uint8_t** mark_list_copy_index = &mark_list_copy[item_count]; + for (ptrdiff_t i = 0; i < item_count; i++) + { + uint8_t* item = mark_list[i]; + mark_list_copy[i] = item; + } +#endif // defined(_DEBUG) || defined(WRITE_SORT_DATA) + // is the range small enough for a 32-bit sort? ptrdiff_t range = high - low; assert(sizeof(uint8_t*) == (1<<3)); @@ -8362,29 +8375,12 @@ void gc_heap::sort_mark_list() { dprintf(3, ("Sorting mark lists as 32-bit offsets")); - ptrdiff_t item_count = mark_list_index - mark_list; - //#define WRITE_SORT_DATA -#if defined(_DEBUG) || defined(WRITE_SORT_DATA) - uint8_t** mark_list_copy = &g_mark_list_copy[heap_number * mark_list_size]; - uint8_t** mark_list_copy_index = &mark_list_copy[item_count]; -#endif // first step: scale the pointers down to 32-bit offsets uint8_t** mark_list = this->mark_list; - uint32_t* mark_list_32 = (uint32_t*)mark_list; - for (ptrdiff_t i = 0; i < item_count; i++) - { - uint8_t* item = mark_list[i]; - ptrdiff_t scaled_item_offset = (item - low) >> 3; - assert((uint32_t)scaled_item_offset == scaled_item_offset); - assert((low + (scaled_item_offset << 3)) == item); - mark_list_32[i] = (uint32_t)scaled_item_offset; -#if defined(_DEBUG) || defined(WRITE_SORT_DATA) - mark_list_copy[i] = item; -#endif - } - + int32_t* mark_list_32 = (int32_t*)mark_list; + do_pack_avx2(mark_list, item_count, low); // sort the 32-bit offsets if (item_count > 0) { @@ -8426,23 +8422,7 @@ void gc_heap::sort_mark_list() #endif } -#ifdef _DEBUG - // in debug, sort the copy as well, so we can check we got the right result - if (mark_list_copy_index > mark_list_copy) - { - _sort(mark_list_copy, mark_list_copy_index - 1, 0); - } -#endif - - // scale the 32-bit offsets back to 64-bit pointers - // work backwards to avoid overwriting information that is still needed - for (ptrdiff_t i = item_count - 1; i >= 0; i--) - { - ptrdiff_t scaled_item_offset = mark_list_32[i]; - uint8_t* item = low + (scaled_item_offset << 3); - assert(mark_list_copy[i] == item); - mark_list[i] = item; - } + do_unpack_avx2(mark_list_32, item_count, low); } else { @@ -8450,13 +8430,36 @@ void gc_heap::sort_mark_list() if (mark_list_index > mark_list) _sort(mark_list, mark_list_index - 1, 0); } +#ifdef _DEBUG + // in debug, sort the copy as well using the proven sort, so we can check we got the right result + if (mark_list_copy_index > mark_list_copy) + { + introsort::sort(mark_list_copy, mark_list_copy_index - 1, 0); + } + for (ptrdiff_t i = 0; i < item_count; i++) + { + uint8_t* item = mark_list[i]; + assert(mark_list_copy[i] == item); + } +#endif //_DEBUG } else #endif //USE_VXSORT { dprintf(3, ("Sorting mark lists")); if (mark_list_index > mark_list) + { + ptrdiff_t start = get_cycle_count(); + introsort::sort(mark_list, mark_list_index - 1, 0); + + ptrdiff_t elapsed_cycles = get_cycle_count() - start; + size_t item_count = mark_list_index - mark_list; + int log2_item_count = index_of_highest_set_bit(item_count); + double elapsed_cyles_by_n_log_n = (double)elapsed_cycles / item_count / log2_item_count; + +// printf("GC#%d: first phase of sort_mark_list for heap %d took %u cycles to sort %u entries (cost/(n*log2(n) = %5.2f)\n", settings.gc_index, this->heap_number, elapsed_cycles, item_count, elapsed_cyles_by_n_log_n); + } } // printf("first phase of sort_mark_list for heap %d took %u cycles to sort %u entries\n", this->heap_number, GetCycleCount32() - start, mark_list_index - mark_list); @@ -22270,24 +22273,22 @@ void gc_heap::plan_phase (int condemned_gen_number) if (SupportsInstructionSet(InstructionSet::AVX2)) { ptrdiff_t entry_count = mark_list_index - mark_list; - uint32_t* mark_list_32 = (uint32_t*)mark_list; + int32_t* mark_list_32 = (int32_t*)mark_list; uint8_t* low = gc_low; ptrdiff_t range = heap_segment_allocated(ephemeral_heap_segment) - low; if ((uint32_t)range == range) { + do_pack_avx2(mark_list, entry_count, low); + _sort(&mark_list_32[0], &mark_list_32[entry_count - 1], 0); + do_unpack_avx2(mark_list_32, entry_count, low); +#ifdef _DEBUG + uint8_t*high = heap_segment_allocated (ephemeral_heap_segment); for (ptrdiff_t i = 0; i < entry_count; i++) { uint8_t* item = mark_list[i]; - size_t offset = item - low; - assert((uint32_t)offset == offset); - mark_list_32[i] = (uint32_t)offset; - } - _sort(&mark_list_32[0], &mark_list_32[entry_count - 1], 0); - for (ptrdiff_t i = entry_count - 1; i >= 0; i--) - { - uint32_t offset = mark_list_32[i]; - mark_list[i] = low + offset; + assert(low <= item && item < high); } +#endif //_DEBUG } else { diff --git a/src/coreclr/src/gc/sample/CMakeLists.txt b/src/coreclr/src/gc/sample/CMakeLists.txt index 224e4cadd69726..d590118e9aa19e 100644 --- a/src/coreclr/src/gc/sample/CMakeLists.txt +++ b/src/coreclr/src/gc/sample/CMakeLists.txt @@ -32,9 +32,9 @@ if (CLR_CMAKE_TARGET_WIN32 AND CLR_CMAKE_TARGET_ARCH_AMD64) ../vxsort/do_vxsort_avx512.cpp ../vxsort/machine_traits.avx2.cpp ../vxsort/smallsort/bitonic_sort.AVX2.int64_t.generated.cpp - ../vxsort/smallsort/bitonic_sort.AVX2.uint32_t.generated.cpp + ../vxsort/smallsort/bitonic_sort.AVX2.int32_t.generated.cpp ../vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.cpp - ../vxsort/smallsort/bitonic_sort.AVX512.uint32_t.generated.cpp + ../vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.cpp ) endif (CLR_CMAKE_TARGET_WIN32 AND CLR_CMAKE_TARGET_ARCH_AMD64) diff --git a/src/coreclr/src/gc/vxsort/alignment.h b/src/coreclr/src/gc/vxsort/alignment.h new file mode 100644 index 00000000000000..c92ba9d6a31186 --- /dev/null +++ b/src/coreclr/src/gc/vxsort/alignment.h @@ -0,0 +1,36 @@ +#ifndef VXSORT_ALIGNNMENT_H +#define VXSORT_ALIGNNMENT_H + +//#include + +namespace vxsort { + +using namespace std; + +template +struct alignment_hint { + public: + static const size_t ALIGN = N; + static const int8_t REALIGN = 0x66; + + alignment_hint() : left_align(REALIGN), right_align(REALIGN) {} + alignment_hint realign_left() { + alignment_hint copy = *this; + copy.left_align = REALIGN; + return copy; + } + + alignment_hint realign_right() { + alignment_hint copy = *this; + copy.right_align = REALIGN; + return copy; + } + + static bool is_aligned(void* p) { return (size_t)p % ALIGN == 0; } + + int left_align : 8; + int right_align : 8; +}; + +} +#endif // VXSORT_ALIGNNMENT_H diff --git a/src/coreclr/src/gc/vxsort/do_vxsort.h b/src/coreclr/src/gc/vxsort/do_vxsort.h index 666af8c0c0e2a1..c33a949d325125 100644 --- a/src/coreclr/src/gc/vxsort/do_vxsort.h +++ b/src/coreclr/src/gc/vxsort/do_vxsort.h @@ -8,6 +8,11 @@ enum class InstructionSet bool SupportsInstructionSet(InstructionSet instructionSet); void do_vxsort_avx2(uint8_t** low, uint8_t** high); -void do_vxsort_avx2(uint32_t* low, uint32_t* high); +void do_vxsort_avx2(int32_t* low, int32_t* high); +void do_pack_avx2(uint8_t** mem, size_t len, uint8_t* base); +void do_unpack_avx2(int32_t* mem, size_t len, uint8_t* base); + void do_vxsort_avx512(uint8_t** low, uint8_t** high); -void do_vxsort_avx512(uint32_t* low, uint32_t* high); +void do_vxsort_avx512(int32_t* low, int32_t* high); +void do_pack_avx512(uint8_t** mem, size_t len, uint8_t* base); +void do_unpack_avx512(int32_t* mem, size_t len, uint8_t* base); diff --git a/src/coreclr/src/gc/vxsort/do_vxsort_avx2.cpp b/src/coreclr/src/gc/vxsort/do_vxsort_avx2.cpp index 02283aa0626c92..03ce5b5a9067e4 100644 --- a/src/coreclr/src/gc/vxsort/do_vxsort_avx2.cpp +++ b/src/coreclr/src/gc/vxsort/do_vxsort_avx2.cpp @@ -12,14 +12,22 @@ namespace std { return _Ty(); } + static _Ty Min() + { + return _Ty(); + } }; template <> - class numeric_limits + class numeric_limits { public: - static uint32_t Max() + static int32_t Max() { - return 0xffffffff; + return 0x7fffffff; + } + static int32_t Min() + { + return -0x7fffffff-1; } }; template <> @@ -28,7 +36,12 @@ namespace std public: static int64_t Max() { - return 0x7fffffffffffffff; + return 0x7fffffffffffffffi64; + } + + static int64_t Min() + { + return -0x7fffffffffffffffi64-1; } }; } @@ -42,6 +55,7 @@ T max(T a, T b) #endif #include "vxsort.h" #include "machine_traits.avx2.h" +#include "packer.h" void do_vxsort_avx2(uint8_t** low, uint8_t** high) { @@ -49,10 +63,21 @@ void do_vxsort_avx2(uint8_t** low, uint8_t** high) sorter.sort((int64_t*)low, (int64_t*)high); } -void do_vxsort_avx2(uint32_t* low, uint32_t* high) +void do_vxsort_avx2(int32_t* low, int32_t* high) { - auto sorter = vxsort::vxsort(); + auto sorter = vxsort::vxsort(); sorter.sort(low, high); } +void do_pack_avx2(uint8_t** mem, size_t len, uint8_t* base) +{ + auto packer = vxsort::packer(); + packer.pack((int64_t*)mem, len, (int64_t)base); +} + +void do_unpack_avx2(int32_t* mem, size_t len, uint8_t* base) +{ + auto packer = vxsort::packer(); + packer.unpack(mem, len, (int64_t)base); +} #include "vxsort_targets_disable.h" diff --git a/src/coreclr/src/gc/vxsort/do_vxsort_avx512.cpp b/src/coreclr/src/gc/vxsort/do_vxsort_avx512.cpp index 8ed453ff609e86..ea6c7748c71f57 100644 --- a/src/coreclr/src/gc/vxsort/do_vxsort_avx512.cpp +++ b/src/coreclr/src/gc/vxsort/do_vxsort_avx512.cpp @@ -12,14 +12,22 @@ namespace std { return _Ty(); } + static _Ty Min() + { + return _Ty(); + } }; template <> - class numeric_limits + class numeric_limits { public: - static uint32_t Max() + static int32_t Max() + { + return 0x7fffffff; + } + static int32_t Min() { - return 0xffffffff; + return -0x7fffffff - 1; } }; template <> @@ -28,7 +36,12 @@ namespace std public: static int64_t Max() { - return 0x7fffffffffffffff; + return 0x7fffffffffffffffi64; + } + + static int64_t Min() + { + return -0x7fffffffffffffffi64 - 1; } }; } @@ -50,9 +63,9 @@ void do_vxsort_avx512(uint8_t** low, uint8_t** high) sorter.sort((int64_t*)low, (int64_t*)high); } -void do_vxsort_avx512(uint32_t* low, uint32_t* high) +void do_vxsort_avx512(int32_t* low, int32_t* high) { - auto sorter = vxsort::vxsort(); + auto sorter = vxsort::vxsort(); sorter.sort(low, high); } diff --git a/src/coreclr/src/gc/vxsort/machine_traits.avx2.h b/src/coreclr/src/gc/vxsort/machine_traits.avx2.h index 8eb068a704cb02..9b65c64d0fd095 100644 --- a/src/coreclr/src/gc/vxsort/machine_traits.avx2.h +++ b/src/coreclr/src/gc/vxsort/machine_traits.avx2.h @@ -32,200 +32,241 @@ static void not_supported() template <> class vxsort_machine_traits { - public: - typedef __m256i TV; - typedef uint32_t TMASK; - - static constexpr bool supports_compress_writes() { return false; } - - static INLINE TV load_vec(TV* p) { - return _mm256_lddqu_si256(p); - } - - static INLINE void store_vec(TV* ptr, TV v) { - _mm256_storeu_si256(ptr, v); - } - - static void store_compress_vec(TV* ptr, TV v, TMASK mask) { not_supported(); } - - static INLINE TV partition_vector(TV v, int mask) { - assert(mask >= 0); - assert(mask <= 255); - return s2i(_mm256_permutevar8x32_ps(i2s(v), _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i*)(perm_table_32 + mask * 8))))); - } - - static INLINE TV get_vec_pivot(int32_t pivot) { - return _mm256_set1_epi32(pivot); - } - static INLINE TMASK get_cmpgt_mask(TV a, TV b) { - return _mm256_movemask_ps(i2s(_mm256_cmpgt_epi32(a, b))); - } + public: + typedef __m256i TV; + typedef uint32_t TMASK; + + static constexpr bool supports_compress_writes() { return false; } + + static INLINE TV load_vec(TV* p) { return _mm256_lddqu_si256(p); } + + static INLINE void store_vec(TV* ptr, TV v) { _mm256_storeu_si256(ptr, v); } + + static void store_compress_vec(TV* ptr, TV v, TMASK mask) { not_supported(); } + + static INLINE TV partition_vector(TV v, int mask) { + assert(mask >= 0); + assert(mask <= 255); + return s2i(_mm256_permutevar8x32_ps(i2s(v), _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i*)(perm_table_32 + mask * 8))))); + } + + static INLINE TV broadcast(int32_t pivot) { return _mm256_set1_epi32(pivot); } + static INLINE TMASK get_cmpgt_mask(TV a, TV b) { return _mm256_movemask_ps(i2s(_mm256_cmpgt_epi32(a, b))); } + + static TV shift_right(TV v, int i) { return _mm256_srli_epi32(v, i); } + static TV shift_left(TV v, int i) { return _mm256_slli_epi32(v, i); } + + static INLINE TV add(TV a, TV b) { return _mm256_add_epi32(a, b); } + static INLINE TV sub(TV a, TV b) { return _mm256_sub_epi32(a, b); }; }; template <> class vxsort_machine_traits { - public: - typedef __m256i TV; - typedef uint32_t TMASK; - - static constexpr bool supports_compress_writes() { return false; } - - static INLINE TV load_vec(TV* p) { - return _mm256_lddqu_si256(p); - } - - static INLINE void store_vec(TV* ptr, TV v) { - _mm256_storeu_si256(ptr, v); - } - - static void store_compress_vec(TV* ptr, TV v, TMASK mask) { not_supported(); } - - static INLINE TV partition_vector(TV v, int mask) { - assert(mask >= 0); - assert(mask <= 255); - return s2i(_mm256_permutevar8x32_ps(i2s(v), _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i*)(perm_table_32 + mask * 8))))); - } - - static INLINE TV get_vec_pivot(uint32_t pivot) { - return _mm256_set1_epi32(pivot); - } - static INLINE TMASK get_cmpgt_mask(TV a, TV b) { - __m256i top_bit = _mm256_set1_epi32(1U << 31); - return _mm256_movemask_ps(i2s(_mm256_cmpgt_epi32(_mm256_xor_si256(top_bit, a), _mm256_xor_si256(top_bit, b)))); - } + public: + typedef __m256i TV; + typedef uint32_t TMASK; + + static constexpr bool supports_compress_writes() { return false; } + + static INLINE TV load_vec(TV* p) { return _mm256_lddqu_si256(p); } + + static INLINE void store_vec(TV* ptr, TV v) { _mm256_storeu_si256(ptr, v); } + + static void store_compress_vec(TV* ptr, TV v, TMASK mask) { not_supported(); } + + static INLINE TV partition_vector(TV v, int mask) { + assert(mask >= 0); + assert(mask <= 255); + return s2i(_mm256_permutevar8x32_ps(i2s(v), _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i*)(perm_table_32 + mask * 8))))); + } + + static INLINE TV broadcast(uint32_t pivot) { return _mm256_set1_epi32(pivot); } + static INLINE TMASK get_cmpgt_mask(TV a, TV b) { + __m256i top_bit = _mm256_set1_epi32(1U << 31); + return _mm256_movemask_ps(i2s(_mm256_cmpgt_epi32(_mm256_xor_si256(top_bit, a), _mm256_xor_si256(top_bit, b)))); + } + + static TV shift_right(TV v, int i) { return _mm256_srli_epi32(v, i); } + static TV shift_left(TV v, int i) { return _mm256_slli_epi32(v, i); } + + static INLINE TV add(TV a, TV b) { return _mm256_add_epi32(a, b); } + static INLINE TV sub(TV a, TV b) { return _mm256_sub_epi32(a, b); }; }; template <> class vxsort_machine_traits { - public: - typedef __m256 TV; - typedef uint32_t TMASK; + public: + typedef __m256 TV; + typedef uint32_t TMASK; + + static constexpr bool supports_compress_writes() { return false; } + + static INLINE TV load_vec(TV* p) { return _mm256_loadu_ps((float*)p); } - static constexpr bool supports_compress_writes() { return false; } + static INLINE void store_vec(TV* ptr, TV v) { _mm256_storeu_ps((float*)ptr, v); } - static INLINE TV load_vec(TV* p) { - return _mm256_loadu_ps((float *)p); - } + static void store_compress_vec(TV* ptr, TV v, TMASK mask) { not_supported(); } - static INLINE void store_vec(TV* ptr, TV v) { - _mm256_storeu_ps((float *) ptr, v); - } + static INLINE TV partition_vector(TV v, int mask) { + assert(mask >= 0); + assert(mask <= 255); + return _mm256_permutevar8x32_ps(v, _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i*)(perm_table_32 + mask * 8)))); + } - static void store_compress_vec(TV* ptr, TV v, TMASK mask) { not_supported(); } + static INLINE TV broadcast(float pivot) { return _mm256_set1_ps(pivot); } - static INLINE TV partition_vector(TV v, int mask) { - assert(mask >= 0); - assert(mask <= 255); - return _mm256_permutevar8x32_ps(v, _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i*)(perm_table_32 + mask * 8)))); - } + static INLINE TMASK get_cmpgt_mask(TV a, TV b) { + /// 0x0E: Greater-than (ordered, signaling) \n + /// 0x1E: Greater-than (ordered, non-signaling) + return _mm256_movemask_ps(_mm256_cmp_ps(a, b, _CMP_GT_OS)); + } - static INLINE TV get_vec_pivot(float pivot) { - return _mm256_set1_ps(pivot); - } + static INLINE TV add(TV a, TV b) { return _mm256_add_ps(a, b); } + static INLINE TV sub(TV a, TV b) { return _mm256_sub_ps(a, b); }; - static INLINE TMASK get_cmpgt_mask(TV a, TV b) { - /// 0x0E: Greater-than (ordered, signaling) \n - /// 0x1E: Greater-than (ordered, non-signaling) - return _mm256_movemask_ps(_mm256_cmp_ps(a, b, _CMP_GT_OS)); - } }; template <> class vxsort_machine_traits { - public: - typedef __m256i TV; - typedef uint32_t TMASK; - - static constexpr bool supports_compress_writes() { return false; } - - static INLINE TV load_vec(TV* p) { - return _mm256_lddqu_si256(p); - } - - static INLINE void store_vec(TV* ptr, TV v) { - _mm256_storeu_si256(ptr, v); - } - - static void store_compress_vec(TV* ptr, TV v, TMASK mask) { not_supported(); } - - static INLINE TV partition_vector(TV v, int mask) { - assert(mask >= 0); - assert(mask <= 15); - return s2i(_mm256_permutevar8x32_ps(i2s(v), _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i*)(perm_table_64 + mask * 8))))); - } - - static INLINE TV get_vec_pivot(int64_t pivot) { - return _mm256_set1_epi64x(pivot); - } - static INLINE TMASK get_cmpgt_mask(TV a, TV b) { - return _mm256_movemask_pd(i2d(_mm256_cmpgt_epi64(a, b))); - } + public: + typedef __m256i TV; + typedef uint32_t TMASK; + + static constexpr bool supports_compress_writes() { return false; } + + static INLINE TV load_vec(TV* p) { return _mm256_lddqu_si256(p); } + + static INLINE void store_vec(TV* ptr, TV v) { _mm256_storeu_si256(ptr, v); } + + static void store_compress_vec(TV* ptr, TV v, TMASK mask) { not_supported(); } + + static INLINE TV partition_vector(TV v, int mask) { + assert(mask >= 0); + assert(mask <= 15); + return s2i(_mm256_permutevar8x32_ps(i2s(v), _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i*)(perm_table_64 + mask * 8))))); + } + + static INLINE TV broadcast(int64_t pivot) { return _mm256_set1_epi64x(pivot); } + static INLINE TMASK get_cmpgt_mask(TV a, TV b) { return _mm256_movemask_pd(i2d(_mm256_cmpgt_epi64(a, b))); } + + static TV shift_right(TV v, int i) { return _mm256_srli_epi64(v, i); } + static TV shift_left(TV v, int i) { return _mm256_slli_epi64(v, i); } + + static INLINE TV add(TV a, TV b) { return _mm256_add_epi64(a, b); } + static INLINE TV sub(TV a, TV b) { return _mm256_sub_epi64(a, b); }; + + + + static INLINE TV pack_ordered(TV a, TV b) { + a = _mm256_permute4x64_epi64(_mm256_shuffle_epi32(a, _MM_PERM_DBCA), _MM_PERM_DBCA); + b = _mm256_permute4x64_epi64(_mm256_shuffle_epi32(b, _MM_PERM_DBCA), _MM_PERM_CADB); + return _mm256_blend_epi32(a, b, 0b11110000); + } + + static INLINE TV pack_unordered(TV a, TV b) { + b = _mm256_shuffle_epi32(b, _MM_PERM_CDAB); + return _mm256_blend_epi32(a, b, 0b10101010); + } + + static INLINE void unpack_ordered_signed(TV p, TV& u1, TV& u2) { + auto p01 = _mm256_extracti128_si256(p, 0); + auto p02 = _mm256_extracti128_si256(p, 1); + + u1 = _mm256_cvtepi32_epi64(p01); + u2 = _mm256_cvtepi32_epi64(p02); + + } + + static INLINE void unpack_ordered_unsigned(TV p, TV& u1, TV& u2) { + auto p01 = _mm256_extracti128_si256(p, 0); + auto p02 = _mm256_extracti128_si256(p, 1); + + u1 = _mm256_cvtepu32_epi64(p01); + u2 = _mm256_cvtepu32_epi64(p02); + + } + +/* + template<> + static INLINE TV pack_ordered(TV a, TV b) { + a = _mm256_permute4x64_epi64(_mm256_shuffle_epi32(a, _MM_PERM_DBCA), _MM_PERM_DBCA); + b = _mm256_permute4x64_epi64(_mm256_shuffle_epi32(b, _MM_PERM_DBCA), _MM_PERM_CADB); + return _mm256_blend_epi32(a, b, 0b11110000); + } + + template<> + static INLINE typename vxsort_machine_traits::TV pack_unordered(TV a, TV b) { + b = _mm256_shuffle_epi32(b, _MM_PERM_CDAB); + return _mm256_blend_epi32(a, b, 0b10101010); + } + + */ + + + }; template <> class vxsort_machine_traits { - public: - typedef __m256i TV; - typedef uint32_t TMASK; - - static constexpr bool supports_compress_writes() { return false; } - - static INLINE TV load_vec(TV* p) { - return _mm256_lddqu_si256(p); - } - - static INLINE void store_vec(TV* ptr, TV v) { - _mm256_storeu_si256(ptr, v); - } - - static void store_compress_vec(TV* ptr, TV v, TMASK mask) { not_supported(); } - - static INLINE TV partition_vector(TV v, int mask) { - assert(mask >= 0); - assert(mask <= 15); - return s2i(_mm256_permutevar8x32_ps(i2s(v), _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i*)(perm_table_64 + mask * 8))))); - } - static INLINE TV get_vec_pivot(int64_t pivot) { - return _mm256_set1_epi64x(pivot); - } - static INLINE TMASK get_cmpgt_mask(TV a, TV b) { - __m256i top_bit = _mm256_set1_epi64x(1LLU << 63); - return _mm256_movemask_pd(i2d(_mm256_cmpgt_epi64(_mm256_xor_si256(top_bit, a), _mm256_xor_si256(top_bit, b)))); - } + public: + typedef __m256i TV; + typedef uint32_t TMASK; + + static constexpr bool supports_compress_writes() { return false; } + + static INLINE TV load_vec(TV* p) { return _mm256_lddqu_si256(p); } + + static INLINE void store_vec(TV* ptr, TV v) { _mm256_storeu_si256(ptr, v); } + + static void store_compress_vec(TV* ptr, TV v, TMASK mask) { not_supported(); } + + static INLINE TV partition_vector(TV v, int mask) { + assert(mask >= 0); + assert(mask <= 15); + return s2i(_mm256_permutevar8x32_ps(i2s(v), _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i*)(perm_table_64 + mask * 8))))); + } + static INLINE TV broadcast(int64_t pivot) { return _mm256_set1_epi64x(pivot); } + static INLINE TMASK get_cmpgt_mask(TV a, TV b) { + __m256i top_bit = _mm256_set1_epi64x(1LLU << 63); + return _mm256_movemask_pd(i2d(_mm256_cmpgt_epi64(_mm256_xor_si256(top_bit, a), _mm256_xor_si256(top_bit, b)))); + } + + static INLINE TV shift_right(TV v, int i) { return _mm256_srli_epi64(v, i); } + static INLINE TV shift_left(TV v, int i) { return _mm256_slli_epi64(v, i); } + + static INLINE TV add(TV a, TV b) { return _mm256_add_epi64(a, b); } + static INLINE TV sub(TV a, TV b) { return _mm256_sub_epi64(a, b); }; }; template <> class vxsort_machine_traits { - public: - typedef __m256d TV; - typedef uint32_t TMASK; - - static constexpr bool supports_compress_writes() { return false; } - - static INLINE TV load_vec(TV* p) { - return _mm256_loadu_pd((double *) p); - } - - static INLINE void store_vec(TV* ptr, TV v) { - _mm256_storeu_pd((double *) ptr, v); - } - - static void store_compress_vec(TV* ptr, TV v, TMASK mask) { not_supported(); } - - static INLINE TV partition_vector(TV v, int mask) { - assert(mask >= 0); - assert(mask <= 15); - return s2d(_mm256_permutevar8x32_ps(d2s(v), _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i*)(perm_table_64 + mask * 8))))); - } - - static INLINE TV get_vec_pivot(double pivot) { - return _mm256_set1_pd(pivot); - } - static INLINE TMASK get_cmpgt_mask(TV a, TV b) { - /// 0x0E: Greater-than (ordered, signaling) \n - /// 0x1E: Greater-than (ordered, non-signaling) - return _mm256_movemask_pd(_mm256_cmp_pd(a, b, _CMP_GT_OS)); - } + public: + typedef __m256d TV; + typedef uint32_t TMASK; + + static constexpr bool supports_compress_writes() { return false; } + + static INLINE TV load_vec(TV* p) { return _mm256_loadu_pd((double*)p); } + + static INLINE void store_vec(TV* ptr, TV v) { _mm256_storeu_pd((double*)ptr, v); } + + static void store_compress_vec(TV* ptr, TV v, TMASK mask) { not_supported(); } + + static INLINE TV partition_vector(TV v, int mask) { + assert(mask >= 0); + assert(mask <= 15); + return s2d(_mm256_permutevar8x32_ps(d2s(v), _mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i*)(perm_table_64 + mask * 8))))); + } + + static INLINE TV broadcast(double pivot) { return _mm256_set1_pd(pivot); } + static INLINE TMASK get_cmpgt_mask(TV a, TV b) { + /// 0x0E: Greater-than (ordered, signaling) \n + /// 0x1E: Greater-than (ordered, non-signaling) + return _mm256_movemask_pd(_mm256_cmp_pd(a, b, _CMP_GT_OS)); + } + + static INLINE TV add(TV a, TV b) { return _mm256_add_pd(a, b); } + static INLINE TV sub(TV a, TV b) { return _mm256_sub_pd(a, b); }; }; } diff --git a/src/coreclr/src/gc/vxsort/machine_traits.avx512.h b/src/coreclr/src/gc/vxsort/machine_traits.avx512.h index fcf15c6974ef89..5811fff4f35231 100644 --- a/src/coreclr/src/gc/vxsort/machine_traits.avx512.h +++ b/src/coreclr/src/gc/vxsort/machine_traits.avx512.h @@ -36,7 +36,7 @@ class vxsort_machine_traits { _mm512_mask_compressstoreu_epi32(ptr, mask, v); } - static INLINE TV get_vec_pivot(int32_t pivot) { + static INLINE TV broadcast(int32_t pivot) { return _mm512_set1_epi32(pivot); } @@ -69,7 +69,7 @@ class vxsort_machine_traits { _mm512_mask_compressstoreu_epi32(ptr, mask, v); } - static INLINE TV get_vec_pivot(uint32_t pivot) { + static INLINE TV broadcast(uint32_t pivot) { return _mm512_set1_epi32(pivot); } @@ -102,7 +102,7 @@ class vxsort_machine_traits { _mm512_mask_compressstoreu_ps(ptr, mask, v); } - static INLINE TV get_vec_pivot(float pivot) { + static INLINE TV broadcast(float pivot) { return _mm512_set1_ps(pivot); } @@ -135,7 +135,7 @@ class vxsort_machine_traits { _mm512_mask_compressstoreu_epi64(ptr, mask, v); } - static INLINE TV get_vec_pivot(int64_t pivot) { + static INLINE TV broadcast(int64_t pivot) { return _mm512_set1_epi64(pivot); } @@ -168,7 +168,7 @@ class vxsort_machine_traits { _mm512_mask_compressstoreu_epi64(ptr, mask, v); } - static INLINE TV get_vec_pivot(uint64_t pivot) { + static INLINE TV broadcast(uint64_t pivot) { return _mm512_set1_epi64(pivot); } @@ -201,7 +201,7 @@ class vxsort_machine_traits { _mm512_mask_compressstoreu_pd(ptr, mask, v); } - static INLINE TV get_vec_pivot(double pivot) { + static INLINE TV broadcast(double pivot) { return _mm512_set1_pd(pivot); } diff --git a/src/coreclr/src/gc/vxsort/machine_traits.h b/src/coreclr/src/gc/vxsort/machine_traits.h index 4a9288b68ed477..c947fa646a76cb 100644 --- a/src/coreclr/src/gc/vxsort/machine_traits.h +++ b/src/coreclr/src/gc/vxsort/machine_traits.h @@ -10,26 +10,40 @@ namespace vxsort { enum vector_machine { - NONE, - AVX2, - AVX512, - SVE, + NONE, + AVX2, + AVX512, + SVE, }; template struct vxsort_machine_traits { - public: - typedef int TV; - typedef int TMASK; - - static constexpr bool supports_compress_writes(); - - static TV load_vec(TV* ptr); - static void store_vec(TV* ptr, TV v); - static void store_compress_vec(TV* ptr, TV v, TMASK mask); - static TV partition_vector(TV v, int mask); - static TV get_vec_pivot(T pivot); - static TMASK get_cmpgt_mask(TV a, TV b); + public: + typedef int TV; + typedef int TMASK; + + static constexpr bool supports_compress_writes(); + + static TV load_vec(TV* ptr); + static void store_vec(TV* ptr, TV v); + static void store_compress_vec(TV* ptr, TV v, TMASK mask); + static TV partition_vector(TV v, int mask); + static TV broadcast(T pivot); + static TMASK get_cmpgt_mask(TV a, TV b); + + static TV shift_right(TV v, int i); + static TV shift_left(TV v, int i); + + static TV add(TV a, TV b); + static TV sub(TV a, TV b); + + static TV pack_ordered(TV a, TV b); + static TV pack_unordered(TV a, TV b); + + static void unpack_ordered_signed(TV p, TV& u1, TV& u2); + static void unpack_ordered_unsigned(TV p, TV& u1, TV& u2); + + }; } diff --git a/src/coreclr/src/gc/vxsort/packer.h b/src/coreclr/src/gc/vxsort/packer.h new file mode 100644 index 00000000000000..53d7a53ad4fcbc --- /dev/null +++ b/src/coreclr/src/gc/vxsort/packer.h @@ -0,0 +1,196 @@ +#ifndef VXSORT_PACKER_H +#define VXSORT_PACKER_H + +#include "vxsort_targets_enable_avx2.h" + +//#include +//#include +//#include +#//include +#include "alignment.h" +#include "machine_traits.h" +#include "machine_traits.avx2.h" +#include "machine_traits.avx512.h" + +#include +//#include + +namespace vxsort { + +template +class packer { + static_assert(Shift <= 31, "Shift must be in the range 0..31"); + using MT = vxsort_machine_traits; + typedef typename MT::TV TV; + typedef typename std::make_unsigned::type TU; + static const int N = sizeof(TV) / sizeof(TFrom); + typedef alignment_hint AH; + + static const size_t ALIGN = AH::ALIGN; + static const size_t ALIGN_MASK = ALIGN - 1; + + static INLINE void pack_scalar(const TFrom offset, TFrom*& mem_read, TTo*& mem_write) { + auto d = *(mem_read++); + if (Shift > 0) + d >>= Shift; + d -= offset; + *(mem_write++) = (TTo) d; + } + + static INLINE void unpack_scalar(const TFrom offset, TTo*& mem_read, TFrom*& mem_write) { + TFrom d = *(--mem_read); + + d += offset; + + if (Shift > 0) + d = (TFrom) (((TU) d) << Shift); + + *(--mem_write) = d; + } + + public: + + static void pack(TFrom *mem, size_t len, TFrom base) { + TFrom offset = (base >> Shift) - std::numeric_limits::Min(); + auto baseVec = MT::broadcast(offset); + + auto pre_aligned_mem = reinterpret_cast(reinterpret_cast(mem) & ~ALIGN_MASK); + + auto mem_read = mem; + auto mem_write = (TTo *) mem; + + // Include a "special" pass to handle very short scalar + // passes + if (MinLength < N && len < N) { + while (len--) { + pack_scalar(offset, mem_read, mem_write); + } + return; + } + + // We have at least + // one vector worth of data to handle + // Let's try to align to vector size first + + if (pre_aligned_mem < mem) { + const auto alignment_point = pre_aligned_mem + N; + len -= (alignment_point - mem_read); + while (mem_read < alignment_point) { + pack_scalar(offset, mem_read, mem_write); + } + } + + assert(AH::is_aligned(mem_read)); + + auto memv_read = (TV *) mem_read; + auto memv_write = (TV *) mem_write; + + auto lenv = len / N; + len -= (lenv * N); + + while (lenv >= 2) { + assert(memv_read >= memv_write); + + auto d01 = MT::load_vec(memv_read); + auto d02 = MT::load_vec(memv_read + 1); + if (Shift > 0) { // This is statically compiled in/out + d01 = MT::shift_right(d01, Shift); + d02 = MT::shift_right(d02, Shift); + } + d01 = MT::sub(d01, baseVec); + d02 = MT::sub(d02, baseVec); + + auto packed_data = RespectPackingOrder ? + MT::pack_ordered(d01, d02) : + MT::pack_unordered(d01, d02); + + MT::store_vec(memv_write, packed_data); + + memv_read += 2; + memv_write++; + lenv -= 2; + } + + len += lenv * N; + + mem_read = (TFrom *) memv_read; + mem_write = (TTo *) memv_write; + + while (len-- > 0) { + pack_scalar(offset, mem_read, mem_write); + } + } + + static void unpack(TTo *mem, size_t len, TFrom base) { + TFrom offset = (base >> Shift) - std::numeric_limits::Min(); + auto baseVec = MT::broadcast(offset); + + auto mem_read = mem + len; + auto mem_write = ((TFrom *) mem) + len; + + + // Include a "special" pass to handle very short scalar + // passers + if (MinLength < 2*N && len < 2*N) { + while (len--) { + unpack_scalar(offset, mem_read, mem_write); + } + return; + } + + auto pre_aligned_mem = reinterpret_cast(reinterpret_cast(mem_read) & ~ALIGN_MASK); + + if (pre_aligned_mem < mem_read) { + len -= (mem_read - pre_aligned_mem); + while (mem_read > pre_aligned_mem) { + unpack_scalar(offset, mem_read, mem_write); + } + } + + assert(AH::is_aligned(mem_read)); + + auto lenv = len / (N*2); + auto memv_read = ((TV *) mem_read) - 1; + auto memv_write = ((TV *) mem_write) - 2; + len -= lenv * N * 2; + + while (lenv > 0) { + assert(memv_read <= memv_write); + TV d01, d02; + + if (std::numeric_limits::Min() < 0) + MT::unpack_ordered_signed(MT::load_vec(memv_read), d01, d02); + else + MT::unpack_ordered_unsigned(MT::load_vec(memv_read), d01, d02); + + d01 = MT::add(d01, baseVec); + d02 = MT::add(d02, baseVec); + + if (Shift > 0) { // This is statically compiled in/out + d01 = MT::shift_left(d01, Shift); + d02 = MT::shift_left(d02, Shift); + } + + MT::store_vec(memv_write, d01); + MT::store_vec(memv_write + 1, d02); + + memv_read -= 1; + memv_write -= 2; + lenv--; + } + + mem_read = (TTo *) (memv_read + 1); + mem_write = (TFrom *) (memv_write + 2); + + while (len-- > 0) { + unpack_scalar(offset, mem_read, mem_write); + } + } + +}; + +} + +#include "vxsort_targets_disable.h" + +#endif // VXSORT_PACKER_H diff --git a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.uint32_t.generated.cpp b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int32_t.generated.cpp similarity index 82% rename from src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.uint32_t.generated.cpp rename to src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int32_t.generated.cpp index 34320362588c33..33ec43e005b39e 100644 --- a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.uint32_t.generated.cpp +++ b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int32_t.generated.cpp @@ -1,9 +1,9 @@ #include "common.h" -#include "bitonic_sort.AVX2.uint32_t.generated.h" +#include "bitonic_sort.AVX2.int32_t.generated.h" using namespace vxsort; -void vxsort::smallsort::bitonic::sort(uint32_t *ptr, size_t length) { +void vxsort::smallsort::bitonic::sort(int32_t *ptr, size_t length) { const int N = 8; switch(length / N) { diff --git a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.uint32_t.generated.h b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int32_t.generated.h similarity index 75% rename from src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.uint32_t.generated.h rename to src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int32_t.generated.h index 33a2d668474d03..2b5bb0e8654ce0 100644 --- a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.uint32_t.generated.h +++ b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int32_t.generated.h @@ -6,8 +6,8 @@ // the code-generator that generated this source file instead. ///////////////////////////////////////////////////////////////////////////// -#ifndef BITONIC_SORT_AVX2_UINT32_T_H -#define BITONIC_SORT_AVX2_UINT32_T_H +#ifndef BITONIC_SORT_AVX2_INT32_T_H +#define BITONIC_SORT_AVX2_INT32_T_H #ifdef __GNUC__ #ifdef __clang__ @@ -30,7 +30,7 @@ namespace vxsort { namespace smallsort { -template<> struct bitonic { +template<> struct bitonic { public: static INLINE void sort_01v_ascending(__m256i& d01) { @@ -38,55 +38,55 @@ template<> struct bitonic { s = _mm256_shuffle_epi32(d01, 0xB1); - min = _mm256_min_epu32(s, d01); - max = _mm256_max_epu32(s, d01); + min = _mm256_min_epi32(s, d01); + max = _mm256_max_epi32(s, d01); d01 = _mm256_blend_epi32(min, max, 0xAA); s = _mm256_shuffle_epi32(d01, 0x1B); - min = _mm256_min_epu32(s, d01); - max = _mm256_max_epu32(s, d01); + min = _mm256_min_epi32(s, d01); + max = _mm256_max_epi32(s, d01); d01 = _mm256_blend_epi32(min, max, 0xCC); s = _mm256_shuffle_epi32(d01, 0xB1); - min = _mm256_min_epu32(s, d01); - max = _mm256_max_epu32(s, d01); + min = _mm256_min_epi32(s, d01); + max = _mm256_max_epi32(s, d01); d01 = _mm256_blend_epi32(min, max, 0xAA); s = d2i(_mm256_permute4x64_pd(i2d(_mm256_shuffle_epi32(d01, 0x1B)), 0x4E)); - min = _mm256_min_epu32(s, d01); - max = _mm256_max_epu32(s, d01); + min = _mm256_min_epi32(s, d01); + max = _mm256_max_epi32(s, d01); d01 = _mm256_blend_epi32(min, max, 0xF0); s = _mm256_shuffle_epi32(d01, 0x4E); - min = _mm256_min_epu32(s, d01); - max = _mm256_max_epu32(s, d01); + min = _mm256_min_epi32(s, d01); + max = _mm256_max_epi32(s, d01); d01 = _mm256_blend_epi32(min, max, 0xCC); s = _mm256_shuffle_epi32(d01, 0xB1); - min = _mm256_min_epu32(s, d01); - max = _mm256_max_epu32(s, d01); + min = _mm256_min_epi32(s, d01); + max = _mm256_max_epi32(s, d01); d01 = _mm256_blend_epi32(min, max, 0xAA); } static INLINE void sort_01v_merge_ascending(__m256i& d01) { __m256i min, max, s; s = d2i(_mm256_permute4x64_pd(i2d(d01), 0x4E)); - min = _mm256_min_epu32(s, d01); - max = _mm256_max_epu32(s, d01); + min = _mm256_min_epi32(s, d01); + max = _mm256_max_epi32(s, d01); d01 = _mm256_blend_epi32(min, max, 0xF0); s = _mm256_shuffle_epi32(d01, 0x4E); - min = _mm256_min_epu32(s, d01); - max = _mm256_max_epu32(s, d01); + min = _mm256_min_epi32(s, d01); + max = _mm256_max_epi32(s, d01); d01 = _mm256_blend_epi32(min, max, 0xCC); s = _mm256_shuffle_epi32(d01, 0xB1); - min = _mm256_min_epu32(s, d01); - max = _mm256_max_epu32(s, d01); + min = _mm256_min_epi32(s, d01); + max = _mm256_max_epi32(s, d01); d01 = _mm256_blend_epi32(min, max, 0xAA); } static INLINE void sort_01v_descending(__m256i& d01) { @@ -94,55 +94,55 @@ template<> struct bitonic { s = _mm256_shuffle_epi32(d01, 0xB1); - min = _mm256_min_epu32(s, d01); - max = _mm256_max_epu32(s, d01); + min = _mm256_min_epi32(s, d01); + max = _mm256_max_epi32(s, d01); d01 = _mm256_blend_epi32(max, min, 0xAA); s = _mm256_shuffle_epi32(d01, 0x1B); - min = _mm256_min_epu32(s, d01); - max = _mm256_max_epu32(s, d01); + min = _mm256_min_epi32(s, d01); + max = _mm256_max_epi32(s, d01); d01 = _mm256_blend_epi32(max, min, 0xCC); s = _mm256_shuffle_epi32(d01, 0xB1); - min = _mm256_min_epu32(s, d01); - max = _mm256_max_epu32(s, d01); + min = _mm256_min_epi32(s, d01); + max = _mm256_max_epi32(s, d01); d01 = _mm256_blend_epi32(max, min, 0xAA); s = d2i(_mm256_permute4x64_pd(i2d(_mm256_shuffle_epi32(d01, 0x1B)), 0x4E)); - min = _mm256_min_epu32(s, d01); - max = _mm256_max_epu32(s, d01); + min = _mm256_min_epi32(s, d01); + max = _mm256_max_epi32(s, d01); d01 = _mm256_blend_epi32(max, min, 0xF0); s = _mm256_shuffle_epi32(d01, 0x4E); - min = _mm256_min_epu32(s, d01); - max = _mm256_max_epu32(s, d01); + min = _mm256_min_epi32(s, d01); + max = _mm256_max_epi32(s, d01); d01 = _mm256_blend_epi32(max, min, 0xCC); s = _mm256_shuffle_epi32(d01, 0xB1); - min = _mm256_min_epu32(s, d01); - max = _mm256_max_epu32(s, d01); + min = _mm256_min_epi32(s, d01); + max = _mm256_max_epi32(s, d01); d01 = _mm256_blend_epi32(max, min, 0xAA); } static INLINE void sort_01v_merge_descending(__m256i& d01) { __m256i min, max, s; s = d2i(_mm256_permute4x64_pd(i2d(d01), 0x4E)); - min = _mm256_min_epu32(s, d01); - max = _mm256_max_epu32(s, d01); + min = _mm256_min_epi32(s, d01); + max = _mm256_max_epi32(s, d01); d01 = _mm256_blend_epi32(max, min, 0xF0); s = _mm256_shuffle_epi32(d01, 0x4E); - min = _mm256_min_epu32(s, d01); - max = _mm256_max_epu32(s, d01); + min = _mm256_min_epi32(s, d01); + max = _mm256_max_epi32(s, d01); d01 = _mm256_blend_epi32(max, min, 0xCC); s = _mm256_shuffle_epi32(d01, 0xB1); - min = _mm256_min_epu32(s, d01); - max = _mm256_max_epu32(s, d01); + min = _mm256_min_epi32(s, d01); + max = _mm256_max_epi32(s, d01); d01 = _mm256_blend_epi32(max, min, 0xAA); } static INLINE void sort_02v_ascending(__m256i& d01, __m256i& d02) { @@ -153,8 +153,8 @@ template<> struct bitonic { tmp = d02; - d02 = _mm256_max_epu32(d01, d02); - d01 = _mm256_min_epu32(d01, tmp); + d02 = _mm256_max_epi32(d01, d02); + d01 = _mm256_min_epi32(d01, tmp); sort_01v_merge_ascending(d01); sort_01v_merge_ascending(d02); @@ -167,8 +167,8 @@ template<> struct bitonic { tmp = d02; - d02 = _mm256_max_epu32(d01, d02); - d01 = _mm256_min_epu32(d01, tmp); + d02 = _mm256_max_epi32(d01, d02); + d01 = _mm256_min_epi32(d01, tmp); sort_01v_merge_descending(d01); sort_01v_merge_descending(d02); @@ -178,9 +178,9 @@ template<> struct bitonic { tmp = d01; - d01 = _mm256_min_epu32(d02, d01); + d01 = _mm256_min_epi32(d02, d01); - d02 = _mm256_max_epu32(d02, tmp); + d02 = _mm256_max_epi32(d02, tmp); sort_01v_merge_ascending(d01); sort_01v_merge_ascending(d02); @@ -190,9 +190,9 @@ template<> struct bitonic { tmp = d01; - d01 = _mm256_min_epu32(d02, d01); + d01 = _mm256_min_epi32(d02, d01); - d02 = _mm256_max_epu32(d02, tmp); + d02 = _mm256_max_epi32(d02, tmp); sort_01v_merge_descending(d01); sort_01v_merge_descending(d02); @@ -205,8 +205,8 @@ template<> struct bitonic { tmp = d03; - d03 = _mm256_max_epu32(d02, d03); - d02 = _mm256_min_epu32(d02, tmp); + d03 = _mm256_max_epi32(d02, d03); + d02 = _mm256_min_epi32(d02, tmp); sort_02v_merge_ascending(d01, d02); sort_01v_merge_ascending(d03); @@ -219,8 +219,8 @@ template<> struct bitonic { tmp = d03; - d03 = _mm256_max_epu32(d02, d03); - d02 = _mm256_min_epu32(d02, tmp); + d03 = _mm256_max_epi32(d02, d03); + d02 = _mm256_min_epi32(d02, tmp); sort_02v_merge_descending(d01, d02); sort_01v_merge_descending(d03); @@ -230,9 +230,9 @@ template<> struct bitonic { tmp = d01; - d01 = _mm256_min_epu32(d03, d01); + d01 = _mm256_min_epi32(d03, d01); - d03 = _mm256_max_epu32(d03, tmp); + d03 = _mm256_max_epi32(d03, tmp); sort_02v_merge_ascending(d01, d02); sort_01v_merge_ascending(d03); @@ -242,9 +242,9 @@ template<> struct bitonic { tmp = d01; - d01 = _mm256_min_epu32(d03, d01); + d01 = _mm256_min_epi32(d03, d01); - d03 = _mm256_max_epu32(d03, tmp); + d03 = _mm256_max_epi32(d03, tmp); sort_02v_merge_descending(d01, d02); sort_01v_merge_descending(d03); @@ -257,13 +257,13 @@ template<> struct bitonic { tmp = d03; - d03 = _mm256_max_epu32(d02, d03); - d02 = _mm256_min_epu32(d02, tmp); + d03 = _mm256_max_epi32(d02, d03); + d02 = _mm256_min_epi32(d02, tmp); tmp = d04; - d04 = _mm256_max_epu32(d01, d04); - d01 = _mm256_min_epu32(d01, tmp); + d04 = _mm256_max_epi32(d01, d04); + d01 = _mm256_min_epi32(d01, tmp); sort_02v_merge_ascending(d01, d02); sort_02v_merge_ascending(d03, d04); @@ -276,13 +276,13 @@ template<> struct bitonic { tmp = d03; - d03 = _mm256_max_epu32(d02, d03); - d02 = _mm256_min_epu32(d02, tmp); + d03 = _mm256_max_epi32(d02, d03); + d02 = _mm256_min_epi32(d02, tmp); tmp = d04; - d04 = _mm256_max_epu32(d01, d04); - d01 = _mm256_min_epu32(d01, tmp); + d04 = _mm256_max_epi32(d01, d04); + d01 = _mm256_min_epi32(d01, tmp); sort_02v_merge_descending(d01, d02); sort_02v_merge_descending(d03, d04); @@ -292,15 +292,15 @@ template<> struct bitonic { tmp = d01; - d01 = _mm256_min_epu32(d03, d01); + d01 = _mm256_min_epi32(d03, d01); - d03 = _mm256_max_epu32(d03, tmp); + d03 = _mm256_max_epi32(d03, tmp); tmp = d02; - d02 = _mm256_min_epu32(d04, d02); + d02 = _mm256_min_epi32(d04, d02); - d04 = _mm256_max_epu32(d04, tmp); + d04 = _mm256_max_epi32(d04, tmp); sort_02v_merge_ascending(d01, d02); sort_02v_merge_ascending(d03, d04); @@ -310,15 +310,15 @@ template<> struct bitonic { tmp = d01; - d01 = _mm256_min_epu32(d03, d01); + d01 = _mm256_min_epi32(d03, d01); - d03 = _mm256_max_epu32(d03, tmp); + d03 = _mm256_max_epi32(d03, tmp); tmp = d02; - d02 = _mm256_min_epu32(d04, d02); + d02 = _mm256_min_epi32(d04, d02); - d04 = _mm256_max_epu32(d04, tmp); + d04 = _mm256_max_epi32(d04, tmp); sort_02v_merge_descending(d01, d02); sort_02v_merge_descending(d03, d04); @@ -331,8 +331,8 @@ template<> struct bitonic { tmp = d05; - d05 = _mm256_max_epu32(d04, d05); - d04 = _mm256_min_epu32(d04, tmp); + d05 = _mm256_max_epi32(d04, d05); + d04 = _mm256_min_epi32(d04, tmp); sort_04v_merge_ascending(d01, d02, d03, d04); sort_01v_merge_ascending(d05); @@ -345,8 +345,8 @@ template<> struct bitonic { tmp = d05; - d05 = _mm256_max_epu32(d04, d05); - d04 = _mm256_min_epu32(d04, tmp); + d05 = _mm256_max_epi32(d04, d05); + d04 = _mm256_min_epi32(d04, tmp); sort_04v_merge_descending(d01, d02, d03, d04); sort_01v_merge_descending(d05); @@ -356,9 +356,9 @@ template<> struct bitonic { tmp = d01; - d01 = _mm256_min_epu32(d05, d01); + d01 = _mm256_min_epi32(d05, d01); - d05 = _mm256_max_epu32(d05, tmp); + d05 = _mm256_max_epi32(d05, tmp); sort_04v_merge_ascending(d01, d02, d03, d04); sort_01v_merge_ascending(d05); @@ -368,9 +368,9 @@ template<> struct bitonic { tmp = d01; - d01 = _mm256_min_epu32(d05, d01); + d01 = _mm256_min_epi32(d05, d01); - d05 = _mm256_max_epu32(d05, tmp); + d05 = _mm256_max_epi32(d05, tmp); sort_04v_merge_descending(d01, d02, d03, d04); sort_01v_merge_descending(d05); @@ -383,13 +383,13 @@ template<> struct bitonic { tmp = d05; - d05 = _mm256_max_epu32(d04, d05); - d04 = _mm256_min_epu32(d04, tmp); + d05 = _mm256_max_epi32(d04, d05); + d04 = _mm256_min_epi32(d04, tmp); tmp = d06; - d06 = _mm256_max_epu32(d03, d06); - d03 = _mm256_min_epu32(d03, tmp); + d06 = _mm256_max_epi32(d03, d06); + d03 = _mm256_min_epi32(d03, tmp); sort_04v_merge_ascending(d01, d02, d03, d04); sort_02v_merge_ascending(d05, d06); @@ -402,13 +402,13 @@ template<> struct bitonic { tmp = d05; - d05 = _mm256_max_epu32(d04, d05); - d04 = _mm256_min_epu32(d04, tmp); + d05 = _mm256_max_epi32(d04, d05); + d04 = _mm256_min_epi32(d04, tmp); tmp = d06; - d06 = _mm256_max_epu32(d03, d06); - d03 = _mm256_min_epu32(d03, tmp); + d06 = _mm256_max_epi32(d03, d06); + d03 = _mm256_min_epi32(d03, tmp); sort_04v_merge_descending(d01, d02, d03, d04); sort_02v_merge_descending(d05, d06); @@ -418,15 +418,15 @@ template<> struct bitonic { tmp = d01; - d01 = _mm256_min_epu32(d05, d01); + d01 = _mm256_min_epi32(d05, d01); - d05 = _mm256_max_epu32(d05, tmp); + d05 = _mm256_max_epi32(d05, tmp); tmp = d02; - d02 = _mm256_min_epu32(d06, d02); + d02 = _mm256_min_epi32(d06, d02); - d06 = _mm256_max_epu32(d06, tmp); + d06 = _mm256_max_epi32(d06, tmp); sort_04v_merge_ascending(d01, d02, d03, d04); sort_02v_merge_ascending(d05, d06); @@ -436,15 +436,15 @@ template<> struct bitonic { tmp = d01; - d01 = _mm256_min_epu32(d05, d01); + d01 = _mm256_min_epi32(d05, d01); - d05 = _mm256_max_epu32(d05, tmp); + d05 = _mm256_max_epi32(d05, tmp); tmp = d02; - d02 = _mm256_min_epu32(d06, d02); + d02 = _mm256_min_epi32(d06, d02); - d06 = _mm256_max_epu32(d06, tmp); + d06 = _mm256_max_epi32(d06, tmp); sort_04v_merge_descending(d01, d02, d03, d04); sort_02v_merge_descending(d05, d06); @@ -457,18 +457,18 @@ template<> struct bitonic { tmp = d05; - d05 = _mm256_max_epu32(d04, d05); - d04 = _mm256_min_epu32(d04, tmp); + d05 = _mm256_max_epi32(d04, d05); + d04 = _mm256_min_epi32(d04, tmp); tmp = d06; - d06 = _mm256_max_epu32(d03, d06); - d03 = _mm256_min_epu32(d03, tmp); + d06 = _mm256_max_epi32(d03, d06); + d03 = _mm256_min_epi32(d03, tmp); tmp = d07; - d07 = _mm256_max_epu32(d02, d07); - d02 = _mm256_min_epu32(d02, tmp); + d07 = _mm256_max_epi32(d02, d07); + d02 = _mm256_min_epi32(d02, tmp); sort_04v_merge_ascending(d01, d02, d03, d04); sort_03v_merge_ascending(d05, d06, d07); @@ -481,18 +481,18 @@ template<> struct bitonic { tmp = d05; - d05 = _mm256_max_epu32(d04, d05); - d04 = _mm256_min_epu32(d04, tmp); + d05 = _mm256_max_epi32(d04, d05); + d04 = _mm256_min_epi32(d04, tmp); tmp = d06; - d06 = _mm256_max_epu32(d03, d06); - d03 = _mm256_min_epu32(d03, tmp); + d06 = _mm256_max_epi32(d03, d06); + d03 = _mm256_min_epi32(d03, tmp); tmp = d07; - d07 = _mm256_max_epu32(d02, d07); - d02 = _mm256_min_epu32(d02, tmp); + d07 = _mm256_max_epi32(d02, d07); + d02 = _mm256_min_epi32(d02, tmp); sort_04v_merge_descending(d01, d02, d03, d04); sort_03v_merge_descending(d05, d06, d07); @@ -502,21 +502,21 @@ template<> struct bitonic { tmp = d01; - d01 = _mm256_min_epu32(d05, d01); + d01 = _mm256_min_epi32(d05, d01); - d05 = _mm256_max_epu32(d05, tmp); + d05 = _mm256_max_epi32(d05, tmp); tmp = d02; - d02 = _mm256_min_epu32(d06, d02); + d02 = _mm256_min_epi32(d06, d02); - d06 = _mm256_max_epu32(d06, tmp); + d06 = _mm256_max_epi32(d06, tmp); tmp = d03; - d03 = _mm256_min_epu32(d07, d03); + d03 = _mm256_min_epi32(d07, d03); - d07 = _mm256_max_epu32(d07, tmp); + d07 = _mm256_max_epi32(d07, tmp); sort_04v_merge_ascending(d01, d02, d03, d04); sort_03v_merge_ascending(d05, d06, d07); @@ -526,21 +526,21 @@ template<> struct bitonic { tmp = d01; - d01 = _mm256_min_epu32(d05, d01); + d01 = _mm256_min_epi32(d05, d01); - d05 = _mm256_max_epu32(d05, tmp); + d05 = _mm256_max_epi32(d05, tmp); tmp = d02; - d02 = _mm256_min_epu32(d06, d02); + d02 = _mm256_min_epi32(d06, d02); - d06 = _mm256_max_epu32(d06, tmp); + d06 = _mm256_max_epi32(d06, tmp); tmp = d03; - d03 = _mm256_min_epu32(d07, d03); + d03 = _mm256_min_epi32(d07, d03); - d07 = _mm256_max_epu32(d07, tmp); + d07 = _mm256_max_epi32(d07, tmp); sort_04v_merge_descending(d01, d02, d03, d04); sort_03v_merge_descending(d05, d06, d07); @@ -553,23 +553,23 @@ template<> struct bitonic { tmp = d05; - d05 = _mm256_max_epu32(d04, d05); - d04 = _mm256_min_epu32(d04, tmp); + d05 = _mm256_max_epi32(d04, d05); + d04 = _mm256_min_epi32(d04, tmp); tmp = d06; - d06 = _mm256_max_epu32(d03, d06); - d03 = _mm256_min_epu32(d03, tmp); + d06 = _mm256_max_epi32(d03, d06); + d03 = _mm256_min_epi32(d03, tmp); tmp = d07; - d07 = _mm256_max_epu32(d02, d07); - d02 = _mm256_min_epu32(d02, tmp); + d07 = _mm256_max_epi32(d02, d07); + d02 = _mm256_min_epi32(d02, tmp); tmp = d08; - d08 = _mm256_max_epu32(d01, d08); - d01 = _mm256_min_epu32(d01, tmp); + d08 = _mm256_max_epi32(d01, d08); + d01 = _mm256_min_epi32(d01, tmp); sort_04v_merge_ascending(d01, d02, d03, d04); sort_04v_merge_ascending(d05, d06, d07, d08); @@ -582,23 +582,23 @@ template<> struct bitonic { tmp = d05; - d05 = _mm256_max_epu32(d04, d05); - d04 = _mm256_min_epu32(d04, tmp); + d05 = _mm256_max_epi32(d04, d05); + d04 = _mm256_min_epi32(d04, tmp); tmp = d06; - d06 = _mm256_max_epu32(d03, d06); - d03 = _mm256_min_epu32(d03, tmp); + d06 = _mm256_max_epi32(d03, d06); + d03 = _mm256_min_epi32(d03, tmp); tmp = d07; - d07 = _mm256_max_epu32(d02, d07); - d02 = _mm256_min_epu32(d02, tmp); + d07 = _mm256_max_epi32(d02, d07); + d02 = _mm256_min_epi32(d02, tmp); tmp = d08; - d08 = _mm256_max_epu32(d01, d08); - d01 = _mm256_min_epu32(d01, tmp); + d08 = _mm256_max_epi32(d01, d08); + d01 = _mm256_min_epi32(d01, tmp); sort_04v_merge_descending(d01, d02, d03, d04); sort_04v_merge_descending(d05, d06, d07, d08); @@ -608,27 +608,27 @@ template<> struct bitonic { tmp = d01; - d01 = _mm256_min_epu32(d05, d01); + d01 = _mm256_min_epi32(d05, d01); - d05 = _mm256_max_epu32(d05, tmp); + d05 = _mm256_max_epi32(d05, tmp); tmp = d02; - d02 = _mm256_min_epu32(d06, d02); + d02 = _mm256_min_epi32(d06, d02); - d06 = _mm256_max_epu32(d06, tmp); + d06 = _mm256_max_epi32(d06, tmp); tmp = d03; - d03 = _mm256_min_epu32(d07, d03); + d03 = _mm256_min_epi32(d07, d03); - d07 = _mm256_max_epu32(d07, tmp); + d07 = _mm256_max_epi32(d07, tmp); tmp = d04; - d04 = _mm256_min_epu32(d08, d04); + d04 = _mm256_min_epi32(d08, d04); - d08 = _mm256_max_epu32(d08, tmp); + d08 = _mm256_max_epi32(d08, tmp); sort_04v_merge_ascending(d01, d02, d03, d04); sort_04v_merge_ascending(d05, d06, d07, d08); @@ -638,27 +638,27 @@ template<> struct bitonic { tmp = d01; - d01 = _mm256_min_epu32(d05, d01); + d01 = _mm256_min_epi32(d05, d01); - d05 = _mm256_max_epu32(d05, tmp); + d05 = _mm256_max_epi32(d05, tmp); tmp = d02; - d02 = _mm256_min_epu32(d06, d02); + d02 = _mm256_min_epi32(d06, d02); - d06 = _mm256_max_epu32(d06, tmp); + d06 = _mm256_max_epi32(d06, tmp); tmp = d03; - d03 = _mm256_min_epu32(d07, d03); + d03 = _mm256_min_epi32(d07, d03); - d07 = _mm256_max_epu32(d07, tmp); + d07 = _mm256_max_epi32(d07, tmp); tmp = d04; - d04 = _mm256_min_epu32(d08, d04); + d04 = _mm256_min_epi32(d08, d04); - d08 = _mm256_max_epu32(d08, tmp); + d08 = _mm256_max_epi32(d08, tmp); sort_04v_merge_descending(d01, d02, d03, d04); sort_04v_merge_descending(d05, d06, d07, d08); @@ -671,8 +671,8 @@ template<> struct bitonic { tmp = d09; - d09 = _mm256_max_epu32(d08, d09); - d08 = _mm256_min_epu32(d08, tmp); + d09 = _mm256_max_epi32(d08, d09); + d08 = _mm256_min_epi32(d08, tmp); sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); sort_01v_merge_ascending(d09); @@ -685,8 +685,8 @@ template<> struct bitonic { tmp = d09; - d09 = _mm256_max_epu32(d08, d09); - d08 = _mm256_min_epu32(d08, tmp); + d09 = _mm256_max_epi32(d08, d09); + d08 = _mm256_min_epi32(d08, tmp); sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); sort_01v_merge_descending(d09); @@ -699,13 +699,13 @@ template<> struct bitonic { tmp = d09; - d09 = _mm256_max_epu32(d08, d09); - d08 = _mm256_min_epu32(d08, tmp); + d09 = _mm256_max_epi32(d08, d09); + d08 = _mm256_min_epi32(d08, tmp); tmp = d10; - d10 = _mm256_max_epu32(d07, d10); - d07 = _mm256_min_epu32(d07, tmp); + d10 = _mm256_max_epi32(d07, d10); + d07 = _mm256_min_epi32(d07, tmp); sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); sort_02v_merge_ascending(d09, d10); @@ -718,13 +718,13 @@ template<> struct bitonic { tmp = d09; - d09 = _mm256_max_epu32(d08, d09); - d08 = _mm256_min_epu32(d08, tmp); + d09 = _mm256_max_epi32(d08, d09); + d08 = _mm256_min_epi32(d08, tmp); tmp = d10; - d10 = _mm256_max_epu32(d07, d10); - d07 = _mm256_min_epu32(d07, tmp); + d10 = _mm256_max_epi32(d07, d10); + d07 = _mm256_min_epi32(d07, tmp); sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); sort_02v_merge_descending(d09, d10); @@ -737,18 +737,18 @@ template<> struct bitonic { tmp = d09; - d09 = _mm256_max_epu32(d08, d09); - d08 = _mm256_min_epu32(d08, tmp); + d09 = _mm256_max_epi32(d08, d09); + d08 = _mm256_min_epi32(d08, tmp); tmp = d10; - d10 = _mm256_max_epu32(d07, d10); - d07 = _mm256_min_epu32(d07, tmp); + d10 = _mm256_max_epi32(d07, d10); + d07 = _mm256_min_epi32(d07, tmp); tmp = d11; - d11 = _mm256_max_epu32(d06, d11); - d06 = _mm256_min_epu32(d06, tmp); + d11 = _mm256_max_epi32(d06, d11); + d06 = _mm256_min_epi32(d06, tmp); sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); sort_03v_merge_ascending(d09, d10, d11); @@ -761,18 +761,18 @@ template<> struct bitonic { tmp = d09; - d09 = _mm256_max_epu32(d08, d09); - d08 = _mm256_min_epu32(d08, tmp); + d09 = _mm256_max_epi32(d08, d09); + d08 = _mm256_min_epi32(d08, tmp); tmp = d10; - d10 = _mm256_max_epu32(d07, d10); - d07 = _mm256_min_epu32(d07, tmp); + d10 = _mm256_max_epi32(d07, d10); + d07 = _mm256_min_epi32(d07, tmp); tmp = d11; - d11 = _mm256_max_epu32(d06, d11); - d06 = _mm256_min_epu32(d06, tmp); + d11 = _mm256_max_epi32(d06, d11); + d06 = _mm256_min_epi32(d06, tmp); sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); sort_03v_merge_descending(d09, d10, d11); @@ -785,23 +785,23 @@ template<> struct bitonic { tmp = d09; - d09 = _mm256_max_epu32(d08, d09); - d08 = _mm256_min_epu32(d08, tmp); + d09 = _mm256_max_epi32(d08, d09); + d08 = _mm256_min_epi32(d08, tmp); tmp = d10; - d10 = _mm256_max_epu32(d07, d10); - d07 = _mm256_min_epu32(d07, tmp); + d10 = _mm256_max_epi32(d07, d10); + d07 = _mm256_min_epi32(d07, tmp); tmp = d11; - d11 = _mm256_max_epu32(d06, d11); - d06 = _mm256_min_epu32(d06, tmp); + d11 = _mm256_max_epi32(d06, d11); + d06 = _mm256_min_epi32(d06, tmp); tmp = d12; - d12 = _mm256_max_epu32(d05, d12); - d05 = _mm256_min_epu32(d05, tmp); + d12 = _mm256_max_epi32(d05, d12); + d05 = _mm256_min_epi32(d05, tmp); sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); sort_04v_merge_ascending(d09, d10, d11, d12); @@ -814,23 +814,23 @@ template<> struct bitonic { tmp = d09; - d09 = _mm256_max_epu32(d08, d09); - d08 = _mm256_min_epu32(d08, tmp); + d09 = _mm256_max_epi32(d08, d09); + d08 = _mm256_min_epi32(d08, tmp); tmp = d10; - d10 = _mm256_max_epu32(d07, d10); - d07 = _mm256_min_epu32(d07, tmp); + d10 = _mm256_max_epi32(d07, d10); + d07 = _mm256_min_epi32(d07, tmp); tmp = d11; - d11 = _mm256_max_epu32(d06, d11); - d06 = _mm256_min_epu32(d06, tmp); + d11 = _mm256_max_epi32(d06, d11); + d06 = _mm256_min_epi32(d06, tmp); tmp = d12; - d12 = _mm256_max_epu32(d05, d12); - d05 = _mm256_min_epu32(d05, tmp); + d12 = _mm256_max_epi32(d05, d12); + d05 = _mm256_min_epi32(d05, tmp); sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); sort_04v_merge_descending(d09, d10, d11, d12); @@ -843,28 +843,28 @@ template<> struct bitonic { tmp = d09; - d09 = _mm256_max_epu32(d08, d09); - d08 = _mm256_min_epu32(d08, tmp); + d09 = _mm256_max_epi32(d08, d09); + d08 = _mm256_min_epi32(d08, tmp); tmp = d10; - d10 = _mm256_max_epu32(d07, d10); - d07 = _mm256_min_epu32(d07, tmp); + d10 = _mm256_max_epi32(d07, d10); + d07 = _mm256_min_epi32(d07, tmp); tmp = d11; - d11 = _mm256_max_epu32(d06, d11); - d06 = _mm256_min_epu32(d06, tmp); + d11 = _mm256_max_epi32(d06, d11); + d06 = _mm256_min_epi32(d06, tmp); tmp = d12; - d12 = _mm256_max_epu32(d05, d12); - d05 = _mm256_min_epu32(d05, tmp); + d12 = _mm256_max_epi32(d05, d12); + d05 = _mm256_min_epi32(d05, tmp); tmp = d13; - d13 = _mm256_max_epu32(d04, d13); - d04 = _mm256_min_epu32(d04, tmp); + d13 = _mm256_max_epi32(d04, d13); + d04 = _mm256_min_epi32(d04, tmp); sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); sort_05v_merge_ascending(d09, d10, d11, d12, d13); @@ -877,28 +877,28 @@ template<> struct bitonic { tmp = d09; - d09 = _mm256_max_epu32(d08, d09); - d08 = _mm256_min_epu32(d08, tmp); + d09 = _mm256_max_epi32(d08, d09); + d08 = _mm256_min_epi32(d08, tmp); tmp = d10; - d10 = _mm256_max_epu32(d07, d10); - d07 = _mm256_min_epu32(d07, tmp); + d10 = _mm256_max_epi32(d07, d10); + d07 = _mm256_min_epi32(d07, tmp); tmp = d11; - d11 = _mm256_max_epu32(d06, d11); - d06 = _mm256_min_epu32(d06, tmp); + d11 = _mm256_max_epi32(d06, d11); + d06 = _mm256_min_epi32(d06, tmp); tmp = d12; - d12 = _mm256_max_epu32(d05, d12); - d05 = _mm256_min_epu32(d05, tmp); + d12 = _mm256_max_epi32(d05, d12); + d05 = _mm256_min_epi32(d05, tmp); tmp = d13; - d13 = _mm256_max_epu32(d04, d13); - d04 = _mm256_min_epu32(d04, tmp); + d13 = _mm256_max_epi32(d04, d13); + d04 = _mm256_min_epi32(d04, tmp); sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); sort_05v_merge_descending(d09, d10, d11, d12, d13); @@ -911,33 +911,33 @@ template<> struct bitonic { tmp = d09; - d09 = _mm256_max_epu32(d08, d09); - d08 = _mm256_min_epu32(d08, tmp); + d09 = _mm256_max_epi32(d08, d09); + d08 = _mm256_min_epi32(d08, tmp); tmp = d10; - d10 = _mm256_max_epu32(d07, d10); - d07 = _mm256_min_epu32(d07, tmp); + d10 = _mm256_max_epi32(d07, d10); + d07 = _mm256_min_epi32(d07, tmp); tmp = d11; - d11 = _mm256_max_epu32(d06, d11); - d06 = _mm256_min_epu32(d06, tmp); + d11 = _mm256_max_epi32(d06, d11); + d06 = _mm256_min_epi32(d06, tmp); tmp = d12; - d12 = _mm256_max_epu32(d05, d12); - d05 = _mm256_min_epu32(d05, tmp); + d12 = _mm256_max_epi32(d05, d12); + d05 = _mm256_min_epi32(d05, tmp); tmp = d13; - d13 = _mm256_max_epu32(d04, d13); - d04 = _mm256_min_epu32(d04, tmp); + d13 = _mm256_max_epi32(d04, d13); + d04 = _mm256_min_epi32(d04, tmp); tmp = d14; - d14 = _mm256_max_epu32(d03, d14); - d03 = _mm256_min_epu32(d03, tmp); + d14 = _mm256_max_epi32(d03, d14); + d03 = _mm256_min_epi32(d03, tmp); sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); sort_06v_merge_ascending(d09, d10, d11, d12, d13, d14); @@ -950,33 +950,33 @@ template<> struct bitonic { tmp = d09; - d09 = _mm256_max_epu32(d08, d09); - d08 = _mm256_min_epu32(d08, tmp); + d09 = _mm256_max_epi32(d08, d09); + d08 = _mm256_min_epi32(d08, tmp); tmp = d10; - d10 = _mm256_max_epu32(d07, d10); - d07 = _mm256_min_epu32(d07, tmp); + d10 = _mm256_max_epi32(d07, d10); + d07 = _mm256_min_epi32(d07, tmp); tmp = d11; - d11 = _mm256_max_epu32(d06, d11); - d06 = _mm256_min_epu32(d06, tmp); + d11 = _mm256_max_epi32(d06, d11); + d06 = _mm256_min_epi32(d06, tmp); tmp = d12; - d12 = _mm256_max_epu32(d05, d12); - d05 = _mm256_min_epu32(d05, tmp); + d12 = _mm256_max_epi32(d05, d12); + d05 = _mm256_min_epi32(d05, tmp); tmp = d13; - d13 = _mm256_max_epu32(d04, d13); - d04 = _mm256_min_epu32(d04, tmp); + d13 = _mm256_max_epi32(d04, d13); + d04 = _mm256_min_epi32(d04, tmp); tmp = d14; - d14 = _mm256_max_epu32(d03, d14); - d03 = _mm256_min_epu32(d03, tmp); + d14 = _mm256_max_epi32(d03, d14); + d03 = _mm256_min_epi32(d03, tmp); sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); sort_06v_merge_descending(d09, d10, d11, d12, d13, d14); @@ -989,38 +989,38 @@ template<> struct bitonic { tmp = d09; - d09 = _mm256_max_epu32(d08, d09); - d08 = _mm256_min_epu32(d08, tmp); + d09 = _mm256_max_epi32(d08, d09); + d08 = _mm256_min_epi32(d08, tmp); tmp = d10; - d10 = _mm256_max_epu32(d07, d10); - d07 = _mm256_min_epu32(d07, tmp); + d10 = _mm256_max_epi32(d07, d10); + d07 = _mm256_min_epi32(d07, tmp); tmp = d11; - d11 = _mm256_max_epu32(d06, d11); - d06 = _mm256_min_epu32(d06, tmp); + d11 = _mm256_max_epi32(d06, d11); + d06 = _mm256_min_epi32(d06, tmp); tmp = d12; - d12 = _mm256_max_epu32(d05, d12); - d05 = _mm256_min_epu32(d05, tmp); + d12 = _mm256_max_epi32(d05, d12); + d05 = _mm256_min_epi32(d05, tmp); tmp = d13; - d13 = _mm256_max_epu32(d04, d13); - d04 = _mm256_min_epu32(d04, tmp); + d13 = _mm256_max_epi32(d04, d13); + d04 = _mm256_min_epi32(d04, tmp); tmp = d14; - d14 = _mm256_max_epu32(d03, d14); - d03 = _mm256_min_epu32(d03, tmp); + d14 = _mm256_max_epi32(d03, d14); + d03 = _mm256_min_epi32(d03, tmp); tmp = d15; - d15 = _mm256_max_epu32(d02, d15); - d02 = _mm256_min_epu32(d02, tmp); + d15 = _mm256_max_epi32(d02, d15); + d02 = _mm256_min_epi32(d02, tmp); sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); sort_07v_merge_ascending(d09, d10, d11, d12, d13, d14, d15); @@ -1033,38 +1033,38 @@ template<> struct bitonic { tmp = d09; - d09 = _mm256_max_epu32(d08, d09); - d08 = _mm256_min_epu32(d08, tmp); + d09 = _mm256_max_epi32(d08, d09); + d08 = _mm256_min_epi32(d08, tmp); tmp = d10; - d10 = _mm256_max_epu32(d07, d10); - d07 = _mm256_min_epu32(d07, tmp); + d10 = _mm256_max_epi32(d07, d10); + d07 = _mm256_min_epi32(d07, tmp); tmp = d11; - d11 = _mm256_max_epu32(d06, d11); - d06 = _mm256_min_epu32(d06, tmp); + d11 = _mm256_max_epi32(d06, d11); + d06 = _mm256_min_epi32(d06, tmp); tmp = d12; - d12 = _mm256_max_epu32(d05, d12); - d05 = _mm256_min_epu32(d05, tmp); + d12 = _mm256_max_epi32(d05, d12); + d05 = _mm256_min_epi32(d05, tmp); tmp = d13; - d13 = _mm256_max_epu32(d04, d13); - d04 = _mm256_min_epu32(d04, tmp); + d13 = _mm256_max_epi32(d04, d13); + d04 = _mm256_min_epi32(d04, tmp); tmp = d14; - d14 = _mm256_max_epu32(d03, d14); - d03 = _mm256_min_epu32(d03, tmp); + d14 = _mm256_max_epi32(d03, d14); + d03 = _mm256_min_epi32(d03, tmp); tmp = d15; - d15 = _mm256_max_epu32(d02, d15); - d02 = _mm256_min_epu32(d02, tmp); + d15 = _mm256_max_epi32(d02, d15); + d02 = _mm256_min_epi32(d02, tmp); sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); sort_07v_merge_descending(d09, d10, d11, d12, d13, d14, d15); @@ -1077,43 +1077,43 @@ template<> struct bitonic { tmp = d09; - d09 = _mm256_max_epu32(d08, d09); - d08 = _mm256_min_epu32(d08, tmp); + d09 = _mm256_max_epi32(d08, d09); + d08 = _mm256_min_epi32(d08, tmp); tmp = d10; - d10 = _mm256_max_epu32(d07, d10); - d07 = _mm256_min_epu32(d07, tmp); + d10 = _mm256_max_epi32(d07, d10); + d07 = _mm256_min_epi32(d07, tmp); tmp = d11; - d11 = _mm256_max_epu32(d06, d11); - d06 = _mm256_min_epu32(d06, tmp); + d11 = _mm256_max_epi32(d06, d11); + d06 = _mm256_min_epi32(d06, tmp); tmp = d12; - d12 = _mm256_max_epu32(d05, d12); - d05 = _mm256_min_epu32(d05, tmp); + d12 = _mm256_max_epi32(d05, d12); + d05 = _mm256_min_epi32(d05, tmp); tmp = d13; - d13 = _mm256_max_epu32(d04, d13); - d04 = _mm256_min_epu32(d04, tmp); + d13 = _mm256_max_epi32(d04, d13); + d04 = _mm256_min_epi32(d04, tmp); tmp = d14; - d14 = _mm256_max_epu32(d03, d14); - d03 = _mm256_min_epu32(d03, tmp); + d14 = _mm256_max_epi32(d03, d14); + d03 = _mm256_min_epi32(d03, tmp); tmp = d15; - d15 = _mm256_max_epu32(d02, d15); - d02 = _mm256_min_epu32(d02, tmp); + d15 = _mm256_max_epi32(d02, d15); + d02 = _mm256_min_epi32(d02, tmp); tmp = d16; - d16 = _mm256_max_epu32(d01, d16); - d01 = _mm256_min_epu32(d01, tmp); + d16 = _mm256_max_epi32(d01, d16); + d01 = _mm256_min_epi32(d01, tmp); sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); sort_08v_merge_ascending(d09, d10, d11, d12, d13, d14, d15, d16); @@ -1126,55 +1126,55 @@ template<> struct bitonic { tmp = d09; - d09 = _mm256_max_epu32(d08, d09); - d08 = _mm256_min_epu32(d08, tmp); + d09 = _mm256_max_epi32(d08, d09); + d08 = _mm256_min_epi32(d08, tmp); tmp = d10; - d10 = _mm256_max_epu32(d07, d10); - d07 = _mm256_min_epu32(d07, tmp); + d10 = _mm256_max_epi32(d07, d10); + d07 = _mm256_min_epi32(d07, tmp); tmp = d11; - d11 = _mm256_max_epu32(d06, d11); - d06 = _mm256_min_epu32(d06, tmp); + d11 = _mm256_max_epi32(d06, d11); + d06 = _mm256_min_epi32(d06, tmp); tmp = d12; - d12 = _mm256_max_epu32(d05, d12); - d05 = _mm256_min_epu32(d05, tmp); + d12 = _mm256_max_epi32(d05, d12); + d05 = _mm256_min_epi32(d05, tmp); tmp = d13; - d13 = _mm256_max_epu32(d04, d13); - d04 = _mm256_min_epu32(d04, tmp); + d13 = _mm256_max_epi32(d04, d13); + d04 = _mm256_min_epi32(d04, tmp); tmp = d14; - d14 = _mm256_max_epu32(d03, d14); - d03 = _mm256_min_epu32(d03, tmp); + d14 = _mm256_max_epi32(d03, d14); + d03 = _mm256_min_epi32(d03, tmp); tmp = d15; - d15 = _mm256_max_epu32(d02, d15); - d02 = _mm256_min_epu32(d02, tmp); + d15 = _mm256_max_epi32(d02, d15); + d02 = _mm256_min_epi32(d02, tmp); tmp = d16; - d16 = _mm256_max_epu32(d01, d16); - d01 = _mm256_min_epu32(d01, tmp); + d16 = _mm256_max_epi32(d01, d16); + d01 = _mm256_min_epi32(d01, tmp); sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); sort_08v_merge_descending(d09, d10, d11, d12, d13, d14, d15, d16); } - static NOINLINE void sort_01v(uint32_t *ptr) { + static NOINLINE void sort_01v(int32_t *ptr) { __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; sort_01v_ascending(d01); _mm256_storeu_si256((__m256i *) ptr + 0, d01); } - static NOINLINE void sort_02v(uint32_t *ptr) { + static NOINLINE void sort_02v(int32_t *ptr) { __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; sort_02v_ascending(d01, d02); @@ -1182,7 +1182,7 @@ template<> struct bitonic { _mm256_storeu_si256((__m256i *) ptr + 1, d02); } - static NOINLINE void sort_03v(uint32_t *ptr) { + static NOINLINE void sort_03v(int32_t *ptr) { __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; @@ -1192,7 +1192,7 @@ template<> struct bitonic { _mm256_storeu_si256((__m256i *) ptr + 2, d03); } - static NOINLINE void sort_04v(uint32_t *ptr) { + static NOINLINE void sort_04v(int32_t *ptr) { __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; @@ -1204,7 +1204,7 @@ template<> struct bitonic { _mm256_storeu_si256((__m256i *) ptr + 3, d04); } - static NOINLINE void sort_05v(uint32_t *ptr) { + static NOINLINE void sort_05v(int32_t *ptr) { __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; @@ -1218,7 +1218,7 @@ template<> struct bitonic { _mm256_storeu_si256((__m256i *) ptr + 4, d05); } - static NOINLINE void sort_06v(uint32_t *ptr) { + static NOINLINE void sort_06v(int32_t *ptr) { __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; @@ -1234,7 +1234,7 @@ template<> struct bitonic { _mm256_storeu_si256((__m256i *) ptr + 5, d06); } - static NOINLINE void sort_07v(uint32_t *ptr) { + static NOINLINE void sort_07v(int32_t *ptr) { __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; @@ -1252,7 +1252,7 @@ template<> struct bitonic { _mm256_storeu_si256((__m256i *) ptr + 6, d07); } - static NOINLINE void sort_08v(uint32_t *ptr) { + static NOINLINE void sort_08v(int32_t *ptr) { __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; @@ -1272,7 +1272,7 @@ template<> struct bitonic { _mm256_storeu_si256((__m256i *) ptr + 7, d08); } - static NOINLINE void sort_09v(uint32_t *ptr) { + static NOINLINE void sort_09v(int32_t *ptr) { __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; @@ -1294,7 +1294,7 @@ template<> struct bitonic { _mm256_storeu_si256((__m256i *) ptr + 8, d09); } - static NOINLINE void sort_10v(uint32_t *ptr) { + static NOINLINE void sort_10v(int32_t *ptr) { __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; @@ -1318,7 +1318,7 @@ template<> struct bitonic { _mm256_storeu_si256((__m256i *) ptr + 9, d10); } - static NOINLINE void sort_11v(uint32_t *ptr) { + static NOINLINE void sort_11v(int32_t *ptr) { __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; @@ -1344,7 +1344,7 @@ template<> struct bitonic { _mm256_storeu_si256((__m256i *) ptr + 10, d11); } - static NOINLINE void sort_12v(uint32_t *ptr) { + static NOINLINE void sort_12v(int32_t *ptr) { __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; @@ -1372,7 +1372,7 @@ template<> struct bitonic { _mm256_storeu_si256((__m256i *) ptr + 11, d12); } - static NOINLINE void sort_13v(uint32_t *ptr) { + static NOINLINE void sort_13v(int32_t *ptr) { __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; @@ -1402,7 +1402,7 @@ template<> struct bitonic { _mm256_storeu_si256((__m256i *) ptr + 12, d13); } - static NOINLINE void sort_14v(uint32_t *ptr) { + static NOINLINE void sort_14v(int32_t *ptr) { __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; @@ -1434,7 +1434,7 @@ template<> struct bitonic { _mm256_storeu_si256((__m256i *) ptr + 13, d14); } - static NOINLINE void sort_15v(uint32_t *ptr) { + static NOINLINE void sort_15v(int32_t *ptr) { __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; @@ -1468,7 +1468,7 @@ template<> struct bitonic { _mm256_storeu_si256((__m256i *) ptr + 14, d15); } - static NOINLINE void sort_16v(uint32_t *ptr) { + static NOINLINE void sort_16v(int32_t *ptr) { __m256i d01 = _mm256_lddqu_si256((__m256i const *) ptr + 0);; __m256i d02 = _mm256_lddqu_si256((__m256i const *) ptr + 1);; __m256i d03 = _mm256_lddqu_si256((__m256i const *) ptr + 2);; @@ -1503,7 +1503,7 @@ template<> struct bitonic { _mm256_storeu_si256((__m256i *) ptr + 14, d15); _mm256_storeu_si256((__m256i *) ptr + 15, d16); } - static void sort(uint32_t *ptr, size_t length); + static void sort(int32_t *ptr, size_t length); }; } diff --git a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.uint32_t.generated.cpp b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.cpp similarity index 82% rename from src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.uint32_t.generated.cpp rename to src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.cpp index 59271c70e9760a..99dbb9efa4068f 100644 --- a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.uint32_t.generated.cpp +++ b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.cpp @@ -1,9 +1,9 @@ #include "common.h" -#include "bitonic_sort.AVX512.uint32_t.generated.h" +#include "bitonic_sort.AVX512.int32_t.generated.h" using namespace vxsort; -void vxsort::smallsort::bitonic::sort(uint32_t *ptr, size_t length) { +void vxsort::smallsort::bitonic::sort(int32_t *ptr, size_t length) { const int N = 16; switch(length / N) { diff --git a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.uint32_t.generated.h b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.h similarity index 73% rename from src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.uint32_t.generated.h rename to src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.h index c0fb49eb564cba..310dcbf0b88783 100644 --- a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.uint32_t.generated.h +++ b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.h @@ -6,8 +6,8 @@ // the code-generator that generated this source file instead. ///////////////////////////////////////////////////////////////////////////// -#ifndef BITONIC_SORT_AVX512_UINT32_T_H -#define BITONIC_SORT_AVX512_UINT32_T_H +#ifndef BITONIC_SORT_AVX512_INT32_T_H +#define BITONIC_SORT_AVX512_INT32_T_H #ifdef __GNUC__ @@ -31,132 +31,132 @@ namespace vxsort { namespace smallsort { -template<> struct bitonic { +template<> struct bitonic { public: static INLINE void sort_01v_ascending(__m512i& d01) { __m512i min, s; s = _mm512_shuffle_epi32(d01, _MM_PERM_CDAB); - min = _mm512_min_epu32(s, d01); - d01 = _mm512_mask_max_epu32(min, 0xAAAA, s, d01); + min = _mm512_min_epi32(s, d01); + d01 = _mm512_mask_max_epi32(min, 0xAAAA, s, d01); s = _mm512_shuffle_epi32(d01, _MM_PERM_ABCD); - min = _mm512_min_epu32(s, d01); - d01 = _mm512_mask_max_epu32(min, 0xCCCC, s, d01); + min = _mm512_min_epi32(s, d01); + d01 = _mm512_mask_max_epi32(min, 0xCCCC, s, d01); s = _mm512_shuffle_epi32(d01, _MM_PERM_CDAB); - min = _mm512_min_epu32(s, d01); - d01 = _mm512_mask_max_epu32(min, 0xAAAA, s, d01); + min = _mm512_min_epi32(s, d01); + d01 = _mm512_mask_max_epi32(min, 0xAAAA, s, d01); s = _mm512_permutex_epi64(_mm512_shuffle_epi32(d01, _MM_PERM_ABCD), _MM_PERM_BADC); - min = _mm512_min_epu32(s, d01); - d01 = _mm512_mask_max_epu32(min, 0xF0F0, s, d01); + min = _mm512_min_epi32(s, d01); + d01 = _mm512_mask_max_epi32(min, 0xF0F0, s, d01); s = _mm512_shuffle_epi32(d01, _MM_PERM_BADC); - min = _mm512_min_epu32(s, d01); - d01 = _mm512_mask_max_epu32(min, 0xCCCC, s, d01); + min = _mm512_min_epi32(s, d01); + d01 = _mm512_mask_max_epi32(min, 0xCCCC, s, d01); s = _mm512_shuffle_epi32(d01, _MM_PERM_CDAB); - min = _mm512_min_epu32(s, d01); - d01 = _mm512_mask_max_epu32(min, 0xAAAA, s, d01); + min = _mm512_min_epi32(s, d01); + d01 = _mm512_mask_max_epi32(min, 0xAAAA, s, d01); s = _mm512_shuffle_i64x2(_mm512_shuffle_epi32(d01, _MM_PERM_ABCD), _mm512_shuffle_epi32(d01, _MM_PERM_ABCD), _MM_PERM_ABCD); - min = _mm512_min_epu32(s, d01); - d01 = _mm512_mask_max_epu32(min, 0xFF00, s, d01); + min = _mm512_min_epi32(s, d01); + d01 = _mm512_mask_max_epi32(min, 0xFF00, s, d01); s = _mm512_permutex_epi64(d01, _MM_PERM_BADC); - min = _mm512_min_epu32(s, d01); - d01 = _mm512_mask_max_epu32(min, 0xF0F0, s, d01); + min = _mm512_min_epi32(s, d01); + d01 = _mm512_mask_max_epi32(min, 0xF0F0, s, d01); s = _mm512_shuffle_epi32(d01, _MM_PERM_BADC); - min = _mm512_min_epu32(s, d01); - d01 = _mm512_mask_max_epu32(min, 0xCCCC, s, d01); + min = _mm512_min_epi32(s, d01); + d01 = _mm512_mask_max_epi32(min, 0xCCCC, s, d01); s = _mm512_shuffle_epi32(d01, _MM_PERM_CDAB); - min = _mm512_min_epu32(s, d01); - d01 = _mm512_mask_max_epu32(min, 0xAAAA, s, d01); + min = _mm512_min_epi32(s, d01); + d01 = _mm512_mask_max_epi32(min, 0xAAAA, s, d01); } static INLINE void sort_01v_merge_ascending(__m512i& d01) { __m512i min, s; s = _mm512_shuffle_i64x2(d01, d01, _MM_PERM_BADC); - min = _mm512_min_epu32(s, d01); - d01 = _mm512_mask_max_epu32(min, 0xFF00, s, d01); + min = _mm512_min_epi32(s, d01); + d01 = _mm512_mask_max_epi32(min, 0xFF00, s, d01); s = _mm512_permutex_epi64(d01, _MM_PERM_BADC); - min = _mm512_min_epu32(s, d01); - d01 = _mm512_mask_max_epu32(min, 0xF0F0, s, d01); + min = _mm512_min_epi32(s, d01); + d01 = _mm512_mask_max_epi32(min, 0xF0F0, s, d01); s = _mm512_shuffle_epi32(d01, _MM_PERM_BADC); - min = _mm512_min_epu32(s, d01); - d01 = _mm512_mask_max_epu32(min, 0xCCCC, s, d01); + min = _mm512_min_epi32(s, d01); + d01 = _mm512_mask_max_epi32(min, 0xCCCC, s, d01); s = _mm512_shuffle_epi32(d01, _MM_PERM_CDAB); - min = _mm512_min_epu32(s, d01); - d01 = _mm512_mask_max_epu32(min, 0xAAAA, s, d01); + min = _mm512_min_epi32(s, d01); + d01 = _mm512_mask_max_epi32(min, 0xAAAA, s, d01); } static INLINE void sort_01v_descending(__m512i& d01) { __m512i min, s; s = _mm512_shuffle_epi32(d01, _MM_PERM_CDAB); - min = _mm512_min_epu32(s, d01); - d01 = _mm512_mask_max_epu32(min, 0x5555, s, d01); + min = _mm512_min_epi32(s, d01); + d01 = _mm512_mask_max_epi32(min, 0x5555, s, d01); s = _mm512_shuffle_epi32(d01, _MM_PERM_ABCD); - min = _mm512_min_epu32(s, d01); - d01 = _mm512_mask_max_epu32(min, 0x3333, s, d01); + min = _mm512_min_epi32(s, d01); + d01 = _mm512_mask_max_epi32(min, 0x3333, s, d01); s = _mm512_shuffle_epi32(d01, _MM_PERM_CDAB); - min = _mm512_min_epu32(s, d01); - d01 = _mm512_mask_max_epu32(min, 0x5555, s, d01); + min = _mm512_min_epi32(s, d01); + d01 = _mm512_mask_max_epi32(min, 0x5555, s, d01); s = _mm512_permutex_epi64(_mm512_shuffle_epi32(d01, _MM_PERM_ABCD), _MM_PERM_BADC); - min = _mm512_min_epu32(s, d01); - d01 = _mm512_mask_max_epu32(min, 0x0F0F, s, d01); + min = _mm512_min_epi32(s, d01); + d01 = _mm512_mask_max_epi32(min, 0x0F0F, s, d01); s = _mm512_shuffle_epi32(d01, _MM_PERM_BADC); - min = _mm512_min_epu32(s, d01); - d01 = _mm512_mask_max_epu32(min, 0x3333, s, d01); + min = _mm512_min_epi32(s, d01); + d01 = _mm512_mask_max_epi32(min, 0x3333, s, d01); s = _mm512_shuffle_epi32(d01, _MM_PERM_CDAB); - min = _mm512_min_epu32(s, d01); - d01 = _mm512_mask_max_epu32(min, 0x5555, s, d01); + min = _mm512_min_epi32(s, d01); + d01 = _mm512_mask_max_epi32(min, 0x5555, s, d01); s = _mm512_shuffle_i64x2(_mm512_shuffle_epi32(d01, _MM_PERM_ABCD), _mm512_shuffle_epi32(d01, _MM_PERM_ABCD), _MM_PERM_ABCD); - min = _mm512_min_epu32(s, d01); - d01 = _mm512_mask_max_epu32(min, 0x00FF, s, d01); + min = _mm512_min_epi32(s, d01); + d01 = _mm512_mask_max_epi32(min, 0x00FF, s, d01); s = _mm512_permutex_epi64(d01, _MM_PERM_BADC); - min = _mm512_min_epu32(s, d01); - d01 = _mm512_mask_max_epu32(min, 0x0F0F, s, d01); + min = _mm512_min_epi32(s, d01); + d01 = _mm512_mask_max_epi32(min, 0x0F0F, s, d01); s = _mm512_shuffle_epi32(d01, _MM_PERM_BADC); - min = _mm512_min_epu32(s, d01); - d01 = _mm512_mask_max_epu32(min, 0x3333, s, d01); + min = _mm512_min_epi32(s, d01); + d01 = _mm512_mask_max_epi32(min, 0x3333, s, d01); s = _mm512_shuffle_epi32(d01, _MM_PERM_CDAB); - min = _mm512_min_epu32(s, d01); - d01 = _mm512_mask_max_epu32(min, 0x5555, s, d01); + min = _mm512_min_epi32(s, d01); + d01 = _mm512_mask_max_epi32(min, 0x5555, s, d01); } static INLINE void sort_01v_merge_descending(__m512i& d01) { __m512i min, s; s = _mm512_shuffle_i64x2(d01, d01, _MM_PERM_BADC); - min = _mm512_min_epu32(s, d01); - d01 = _mm512_mask_max_epu32(min, 0x00FF, s, d01); + min = _mm512_min_epi32(s, d01); + d01 = _mm512_mask_max_epi32(min, 0x00FF, s, d01); s = _mm512_permutex_epi64(d01, _MM_PERM_BADC); - min = _mm512_min_epu32(s, d01); - d01 = _mm512_mask_max_epu32(min, 0x0F0F, s, d01); + min = _mm512_min_epi32(s, d01); + d01 = _mm512_mask_max_epi32(min, 0x0F0F, s, d01); s = _mm512_shuffle_epi32(d01, _MM_PERM_BADC); - min = _mm512_min_epu32(s, d01); - d01 = _mm512_mask_max_epu32(min, 0x3333, s, d01); + min = _mm512_min_epi32(s, d01); + d01 = _mm512_mask_max_epi32(min, 0x3333, s, d01); s = _mm512_shuffle_epi32(d01, _MM_PERM_CDAB); - min = _mm512_min_epu32(s, d01); - d01 = _mm512_mask_max_epu32(min, 0x5555, s, d01); + min = _mm512_min_epi32(s, d01); + d01 = _mm512_mask_max_epi32(min, 0x5555, s, d01); } static INLINE void sort_02v_ascending(__m512i& d01, __m512i& d02) { __m512i tmp; @@ -165,8 +165,8 @@ template<> struct bitonic { sort_01v_descending(d02); tmp = d02; - d02 = _mm512_max_epu32(d01, d02); - d01 = _mm512_min_epu32(d01, tmp); + d02 = _mm512_max_epi32(d01, d02); + d01 = _mm512_min_epi32(d01, tmp); sort_01v_merge_ascending(d01); sort_01v_merge_ascending(d02); @@ -178,8 +178,8 @@ template<> struct bitonic { sort_01v_ascending(d02); tmp = d02; - d02 = _mm512_max_epu32(d01, d02); - d01 = _mm512_min_epu32(d01, tmp); + d02 = _mm512_max_epi32(d01, d02); + d01 = _mm512_min_epi32(d01, tmp); sort_01v_merge_descending(d01); sort_01v_merge_descending(d02); @@ -188,8 +188,8 @@ template<> struct bitonic { __m512i tmp; tmp = d01; - d01 = _mm512_min_epu32(d02, d01); - d02 = _mm512_max_epu32(d02, tmp); + d01 = _mm512_min_epi32(d02, d01); + d02 = _mm512_max_epi32(d02, tmp); sort_01v_merge_ascending(d01); sort_01v_merge_ascending(d02); @@ -198,8 +198,8 @@ template<> struct bitonic { __m512i tmp; tmp = d01; - d01 = _mm512_min_epu32(d02, d01); - d02 = _mm512_max_epu32(d02, tmp); + d01 = _mm512_min_epi32(d02, d01); + d02 = _mm512_max_epi32(d02, tmp); sort_01v_merge_descending(d01); sort_01v_merge_descending(d02); @@ -211,8 +211,8 @@ template<> struct bitonic { sort_01v_descending(d03); tmp = d03; - d03 = _mm512_max_epu32(d02, d03); - d02 = _mm512_min_epu32(d02, tmp); + d03 = _mm512_max_epi32(d02, d03); + d02 = _mm512_min_epi32(d02, tmp); sort_02v_merge_ascending(d01, d02); sort_01v_merge_ascending(d03); @@ -224,8 +224,8 @@ template<> struct bitonic { sort_01v_ascending(d03); tmp = d03; - d03 = _mm512_max_epu32(d02, d03); - d02 = _mm512_min_epu32(d02, tmp); + d03 = _mm512_max_epi32(d02, d03); + d02 = _mm512_min_epi32(d02, tmp); sort_02v_merge_descending(d01, d02); sort_01v_merge_descending(d03); @@ -234,8 +234,8 @@ template<> struct bitonic { __m512i tmp; tmp = d01; - d01 = _mm512_min_epu32(d03, d01); - d03 = _mm512_max_epu32(d03, tmp); + d01 = _mm512_min_epi32(d03, d01); + d03 = _mm512_max_epi32(d03, tmp); sort_02v_merge_ascending(d01, d02); sort_01v_merge_ascending(d03); @@ -244,8 +244,8 @@ template<> struct bitonic { __m512i tmp; tmp = d01; - d01 = _mm512_min_epu32(d03, d01); - d03 = _mm512_max_epu32(d03, tmp); + d01 = _mm512_min_epi32(d03, d01); + d03 = _mm512_max_epi32(d03, tmp); sort_02v_merge_descending(d01, d02); sort_01v_merge_descending(d03); @@ -257,12 +257,12 @@ template<> struct bitonic { sort_02v_descending(d03, d04); tmp = d03; - d03 = _mm512_max_epu32(d02, d03); - d02 = _mm512_min_epu32(d02, tmp); + d03 = _mm512_max_epi32(d02, d03); + d02 = _mm512_min_epi32(d02, tmp); tmp = d04; - d04 = _mm512_max_epu32(d01, d04); - d01 = _mm512_min_epu32(d01, tmp); + d04 = _mm512_max_epi32(d01, d04); + d01 = _mm512_min_epi32(d01, tmp); sort_02v_merge_ascending(d01, d02); sort_02v_merge_ascending(d03, d04); @@ -274,12 +274,12 @@ template<> struct bitonic { sort_02v_ascending(d03, d04); tmp = d03; - d03 = _mm512_max_epu32(d02, d03); - d02 = _mm512_min_epu32(d02, tmp); + d03 = _mm512_max_epi32(d02, d03); + d02 = _mm512_min_epi32(d02, tmp); tmp = d04; - d04 = _mm512_max_epu32(d01, d04); - d01 = _mm512_min_epu32(d01, tmp); + d04 = _mm512_max_epi32(d01, d04); + d01 = _mm512_min_epi32(d01, tmp); sort_02v_merge_descending(d01, d02); sort_02v_merge_descending(d03, d04); @@ -288,12 +288,12 @@ template<> struct bitonic { __m512i tmp; tmp = d01; - d01 = _mm512_min_epu32(d03, d01); - d03 = _mm512_max_epu32(d03, tmp); + d01 = _mm512_min_epi32(d03, d01); + d03 = _mm512_max_epi32(d03, tmp); tmp = d02; - d02 = _mm512_min_epu32(d04, d02); - d04 = _mm512_max_epu32(d04, tmp); + d02 = _mm512_min_epi32(d04, d02); + d04 = _mm512_max_epi32(d04, tmp); sort_02v_merge_ascending(d01, d02); sort_02v_merge_ascending(d03, d04); @@ -302,12 +302,12 @@ template<> struct bitonic { __m512i tmp; tmp = d01; - d01 = _mm512_min_epu32(d03, d01); - d03 = _mm512_max_epu32(d03, tmp); + d01 = _mm512_min_epi32(d03, d01); + d03 = _mm512_max_epi32(d03, tmp); tmp = d02; - d02 = _mm512_min_epu32(d04, d02); - d04 = _mm512_max_epu32(d04, tmp); + d02 = _mm512_min_epi32(d04, d02); + d04 = _mm512_max_epi32(d04, tmp); sort_02v_merge_descending(d01, d02); sort_02v_merge_descending(d03, d04); @@ -319,8 +319,8 @@ template<> struct bitonic { sort_01v_descending(d05); tmp = d05; - d05 = _mm512_max_epu32(d04, d05); - d04 = _mm512_min_epu32(d04, tmp); + d05 = _mm512_max_epi32(d04, d05); + d04 = _mm512_min_epi32(d04, tmp); sort_04v_merge_ascending(d01, d02, d03, d04); sort_01v_merge_ascending(d05); @@ -332,8 +332,8 @@ template<> struct bitonic { sort_01v_ascending(d05); tmp = d05; - d05 = _mm512_max_epu32(d04, d05); - d04 = _mm512_min_epu32(d04, tmp); + d05 = _mm512_max_epi32(d04, d05); + d04 = _mm512_min_epi32(d04, tmp); sort_04v_merge_descending(d01, d02, d03, d04); sort_01v_merge_descending(d05); @@ -342,8 +342,8 @@ template<> struct bitonic { __m512i tmp; tmp = d01; - d01 = _mm512_min_epu32(d05, d01); - d05 = _mm512_max_epu32(d05, tmp); + d01 = _mm512_min_epi32(d05, d01); + d05 = _mm512_max_epi32(d05, tmp); sort_04v_merge_ascending(d01, d02, d03, d04); sort_01v_merge_ascending(d05); @@ -352,8 +352,8 @@ template<> struct bitonic { __m512i tmp; tmp = d01; - d01 = _mm512_min_epu32(d05, d01); - d05 = _mm512_max_epu32(d05, tmp); + d01 = _mm512_min_epi32(d05, d01); + d05 = _mm512_max_epi32(d05, tmp); sort_04v_merge_descending(d01, d02, d03, d04); sort_01v_merge_descending(d05); @@ -365,12 +365,12 @@ template<> struct bitonic { sort_02v_descending(d05, d06); tmp = d05; - d05 = _mm512_max_epu32(d04, d05); - d04 = _mm512_min_epu32(d04, tmp); + d05 = _mm512_max_epi32(d04, d05); + d04 = _mm512_min_epi32(d04, tmp); tmp = d06; - d06 = _mm512_max_epu32(d03, d06); - d03 = _mm512_min_epu32(d03, tmp); + d06 = _mm512_max_epi32(d03, d06); + d03 = _mm512_min_epi32(d03, tmp); sort_04v_merge_ascending(d01, d02, d03, d04); sort_02v_merge_ascending(d05, d06); @@ -382,12 +382,12 @@ template<> struct bitonic { sort_02v_ascending(d05, d06); tmp = d05; - d05 = _mm512_max_epu32(d04, d05); - d04 = _mm512_min_epu32(d04, tmp); + d05 = _mm512_max_epi32(d04, d05); + d04 = _mm512_min_epi32(d04, tmp); tmp = d06; - d06 = _mm512_max_epu32(d03, d06); - d03 = _mm512_min_epu32(d03, tmp); + d06 = _mm512_max_epi32(d03, d06); + d03 = _mm512_min_epi32(d03, tmp); sort_04v_merge_descending(d01, d02, d03, d04); sort_02v_merge_descending(d05, d06); @@ -396,12 +396,12 @@ template<> struct bitonic { __m512i tmp; tmp = d01; - d01 = _mm512_min_epu32(d05, d01); - d05 = _mm512_max_epu32(d05, tmp); + d01 = _mm512_min_epi32(d05, d01); + d05 = _mm512_max_epi32(d05, tmp); tmp = d02; - d02 = _mm512_min_epu32(d06, d02); - d06 = _mm512_max_epu32(d06, tmp); + d02 = _mm512_min_epi32(d06, d02); + d06 = _mm512_max_epi32(d06, tmp); sort_04v_merge_ascending(d01, d02, d03, d04); sort_02v_merge_ascending(d05, d06); @@ -410,12 +410,12 @@ template<> struct bitonic { __m512i tmp; tmp = d01; - d01 = _mm512_min_epu32(d05, d01); - d05 = _mm512_max_epu32(d05, tmp); + d01 = _mm512_min_epi32(d05, d01); + d05 = _mm512_max_epi32(d05, tmp); tmp = d02; - d02 = _mm512_min_epu32(d06, d02); - d06 = _mm512_max_epu32(d06, tmp); + d02 = _mm512_min_epi32(d06, d02); + d06 = _mm512_max_epi32(d06, tmp); sort_04v_merge_descending(d01, d02, d03, d04); sort_02v_merge_descending(d05, d06); @@ -427,16 +427,16 @@ template<> struct bitonic { sort_03v_descending(d05, d06, d07); tmp = d05; - d05 = _mm512_max_epu32(d04, d05); - d04 = _mm512_min_epu32(d04, tmp); + d05 = _mm512_max_epi32(d04, d05); + d04 = _mm512_min_epi32(d04, tmp); tmp = d06; - d06 = _mm512_max_epu32(d03, d06); - d03 = _mm512_min_epu32(d03, tmp); + d06 = _mm512_max_epi32(d03, d06); + d03 = _mm512_min_epi32(d03, tmp); tmp = d07; - d07 = _mm512_max_epu32(d02, d07); - d02 = _mm512_min_epu32(d02, tmp); + d07 = _mm512_max_epi32(d02, d07); + d02 = _mm512_min_epi32(d02, tmp); sort_04v_merge_ascending(d01, d02, d03, d04); sort_03v_merge_ascending(d05, d06, d07); @@ -448,16 +448,16 @@ template<> struct bitonic { sort_03v_ascending(d05, d06, d07); tmp = d05; - d05 = _mm512_max_epu32(d04, d05); - d04 = _mm512_min_epu32(d04, tmp); + d05 = _mm512_max_epi32(d04, d05); + d04 = _mm512_min_epi32(d04, tmp); tmp = d06; - d06 = _mm512_max_epu32(d03, d06); - d03 = _mm512_min_epu32(d03, tmp); + d06 = _mm512_max_epi32(d03, d06); + d03 = _mm512_min_epi32(d03, tmp); tmp = d07; - d07 = _mm512_max_epu32(d02, d07); - d02 = _mm512_min_epu32(d02, tmp); + d07 = _mm512_max_epi32(d02, d07); + d02 = _mm512_min_epi32(d02, tmp); sort_04v_merge_descending(d01, d02, d03, d04); sort_03v_merge_descending(d05, d06, d07); @@ -466,16 +466,16 @@ template<> struct bitonic { __m512i tmp; tmp = d01; - d01 = _mm512_min_epu32(d05, d01); - d05 = _mm512_max_epu32(d05, tmp); + d01 = _mm512_min_epi32(d05, d01); + d05 = _mm512_max_epi32(d05, tmp); tmp = d02; - d02 = _mm512_min_epu32(d06, d02); - d06 = _mm512_max_epu32(d06, tmp); + d02 = _mm512_min_epi32(d06, d02); + d06 = _mm512_max_epi32(d06, tmp); tmp = d03; - d03 = _mm512_min_epu32(d07, d03); - d07 = _mm512_max_epu32(d07, tmp); + d03 = _mm512_min_epi32(d07, d03); + d07 = _mm512_max_epi32(d07, tmp); sort_04v_merge_ascending(d01, d02, d03, d04); sort_03v_merge_ascending(d05, d06, d07); @@ -484,16 +484,16 @@ template<> struct bitonic { __m512i tmp; tmp = d01; - d01 = _mm512_min_epu32(d05, d01); - d05 = _mm512_max_epu32(d05, tmp); + d01 = _mm512_min_epi32(d05, d01); + d05 = _mm512_max_epi32(d05, tmp); tmp = d02; - d02 = _mm512_min_epu32(d06, d02); - d06 = _mm512_max_epu32(d06, tmp); + d02 = _mm512_min_epi32(d06, d02); + d06 = _mm512_max_epi32(d06, tmp); tmp = d03; - d03 = _mm512_min_epu32(d07, d03); - d07 = _mm512_max_epu32(d07, tmp); + d03 = _mm512_min_epi32(d07, d03); + d07 = _mm512_max_epi32(d07, tmp); sort_04v_merge_descending(d01, d02, d03, d04); sort_03v_merge_descending(d05, d06, d07); @@ -505,20 +505,20 @@ template<> struct bitonic { sort_04v_descending(d05, d06, d07, d08); tmp = d05; - d05 = _mm512_max_epu32(d04, d05); - d04 = _mm512_min_epu32(d04, tmp); + d05 = _mm512_max_epi32(d04, d05); + d04 = _mm512_min_epi32(d04, tmp); tmp = d06; - d06 = _mm512_max_epu32(d03, d06); - d03 = _mm512_min_epu32(d03, tmp); + d06 = _mm512_max_epi32(d03, d06); + d03 = _mm512_min_epi32(d03, tmp); tmp = d07; - d07 = _mm512_max_epu32(d02, d07); - d02 = _mm512_min_epu32(d02, tmp); + d07 = _mm512_max_epi32(d02, d07); + d02 = _mm512_min_epi32(d02, tmp); tmp = d08; - d08 = _mm512_max_epu32(d01, d08); - d01 = _mm512_min_epu32(d01, tmp); + d08 = _mm512_max_epi32(d01, d08); + d01 = _mm512_min_epi32(d01, tmp); sort_04v_merge_ascending(d01, d02, d03, d04); sort_04v_merge_ascending(d05, d06, d07, d08); @@ -530,20 +530,20 @@ template<> struct bitonic { sort_04v_ascending(d05, d06, d07, d08); tmp = d05; - d05 = _mm512_max_epu32(d04, d05); - d04 = _mm512_min_epu32(d04, tmp); + d05 = _mm512_max_epi32(d04, d05); + d04 = _mm512_min_epi32(d04, tmp); tmp = d06; - d06 = _mm512_max_epu32(d03, d06); - d03 = _mm512_min_epu32(d03, tmp); + d06 = _mm512_max_epi32(d03, d06); + d03 = _mm512_min_epi32(d03, tmp); tmp = d07; - d07 = _mm512_max_epu32(d02, d07); - d02 = _mm512_min_epu32(d02, tmp); + d07 = _mm512_max_epi32(d02, d07); + d02 = _mm512_min_epi32(d02, tmp); tmp = d08; - d08 = _mm512_max_epu32(d01, d08); - d01 = _mm512_min_epu32(d01, tmp); + d08 = _mm512_max_epi32(d01, d08); + d01 = _mm512_min_epi32(d01, tmp); sort_04v_merge_descending(d01, d02, d03, d04); sort_04v_merge_descending(d05, d06, d07, d08); @@ -552,20 +552,20 @@ template<> struct bitonic { __m512i tmp; tmp = d01; - d01 = _mm512_min_epu32(d05, d01); - d05 = _mm512_max_epu32(d05, tmp); + d01 = _mm512_min_epi32(d05, d01); + d05 = _mm512_max_epi32(d05, tmp); tmp = d02; - d02 = _mm512_min_epu32(d06, d02); - d06 = _mm512_max_epu32(d06, tmp); + d02 = _mm512_min_epi32(d06, d02); + d06 = _mm512_max_epi32(d06, tmp); tmp = d03; - d03 = _mm512_min_epu32(d07, d03); - d07 = _mm512_max_epu32(d07, tmp); + d03 = _mm512_min_epi32(d07, d03); + d07 = _mm512_max_epi32(d07, tmp); tmp = d04; - d04 = _mm512_min_epu32(d08, d04); - d08 = _mm512_max_epu32(d08, tmp); + d04 = _mm512_min_epi32(d08, d04); + d08 = _mm512_max_epi32(d08, tmp); sort_04v_merge_ascending(d01, d02, d03, d04); sort_04v_merge_ascending(d05, d06, d07, d08); @@ -574,20 +574,20 @@ template<> struct bitonic { __m512i tmp; tmp = d01; - d01 = _mm512_min_epu32(d05, d01); - d05 = _mm512_max_epu32(d05, tmp); + d01 = _mm512_min_epi32(d05, d01); + d05 = _mm512_max_epi32(d05, tmp); tmp = d02; - d02 = _mm512_min_epu32(d06, d02); - d06 = _mm512_max_epu32(d06, tmp); + d02 = _mm512_min_epi32(d06, d02); + d06 = _mm512_max_epi32(d06, tmp); tmp = d03; - d03 = _mm512_min_epu32(d07, d03); - d07 = _mm512_max_epu32(d07, tmp); + d03 = _mm512_min_epi32(d07, d03); + d07 = _mm512_max_epi32(d07, tmp); tmp = d04; - d04 = _mm512_min_epu32(d08, d04); - d08 = _mm512_max_epu32(d08, tmp); + d04 = _mm512_min_epi32(d08, d04); + d08 = _mm512_max_epi32(d08, tmp); sort_04v_merge_descending(d01, d02, d03, d04); sort_04v_merge_descending(d05, d06, d07, d08); @@ -599,8 +599,8 @@ template<> struct bitonic { sort_01v_descending(d09); tmp = d09; - d09 = _mm512_max_epu32(d08, d09); - d08 = _mm512_min_epu32(d08, tmp); + d09 = _mm512_max_epi32(d08, d09); + d08 = _mm512_min_epi32(d08, tmp); sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); sort_01v_merge_ascending(d09); @@ -612,8 +612,8 @@ template<> struct bitonic { sort_01v_ascending(d09); tmp = d09; - d09 = _mm512_max_epu32(d08, d09); - d08 = _mm512_min_epu32(d08, tmp); + d09 = _mm512_max_epi32(d08, d09); + d08 = _mm512_min_epi32(d08, tmp); sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); sort_01v_merge_descending(d09); @@ -625,12 +625,12 @@ template<> struct bitonic { sort_02v_descending(d09, d10); tmp = d09; - d09 = _mm512_max_epu32(d08, d09); - d08 = _mm512_min_epu32(d08, tmp); + d09 = _mm512_max_epi32(d08, d09); + d08 = _mm512_min_epi32(d08, tmp); tmp = d10; - d10 = _mm512_max_epu32(d07, d10); - d07 = _mm512_min_epu32(d07, tmp); + d10 = _mm512_max_epi32(d07, d10); + d07 = _mm512_min_epi32(d07, tmp); sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); sort_02v_merge_ascending(d09, d10); @@ -642,12 +642,12 @@ template<> struct bitonic { sort_02v_ascending(d09, d10); tmp = d09; - d09 = _mm512_max_epu32(d08, d09); - d08 = _mm512_min_epu32(d08, tmp); + d09 = _mm512_max_epi32(d08, d09); + d08 = _mm512_min_epi32(d08, tmp); tmp = d10; - d10 = _mm512_max_epu32(d07, d10); - d07 = _mm512_min_epu32(d07, tmp); + d10 = _mm512_max_epi32(d07, d10); + d07 = _mm512_min_epi32(d07, tmp); sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); sort_02v_merge_descending(d09, d10); @@ -659,16 +659,16 @@ template<> struct bitonic { sort_03v_descending(d09, d10, d11); tmp = d09; - d09 = _mm512_max_epu32(d08, d09); - d08 = _mm512_min_epu32(d08, tmp); + d09 = _mm512_max_epi32(d08, d09); + d08 = _mm512_min_epi32(d08, tmp); tmp = d10; - d10 = _mm512_max_epu32(d07, d10); - d07 = _mm512_min_epu32(d07, tmp); + d10 = _mm512_max_epi32(d07, d10); + d07 = _mm512_min_epi32(d07, tmp); tmp = d11; - d11 = _mm512_max_epu32(d06, d11); - d06 = _mm512_min_epu32(d06, tmp); + d11 = _mm512_max_epi32(d06, d11); + d06 = _mm512_min_epi32(d06, tmp); sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); sort_03v_merge_ascending(d09, d10, d11); @@ -680,16 +680,16 @@ template<> struct bitonic { sort_03v_ascending(d09, d10, d11); tmp = d09; - d09 = _mm512_max_epu32(d08, d09); - d08 = _mm512_min_epu32(d08, tmp); + d09 = _mm512_max_epi32(d08, d09); + d08 = _mm512_min_epi32(d08, tmp); tmp = d10; - d10 = _mm512_max_epu32(d07, d10); - d07 = _mm512_min_epu32(d07, tmp); + d10 = _mm512_max_epi32(d07, d10); + d07 = _mm512_min_epi32(d07, tmp); tmp = d11; - d11 = _mm512_max_epu32(d06, d11); - d06 = _mm512_min_epu32(d06, tmp); + d11 = _mm512_max_epi32(d06, d11); + d06 = _mm512_min_epi32(d06, tmp); sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); sort_03v_merge_descending(d09, d10, d11); @@ -701,20 +701,20 @@ template<> struct bitonic { sort_04v_descending(d09, d10, d11, d12); tmp = d09; - d09 = _mm512_max_epu32(d08, d09); - d08 = _mm512_min_epu32(d08, tmp); + d09 = _mm512_max_epi32(d08, d09); + d08 = _mm512_min_epi32(d08, tmp); tmp = d10; - d10 = _mm512_max_epu32(d07, d10); - d07 = _mm512_min_epu32(d07, tmp); + d10 = _mm512_max_epi32(d07, d10); + d07 = _mm512_min_epi32(d07, tmp); tmp = d11; - d11 = _mm512_max_epu32(d06, d11); - d06 = _mm512_min_epu32(d06, tmp); + d11 = _mm512_max_epi32(d06, d11); + d06 = _mm512_min_epi32(d06, tmp); tmp = d12; - d12 = _mm512_max_epu32(d05, d12); - d05 = _mm512_min_epu32(d05, tmp); + d12 = _mm512_max_epi32(d05, d12); + d05 = _mm512_min_epi32(d05, tmp); sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); sort_04v_merge_ascending(d09, d10, d11, d12); @@ -726,20 +726,20 @@ template<> struct bitonic { sort_04v_ascending(d09, d10, d11, d12); tmp = d09; - d09 = _mm512_max_epu32(d08, d09); - d08 = _mm512_min_epu32(d08, tmp); + d09 = _mm512_max_epi32(d08, d09); + d08 = _mm512_min_epi32(d08, tmp); tmp = d10; - d10 = _mm512_max_epu32(d07, d10); - d07 = _mm512_min_epu32(d07, tmp); + d10 = _mm512_max_epi32(d07, d10); + d07 = _mm512_min_epi32(d07, tmp); tmp = d11; - d11 = _mm512_max_epu32(d06, d11); - d06 = _mm512_min_epu32(d06, tmp); + d11 = _mm512_max_epi32(d06, d11); + d06 = _mm512_min_epi32(d06, tmp); tmp = d12; - d12 = _mm512_max_epu32(d05, d12); - d05 = _mm512_min_epu32(d05, tmp); + d12 = _mm512_max_epi32(d05, d12); + d05 = _mm512_min_epi32(d05, tmp); sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); sort_04v_merge_descending(d09, d10, d11, d12); @@ -751,24 +751,24 @@ template<> struct bitonic { sort_05v_descending(d09, d10, d11, d12, d13); tmp = d09; - d09 = _mm512_max_epu32(d08, d09); - d08 = _mm512_min_epu32(d08, tmp); + d09 = _mm512_max_epi32(d08, d09); + d08 = _mm512_min_epi32(d08, tmp); tmp = d10; - d10 = _mm512_max_epu32(d07, d10); - d07 = _mm512_min_epu32(d07, tmp); + d10 = _mm512_max_epi32(d07, d10); + d07 = _mm512_min_epi32(d07, tmp); tmp = d11; - d11 = _mm512_max_epu32(d06, d11); - d06 = _mm512_min_epu32(d06, tmp); + d11 = _mm512_max_epi32(d06, d11); + d06 = _mm512_min_epi32(d06, tmp); tmp = d12; - d12 = _mm512_max_epu32(d05, d12); - d05 = _mm512_min_epu32(d05, tmp); + d12 = _mm512_max_epi32(d05, d12); + d05 = _mm512_min_epi32(d05, tmp); tmp = d13; - d13 = _mm512_max_epu32(d04, d13); - d04 = _mm512_min_epu32(d04, tmp); + d13 = _mm512_max_epi32(d04, d13); + d04 = _mm512_min_epi32(d04, tmp); sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); sort_05v_merge_ascending(d09, d10, d11, d12, d13); @@ -780,24 +780,24 @@ template<> struct bitonic { sort_05v_ascending(d09, d10, d11, d12, d13); tmp = d09; - d09 = _mm512_max_epu32(d08, d09); - d08 = _mm512_min_epu32(d08, tmp); + d09 = _mm512_max_epi32(d08, d09); + d08 = _mm512_min_epi32(d08, tmp); tmp = d10; - d10 = _mm512_max_epu32(d07, d10); - d07 = _mm512_min_epu32(d07, tmp); + d10 = _mm512_max_epi32(d07, d10); + d07 = _mm512_min_epi32(d07, tmp); tmp = d11; - d11 = _mm512_max_epu32(d06, d11); - d06 = _mm512_min_epu32(d06, tmp); + d11 = _mm512_max_epi32(d06, d11); + d06 = _mm512_min_epi32(d06, tmp); tmp = d12; - d12 = _mm512_max_epu32(d05, d12); - d05 = _mm512_min_epu32(d05, tmp); + d12 = _mm512_max_epi32(d05, d12); + d05 = _mm512_min_epi32(d05, tmp); tmp = d13; - d13 = _mm512_max_epu32(d04, d13); - d04 = _mm512_min_epu32(d04, tmp); + d13 = _mm512_max_epi32(d04, d13); + d04 = _mm512_min_epi32(d04, tmp); sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); sort_05v_merge_descending(d09, d10, d11, d12, d13); @@ -809,28 +809,28 @@ template<> struct bitonic { sort_06v_descending(d09, d10, d11, d12, d13, d14); tmp = d09; - d09 = _mm512_max_epu32(d08, d09); - d08 = _mm512_min_epu32(d08, tmp); + d09 = _mm512_max_epi32(d08, d09); + d08 = _mm512_min_epi32(d08, tmp); tmp = d10; - d10 = _mm512_max_epu32(d07, d10); - d07 = _mm512_min_epu32(d07, tmp); + d10 = _mm512_max_epi32(d07, d10); + d07 = _mm512_min_epi32(d07, tmp); tmp = d11; - d11 = _mm512_max_epu32(d06, d11); - d06 = _mm512_min_epu32(d06, tmp); + d11 = _mm512_max_epi32(d06, d11); + d06 = _mm512_min_epi32(d06, tmp); tmp = d12; - d12 = _mm512_max_epu32(d05, d12); - d05 = _mm512_min_epu32(d05, tmp); + d12 = _mm512_max_epi32(d05, d12); + d05 = _mm512_min_epi32(d05, tmp); tmp = d13; - d13 = _mm512_max_epu32(d04, d13); - d04 = _mm512_min_epu32(d04, tmp); + d13 = _mm512_max_epi32(d04, d13); + d04 = _mm512_min_epi32(d04, tmp); tmp = d14; - d14 = _mm512_max_epu32(d03, d14); - d03 = _mm512_min_epu32(d03, tmp); + d14 = _mm512_max_epi32(d03, d14); + d03 = _mm512_min_epi32(d03, tmp); sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); sort_06v_merge_ascending(d09, d10, d11, d12, d13, d14); @@ -842,28 +842,28 @@ template<> struct bitonic { sort_06v_ascending(d09, d10, d11, d12, d13, d14); tmp = d09; - d09 = _mm512_max_epu32(d08, d09); - d08 = _mm512_min_epu32(d08, tmp); + d09 = _mm512_max_epi32(d08, d09); + d08 = _mm512_min_epi32(d08, tmp); tmp = d10; - d10 = _mm512_max_epu32(d07, d10); - d07 = _mm512_min_epu32(d07, tmp); + d10 = _mm512_max_epi32(d07, d10); + d07 = _mm512_min_epi32(d07, tmp); tmp = d11; - d11 = _mm512_max_epu32(d06, d11); - d06 = _mm512_min_epu32(d06, tmp); + d11 = _mm512_max_epi32(d06, d11); + d06 = _mm512_min_epi32(d06, tmp); tmp = d12; - d12 = _mm512_max_epu32(d05, d12); - d05 = _mm512_min_epu32(d05, tmp); + d12 = _mm512_max_epi32(d05, d12); + d05 = _mm512_min_epi32(d05, tmp); tmp = d13; - d13 = _mm512_max_epu32(d04, d13); - d04 = _mm512_min_epu32(d04, tmp); + d13 = _mm512_max_epi32(d04, d13); + d04 = _mm512_min_epi32(d04, tmp); tmp = d14; - d14 = _mm512_max_epu32(d03, d14); - d03 = _mm512_min_epu32(d03, tmp); + d14 = _mm512_max_epi32(d03, d14); + d03 = _mm512_min_epi32(d03, tmp); sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); sort_06v_merge_descending(d09, d10, d11, d12, d13, d14); @@ -875,32 +875,32 @@ template<> struct bitonic { sort_07v_descending(d09, d10, d11, d12, d13, d14, d15); tmp = d09; - d09 = _mm512_max_epu32(d08, d09); - d08 = _mm512_min_epu32(d08, tmp); + d09 = _mm512_max_epi32(d08, d09); + d08 = _mm512_min_epi32(d08, tmp); tmp = d10; - d10 = _mm512_max_epu32(d07, d10); - d07 = _mm512_min_epu32(d07, tmp); + d10 = _mm512_max_epi32(d07, d10); + d07 = _mm512_min_epi32(d07, tmp); tmp = d11; - d11 = _mm512_max_epu32(d06, d11); - d06 = _mm512_min_epu32(d06, tmp); + d11 = _mm512_max_epi32(d06, d11); + d06 = _mm512_min_epi32(d06, tmp); tmp = d12; - d12 = _mm512_max_epu32(d05, d12); - d05 = _mm512_min_epu32(d05, tmp); + d12 = _mm512_max_epi32(d05, d12); + d05 = _mm512_min_epi32(d05, tmp); tmp = d13; - d13 = _mm512_max_epu32(d04, d13); - d04 = _mm512_min_epu32(d04, tmp); + d13 = _mm512_max_epi32(d04, d13); + d04 = _mm512_min_epi32(d04, tmp); tmp = d14; - d14 = _mm512_max_epu32(d03, d14); - d03 = _mm512_min_epu32(d03, tmp); + d14 = _mm512_max_epi32(d03, d14); + d03 = _mm512_min_epi32(d03, tmp); tmp = d15; - d15 = _mm512_max_epu32(d02, d15); - d02 = _mm512_min_epu32(d02, tmp); + d15 = _mm512_max_epi32(d02, d15); + d02 = _mm512_min_epi32(d02, tmp); sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); sort_07v_merge_ascending(d09, d10, d11, d12, d13, d14, d15); @@ -912,32 +912,32 @@ template<> struct bitonic { sort_07v_ascending(d09, d10, d11, d12, d13, d14, d15); tmp = d09; - d09 = _mm512_max_epu32(d08, d09); - d08 = _mm512_min_epu32(d08, tmp); + d09 = _mm512_max_epi32(d08, d09); + d08 = _mm512_min_epi32(d08, tmp); tmp = d10; - d10 = _mm512_max_epu32(d07, d10); - d07 = _mm512_min_epu32(d07, tmp); + d10 = _mm512_max_epi32(d07, d10); + d07 = _mm512_min_epi32(d07, tmp); tmp = d11; - d11 = _mm512_max_epu32(d06, d11); - d06 = _mm512_min_epu32(d06, tmp); + d11 = _mm512_max_epi32(d06, d11); + d06 = _mm512_min_epi32(d06, tmp); tmp = d12; - d12 = _mm512_max_epu32(d05, d12); - d05 = _mm512_min_epu32(d05, tmp); + d12 = _mm512_max_epi32(d05, d12); + d05 = _mm512_min_epi32(d05, tmp); tmp = d13; - d13 = _mm512_max_epu32(d04, d13); - d04 = _mm512_min_epu32(d04, tmp); + d13 = _mm512_max_epi32(d04, d13); + d04 = _mm512_min_epi32(d04, tmp); tmp = d14; - d14 = _mm512_max_epu32(d03, d14); - d03 = _mm512_min_epu32(d03, tmp); + d14 = _mm512_max_epi32(d03, d14); + d03 = _mm512_min_epi32(d03, tmp); tmp = d15; - d15 = _mm512_max_epu32(d02, d15); - d02 = _mm512_min_epu32(d02, tmp); + d15 = _mm512_max_epi32(d02, d15); + d02 = _mm512_min_epi32(d02, tmp); sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); sort_07v_merge_descending(d09, d10, d11, d12, d13, d14, d15); @@ -949,36 +949,36 @@ template<> struct bitonic { sort_08v_descending(d09, d10, d11, d12, d13, d14, d15, d16); tmp = d09; - d09 = _mm512_max_epu32(d08, d09); - d08 = _mm512_min_epu32(d08, tmp); + d09 = _mm512_max_epi32(d08, d09); + d08 = _mm512_min_epi32(d08, tmp); tmp = d10; - d10 = _mm512_max_epu32(d07, d10); - d07 = _mm512_min_epu32(d07, tmp); + d10 = _mm512_max_epi32(d07, d10); + d07 = _mm512_min_epi32(d07, tmp); tmp = d11; - d11 = _mm512_max_epu32(d06, d11); - d06 = _mm512_min_epu32(d06, tmp); + d11 = _mm512_max_epi32(d06, d11); + d06 = _mm512_min_epi32(d06, tmp); tmp = d12; - d12 = _mm512_max_epu32(d05, d12); - d05 = _mm512_min_epu32(d05, tmp); + d12 = _mm512_max_epi32(d05, d12); + d05 = _mm512_min_epi32(d05, tmp); tmp = d13; - d13 = _mm512_max_epu32(d04, d13); - d04 = _mm512_min_epu32(d04, tmp); + d13 = _mm512_max_epi32(d04, d13); + d04 = _mm512_min_epi32(d04, tmp); tmp = d14; - d14 = _mm512_max_epu32(d03, d14); - d03 = _mm512_min_epu32(d03, tmp); + d14 = _mm512_max_epi32(d03, d14); + d03 = _mm512_min_epi32(d03, tmp); tmp = d15; - d15 = _mm512_max_epu32(d02, d15); - d02 = _mm512_min_epu32(d02, tmp); + d15 = _mm512_max_epi32(d02, d15); + d02 = _mm512_min_epi32(d02, tmp); tmp = d16; - d16 = _mm512_max_epu32(d01, d16); - d01 = _mm512_min_epu32(d01, tmp); + d16 = _mm512_max_epi32(d01, d16); + d01 = _mm512_min_epi32(d01, tmp); sort_08v_merge_ascending(d01, d02, d03, d04, d05, d06, d07, d08); sort_08v_merge_ascending(d09, d10, d11, d12, d13, d14, d15, d16); @@ -990,48 +990,48 @@ template<> struct bitonic { sort_08v_ascending(d09, d10, d11, d12, d13, d14, d15, d16); tmp = d09; - d09 = _mm512_max_epu32(d08, d09); - d08 = _mm512_min_epu32(d08, tmp); + d09 = _mm512_max_epi32(d08, d09); + d08 = _mm512_min_epi32(d08, tmp); tmp = d10; - d10 = _mm512_max_epu32(d07, d10); - d07 = _mm512_min_epu32(d07, tmp); + d10 = _mm512_max_epi32(d07, d10); + d07 = _mm512_min_epi32(d07, tmp); tmp = d11; - d11 = _mm512_max_epu32(d06, d11); - d06 = _mm512_min_epu32(d06, tmp); + d11 = _mm512_max_epi32(d06, d11); + d06 = _mm512_min_epi32(d06, tmp); tmp = d12; - d12 = _mm512_max_epu32(d05, d12); - d05 = _mm512_min_epu32(d05, tmp); + d12 = _mm512_max_epi32(d05, d12); + d05 = _mm512_min_epi32(d05, tmp); tmp = d13; - d13 = _mm512_max_epu32(d04, d13); - d04 = _mm512_min_epu32(d04, tmp); + d13 = _mm512_max_epi32(d04, d13); + d04 = _mm512_min_epi32(d04, tmp); tmp = d14; - d14 = _mm512_max_epu32(d03, d14); - d03 = _mm512_min_epu32(d03, tmp); + d14 = _mm512_max_epi32(d03, d14); + d03 = _mm512_min_epi32(d03, tmp); tmp = d15; - d15 = _mm512_max_epu32(d02, d15); - d02 = _mm512_min_epu32(d02, tmp); + d15 = _mm512_max_epi32(d02, d15); + d02 = _mm512_min_epi32(d02, tmp); tmp = d16; - d16 = _mm512_max_epu32(d01, d16); - d01 = _mm512_min_epu32(d01, tmp); + d16 = _mm512_max_epi32(d01, d16); + d01 = _mm512_min_epi32(d01, tmp); sort_08v_merge_descending(d01, d02, d03, d04, d05, d06, d07, d08); sort_08v_merge_descending(d09, d10, d11, d12, d13, d14, d15, d16); } - static NOINLINE void sort_01v(uint32_t *ptr) { + static NOINLINE void sort_01v(int32_t *ptr) { __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; sort_01v_ascending(d01); _mm512_storeu_si512((__m512i *) ptr + 0, d01); } - static NOINLINE void sort_02v(uint32_t *ptr) { + static NOINLINE void sort_02v(int32_t *ptr) { __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; sort_02v_ascending(d01, d02); @@ -1039,7 +1039,7 @@ template<> struct bitonic { _mm512_storeu_si512((__m512i *) ptr + 1, d02); } - static NOINLINE void sort_03v(uint32_t *ptr) { + static NOINLINE void sort_03v(int32_t *ptr) { __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; @@ -1049,7 +1049,7 @@ template<> struct bitonic { _mm512_storeu_si512((__m512i *) ptr + 2, d03); } - static NOINLINE void sort_04v(uint32_t *ptr) { + static NOINLINE void sort_04v(int32_t *ptr) { __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; @@ -1061,7 +1061,7 @@ template<> struct bitonic { _mm512_storeu_si512((__m512i *) ptr + 3, d04); } - static NOINLINE void sort_05v(uint32_t *ptr) { + static NOINLINE void sort_05v(int32_t *ptr) { __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; @@ -1075,7 +1075,7 @@ template<> struct bitonic { _mm512_storeu_si512((__m512i *) ptr + 4, d05); } - static NOINLINE void sort_06v(uint32_t *ptr) { + static NOINLINE void sort_06v(int32_t *ptr) { __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; @@ -1091,7 +1091,7 @@ template<> struct bitonic { _mm512_storeu_si512((__m512i *) ptr + 5, d06); } - static NOINLINE void sort_07v(uint32_t *ptr) { + static NOINLINE void sort_07v(int32_t *ptr) { __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; @@ -1109,7 +1109,7 @@ template<> struct bitonic { _mm512_storeu_si512((__m512i *) ptr + 6, d07); } - static NOINLINE void sort_08v(uint32_t *ptr) { + static NOINLINE void sort_08v(int32_t *ptr) { __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; @@ -1129,7 +1129,7 @@ template<> struct bitonic { _mm512_storeu_si512((__m512i *) ptr + 7, d08); } - static NOINLINE void sort_09v(uint32_t *ptr) { + static NOINLINE void sort_09v(int32_t *ptr) { __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; @@ -1151,7 +1151,7 @@ template<> struct bitonic { _mm512_storeu_si512((__m512i *) ptr + 8, d09); } - static NOINLINE void sort_10v(uint32_t *ptr) { + static NOINLINE void sort_10v(int32_t *ptr) { __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; @@ -1175,7 +1175,7 @@ template<> struct bitonic { _mm512_storeu_si512((__m512i *) ptr + 9, d10); } - static NOINLINE void sort_11v(uint32_t *ptr) { + static NOINLINE void sort_11v(int32_t *ptr) { __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; @@ -1201,7 +1201,7 @@ template<> struct bitonic { _mm512_storeu_si512((__m512i *) ptr + 10, d11); } - static NOINLINE void sort_12v(uint32_t *ptr) { + static NOINLINE void sort_12v(int32_t *ptr) { __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; @@ -1229,7 +1229,7 @@ template<> struct bitonic { _mm512_storeu_si512((__m512i *) ptr + 11, d12); } - static NOINLINE void sort_13v(uint32_t *ptr) { + static NOINLINE void sort_13v(int32_t *ptr) { __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; @@ -1259,7 +1259,7 @@ template<> struct bitonic { _mm512_storeu_si512((__m512i *) ptr + 12, d13); } - static NOINLINE void sort_14v(uint32_t *ptr) { + static NOINLINE void sort_14v(int32_t *ptr) { __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; @@ -1291,7 +1291,7 @@ template<> struct bitonic { _mm512_storeu_si512((__m512i *) ptr + 13, d14); } - static NOINLINE void sort_15v(uint32_t *ptr) { + static NOINLINE void sort_15v(int32_t *ptr) { __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; @@ -1325,7 +1325,7 @@ template<> struct bitonic { _mm512_storeu_si512((__m512i *) ptr + 14, d15); } - static NOINLINE void sort_16v(uint32_t *ptr) { + static NOINLINE void sort_16v(int32_t *ptr) { __m512i d01 = _mm512_loadu_si512((__m512i const *) ptr + 0);; __m512i d02 = _mm512_loadu_si512((__m512i const *) ptr + 1);; __m512i d03 = _mm512_loadu_si512((__m512i const *) ptr + 2);; @@ -1360,7 +1360,7 @@ template<> struct bitonic { _mm512_storeu_si512((__m512i *) ptr + 14, d15); _mm512_storeu_si512((__m512i *) ptr + 15, d16); } - static void sort(uint32_t *ptr, size_t length); + static void sort(int32_t *ptr, size_t length); }; } diff --git a/src/coreclr/src/gc/vxsort/vxsort.h b/src/coreclr/src/gc/vxsort/vxsort.h index 65255f477f4fcc..96668e458ee4d1 100644 --- a/src/coreclr/src/gc/vxsort/vxsort.h +++ b/src/coreclr/src/gc/vxsort/vxsort.h @@ -17,6 +17,7 @@ #include "defs.h" //#include "isa_detection.h" +#include "alignment.h" #include "machine_traits.h" #include "smallsort/bitonic_sort.h" @@ -27,32 +28,6 @@ namespace vxsort { using vxsort::smallsort::bitonic; -template -struct alignment_hint { -public: - static const size_t ALIGN = N; - static const int8_t REALIGN = 0x66; - - alignment_hint() : left_align(REALIGN), right_align(REALIGN) {} - alignment_hint realign_left() { - alignment_hint copy = *this; - copy.left_align = REALIGN; - return copy; - } - - alignment_hint realign_right() { - alignment_hint copy = *this; - copy.right_align = REALIGN; - return copy; - } - - static bool is_aligned(void *p) { - return (size_t)p % ALIGN == 0; - } - - int left_align : 8; - int right_align : 8; -}; template class vxsort { @@ -60,8 +35,8 @@ class vxsort { static_assert(Unroll <= 12, "Unroll can be in the range 1..12"); private: - using Tp = vxsort_machine_traits; - typedef typename Tp::TV TV; + using MT = vxsort_machine_traits; + typedef typename MT::TV TV; typedef alignment_hint AH; static const int ELEMENT_ALIGN = sizeof(T) - 1; @@ -309,7 +284,7 @@ class vxsort { const TV& P, T*& left, T*& right) { - if (Tp::supports_compress_writes()) { + if (MT::supports_compress_writes()) { partition_block_with_compress(dataVec, P, left, right); } else { partition_block_without_compress(dataVec, P, left, right); @@ -320,10 +295,10 @@ class vxsort { const TV& P, T*& left, T*& right) { - auto mask = Tp::get_cmpgt_mask(dataVec, P); - dataVec = Tp::partition_vector(dataVec, mask); - Tp::store_vec(reinterpret_cast(left), dataVec); - Tp::store_vec(reinterpret_cast(right), dataVec); + auto mask = MT::get_cmpgt_mask(dataVec, P); + dataVec = MT::partition_vector(dataVec, mask); + MT::store_vec(reinterpret_cast(left), dataVec); + MT::store_vec(reinterpret_cast(right), dataVec); auto popCount = -_mm_popcnt_u64(mask); right += popCount; left += popCount + N; @@ -333,10 +308,10 @@ class vxsort { const TV& P, T*& left, T*& right) { - auto mask = Tp::get_cmpgt_mask(dataVec, P); + auto mask = MT::get_cmpgt_mask(dataVec, P); auto popCount = -_mm_popcnt_u64(mask); - Tp::store_compress_vec(reinterpret_cast(left), dataVec, ~mask); - Tp::store_compress_vec(reinterpret_cast(right + N + popCount), dataVec, mask); + MT::store_compress_vec(reinterpret_cast(left), dataVec, ~mask); + MT::store_compress_vec(reinterpret_cast(right + N + popCount), dataVec, mask); right += popCount; left += popCount + N; } @@ -388,7 +363,7 @@ class vxsort { *right = std::numeric_limits::Max(); // Broadcast the selected pivot - const TV P = Tp::get_vec_pivot(pivot); + const TV P = MT::broadcast(pivot); auto readLeft = left; auto readRight = right; @@ -440,8 +415,8 @@ class vxsort { #endif for (auto u = 0; u < InnerUnroll; u++) { - auto dl = Tp::load_vec(readLeftV + u); - auto dr = Tp::load_vec(readRightV - (u + 1)); + auto dl = MT::load_vec(readLeftV + u); + auto dr = MT::load_vec(readRightV - (u + 1)); partition_block(dl, P, tmpLeft, tmpRight); partition_block(dr, P, tmpLeft, tmpRight); } @@ -468,18 +443,18 @@ class vxsort { TV d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11, d12; switch (InnerUnroll) { - case 12: d12 = Tp::load_vec(nextPtr + InnerUnroll - 12); - case 11: d11 = Tp::load_vec(nextPtr + InnerUnroll - 11); - case 10: d10 = Tp::load_vec(nextPtr + InnerUnroll - 10); - case 9: d09 = Tp::load_vec(nextPtr + InnerUnroll - 9); - case 8: d08 = Tp::load_vec(nextPtr + InnerUnroll - 8); - case 7: d07 = Tp::load_vec(nextPtr + InnerUnroll - 7); - case 6: d06 = Tp::load_vec(nextPtr + InnerUnroll - 6); - case 5: d05 = Tp::load_vec(nextPtr + InnerUnroll - 5); - case 4: d04 = Tp::load_vec(nextPtr + InnerUnroll - 4); - case 3: d03 = Tp::load_vec(nextPtr + InnerUnroll - 3); - case 2: d02 = Tp::load_vec(nextPtr + InnerUnroll - 2); - case 1: d01 = Tp::load_vec(nextPtr + InnerUnroll - 1); + case 12: d12 = MT::load_vec(nextPtr + InnerUnroll - 12); + case 11: d11 = MT::load_vec(nextPtr + InnerUnroll - 11); + case 10: d10 = MT::load_vec(nextPtr + InnerUnroll - 10); + case 9: d09 = MT::load_vec(nextPtr + InnerUnroll - 9); + case 8: d08 = MT::load_vec(nextPtr + InnerUnroll - 8); + case 7: d07 = MT::load_vec(nextPtr + InnerUnroll - 7); + case 6: d06 = MT::load_vec(nextPtr + InnerUnroll - 6); + case 5: d05 = MT::load_vec(nextPtr + InnerUnroll - 5); + case 4: d04 = MT::load_vec(nextPtr + InnerUnroll - 4); + case 3: d03 = MT::load_vec(nextPtr + InnerUnroll - 3); + case 2: d02 = MT::load_vec(nextPtr + InnerUnroll - 2); + case 1: d01 = MT::load_vec(nextPtr + InnerUnroll - 1); } switch (InnerUnroll) { @@ -510,7 +485,7 @@ class vxsort { readLeftV += 1; } - auto d = Tp::load_vec(nextPtr); + auto d = MT::load_vec(nextPtr); partition_block(d, P, writeLeft, writeRight); //partition_block_without_compress(d, P, writeLeft, writeRight); } @@ -560,50 +535,50 @@ class vxsort { // were actually needed to be written to the right hand side // e) We write the right portion of the left vector to the right side // now that its write position has been updated - auto RT0 = Tp::load_vec(preAlignedRight); - auto LT0 = Tp::load_vec(preAlignedLeft); - auto rtMask = Tp::get_cmpgt_mask(RT0, P); - auto ltMask = Tp::get_cmpgt_mask(LT0, P); + auto RT0 = MT::load_vec(preAlignedRight); + auto LT0 = MT::load_vec(preAlignedLeft); + auto rtMask = MT::get_cmpgt_mask(RT0, P); + auto ltMask = MT::get_cmpgt_mask(LT0, P); const auto rtPopCountRightPart = max(_mm_popcnt_u32(rtMask), rightAlign); const auto ltPopCountRightPart = _mm_popcnt_u32(ltMask); const auto rtPopCountLeftPart = N - rtPopCountRightPart; const auto ltPopCountLeftPart = N - ltPopCountRightPart; - if (Tp::supports_compress_writes()) { - Tp::store_compress_vec((TV *) (tmpRight + N - rtPopCountRightPart), RT0, rtMask); - Tp::store_compress_vec((TV *) tmpLeft, LT0, ~ltMask); + if (MT::supports_compress_writes()) { + MT::store_compress_vec((TV *) (tmpRight + N - rtPopCountRightPart), RT0, rtMask); + MT::store_compress_vec((TV *) tmpLeft, LT0, ~ltMask); tmpRight -= rtPopCountRightPart & rai; readRight += (rightAlign - N) & rai; - Tp::store_compress_vec((TV *) (tmpRight + N - ltPopCountRightPart), LT0, ltMask); + MT::store_compress_vec((TV *) (tmpRight + N - ltPopCountRightPart), LT0, ltMask); tmpRight -= ltPopCountRightPart & lai; tmpLeft += ltPopCountLeftPart & lai; tmpStartLeft += -leftAlign & lai; readLeft += (leftAlign + N) & lai; - Tp::store_compress_vec((TV*) tmpLeft, RT0, ~rtMask); + MT::store_compress_vec((TV*) tmpLeft, RT0, ~rtMask); tmpLeft += rtPopCountLeftPart & rai; tmpStartRight -= rightAlign & rai; } else { - RT0 = Tp::partition_vector(RT0, rtMask); - LT0 = Tp::partition_vector(LT0, ltMask); - Tp::store_vec((TV*) tmpRight, RT0); - Tp::store_vec((TV*) tmpLeft, LT0); + RT0 = MT::partition_vector(RT0, rtMask); + LT0 = MT::partition_vector(LT0, ltMask); + MT::store_vec((TV*) tmpRight, RT0); + MT::store_vec((TV*) tmpLeft, LT0); tmpRight -= rtPopCountRightPart & rai; readRight += (rightAlign - N) & rai; - Tp::store_vec((TV*) tmpRight, LT0); + MT::store_vec((TV*) tmpRight, LT0); tmpRight -= ltPopCountRightPart & lai; tmpLeft += ltPopCountLeftPart & lai; tmpStartLeft += -leftAlign & lai; readLeft += (leftAlign + N) & lai; - Tp::store_vec((TV*) tmpLeft, RT0); + MT::store_vec((TV*) tmpLeft, RT0); tmpLeft += rtPopCountLeftPart & rai; tmpStartRight -= rightAlign & rai; } diff --git a/src/coreclr/src/vm/CMakeLists.txt b/src/coreclr/src/vm/CMakeLists.txt index 4c28c1708ab3dd..aad5f6938ae36f 100644 --- a/src/coreclr/src/vm/CMakeLists.txt +++ b/src/coreclr/src/vm/CMakeLists.txt @@ -550,9 +550,9 @@ if (CLR_CMAKE_TARGET_WIN32 AND CLR_CMAKE_TARGET_ARCH_AMD64) ../gc/vxsort/do_vxsort_avx512.cpp ../gc/vxsort/machine_traits.avx2.cpp ../gc/vxsort/smallsort/bitonic_sort.AVX2.int64_t.generated.cpp - ../gc/vxsort/smallsort/bitonic_sort.AVX2.uint32_t.generated.cpp + ../gc/vxsort/smallsort/bitonic_sort.AVX2.int32_t.generated.cpp ../gc/vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.cpp - ../gc/vxsort/smallsort/bitonic_sort.AVX512.uint32_t.generated.cpp + ../gc/vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.cpp ) endif (CLR_CMAKE_TARGET_WIN32 AND CLR_CMAKE_TARGET_ARCH_AMD64) From 8d51d9006b4e194d1766ab67a2b961e6eebce257 Mon Sep 17 00:00:00 2001 From: Peter Sollich Date: Fri, 3 Jul 2020 15:04:56 +0200 Subject: [PATCH 21/31] Add GCConfig setting to turn vectorized sorting off, streamline ISA detection (but require initialization), rename to IsSupportedInstructionSet. --- src/coreclr/src/gc/gc.cpp | 27 ++++++++++++------- src/coreclr/src/gc/gcconfig.h | 1 + src/coreclr/src/gc/vxsort/do_vxsort.h | 9 +++++-- src/coreclr/src/gc/vxsort/do_vxsort_avx2.cpp | 4 +++ .../src/gc/vxsort/do_vxsort_avx512.cpp | 4 +++ src/coreclr/src/gc/vxsort/isa_detection.cpp | 23 ++++++++++------ .../src/gc/vxsort/isa_detection_dummy.cpp | 12 ++++++--- 7 files changed, 58 insertions(+), 22 deletions(-) diff --git a/src/coreclr/src/gc/gc.cpp b/src/coreclr/src/gc/gc.cpp index 20d7f3eeaaaa18..204c477997c03a 100644 --- a/src/coreclr/src/gc/gc.cpp +++ b/src/coreclr/src/gc/gc.cpp @@ -2088,8 +2088,9 @@ uint8_t* tree_search (uint8_t* tree, uint8_t* old_address); #ifdef USE_VXSORT void do_vxsort(uint8_t** low, uint8_t** high, unsigned int depth) { - assert(SupportsInstructionSet(InstructionSet::AVX2)); - if (SupportsInstructionSet(InstructionSet::AVX512F)) + assert(IsSupportedInstructionSet(InstructionSet::AVX2)); + // use AVX512F only if the list is large enough to pay for downclocking impact + if (IsSupportedInstructionSet(InstructionSet::AVX512F) && ((high -low) > 128*1024)) { do_vxsort_avx512(low, high); } @@ -2107,8 +2108,9 @@ void do_vxsort(uint8_t** low, uint8_t** high, unsigned int depth) void do_vxsort(int32_t* low, int32_t* high, unsigned int depth) { - assert(SupportsInstructionSet(InstructionSet::AVX2)); - if (SupportsInstructionSet(InstructionSet::AVX512F)) + assert(IsSupportedInstructionSet(InstructionSet::AVX2)); + // use AVX512F only if the list is large enough to pay for downclocking impact + if (IsSupportedInstructionSet(InstructionSet::AVX512F) && ((high - low) > 128*1024)) { do_vxsort_avx512(low, high); } @@ -8351,10 +8353,11 @@ void gc_heap::sort_mark_list() } #ifdef USE_VXSORT - // runtime test if AVX2 is indeed available - if (SupportsInstructionSet(InstructionSet::AVX2)) + ptrdiff_t item_count = mark_list_index - mark_list; + // conservatively use AVX2 only for large mark lists, + // and do runtime test if AVX2 is indeed available + if (item_count > 8*1024 && IsSupportedInstructionSet(InstructionSet::AVX2)) { - ptrdiff_t item_count = mark_list_index - mark_list; #if defined(_DEBUG) || defined(WRITE_SORT_DATA) // in debug, make a copy of the mark list // for checking and debugging purposes @@ -10327,6 +10330,10 @@ HRESULT gc_heap::initialize_gc (size_t soh_segment_size, static_cast(GCEventStatus::GetEnabledKeywords(GCEventProvider_Private))); #endif // __linux__ +#ifdef USE_VXSORT + InitSupportedInstructionSet((int32_t)GCConfig::GetGCEnabledInstructionSets()); +#endif + if (!init_semi_shared()) { hres = E_FAIL; @@ -22270,9 +22277,11 @@ void gc_heap::plan_phase (int condemned_gen_number) { #ifndef MULTIPLE_HEAPS #ifdef USE_VXSORT - if (SupportsInstructionSet(InstructionSet::AVX2)) + ptrdiff_t entry_count = mark_list_index - mark_list; + // conservatively use AVX2 only for large mark lists, + // and do runtime test if AVX2 is indeed available + if (entry_count > 8*1024 && IsSupportedInstructionSet(InstructionSet::AVX2)) { - ptrdiff_t entry_count = mark_list_index - mark_list; int32_t* mark_list_32 = (int32_t*)mark_list; uint8_t* low = gc_low; ptrdiff_t range = heap_segment_allocated(ephemeral_heap_segment) - low; diff --git a/src/coreclr/src/gc/gcconfig.h b/src/coreclr/src/gc/gcconfig.h index c1af6250342f29..053a54f0010c7c 100644 --- a/src/coreclr/src/gc/gcconfig.h +++ b/src/coreclr/src/gc/gcconfig.h @@ -129,6 +129,7 @@ class GCConfigStringHolder INT_CONFIG (GCHeapHardLimitSOHPercent, "GCHeapHardLimitSOHPercent", NULL, 0, "Specifies the GC heap SOH usage as a percentage of the total memory") \ INT_CONFIG (GCHeapHardLimitLOHPercent, "GCHeapHardLimitLOHPercent", NULL, 0, "Specifies the GC heap LOH usage as a percentage of the total memory") \ INT_CONFIG (GCHeapHardLimitPOHPercent, "GCHeapHardLimitPOHPercent", NULL, 0, "Specifies the GC heap POH usage as a percentage of the total memory") \ + INT_CONFIG (GCEnabledInstructionSets, "GCEnabledInstructionSets", NULL, -1, "Specifies whether GC can use AVX2 or AVX512F") \ // This class is responsible for retreiving configuration information // for how the GC should operate. diff --git a/src/coreclr/src/gc/vxsort/do_vxsort.h b/src/coreclr/src/gc/vxsort/do_vxsort.h index c33a949d325125..972d92496c89fa 100644 --- a/src/coreclr/src/gc/vxsort/do_vxsort.h +++ b/src/coreclr/src/gc/vxsort/do_vxsort.h @@ -1,11 +1,16 @@ -// Enum for the GCToOSInterface::SupportsInstructionSet method +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +// Enum for the IsSupportedInstructionSet method enum class InstructionSet { AVX2 = 0, AVX512F = 1, }; -bool SupportsInstructionSet(InstructionSet instructionSet); +void InitSupportedInstructionSet(int32_t configSetting); +bool IsSupportedInstructionSet(InstructionSet instructionSet); void do_vxsort_avx2(uint8_t** low, uint8_t** high); void do_vxsort_avx2(int32_t* low, int32_t* high); diff --git a/src/coreclr/src/gc/vxsort/do_vxsort_avx2.cpp b/src/coreclr/src/gc/vxsort/do_vxsort_avx2.cpp index 03ce5b5a9067e4..49f98767f01cdd 100644 --- a/src/coreclr/src/gc/vxsort/do_vxsort_avx2.cpp +++ b/src/coreclr/src/gc/vxsort/do_vxsort_avx2.cpp @@ -1,3 +1,7 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + #include "common.h" #include "vxsort_targets_enable_avx2.h" diff --git a/src/coreclr/src/gc/vxsort/do_vxsort_avx512.cpp b/src/coreclr/src/gc/vxsort/do_vxsort_avx512.cpp index ea6c7748c71f57..93724bd6c8b527 100644 --- a/src/coreclr/src/gc/vxsort/do_vxsort_avx512.cpp +++ b/src/coreclr/src/gc/vxsort/do_vxsort_avx512.cpp @@ -1,3 +1,7 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + #include "common.h" #include "vxsort_targets_enable_avx512.h" diff --git a/src/coreclr/src/gc/vxsort/isa_detection.cpp b/src/coreclr/src/gc/vxsort/isa_detection.cpp index 806b7923ef108c..7e5fdb9a8bc7be 100644 --- a/src/coreclr/src/gc/vxsort/isa_detection.cpp +++ b/src/coreclr/src/gc/vxsort/isa_detection.cpp @@ -1,5 +1,7 @@ -// ISA_Detection.cpp : Diese Datei enthält die Funktion "main". Hier beginnt und endet die Ausführung des Programms. -// +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + #include "common.h" #include @@ -121,14 +123,19 @@ SupportedISA DetermineSupportedISA() static bool s_initialized; static SupportedISA s_supportedISA; -bool SupportsInstructionSet(InstructionSet instructionSet) +bool IsSupportedInstructionSet(InstructionSet instructionSet) { + assert(s_initialized); assert(instructionSet == InstructionSet::AVX2 || instructionSet == InstructionSet::AVX512F); - if (!s_initialized) - { - s_supportedISA = DetermineSupportedISA(); - s_initialized = true; - } return ((int)s_supportedISA & (1 << (int)instructionSet)) != 0; } +void InitSupportedInstructionSet(int32_t configSetting) +{ + s_supportedISA = (SupportedISA)((int)DetermineSupportedISA() & configSetting); + // we are assuming that AVX2 can be used if AVX521F can, + // so if AVX2 is disabled, we need to disable AVX512F as well + if (!((int)s_supportedISA & (int)SupportedISA::AVX2)) + s_supportedISA = SupportedISA::None; + s_initialized = true; +} diff --git a/src/coreclr/src/gc/vxsort/isa_detection_dummy.cpp b/src/coreclr/src/gc/vxsort/isa_detection_dummy.cpp index 9049ec60c69c75..1e36ba79ed4ac0 100644 --- a/src/coreclr/src/gc/vxsort/isa_detection_dummy.cpp +++ b/src/coreclr/src/gc/vxsort/isa_detection_dummy.cpp @@ -1,5 +1,7 @@ -// ISA_Detection.cpp : Diese Datei enthält die Funktion "main". Hier beginnt und endet die Ausführung des Programms. -// +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + #include "common.h" #include @@ -7,7 +9,11 @@ #if defined(TARGET_AMD64) && defined(TARGET_WINDOWS) -bool SupportsInstructionSet(InstructionSet instructionSet) +void InitSupportedInstructionSet(int32_t) +{ +} + +bool IsSupportedInstructionSet(InstructionSet) { return false; } From c9a7b76781f3ecb79bb6c45ef94008475e8f0fad Mon Sep 17 00:00:00 2001 From: Peter Sollich Date: Fri, 3 Jul 2020 16:34:19 +0200 Subject: [PATCH 22/31] Several small improvements: - Don't waste time sorting the mark list if background GC is running as we are not going to use it. - Use smaller max mark list size if we cannot use AVX2/AVX512 instruction sets - Fix mark list overflow detection for server GC. --- src/coreclr/src/gc/gc.cpp | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/src/coreclr/src/gc/gc.cpp b/src/coreclr/src/gc/gc.cpp index 204c477997c03a..0753235cdbc636 100644 --- a/src/coreclr/src/gc/gc.cpp +++ b/src/coreclr/src/gc/gc.cpp @@ -8314,6 +8314,16 @@ void gc_heap::sort_mark_list() return; } +#ifdef BACKGROUND_GC + // we are not going to use the mark list if background GC is running + // so let's not waste time sorting it + if (gc_heap::background_running_p()) + { + mark_list_index = mark_list_end + 1; + return; + } +#endif //BACKGROUND_GC + // if any other heap had a mark list overflow, we fake one too, // so we don't use an incomplete mark list by mistake for (int i = 0; i < n_heaps; i++) @@ -8797,11 +8807,20 @@ void gc_heap::combine_mark_lists() void gc_heap::grow_mark_list () { + // with vectorized sorting, we can use bigger mark lists +#ifdef USE_VXSORT #ifdef MULTIPLE_HEAPS - const size_t MAX_MARK_LIST_SIZE = 1000 * 1024; + const size_t MAX_MARK_LIST_SIZE = IsSupportedInstructionSet(InstructionSet::AVX2) ? 1000 * 1024 : 200 * 1024; #else //MULTIPLE_HEAPS - const size_t MAX_MARK_LIST_SIZE = 32 * 1024; + const size_t MAX_MARK_LIST_SIZE = IsSupportedInstructionSet(InstructionSet::AVX2) ? 32 * 1024 : 16 * 1024; #endif //MULTIPLE_HEAPS +#else +#ifdef MULTIPLE_HEAPS + const size_t MAX_MARK_LIST_SIZE = 200 * 1024; +#else //MULTIPLE_HEAPS + const size_t MAX_MARK_LIST_SIZE = 16 * 1024; +#endif //MULTIPLE_HEAPS +#endif size_t new_mark_list_size = min (mark_list_size * 2, MAX_MARK_LIST_SIZE); if (new_mark_list_size == mark_list_size) @@ -22261,7 +22280,9 @@ void gc_heap::plan_phase (int condemned_gen_number) if (mark_list_index >= (mark_list_end + 1)) { mark_list_index = mark_list_end + 1; +#ifndef MULTIPLE_HEAPS // in Server GC, we check for mark list overflow in sort_mark_list mark_list_overflow = true; +#endif } #else dprintf (3, ("mark_list length: %Id", From 43e5d38215dbb0ae3733e74d943dbd8820e45c17 Mon Sep 17 00:00:00 2001 From: Peter Sollich Date: Mon, 6 Jul 2020 16:52:28 +0200 Subject: [PATCH 23/31] Address codereview feedback - add constants for the thresholds above which we use AVX/AVX512F instruction sets. Add space before parameter lists as per GC codebase coding conventions. Improve some comments. --- src/coreclr/src/gc/gc.cpp | 119 ++++++++++-------- src/coreclr/src/gc/vxsort/do_vxsort.h | 22 ++-- src/coreclr/src/gc/vxsort/do_vxsort_avx2.cpp | 18 +-- .../src/gc/vxsort/do_vxsort_avx512.cpp | 10 +- src/coreclr/src/gc/vxsort/isa_detection.cpp | 4 +- .../src/gc/vxsort/isa_detection_dummy.cpp | 4 +- 6 files changed, 94 insertions(+), 83 deletions(-) diff --git a/src/coreclr/src/gc/gc.cpp b/src/coreclr/src/gc/gc.cpp index 0753235cdbc636..f15046398b26e4 100644 --- a/src/coreclr/src/gc/gc.cpp +++ b/src/coreclr/src/gc/gc.cpp @@ -2086,42 +2086,51 @@ uint8_t* tree_search (uint8_t* tree, uint8_t* old_address); #elif defined(USE_VXSORT) #define _sort do_vxsort #ifdef USE_VXSORT -void do_vxsort(uint8_t** low, uint8_t** high, unsigned int depth) + +// above this threshold, using AVX2 for sorting will likely pay off +// despite possible downclocking on some devices +const size_t AVX2_THRESHOLD_SIZE = 8 * 1024; + +// above this threshold, using AVX51F for sorting will likely pay off +// despite possible downclocking on current devices +const size_t AVX512F_THRESHOLD_SIZE = 128 * 1024; + +void do_vxsort (uint8_t** low, uint8_t** high, unsigned int depth) { - assert(IsSupportedInstructionSet(InstructionSet::AVX2)); + assert (IsSupportedInstructionSet (InstructionSet::AVX2)); // use AVX512F only if the list is large enough to pay for downclocking impact - if (IsSupportedInstructionSet(InstructionSet::AVX512F) && ((high -low) > 128*1024)) + if (IsSupportedInstructionSet (InstructionSet::AVX512F) && ((high -low) > AVX512F_THRESHOLD_SIZE)) { - do_vxsort_avx512(low, high); + do_vxsort_avx512 (low, high); } else { - do_vxsort_avx2(low, high); + do_vxsort_avx2 (low, high); } #ifdef _DEBUG for (uint8_t** p = low; p < high; p++) { - assert(p[0] <= p[1]); + assert (p[0] <= p[1]); } #endif } -void do_vxsort(int32_t* low, int32_t* high, unsigned int depth) +void do_vxsort (int32_t* low, int32_t* high, unsigned int depth) { - assert(IsSupportedInstructionSet(InstructionSet::AVX2)); + assert (IsSupportedInstructionSet(InstructionSet::AVX2)); // use AVX512F only if the list is large enough to pay for downclocking impact - if (IsSupportedInstructionSet(InstructionSet::AVX512F) && ((high - low) > 128*1024)) + if (IsSupportedInstructionSet (InstructionSet::AVX512F) && ((high - low) > AVX512F_THRESHOLD_SIZE)) { - do_vxsort_avx512(low, high); + do_vxsort_avx512 (low, high); } else { - do_vxsort_avx2(low, high); + do_vxsort_avx2 (low, high); } #ifdef _DEBUG for (int32_t* p = low; p < high; p++) { - assert(p[0] <= p[1]); + assert (p[0] <= p[1]); } #endif } @@ -8365,8 +8374,8 @@ void gc_heap::sort_mark_list() #ifdef USE_VXSORT ptrdiff_t item_count = mark_list_index - mark_list; // conservatively use AVX2 only for large mark lists, - // and do runtime test if AVX2 is indeed available - if (item_count > 8*1024 && IsSupportedInstructionSet(InstructionSet::AVX2)) + // and do runtime test to check whether AVX2 is indeed available + if (item_count > AVX2_THRESHOLD_SIZE && IsSupportedInstructionSet (InstructionSet::AVX2)) { #if defined(_DEBUG) || defined(WRITE_SORT_DATA) // in debug, make a copy of the mark list @@ -8386,68 +8395,68 @@ void gc_heap::sort_mark_list() ptrdiff_t scaled_range = range >> 3; if ((uint32_t)scaled_range == scaled_range) { - dprintf(3, ("Sorting mark lists as 32-bit offsets")); + dprintf (3, ("Sorting mark lists as 32-bit offsets")); //#define WRITE_SORT_DATA // first step: scale the pointers down to 32-bit offsets uint8_t** mark_list = this->mark_list; int32_t* mark_list_32 = (int32_t*)mark_list; - do_pack_avx2(mark_list, item_count, low); + do_pack_avx2 (mark_list, item_count, low); // sort the 32-bit offsets if (item_count > 0) { ptrdiff_t start = get_cycle_count(); - _sort(&mark_list_32[0], &mark_list_32[item_count - 1], 0); + _sort (&mark_list_32[0], &mark_list_32[item_count - 1], 0); ptrdiff_t elapsed_cycles = get_cycle_count() - start; - int log2_item_count = index_of_highest_set_bit(item_count); + int log2_item_count = index_of_highest_set_bit (item_count); double elapsed_cyles_by_n_log_n = (double)elapsed_cycles / item_count / log2_item_count; -// printf("GC#%d: first phase of sort_mark_list for heap %d took %u cycles to sort %u entries (cost/(n*log2(n) = %5.2f)\n", settings.gc_index, this->heap_number, elapsed_cycles, item_count, elapsed_cyles_by_n_log_n); +// printf ("GC#%d: first phase of sort_mark_list for heap %d took %u cycles to sort %u entries (cost/(n*log2(n) = %5.2f)\n", settings.gc_index, this->heap_number, elapsed_cycles, item_count, elapsed_cyles_by_n_log_n); #ifdef WRITE_SORT_DATA char file_name[256]; - sprintf_s(file_name, _countof(file_name), "sort_data_gc%d_heap%d", settings.gc_index, heap_number); + sprintf_s (file_name, _countof(file_name), "sort_data_gc%d_heap%d", settings.gc_index, heap_number); FILE* f; - errno_t err = fopen_s(&f, file_name, "wb"); + errno_t err = fopen_s (&f, file_name, "wb"); if (err == 0) { size_t magic = 'SDAT'; - if (fwrite(&magic, sizeof(magic), 1, f) != 1) - printf("fwrite failed\n"); - if (fwrite(&elapsed_cycles, sizeof(elapsed_cycles), 1, f) != 1) - printf("fwrite failed\n"); - if (fwrite(&low, sizeof(low), 1, f) != 1) - printf("fwrite failed\n"); - if (fwrite(&item_count, sizeof(item_count), 1, f) != 1) - printf("fwrite failed\n"); - if (fwrite(mark_list_copy, sizeof(mark_list_copy[0]), item_count, f) != item_count) - printf("fwrite failed\n"); - if (fwrite(&magic, sizeof(magic), 1, f) != 1) - printf("fwrite failed\n"); - if (fclose(f) != 0) - printf("fclose failed\n"); + if (fwrite (&magic, sizeof(magic), 1, f) != 1) + printf ("fwrite failed\n"); + if (fwrite (&elapsed_cycles, sizeof(elapsed_cycles), 1, f) != 1) + printf ("fwrite failed\n"); + if (fwrite (&low, sizeof(low), 1, f) != 1) + printf ("fwrite failed\n"); + if (fwrite (&item_count, sizeof(item_count), 1, f) != 1) + printf ("fwrite failed\n"); + if (fwrite (mark_list_copy, sizeof(mark_list_copy[0]), item_count, f) != item_count) + printf ("fwrite failed\n"); + if (fwrite (&magic, sizeof(magic), 1, f) != 1) + printf ("fwrite failed\n"); + if (fclose (f) != 0) + printf ("fclose failed\n"); } #endif } - do_unpack_avx2(mark_list_32, item_count, low); + do_unpack_avx2 (mark_list_32, item_count, low); } else { - dprintf(3, ("Sorting mark lists")); + dprintf (3, ("Sorting mark lists")); if (mark_list_index > mark_list) - _sort(mark_list, mark_list_index - 1, 0); + _sort (mark_list, mark_list_index - 1, 0); } #ifdef _DEBUG // in debug, sort the copy as well using the proven sort, so we can check we got the right result if (mark_list_copy_index > mark_list_copy) { - introsort::sort(mark_list_copy, mark_list_copy_index - 1, 0); + introsort::sort (mark_list_copy, mark_list_copy_index - 1, 0); } for (ptrdiff_t i = 0; i < item_count; i++) { @@ -8459,19 +8468,19 @@ void gc_heap::sort_mark_list() else #endif //USE_VXSORT { - dprintf(3, ("Sorting mark lists")); + dprintf (3, ("Sorting mark lists")); if (mark_list_index > mark_list) { ptrdiff_t start = get_cycle_count(); - introsort::sort(mark_list, mark_list_index - 1, 0); + introsort::sort (mark_list, mark_list_index - 1, 0); ptrdiff_t elapsed_cycles = get_cycle_count() - start; size_t item_count = mark_list_index - mark_list; - int log2_item_count = index_of_highest_set_bit(item_count); + int log2_item_count = index_of_highest_set_bit (item_count); double elapsed_cyles_by_n_log_n = (double)elapsed_cycles / item_count / log2_item_count; -// printf("GC#%d: first phase of sort_mark_list for heap %d took %u cycles to sort %u entries (cost/(n*log2(n) = %5.2f)\n", settings.gc_index, this->heap_number, elapsed_cycles, item_count, elapsed_cyles_by_n_log_n); +// printf ("GC#%d: first phase of sort_mark_list for heap %d took %u cycles to sort %u entries (cost/(n*log2(n) = %5.2f)\n", settings.gc_index, this->heap_number, elapsed_cycles, item_count, elapsed_cyles_by_n_log_n); } } @@ -8810,9 +8819,9 @@ void gc_heap::grow_mark_list () // with vectorized sorting, we can use bigger mark lists #ifdef USE_VXSORT #ifdef MULTIPLE_HEAPS - const size_t MAX_MARK_LIST_SIZE = IsSupportedInstructionSet(InstructionSet::AVX2) ? 1000 * 1024 : 200 * 1024; + const size_t MAX_MARK_LIST_SIZE = IsSupportedInstructionSet (InstructionSet::AVX2) ? 1000 * 1024 : 200 * 1024; #else //MULTIPLE_HEAPS - const size_t MAX_MARK_LIST_SIZE = IsSupportedInstructionSet(InstructionSet::AVX2) ? 32 * 1024 : 16 * 1024; + const size_t MAX_MARK_LIST_SIZE = IsSupportedInstructionSet (InstructionSet::AVX2) ? 32 * 1024 : 16 * 1024; #endif //MULTIPLE_HEAPS #else #ifdef MULTIPLE_HEAPS @@ -10350,7 +10359,7 @@ HRESULT gc_heap::initialize_gc (size_t soh_segment_size, #endif // __linux__ #ifdef USE_VXSORT - InitSupportedInstructionSet((int32_t)GCConfig::GetGCEnabledInstructionSets()); + InitSupportedInstructionSet ((int32_t)GCConfig::GetGCEnabledInstructionSets()); #endif if (!init_semi_shared()) @@ -22300,35 +22309,35 @@ void gc_heap::plan_phase (int condemned_gen_number) #ifdef USE_VXSORT ptrdiff_t entry_count = mark_list_index - mark_list; // conservatively use AVX2 only for large mark lists, - // and do runtime test if AVX2 is indeed available - if (entry_count > 8*1024 && IsSupportedInstructionSet(InstructionSet::AVX2)) + // and do runtime test to check whether AVX2 is indeed available + if (entry_count > AVX2_THRESHOLD_SIZE && IsSupportedInstructionSet (InstructionSet::AVX2)) { int32_t* mark_list_32 = (int32_t*)mark_list; uint8_t* low = gc_low; - ptrdiff_t range = heap_segment_allocated(ephemeral_heap_segment) - low; + ptrdiff_t range = heap_segment_allocated (ephemeral_heap_segment) - low; if ((uint32_t)range == range) { - do_pack_avx2(mark_list, entry_count, low); - _sort(&mark_list_32[0], &mark_list_32[entry_count - 1], 0); - do_unpack_avx2(mark_list_32, entry_count, low); + do_pack_avx2 (mark_list, entry_count, low); + _sort (&mark_list_32[0], &mark_list_32[entry_count - 1], 0); + do_unpack_avx2 (mark_list_32, entry_count, low); #ifdef _DEBUG uint8_t*high = heap_segment_allocated (ephemeral_heap_segment); for (ptrdiff_t i = 0; i < entry_count; i++) { uint8_t* item = mark_list[i]; - assert(low <= item && item < high); + assert (low <= item && item < high); } #endif //_DEBUG } else { - _sort(&mark_list[0], mark_list_index - 1, 0); + _sort (&mark_list[0], mark_list_index - 1, 0); } } else #endif //USE_VXSORT { - introsort::sort(&mark_list[0], mark_list_index - 1, 0); + introsort::sort (&mark_list[0], mark_list_index - 1, 0); } //printf ("using mark list at GC #%d", dd_collection_count (dynamic_data_of (0))); diff --git a/src/coreclr/src/gc/vxsort/do_vxsort.h b/src/coreclr/src/gc/vxsort/do_vxsort.h index 972d92496c89fa..302a7bc3406cb1 100644 --- a/src/coreclr/src/gc/vxsort/do_vxsort.h +++ b/src/coreclr/src/gc/vxsort/do_vxsort.h @@ -9,15 +9,17 @@ enum class InstructionSet AVX512F = 1, }; -void InitSupportedInstructionSet(int32_t configSetting); -bool IsSupportedInstructionSet(InstructionSet instructionSet); +void InitSupportedInstructionSet (int32_t configSetting); +bool IsSupportedInstructionSet (InstructionSet instructionSet); -void do_vxsort_avx2(uint8_t** low, uint8_t** high); -void do_vxsort_avx2(int32_t* low, int32_t* high); -void do_pack_avx2(uint8_t** mem, size_t len, uint8_t* base); -void do_unpack_avx2(int32_t* mem, size_t len, uint8_t* base); +void do_vxsort_avx2 (uint8_t** low, uint8_t** high); +void do_vxsort_avx2 (int32_t* low, int32_t* high); -void do_vxsort_avx512(uint8_t** low, uint8_t** high); -void do_vxsort_avx512(int32_t* low, int32_t* high); -void do_pack_avx512(uint8_t** mem, size_t len, uint8_t* base); -void do_unpack_avx512(int32_t* mem, size_t len, uint8_t* base); +void do_pack_avx2 (uint8_t** mem, size_t len, uint8_t* base); +void do_unpack_avx2 (int32_t* mem, size_t len, uint8_t* base); + +void do_vxsort_avx512 (uint8_t** low, uint8_t** high); +void do_vxsort_avx512 (int32_t* low, int32_t* high); + +void do_pack_avx512 (uint8_t** mem, size_t len, uint8_t* base); +void do_unpack_avx512 (int32_t* mem, size_t len, uint8_t* base); diff --git a/src/coreclr/src/gc/vxsort/do_vxsort_avx2.cpp b/src/coreclr/src/gc/vxsort/do_vxsort_avx2.cpp index 49f98767f01cdd..dcbb3280c3d4b6 100644 --- a/src/coreclr/src/gc/vxsort/do_vxsort_avx2.cpp +++ b/src/coreclr/src/gc/vxsort/do_vxsort_avx2.cpp @@ -52,7 +52,7 @@ namespace std #ifndef max template -T max(T a, T b) +T max (T a, T b) { if (a > b) return a; else return b; } @@ -61,27 +61,27 @@ T max(T a, T b) #include "machine_traits.avx2.h" #include "packer.h" -void do_vxsort_avx2(uint8_t** low, uint8_t** high) +void do_vxsort_avx2 (uint8_t** low, uint8_t** high) { auto sorter = vxsort::vxsort(); - sorter.sort((int64_t*)low, (int64_t*)high); + sorter.sort ((int64_t*)low, (int64_t*)high); } -void do_vxsort_avx2(int32_t* low, int32_t* high) +void do_vxsort_avx2 (int32_t* low, int32_t* high) { auto sorter = vxsort::vxsort(); - sorter.sort(low, high); + sorter.sort (low, high); } -void do_pack_avx2(uint8_t** mem, size_t len, uint8_t* base) +void do_pack_avx2 (uint8_t** mem, size_t len, uint8_t* base) { auto packer = vxsort::packer(); - packer.pack((int64_t*)mem, len, (int64_t)base); + packer.pack ((int64_t*)mem, len, (int64_t)base); } -void do_unpack_avx2(int32_t* mem, size_t len, uint8_t* base) +void do_unpack_avx2 (int32_t* mem, size_t len, uint8_t* base) { auto packer = vxsort::packer(); - packer.unpack(mem, len, (int64_t)base); + packer.unpack (mem, len, (int64_t)base); } #include "vxsort_targets_disable.h" diff --git a/src/coreclr/src/gc/vxsort/do_vxsort_avx512.cpp b/src/coreclr/src/gc/vxsort/do_vxsort_avx512.cpp index 93724bd6c8b527..623558ba2512a2 100644 --- a/src/coreclr/src/gc/vxsort/do_vxsort_avx512.cpp +++ b/src/coreclr/src/gc/vxsort/do_vxsort_avx512.cpp @@ -52,7 +52,7 @@ namespace std #ifndef max template -T max(T a, T b) +T max (T a, T b) { if (a > b) return a; else return b; } @@ -61,16 +61,16 @@ T max(T a, T b) #include "vxsort.h" #include "machine_traits.avx512.h" -void do_vxsort_avx512(uint8_t** low, uint8_t** high) +void do_vxsort_avx512 (uint8_t** low, uint8_t** high) { auto sorter = vxsort::vxsort(); - sorter.sort((int64_t*)low, (int64_t*)high); + sorter.sort ((int64_t*)low, (int64_t*)high); } -void do_vxsort_avx512(int32_t* low, int32_t* high) +void do_vxsort_avx512 (int32_t* low, int32_t* high) { auto sorter = vxsort::vxsort(); - sorter.sort(low, high); + sorter.sort (low, high); } #include "vxsort_targets_disable.h" diff --git a/src/coreclr/src/gc/vxsort/isa_detection.cpp b/src/coreclr/src/gc/vxsort/isa_detection.cpp index 7e5fdb9a8bc7be..3ebd4fbb244094 100644 --- a/src/coreclr/src/gc/vxsort/isa_detection.cpp +++ b/src/coreclr/src/gc/vxsort/isa_detection.cpp @@ -123,14 +123,14 @@ SupportedISA DetermineSupportedISA() static bool s_initialized; static SupportedISA s_supportedISA; -bool IsSupportedInstructionSet(InstructionSet instructionSet) +bool IsSupportedInstructionSet (InstructionSet instructionSet) { assert(s_initialized); assert(instructionSet == InstructionSet::AVX2 || instructionSet == InstructionSet::AVX512F); return ((int)s_supportedISA & (1 << (int)instructionSet)) != 0; } -void InitSupportedInstructionSet(int32_t configSetting) +void InitSupportedInstructionSet (int32_t configSetting) { s_supportedISA = (SupportedISA)((int)DetermineSupportedISA() & configSetting); // we are assuming that AVX2 can be used if AVX521F can, diff --git a/src/coreclr/src/gc/vxsort/isa_detection_dummy.cpp b/src/coreclr/src/gc/vxsort/isa_detection_dummy.cpp index 1e36ba79ed4ac0..c25bc54a7f1fd3 100644 --- a/src/coreclr/src/gc/vxsort/isa_detection_dummy.cpp +++ b/src/coreclr/src/gc/vxsort/isa_detection_dummy.cpp @@ -9,11 +9,11 @@ #if defined(TARGET_AMD64) && defined(TARGET_WINDOWS) -void InitSupportedInstructionSet(int32_t) +void InitSupportedInstructionSet (int32_t) { } -bool IsSupportedInstructionSet(InstructionSet) +bool IsSupportedInstructionSet (InstructionSet) { return false; } From cd6660a88fb81e8ead5ad815a91c39f64951da99 Mon Sep 17 00:00:00 2001 From: Peter Sollich Date: Mon, 6 Jul 2020 17:10:34 +0200 Subject: [PATCH 24/31] Add license headers and entry in THIRD-PARTY_NOTICES.TXT for Dan's vectorized sorting code. --- THIRD-PARTY-NOTICES.TXT | 25 +++++++++++++++++++ src/coreclr/src/gc/vxsort/alignment.h | 4 +++ src/coreclr/src/gc/vxsort/defs.h | 4 +++ .../src/gc/vxsort/machine_traits.avx2.cpp | 4 +++ .../src/gc/vxsort/machine_traits.avx2.h | 4 +++ .../src/gc/vxsort/machine_traits.avx512.h | 4 +++ src/coreclr/src/gc/vxsort/machine_traits.h | 4 +++ src/coreclr/src/gc/vxsort/packer.h | 4 +++ .../bitonic_sort.AVX2.int32_t.generated.cpp | 4 +++ .../bitonic_sort.AVX2.int32_t.generated.h | 4 +++ .../bitonic_sort.AVX2.int64_t.generated.cpp | 4 +++ .../bitonic_sort.AVX2.int64_t.generated.h | 4 +++ .../bitonic_sort.AVX512.int32_t.generated.cpp | 4 +++ .../bitonic_sort.AVX512.int32_t.generated.h | 4 +++ .../bitonic_sort.AVX512.int64_t.generated.cpp | 4 +++ .../bitonic_sort.AVX512.int64_t.generated.h | 4 +++ .../src/gc/vxsort/smallsort/bitonic_sort.h | 4 +++ .../src/gc/vxsort/smallsort/codegen/avx2.py | 6 +++++ .../src/gc/vxsort/smallsort/codegen/avx512.py | 6 +++++ .../vxsort/smallsort/codegen/bitonic_gen.py | 6 +++++ .../vxsort/smallsort/codegen/bitonic_isa.py | 6 +++++ .../src/gc/vxsort/smallsort/codegen/utils.py | 6 +++++ src/coreclr/src/gc/vxsort/vxsort.h | 4 +++ .../src/gc/vxsort/vxsort_targets_disable.h | 6 ++++- .../gc/vxsort/vxsort_targets_enable_avx2.h | 4 +++ .../gc/vxsort/vxsort_targets_enable_avx512.h | 4 +++ 26 files changed, 136 insertions(+), 1 deletion(-) diff --git a/THIRD-PARTY-NOTICES.TXT b/THIRD-PARTY-NOTICES.TXT index 707bd024f8f867..32e23122d71a0c 100644 --- a/THIRD-PARTY-NOTICES.TXT +++ b/THIRD-PARTY-NOTICES.TXT @@ -860,3 +860,28 @@ The above copyright notice and this permission notice shall be included in all c THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +License notice for vectorized sorting code +------------------------------------------ + +MIT License + +Copyright (c) 2020 Dan Shechter + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/src/coreclr/src/gc/vxsort/alignment.h b/src/coreclr/src/gc/vxsort/alignment.h index c92ba9d6a31186..ea4e4c03346b37 100644 --- a/src/coreclr/src/gc/vxsort/alignment.h +++ b/src/coreclr/src/gc/vxsort/alignment.h @@ -1,3 +1,7 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + #ifndef VXSORT_ALIGNNMENT_H #define VXSORT_ALIGNNMENT_H diff --git a/src/coreclr/src/gc/vxsort/defs.h b/src/coreclr/src/gc/vxsort/defs.h index 8902ffe5e61f91..3ae328e943f5d0 100644 --- a/src/coreclr/src/gc/vxsort/defs.h +++ b/src/coreclr/src/gc/vxsort/defs.h @@ -1,3 +1,7 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + #ifndef VXSORT_DEFS_H #define VXSORT_DEFS_H diff --git a/src/coreclr/src/gc/vxsort/machine_traits.avx2.cpp b/src/coreclr/src/gc/vxsort/machine_traits.avx2.cpp index b2d4feb31736ec..9e0741d605fd49 100644 --- a/src/coreclr/src/gc/vxsort/machine_traits.avx2.cpp +++ b/src/coreclr/src/gc/vxsort/machine_traits.avx2.cpp @@ -1,3 +1,7 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + #include "common.h" //#include diff --git a/src/coreclr/src/gc/vxsort/machine_traits.avx2.h b/src/coreclr/src/gc/vxsort/machine_traits.avx2.h index 9b65c64d0fd095..63ad17ced9b39c 100644 --- a/src/coreclr/src/gc/vxsort/machine_traits.avx2.h +++ b/src/coreclr/src/gc/vxsort/machine_traits.avx2.h @@ -1,3 +1,7 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + // // Created by dans on 6/1/20. // diff --git a/src/coreclr/src/gc/vxsort/machine_traits.avx512.h b/src/coreclr/src/gc/vxsort/machine_traits.avx512.h index 5811fff4f35231..ac6eeb05bf2249 100644 --- a/src/coreclr/src/gc/vxsort/machine_traits.avx512.h +++ b/src/coreclr/src/gc/vxsort/machine_traits.avx512.h @@ -1,3 +1,7 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + // // Created by dans on 6/1/20. // diff --git a/src/coreclr/src/gc/vxsort/machine_traits.h b/src/coreclr/src/gc/vxsort/machine_traits.h index c947fa646a76cb..58ff2fa395c4b8 100644 --- a/src/coreclr/src/gc/vxsort/machine_traits.h +++ b/src/coreclr/src/gc/vxsort/machine_traits.h @@ -1,3 +1,7 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + // // Created by dans on 6/1/20. // diff --git a/src/coreclr/src/gc/vxsort/packer.h b/src/coreclr/src/gc/vxsort/packer.h index 53d7a53ad4fcbc..44aeab31b0658c 100644 --- a/src/coreclr/src/gc/vxsort/packer.h +++ b/src/coreclr/src/gc/vxsort/packer.h @@ -1,3 +1,7 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + #ifndef VXSORT_PACKER_H #define VXSORT_PACKER_H diff --git a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int32_t.generated.cpp b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int32_t.generated.cpp index 33ec43e005b39e..c73b55510c60d3 100644 --- a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int32_t.generated.cpp +++ b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int32_t.generated.cpp @@ -1,3 +1,7 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + #include "common.h" #include "bitonic_sort.AVX2.int32_t.generated.h" diff --git a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int32_t.generated.h b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int32_t.generated.h index 2b5bb0e8654ce0..909d6cd8adeeb8 100644 --- a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int32_t.generated.h +++ b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int32_t.generated.h @@ -1,3 +1,7 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + ///////////////////////////////////////////////////////////////////////////// //// // This file was auto-generated by a tool at 2020-06-22 05:27:48 diff --git a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int64_t.generated.cpp b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int64_t.generated.cpp index 81c308736a22a6..70e2f7eb295c21 100644 --- a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int64_t.generated.cpp +++ b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int64_t.generated.cpp @@ -1,3 +1,7 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + #include "common.h" #include "bitonic_sort.AVX2.int64_t.generated.h" diff --git a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int64_t.generated.h b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int64_t.generated.h index bc42ebc91f58ca..503165f70d62c3 100644 --- a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int64_t.generated.h +++ b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int64_t.generated.h @@ -1,3 +1,7 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + ///////////////////////////////////////////////////////////////////////////// //// // This file was auto-generated by a tool at 2020-06-22 05:27:48 diff --git a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.cpp b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.cpp index 99dbb9efa4068f..9c4a07f6292a4c 100644 --- a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.cpp +++ b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.cpp @@ -1,3 +1,7 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + #include "common.h" #include "bitonic_sort.AVX512.int32_t.generated.h" diff --git a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.h b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.h index 310dcbf0b88783..3dc239a530580d 100644 --- a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.h +++ b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.h @@ -1,3 +1,7 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + ///////////////////////////////////////////////////////////////////////////// //// // This file was auto-generated by a tool at 2020-06-22 05:27:48 diff --git a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.cpp b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.cpp index 42e26f9c383511..e0fc247cac278c 100644 --- a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.cpp +++ b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.cpp @@ -1,3 +1,7 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + #include "common.h" #include "bitonic_sort.AVX512.int64_t.generated.h" diff --git a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.h b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.h index 2457e46fc28b03..e2034f8075640c 100644 --- a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.h +++ b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.h @@ -1,3 +1,7 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + ///////////////////////////////////////////////////////////////////////////// //// // This file was auto-generated by a tool at 2020-06-22 05:27:48 diff --git a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.h b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.h index 9978454431c8c7..fbf10cbadcac21 100644 --- a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.h +++ b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.h @@ -1,3 +1,7 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + #ifndef BITONIC_SORT_H #define BITONIC_SORT_H diff --git a/src/coreclr/src/gc/vxsort/smallsort/codegen/avx2.py b/src/coreclr/src/gc/vxsort/smallsort/codegen/avx2.py index 1e49c7e47db217..dbb928795ff2fe 100644 --- a/src/coreclr/src/gc/vxsort/smallsort/codegen/avx2.py +++ b/src/coreclr/src/gc/vxsort/smallsort/codegen/avx2.py @@ -1,3 +1,9 @@ +## +## Licensed to the .NET Foundation under one or more agreements. +## The .NET Foundation licenses this file to you under the MIT license. +## See the LICENSE file in the project root for more information. +## + import os from datetime import datetime diff --git a/src/coreclr/src/gc/vxsort/smallsort/codegen/avx512.py b/src/coreclr/src/gc/vxsort/smallsort/codegen/avx512.py index ffbf612b00b023..f2f7a73557957b 100644 --- a/src/coreclr/src/gc/vxsort/smallsort/codegen/avx512.py +++ b/src/coreclr/src/gc/vxsort/smallsort/codegen/avx512.py @@ -1,3 +1,9 @@ +## +## Licensed to the .NET Foundation under one or more agreements. +## The .NET Foundation licenses this file to you under the MIT license. +## See the LICENSE file in the project root for more information. +## + from datetime import datetime from utils import native_size_map, next_power_of_2 diff --git a/src/coreclr/src/gc/vxsort/smallsort/codegen/bitonic_gen.py b/src/coreclr/src/gc/vxsort/smallsort/codegen/bitonic_gen.py index 47e7f8f6756be8..1e7ce947aca8be 100644 --- a/src/coreclr/src/gc/vxsort/smallsort/codegen/bitonic_gen.py +++ b/src/coreclr/src/gc/vxsort/smallsort/codegen/bitonic_gen.py @@ -1,3 +1,9 @@ +## +## Licensed to the .NET Foundation under one or more agreements. +## The .NET Foundation licenses this file to you under the MIT license. +## See the LICENSE file in the project root for more information. +## + #!/usr/bin/env python3 import argparse import os diff --git a/src/coreclr/src/gc/vxsort/smallsort/codegen/bitonic_isa.py b/src/coreclr/src/gc/vxsort/smallsort/codegen/bitonic_isa.py index 1356a24637eb7b..10a1409219fbd0 100644 --- a/src/coreclr/src/gc/vxsort/smallsort/codegen/bitonic_isa.py +++ b/src/coreclr/src/gc/vxsort/smallsort/codegen/bitonic_isa.py @@ -1,3 +1,9 @@ +## +## Licensed to the .NET Foundation under one or more agreements. +## The .NET Foundation licenses this file to you under the MIT license. +## See the LICENSE file in the project root for more information. +## + from abc import ABC, ABCMeta, abstractmethod from utils import next_power_of_2 diff --git a/src/coreclr/src/gc/vxsort/smallsort/codegen/utils.py b/src/coreclr/src/gc/vxsort/smallsort/codegen/utils.py index af53207dc563ab..99a82f10e5efba 100644 --- a/src/coreclr/src/gc/vxsort/smallsort/codegen/utils.py +++ b/src/coreclr/src/gc/vxsort/smallsort/codegen/utils.py @@ -1,3 +1,9 @@ +## +## Licensed to the .NET Foundation under one or more agreements. +## The .NET Foundation licenses this file to you under the MIT license. +## See the LICENSE file in the project root for more information. +## + native_size_map = { "int32_t": 4, "uint32_t": 4, diff --git a/src/coreclr/src/gc/vxsort/vxsort.h b/src/coreclr/src/gc/vxsort/vxsort.h index 96668e458ee4d1..ab4fd776d3f18a 100644 --- a/src/coreclr/src/gc/vxsort/vxsort.h +++ b/src/coreclr/src/gc/vxsort/vxsort.h @@ -1,3 +1,7 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + #ifndef VXSORT_VXSORT_H #define VXSORT_VXSORT_H diff --git a/src/coreclr/src/gc/vxsort/vxsort_targets_disable.h b/src/coreclr/src/gc/vxsort/vxsort_targets_disable.h index 56fd0ff02325b9..afea66021809cc 100644 --- a/src/coreclr/src/gc/vxsort/vxsort_targets_disable.h +++ b/src/coreclr/src/gc/vxsort/vxsort_targets_disable.h @@ -1,3 +1,7 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + #ifdef __GNUC__ #ifdef __clang__ @@ -5,4 +9,4 @@ #else #pragma GCC pop_options #endif -#endif \ No newline at end of file +#endif diff --git a/src/coreclr/src/gc/vxsort/vxsort_targets_enable_avx2.h b/src/coreclr/src/gc/vxsort/vxsort_targets_enable_avx2.h index b29cd1ae0aa774..a379304a0aa76e 100644 --- a/src/coreclr/src/gc/vxsort/vxsort_targets_enable_avx2.h +++ b/src/coreclr/src/gc/vxsort/vxsort_targets_enable_avx2.h @@ -1,3 +1,7 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + #ifdef __GNUC__ #ifdef __clang__ #pragma clang attribute push (__attribute__((target("avx2"))), apply_to = any(function)) diff --git a/src/coreclr/src/gc/vxsort/vxsort_targets_enable_avx512.h b/src/coreclr/src/gc/vxsort/vxsort_targets_enable_avx512.h index fb6930a3014565..5ffcde88f91f1a 100644 --- a/src/coreclr/src/gc/vxsort/vxsort_targets_enable_avx512.h +++ b/src/coreclr/src/gc/vxsort/vxsort_targets_enable_avx512.h @@ -1,3 +1,7 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + #ifdef __GNUC__ #ifdef __clang__ #pragma clang attribute push (__attribute__((target("avx512f"))), apply_to = any(function)) From c3a49742e283be84d26b3941b1daf8ccd0508685 Mon Sep 17 00:00:00 2001 From: Peter Sollich Date: Wed, 8 Jul 2020 17:27:25 +0200 Subject: [PATCH 25/31] Update license headers --- src/coreclr/src/gc/vxsort/alignment.h | 1 - src/coreclr/src/gc/vxsort/defs.h | 1 - src/coreclr/src/gc/vxsort/do_vxsort.h | 1 - src/coreclr/src/gc/vxsort/do_vxsort_avx2.cpp | 1 - src/coreclr/src/gc/vxsort/do_vxsort_avx512.cpp | 1 - src/coreclr/src/gc/vxsort/isa_detection.cpp | 1 - src/coreclr/src/gc/vxsort/isa_detection_dummy.cpp | 1 - src/coreclr/src/gc/vxsort/machine_traits.avx2.cpp | 1 - src/coreclr/src/gc/vxsort/machine_traits.avx2.h | 1 - src/coreclr/src/gc/vxsort/machine_traits.avx512.h | 1 - src/coreclr/src/gc/vxsort/machine_traits.h | 1 - src/coreclr/src/gc/vxsort/packer.h | 1 - .../gc/vxsort/smallsort/bitonic_sort.AVX2.int32_t.generated.cpp | 1 - .../gc/vxsort/smallsort/bitonic_sort.AVX2.int32_t.generated.h | 1 - .../gc/vxsort/smallsort/bitonic_sort.AVX2.int64_t.generated.cpp | 1 - .../gc/vxsort/smallsort/bitonic_sort.AVX2.int64_t.generated.h | 1 - .../vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.cpp | 1 - .../gc/vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.h | 1 - .../vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.cpp | 1 - .../gc/vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.h | 1 - src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.h | 1 - src/coreclr/src/gc/vxsort/smallsort/codegen/avx2.py | 1 - src/coreclr/src/gc/vxsort/smallsort/codegen/avx512.py | 1 - src/coreclr/src/gc/vxsort/smallsort/codegen/bitonic_gen.py | 1 - src/coreclr/src/gc/vxsort/smallsort/codegen/bitonic_isa.py | 1 - src/coreclr/src/gc/vxsort/smallsort/codegen/utils.py | 1 - src/coreclr/src/gc/vxsort/vxsort.h | 1 - src/coreclr/src/gc/vxsort/vxsort_targets_disable.h | 1 - src/coreclr/src/gc/vxsort/vxsort_targets_enable_avx2.h | 1 - src/coreclr/src/gc/vxsort/vxsort_targets_enable_avx512.h | 1 - 30 files changed, 30 deletions(-) diff --git a/src/coreclr/src/gc/vxsort/alignment.h b/src/coreclr/src/gc/vxsort/alignment.h index ea4e4c03346b37..df61c3a30f4109 100644 --- a/src/coreclr/src/gc/vxsort/alignment.h +++ b/src/coreclr/src/gc/vxsort/alignment.h @@ -1,6 +1,5 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -// See the LICENSE file in the project root for more information. #ifndef VXSORT_ALIGNNMENT_H #define VXSORT_ALIGNNMENT_H diff --git a/src/coreclr/src/gc/vxsort/defs.h b/src/coreclr/src/gc/vxsort/defs.h index 3ae328e943f5d0..628315e5110a6b 100644 --- a/src/coreclr/src/gc/vxsort/defs.h +++ b/src/coreclr/src/gc/vxsort/defs.h @@ -1,6 +1,5 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -// See the LICENSE file in the project root for more information. #ifndef VXSORT_DEFS_H #define VXSORT_DEFS_H diff --git a/src/coreclr/src/gc/vxsort/do_vxsort.h b/src/coreclr/src/gc/vxsort/do_vxsort.h index 302a7bc3406cb1..50a5e1ef77a77d 100644 --- a/src/coreclr/src/gc/vxsort/do_vxsort.h +++ b/src/coreclr/src/gc/vxsort/do_vxsort.h @@ -1,6 +1,5 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -// See the LICENSE file in the project root for more information. // Enum for the IsSupportedInstructionSet method enum class InstructionSet diff --git a/src/coreclr/src/gc/vxsort/do_vxsort_avx2.cpp b/src/coreclr/src/gc/vxsort/do_vxsort_avx2.cpp index dcbb3280c3d4b6..3e4fd10d15f48d 100644 --- a/src/coreclr/src/gc/vxsort/do_vxsort_avx2.cpp +++ b/src/coreclr/src/gc/vxsort/do_vxsort_avx2.cpp @@ -1,6 +1,5 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -// See the LICENSE file in the project root for more information. #include "common.h" diff --git a/src/coreclr/src/gc/vxsort/do_vxsort_avx512.cpp b/src/coreclr/src/gc/vxsort/do_vxsort_avx512.cpp index 623558ba2512a2..aa0a8f99442e8b 100644 --- a/src/coreclr/src/gc/vxsort/do_vxsort_avx512.cpp +++ b/src/coreclr/src/gc/vxsort/do_vxsort_avx512.cpp @@ -1,6 +1,5 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -// See the LICENSE file in the project root for more information. #include "common.h" diff --git a/src/coreclr/src/gc/vxsort/isa_detection.cpp b/src/coreclr/src/gc/vxsort/isa_detection.cpp index 3ebd4fbb244094..18d19db32c3aad 100644 --- a/src/coreclr/src/gc/vxsort/isa_detection.cpp +++ b/src/coreclr/src/gc/vxsort/isa_detection.cpp @@ -1,6 +1,5 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -// See the LICENSE file in the project root for more information. #include "common.h" #include diff --git a/src/coreclr/src/gc/vxsort/isa_detection_dummy.cpp b/src/coreclr/src/gc/vxsort/isa_detection_dummy.cpp index c25bc54a7f1fd3..fd52b7831ba024 100644 --- a/src/coreclr/src/gc/vxsort/isa_detection_dummy.cpp +++ b/src/coreclr/src/gc/vxsort/isa_detection_dummy.cpp @@ -1,6 +1,5 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -// See the LICENSE file in the project root for more information. #include "common.h" #include diff --git a/src/coreclr/src/gc/vxsort/machine_traits.avx2.cpp b/src/coreclr/src/gc/vxsort/machine_traits.avx2.cpp index 9e0741d605fd49..d693d08ea41409 100644 --- a/src/coreclr/src/gc/vxsort/machine_traits.avx2.cpp +++ b/src/coreclr/src/gc/vxsort/machine_traits.avx2.cpp @@ -1,6 +1,5 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -// See the LICENSE file in the project root for more information. #include "common.h" //#include diff --git a/src/coreclr/src/gc/vxsort/machine_traits.avx2.h b/src/coreclr/src/gc/vxsort/machine_traits.avx2.h index 63ad17ced9b39c..c1f4a52e7c9ff2 100644 --- a/src/coreclr/src/gc/vxsort/machine_traits.avx2.h +++ b/src/coreclr/src/gc/vxsort/machine_traits.avx2.h @@ -1,6 +1,5 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -// See the LICENSE file in the project root for more information. // // Created by dans on 6/1/20. diff --git a/src/coreclr/src/gc/vxsort/machine_traits.avx512.h b/src/coreclr/src/gc/vxsort/machine_traits.avx512.h index ac6eeb05bf2249..bcecb76e2953d1 100644 --- a/src/coreclr/src/gc/vxsort/machine_traits.avx512.h +++ b/src/coreclr/src/gc/vxsort/machine_traits.avx512.h @@ -1,6 +1,5 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -// See the LICENSE file in the project root for more information. // // Created by dans on 6/1/20. diff --git a/src/coreclr/src/gc/vxsort/machine_traits.h b/src/coreclr/src/gc/vxsort/machine_traits.h index 58ff2fa395c4b8..cd31ed365777a1 100644 --- a/src/coreclr/src/gc/vxsort/machine_traits.h +++ b/src/coreclr/src/gc/vxsort/machine_traits.h @@ -1,6 +1,5 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -// See the LICENSE file in the project root for more information. // // Created by dans on 6/1/20. diff --git a/src/coreclr/src/gc/vxsort/packer.h b/src/coreclr/src/gc/vxsort/packer.h index 44aeab31b0658c..4c7257a58f17e4 100644 --- a/src/coreclr/src/gc/vxsort/packer.h +++ b/src/coreclr/src/gc/vxsort/packer.h @@ -1,6 +1,5 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -// See the LICENSE file in the project root for more information. #ifndef VXSORT_PACKER_H #define VXSORT_PACKER_H diff --git a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int32_t.generated.cpp b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int32_t.generated.cpp index c73b55510c60d3..17ddcd81505719 100644 --- a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int32_t.generated.cpp +++ b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int32_t.generated.cpp @@ -1,6 +1,5 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -// See the LICENSE file in the project root for more information. #include "common.h" #include "bitonic_sort.AVX2.int32_t.generated.h" diff --git a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int32_t.generated.h b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int32_t.generated.h index 909d6cd8adeeb8..79bdbcc870d441 100644 --- a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int32_t.generated.h +++ b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int32_t.generated.h @@ -1,6 +1,5 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -// See the LICENSE file in the project root for more information. ///////////////////////////////////////////////////////////////////////////// //// diff --git a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int64_t.generated.cpp b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int64_t.generated.cpp index 70e2f7eb295c21..00360ae70f0c0e 100644 --- a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int64_t.generated.cpp +++ b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int64_t.generated.cpp @@ -1,6 +1,5 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -// See the LICENSE file in the project root for more information. #include "common.h" #include "bitonic_sort.AVX2.int64_t.generated.h" diff --git a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int64_t.generated.h b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int64_t.generated.h index 503165f70d62c3..5e9d2fea0dcf09 100644 --- a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int64_t.generated.h +++ b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX2.int64_t.generated.h @@ -1,6 +1,5 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -// See the LICENSE file in the project root for more information. ///////////////////////////////////////////////////////////////////////////// //// diff --git a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.cpp b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.cpp index 9c4a07f6292a4c..9efdf598ea49ce 100644 --- a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.cpp +++ b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.cpp @@ -1,6 +1,5 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -// See the LICENSE file in the project root for more information. #include "common.h" #include "bitonic_sort.AVX512.int32_t.generated.h" diff --git a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.h b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.h index 3dc239a530580d..21c992c3e0df32 100644 --- a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.h +++ b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.h @@ -1,6 +1,5 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -// See the LICENSE file in the project root for more information. ///////////////////////////////////////////////////////////////////////////// //// diff --git a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.cpp b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.cpp index e0fc247cac278c..cf8b62809b368a 100644 --- a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.cpp +++ b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.cpp @@ -1,6 +1,5 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -// See the LICENSE file in the project root for more information. #include "common.h" #include "bitonic_sort.AVX512.int64_t.generated.h" diff --git a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.h b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.h index e2034f8075640c..483cf5a1e15853 100644 --- a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.h +++ b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.h @@ -1,6 +1,5 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -// See the LICENSE file in the project root for more information. ///////////////////////////////////////////////////////////////////////////// //// diff --git a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.h b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.h index fbf10cbadcac21..0e87b374226642 100644 --- a/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.h +++ b/src/coreclr/src/gc/vxsort/smallsort/bitonic_sort.h @@ -1,6 +1,5 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -// See the LICENSE file in the project root for more information. #ifndef BITONIC_SORT_H #define BITONIC_SORT_H diff --git a/src/coreclr/src/gc/vxsort/smallsort/codegen/avx2.py b/src/coreclr/src/gc/vxsort/smallsort/codegen/avx2.py index dbb928795ff2fe..11ab818e48a201 100644 --- a/src/coreclr/src/gc/vxsort/smallsort/codegen/avx2.py +++ b/src/coreclr/src/gc/vxsort/smallsort/codegen/avx2.py @@ -1,7 +1,6 @@ ## ## Licensed to the .NET Foundation under one or more agreements. ## The .NET Foundation licenses this file to you under the MIT license. -## See the LICENSE file in the project root for more information. ## import os diff --git a/src/coreclr/src/gc/vxsort/smallsort/codegen/avx512.py b/src/coreclr/src/gc/vxsort/smallsort/codegen/avx512.py index f2f7a73557957b..f90433c5a7418f 100644 --- a/src/coreclr/src/gc/vxsort/smallsort/codegen/avx512.py +++ b/src/coreclr/src/gc/vxsort/smallsort/codegen/avx512.py @@ -1,7 +1,6 @@ ## ## Licensed to the .NET Foundation under one or more agreements. ## The .NET Foundation licenses this file to you under the MIT license. -## See the LICENSE file in the project root for more information. ## from datetime import datetime diff --git a/src/coreclr/src/gc/vxsort/smallsort/codegen/bitonic_gen.py b/src/coreclr/src/gc/vxsort/smallsort/codegen/bitonic_gen.py index 1e7ce947aca8be..484f7fec5d9a39 100644 --- a/src/coreclr/src/gc/vxsort/smallsort/codegen/bitonic_gen.py +++ b/src/coreclr/src/gc/vxsort/smallsort/codegen/bitonic_gen.py @@ -1,7 +1,6 @@ ## ## Licensed to the .NET Foundation under one or more agreements. ## The .NET Foundation licenses this file to you under the MIT license. -## See the LICENSE file in the project root for more information. ## #!/usr/bin/env python3 diff --git a/src/coreclr/src/gc/vxsort/smallsort/codegen/bitonic_isa.py b/src/coreclr/src/gc/vxsort/smallsort/codegen/bitonic_isa.py index 10a1409219fbd0..d48d7871dedf85 100644 --- a/src/coreclr/src/gc/vxsort/smallsort/codegen/bitonic_isa.py +++ b/src/coreclr/src/gc/vxsort/smallsort/codegen/bitonic_isa.py @@ -1,7 +1,6 @@ ## ## Licensed to the .NET Foundation under one or more agreements. ## The .NET Foundation licenses this file to you under the MIT license. -## See the LICENSE file in the project root for more information. ## from abc import ABC, ABCMeta, abstractmethod diff --git a/src/coreclr/src/gc/vxsort/smallsort/codegen/utils.py b/src/coreclr/src/gc/vxsort/smallsort/codegen/utils.py index 99a82f10e5efba..e96c4e8ffbb489 100644 --- a/src/coreclr/src/gc/vxsort/smallsort/codegen/utils.py +++ b/src/coreclr/src/gc/vxsort/smallsort/codegen/utils.py @@ -1,7 +1,6 @@ ## ## Licensed to the .NET Foundation under one or more agreements. ## The .NET Foundation licenses this file to you under the MIT license. -## See the LICENSE file in the project root for more information. ## native_size_map = { diff --git a/src/coreclr/src/gc/vxsort/vxsort.h b/src/coreclr/src/gc/vxsort/vxsort.h index ab4fd776d3f18a..35812d9356f3f5 100644 --- a/src/coreclr/src/gc/vxsort/vxsort.h +++ b/src/coreclr/src/gc/vxsort/vxsort.h @@ -1,6 +1,5 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -// See the LICENSE file in the project root for more information. #ifndef VXSORT_VXSORT_H #define VXSORT_VXSORT_H diff --git a/src/coreclr/src/gc/vxsort/vxsort_targets_disable.h b/src/coreclr/src/gc/vxsort/vxsort_targets_disable.h index afea66021809cc..1c6efb1b246230 100644 --- a/src/coreclr/src/gc/vxsort/vxsort_targets_disable.h +++ b/src/coreclr/src/gc/vxsort/vxsort_targets_disable.h @@ -1,6 +1,5 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -// See the LICENSE file in the project root for more information. #ifdef __GNUC__ diff --git a/src/coreclr/src/gc/vxsort/vxsort_targets_enable_avx2.h b/src/coreclr/src/gc/vxsort/vxsort_targets_enable_avx2.h index a379304a0aa76e..343d7ae1850657 100644 --- a/src/coreclr/src/gc/vxsort/vxsort_targets_enable_avx2.h +++ b/src/coreclr/src/gc/vxsort/vxsort_targets_enable_avx2.h @@ -1,6 +1,5 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -// See the LICENSE file in the project root for more information. #ifdef __GNUC__ #ifdef __clang__ diff --git a/src/coreclr/src/gc/vxsort/vxsort_targets_enable_avx512.h b/src/coreclr/src/gc/vxsort/vxsort_targets_enable_avx512.h index 5ffcde88f91f1a..c5bfe4998a8f3c 100644 --- a/src/coreclr/src/gc/vxsort/vxsort_targets_enable_avx512.h +++ b/src/coreclr/src/gc/vxsort/vxsort_targets_enable_avx512.h @@ -1,6 +1,5 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -// See the LICENSE file in the project root for more information. #ifdef __GNUC__ #ifdef __clang__ From 9a0acaabd72ea7d65cf3ce1d6ef10e7b5bf0ed3b Mon Sep 17 00:00:00 2001 From: Peter Sollich Date: Fri, 10 Jul 2020 12:31:47 +0200 Subject: [PATCH 26/31] Address code review feedback: - fix typo in comment in InitSupportedInstructionSet - move test for full GC to beginning of sort_mark_list - in WKS GC, we can use the tighter range shigh - slow for the surviving objects instead of the full ephemeral range. - make the description for the new config setting GCEnabledInstructionSets more explicit by enumerating the legal values and their meanings. --- src/coreclr/src/gc/gc.cpp | 11 ++++++++--- src/coreclr/src/gc/gcconfig.h | 2 +- src/coreclr/src/gc/vxsort/isa_detection.cpp | 2 +- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/src/coreclr/src/gc/gc.cpp b/src/coreclr/src/gc/gc.cpp index f15046398b26e4..9e68050b436b13 100644 --- a/src/coreclr/src/gc/gc.cpp +++ b/src/coreclr/src/gc/gc.cpp @@ -8315,6 +8315,11 @@ inline static void swap_elements(uint8_t** i,uint8_t** j) NOINLINE void gc_heap::sort_mark_list() { + if (settings.condemned_generation >= max_generation) + { + return; + } + // if this heap had a mark list overflow, we don't do anything if (mark_list_index > mark_list_end) { @@ -8363,7 +8368,7 @@ void gc_heap::sort_mark_list() } // give up if this is not an ephemeral GC or the mark list size is unreasonably large - if (settings.condemned_generation > 1 || total_mark_list_size > total_ephemeral_size/256) + if (total_mark_list_size > total_ephemeral_size/256) { mark_list_index = mark_list_end + 1; // let's not count this as a mark list overflow @@ -22313,8 +22318,8 @@ void gc_heap::plan_phase (int condemned_gen_number) if (entry_count > AVX2_THRESHOLD_SIZE && IsSupportedInstructionSet (InstructionSet::AVX2)) { int32_t* mark_list_32 = (int32_t*)mark_list; - uint8_t* low = gc_low; - ptrdiff_t range = heap_segment_allocated (ephemeral_heap_segment) - low; + uint8_t* low = slow; + ptrdiff_t range = shigh - low; if ((uint32_t)range == range) { do_pack_avx2 (mark_list, entry_count, low); diff --git a/src/coreclr/src/gc/gcconfig.h b/src/coreclr/src/gc/gcconfig.h index 053a54f0010c7c..850a0c3f922d34 100644 --- a/src/coreclr/src/gc/gcconfig.h +++ b/src/coreclr/src/gc/gcconfig.h @@ -129,7 +129,7 @@ class GCConfigStringHolder INT_CONFIG (GCHeapHardLimitSOHPercent, "GCHeapHardLimitSOHPercent", NULL, 0, "Specifies the GC heap SOH usage as a percentage of the total memory") \ INT_CONFIG (GCHeapHardLimitLOHPercent, "GCHeapHardLimitLOHPercent", NULL, 0, "Specifies the GC heap LOH usage as a percentage of the total memory") \ INT_CONFIG (GCHeapHardLimitPOHPercent, "GCHeapHardLimitPOHPercent", NULL, 0, "Specifies the GC heap POH usage as a percentage of the total memory") \ - INT_CONFIG (GCEnabledInstructionSets, "GCEnabledInstructionSets", NULL, -1, "Specifies whether GC can use AVX2 or AVX512F") \ + INT_CONFIG (GCEnabledInstructionSets, "GCEnabledInstructionSets", NULL, -1, "Specifies whether GC can use AVX2 or AVX512F - 0 for neither, 1 for AVX2, 3 for AVX512F")\ // This class is responsible for retreiving configuration information // for how the GC should operate. diff --git a/src/coreclr/src/gc/vxsort/isa_detection.cpp b/src/coreclr/src/gc/vxsort/isa_detection.cpp index 18d19db32c3aad..ac469a615ddedd 100644 --- a/src/coreclr/src/gc/vxsort/isa_detection.cpp +++ b/src/coreclr/src/gc/vxsort/isa_detection.cpp @@ -132,7 +132,7 @@ bool IsSupportedInstructionSet (InstructionSet instructionSet) void InitSupportedInstructionSet (int32_t configSetting) { s_supportedISA = (SupportedISA)((int)DetermineSupportedISA() & configSetting); - // we are assuming that AVX2 can be used if AVX521F can, + // we are assuming that AVX2 can be used if AVX512F can, // so if AVX2 is disabled, we need to disable AVX512F as well if (!((int)s_supportedISA & (int)SupportedISA::AVX2)) s_supportedISA = SupportedISA::None; From 515cae64c9e12fd43ead1ec04208833a5564c62b Mon Sep 17 00:00:00 2001 From: Peter Sollich Date: Fri, 10 Jul 2020 13:03:04 +0200 Subject: [PATCH 27/31] Snapshot for Linux changes --- src/coreclr/src/gc/CMakeLists.txt | 2 +- src/coreclr/src/gc/gc.cpp | 2 +- src/coreclr/src/gc/gcsvr.cpp | 2 +- src/coreclr/src/gc/gcwks.cpp | 2 +- src/coreclr/src/gc/sample/CMakeLists.txt | 2 +- .../src/gc/vxsort/isa_detection_dummy.cpp | 1 - .../src/gc/vxsort/machine_traits.avx2.h | 12 +- .../src/gc/vxsort/machine_traits.avx512.h | 12 +- src/coreclr/src/pal/inc/rt/cpp/immintrin.h | 1291 +++++++++++++++++ src/coreclr/src/vm/CMakeLists.txt | 2 +- 10 files changed, 1319 insertions(+), 9 deletions(-) create mode 100644 src/coreclr/src/pal/inc/rt/cpp/immintrin.h diff --git a/src/coreclr/src/gc/CMakeLists.txt b/src/coreclr/src/gc/CMakeLists.txt index 61f75f4b71917a..921214168ea725 100644 --- a/src/coreclr/src/gc/CMakeLists.txt +++ b/src/coreclr/src/gc/CMakeLists.txt @@ -39,7 +39,7 @@ else() windows/gcenv.windows.cpp) endif(CLR_CMAKE_HOST_UNIX) -if (CLR_CMAKE_TARGET_WIN32 AND CLR_CMAKE_TARGET_ARCH_AMD64) +if (CLR_CMAKE_TARGET_ARCH_AMD64) set ( GC_SOURCES ${GC_SOURCES} vxsort/isa_detection_dummy.cpp diff --git a/src/coreclr/src/gc/gc.cpp b/src/coreclr/src/gc/gc.cpp index bc5b368fd95254..a2a3afd3389e1c 100644 --- a/src/coreclr/src/gc/gc.cpp +++ b/src/coreclr/src/gc/gc.cpp @@ -19,7 +19,7 @@ #include "gcpriv.h" -#if defined(TARGET_AMD64) && defined(TARGET_WINDOWS) +#if defined(TARGET_AMD64) #define USE_VXSORT #else #define USE_INTROSORT diff --git a/src/coreclr/src/gc/gcsvr.cpp b/src/coreclr/src/gc/gcsvr.cpp index 8cdef316ea953a..34d006887a5bda 100644 --- a/src/coreclr/src/gc/gcsvr.cpp +++ b/src/coreclr/src/gc/gcsvr.cpp @@ -21,7 +21,7 @@ #define SERVER_GC 1 -#if defined(TARGET_AMD64) && defined(TARGET_WINDOWS) +#if defined(TARGET_AMD64) #include "vxsort/do_vxsort.h" #endif diff --git a/src/coreclr/src/gc/gcwks.cpp b/src/coreclr/src/gc/gcwks.cpp index 531e8a0afdf283..50264323d6c077 100644 --- a/src/coreclr/src/gc/gcwks.cpp +++ b/src/coreclr/src/gc/gcwks.cpp @@ -21,7 +21,7 @@ #undef SERVER_GC #endif -#if defined(TARGET_AMD64) && defined(TARGET_WINDOWS) +#if defined(TARGET_AMD64) #include "vxsort/do_vxsort.h" #endif diff --git a/src/coreclr/src/gc/sample/CMakeLists.txt b/src/coreclr/src/gc/sample/CMakeLists.txt index 224e4cadd69726..297db2fed38c16 100644 --- a/src/coreclr/src/gc/sample/CMakeLists.txt +++ b/src/coreclr/src/gc/sample/CMakeLists.txt @@ -24,7 +24,7 @@ set(SOURCES ../softwarewritewatch.cpp ) -if (CLR_CMAKE_TARGET_WIN32 AND CLR_CMAKE_TARGET_ARCH_AMD64) +if (CLR_CMAKE_TARGET_ARCH_AMD64) set ( SOURCES ${SOURCES} ../vxsort/isa_detection_dummy.cpp diff --git a/src/coreclr/src/gc/vxsort/isa_detection_dummy.cpp b/src/coreclr/src/gc/vxsort/isa_detection_dummy.cpp index 9049ec60c69c75..3e18aa66d5e58c 100644 --- a/src/coreclr/src/gc/vxsort/isa_detection_dummy.cpp +++ b/src/coreclr/src/gc/vxsort/isa_detection_dummy.cpp @@ -1,7 +1,6 @@ // ISA_Detection.cpp : Diese Datei enthält die Funktion "main". Hier beginnt und endet die Ausführung des Programms. // #include "common.h" -#include #include "do_vxsort.h" diff --git a/src/coreclr/src/gc/vxsort/machine_traits.avx2.h b/src/coreclr/src/gc/vxsort/machine_traits.avx2.h index 8eb068a704cb02..1eed2d2e873539 100644 --- a/src/coreclr/src/gc/vxsort/machine_traits.avx2.h +++ b/src/coreclr/src/gc/vxsort/machine_traits.avx2.h @@ -30,6 +30,12 @@ static void not_supported() assert(!"operation is unsupported"); } +#ifdef _DEBUG +// in _DEBUG, we #define return to be something more complicated, +// containing a statement, so #define away constexpr for _DEBUG +#define constexpr +#endif //_DEBUG + template <> class vxsort_machine_traits { public: @@ -136,7 +142,7 @@ class vxsort_machine_traits { typedef __m256i TV; typedef uint32_t TMASK; - static constexpr bool supports_compress_writes() { return false; } + static bool supports_compress_writes() { return false; } static INLINE TV load_vec(TV* p) { return _mm256_lddqu_si256(p); @@ -237,6 +243,10 @@ class vxsort_machine_traits { #undef s2d #undef d2s +#ifdef _DEBUG +#undef constexpr +#endif //_DEBUG + #include "vxsort_targets_disable.h" diff --git a/src/coreclr/src/gc/vxsort/machine_traits.avx512.h b/src/coreclr/src/gc/vxsort/machine_traits.avx512.h index fcf15c6974ef89..077dfcca9245c5 100644 --- a/src/coreclr/src/gc/vxsort/machine_traits.avx512.h +++ b/src/coreclr/src/gc/vxsort/machine_traits.avx512.h @@ -11,6 +11,12 @@ #include "defs.h" #include "machine_traits.h" +#ifdef _DEBUG +// in _DEBUG, we #define return to be something more complicated, +// containing a statement, so #define away constexpr for _DEBUG +#define constexpr +#endif //_DEBUG + namespace vxsort { template <> class vxsort_machine_traits { @@ -117,7 +123,7 @@ class vxsort_machine_traits { typedef __m512i TV; typedef __mmask8 TMASK; - static constexpr bool supports_compress_writes() { return true; } + static bool supports_compress_writes() { return true; } static INLINE TV load_vec(TV* p) { return _mm512_loadu_si512(p); @@ -212,6 +218,10 @@ class vxsort_machine_traits { } +#ifdef _DEBUG +#undef constexpr +#endif //_DEBUG + #include "vxsort_targets_disable.h" #endif // VXSORT_VXSORT_AVX512_H diff --git a/src/coreclr/src/pal/inc/rt/cpp/immintrin.h b/src/coreclr/src/pal/inc/rt/cpp/immintrin.h new file mode 100644 index 00000000000000..7d9b02332a1868 --- /dev/null +++ b/src/coreclr/src/pal/inc/rt/cpp/immintrin.h @@ -0,0 +1,1291 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +#ifndef _IMMINTRIN_H +#define _IMMINTRIN_H + +/*===---- avxintrin.h - AVX intrinsics -------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +typedef double __v4df __attribute__ ((__vector_size__ (32))); +typedef long long __v4di __attribute__ ((__vector_size__ (32))); +typedef float __v8sf __attribute__ ((__vector_size__ (32))); +typedef unsigned char __v16qu __attribute__((__vector_size__(16))); +typedef unsigned long long __v4du __attribute__ ((__vector_size__ (32))); + +typedef float __m256 __attribute__ ((__vector_size__ (32), __aligned__(32))); +typedef double __m256d __attribute__((__vector_size__(32), __aligned__(32))); +typedef float __m256_u __attribute__ ((__vector_size__ (32), __aligned__(1))); +typedef long long __m256i __attribute__((__vector_size__(32), __aligned__(32))); +typedef double __m256d_u __attribute__((__vector_size__(32), __aligned__(1))); +typedef long long __m256i_u __attribute__((__vector_size__(32), __aligned__(1))); +typedef int __v8si __attribute__ ((__vector_size__ (32))); + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx"), __min_vector_width__(256))) +#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx"), __min_vector_width__(128))) + +#define _CMP_GT_OS 0x0e /* Greater-than (ordered, signaling) */ + +//// Stores integer values from a 256-bit integer vector to an unaligned +/// memory location pointed to by \a __p. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDQU instruction. +/// +/// \param __p +/// A pointer to a memory location that will receive the integer values. +/// \param __a +/// A 256-bit integer vector containing the values to be moved. +static __inline void __DEFAULT_FN_ATTRS +_mm256_storeu_si256(__m256i_u *__p, __m256i __a) +{ + struct __storeu_si256 { + __m256i_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_si256*)__p)->__v = __a; +} + +/// Stores single-precision floating point values from a 256-bit vector +/// of [8 x float] to an unaligned memory location pointed to by \a __p. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVUPS instruction. +/// +/// \param __p +/// A pointer to a memory location that will receive the float values. +/// \param __a +/// A 256-bit vector of [8 x float] containing the values to be moved. +static __inline void __DEFAULT_FN_ATTRS +_mm256_storeu_ps(float *__p, __m256 __a) +{ + struct __storeu_ps { + __m256_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_ps*)__p)->__v = __a; +} + +/// Loads 256 bits of integer data from an unaligned memory location +/// pointed to by \a __p into a 256-bit integer vector. This intrinsic may +/// perform better than \c _mm256_loadu_si256 when the data crosses a cache +/// line boundary. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VLDDQU instruction. +/// +/// \param __p +/// A pointer to a 256-bit integer vector containing integer values. +/// \returns A 256-bit integer vector containing the moved values. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_lddqu_si256(__m256i const *__p) +{ + return (__m256i)__builtin_ia32_lddqu256((char const *)__p); +} + +/// Loads 4 double-precision floating point values from an unaligned +/// memory location pointed to by \a __p into a vector of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVUPD instruction. +/// +/// \param __p +/// A pointer to a memory location containing double-precision floating +/// point values. +/// \returns A 256-bit vector of [4 x double] containing the moved values. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_loadu_pd(double const *__p) +{ + struct __loadu_pd { + __m256d_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_pd*)__p)->__v; +} + +/// Casts a 256-bit integer vector into a 256-bit floating-point vector +/// of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \returns A 256-bit floating-point vector of [4 x double] containing the same +/// bitwise pattern as the parameter. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_castsi256_pd(__m256i __a) +{ + return (__m256d)__a; +} + +/// Casts a 256-bit floating-point vector of [8 x float] into a 256-bit +/// integer vector. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 256-bit floating-point vector of [8 x float]. +/// \returns A 256-bit integer vector containing the same bitwise pattern as the +/// parameter. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_castps_si256(__m256 __a) +{ + return (__m256i)__a; +} + +/// Casts a 256-bit floating-point vector of [8 x float] into a 256-bit +/// floating-point vector of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 256-bit floating-point vector of [8 x float]. +/// \returns A 256-bit floating-point vector of [4 x double] containing the same +/// bitwise pattern as the parameter. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_castps_pd(__m256 __a) +{ + return (__m256d)__a; +} + +/// Merges 64-bit double-precision data values stored in either of the +/// two 256-bit vectors of [4 x double], as specified by the 256-bit vector +/// operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VBLENDVPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double]. +/// \param __b +/// A 256-bit vector of [4 x double]. +/// \param __c +/// A 256-bit vector operand, with mask bits 255, 191, 127, and 63 specifying +/// how the values are to be copied. The position of the mask bit corresponds +/// to the most significant bit of a copied value. When a mask bit is 0, the +/// corresponding 64-bit element in operand \a __a is copied to the same +/// position in the destination. When a mask bit is 1, the corresponding +/// 64-bit element in operand \a __b is copied to the same position in the +/// destination. +/// \returns A 256-bit vector of [4 x double] containing the copied values. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_blendv_pd(__m256d __a, __m256d __b, __m256d __c) +{ + return (__m256d)__builtin_ia32_blendvpd256( + (__v4df)__a, (__v4df)__b, (__v4df)__c); +} + +/// Casts a 256-bit floating-point vector of [4 x double] into a 256-bit +/// integer vector. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 256-bit floating-point vector of [4 x double]. +/// \returns A 256-bit integer vector containing the same bitwise pattern as the +/// parameter. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_castpd_si256(__m256d __a) +{ + return (__m256i)__a; +} + +/// Constructs a 256-bit floating-point vector of [4 x double] +/// initialized with the specified double-precision floating-point values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKLPD+VINSERTF128 +/// instruction. +/// +/// \param __a +/// A double-precision floating-point value used to initialize bits [255:192] +/// of the result. +/// \param __b +/// A double-precision floating-point value used to initialize bits [191:128] +/// of the result. +/// \param __c +/// A double-precision floating-point value used to initialize bits [127:64] +/// of the result. +/// \param __d +/// A double-precision floating-point value used to initialize bits [63:0] +/// of the result. +/// \returns An initialized 256-bit floating-point vector of [4 x double]. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_set_pd(double __a, double __b, double __c, double __d) +{ + return __extension__ (__m256d){ __d, __c, __b, __a }; +} + +/* Create vectors with repeated elements */ +/// Constructs a 256-bit floating-point vector of [4 x double], with each +/// of the four double-precision floating-point vector elements set to the +/// specified double-precision floating-point value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDDUP+VINSERTF128 instruction. +/// +/// \param __w +/// A double-precision floating-point value used to initialize each vector +/// element of the result. +/// \returns An initialized 256-bit floating-point vector of [4 x double]. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_set1_pd(double __w) +{ + return _mm256_set_pd(__w, __w, __w, __w); +} + +/// This intrinsic corresponds to the VSHUFPD instruction. +/// +/// \param a +/// A 256-bit vector of [4 x double]. +/// \param b +/// A 256-bit vector of [4 x double]. +/// \param mask +/// An immediate value containing 8-bit values specifying which elements to +/// copy from \a a and \a b: \n +/// Bit [0]=0: Bits [63:0] are copied from \a a to bits [63:0] of the +/// destination. \n +/// Bit [0]=1: Bits [127:64] are copied from \a a to bits [63:0] of the +/// destination. \n +/// Bit [1]=0: Bits [63:0] are copied from \a b to bits [127:64] of the +/// destination. \n +/// Bit [1]=1: Bits [127:64] are copied from \a b to bits [127:64] of the +/// destination. \n +/// Bit [2]=0: Bits [191:128] are copied from \a a to bits [191:128] of the +/// destination. \n +/// Bit [2]=1: Bits [255:192] are copied from \a a to bits [191:128] of the +/// destination. \n +/// Bit [3]=0: Bits [191:128] are copied from \a b to bits [255:192] of the +/// destination. \n +/// Bit [3]=1: Bits [255:192] are copied from \a b to bits [255:192] of the +/// destination. +/// \returns A 256-bit vector of [4 x double] containing the shuffled values. +#define _mm256_shuffle_pd(a, b, mask) \ + (__m256d)__builtin_ia32_shufpd256((__v4df)(__m256d)(a), \ + (__v4df)(__m256d)(b), (int)(mask)) + +/// Loads 8 single-precision floating point values from an unaligned +/// memory location pointed to by \a __p into a vector of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVUPS instruction. +/// +/// \param __p +/// A pointer to a memory location containing single-precision floating +/// point values. +/// \returns A 256-bit vector of [8 x float] containing the moved values. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_loadu_ps(float const *__p) +{ + struct __loadu_ps { + __m256_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_ps*)__p)->__v; +} + +/// Casts a 256-bit integer vector into a 256-bit floating-point vector +/// of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \returns A 256-bit floating-point vector of [8 x float] containing the same +/// bitwise pattern as the parameter. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_castsi256_ps(__m256i __a) +{ + return (__m256)__a; +} + +/// Constructs a 256-bit floating-point vector of [8 x float] initialized +/// with the specified single-precision floating-point values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __a +/// A single-precision floating-point value used to initialize bits [255:224] +/// of the result. +/// \param __b +/// A single-precision floating-point value used to initialize bits [223:192] +/// of the result. +/// \param __c +/// A single-precision floating-point value used to initialize bits [191:160] +/// of the result. +/// \param __d +/// A single-precision floating-point value used to initialize bits [159:128] +/// of the result. +/// \param __e +/// A single-precision floating-point value used to initialize bits [127:96] +/// of the result. +/// \param __f +/// A single-precision floating-point value used to initialize bits [95:64] +/// of the result. +/// \param __g +/// A single-precision floating-point value used to initialize bits [63:32] +/// of the result. +/// \param __h +/// A single-precision floating-point value used to initialize bits [31:0] +/// of the result. +/// \returns An initialized 256-bit floating-point vector of [8 x float]. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_set_ps(float __a, float __b, float __c, float __d, + float __e, float __f, float __g, float __h) +{ + return __extension__ (__m256){ __h, __g, __f, __e, __d, __c, __b, __a }; +} + +/// Constructs a 256-bit floating-point vector of [8 x float], with each +/// of the eight single-precision floating-point vector elements set to the +/// specified single-precision floating-point value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPERMILPS+VINSERTF128 +/// instruction. +/// +/// \param __w +/// A single-precision floating-point value used to initialize each vector +/// element of the result. +/// \returns An initialized 256-bit floating-point vector of [8 x float]. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_set1_ps(float __w) +{ + return _mm256_set_ps(__w, __w, __w, __w, __w, __w, __w, __w); +} + +/// Constructs a 256-bit integer vector initialized with the specified +/// 32-bit integral values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __i0 +/// A 32-bit integral value used to initialize bits [255:224] of the result. +/// \param __i1 +/// A 32-bit integral value used to initialize bits [223:192] of the result. +/// \param __i2 +/// A 32-bit integral value used to initialize bits [191:160] of the result. +/// \param __i3 +/// A 32-bit integral value used to initialize bits [159:128] of the result. +/// \param __i4 +/// A 32-bit integral value used to initialize bits [127:96] of the result. +/// \param __i5 +/// A 32-bit integral value used to initialize bits [95:64] of the result. +/// \param __i6 +/// A 32-bit integral value used to initialize bits [63:32] of the result. +/// \param __i7 +/// A 32-bit integral value used to initialize bits [31:0] of the result. +/// \returns An initialized 256-bit integer vector. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_set_epi32(int __i0, int __i1, int __i2, int __i3, + int __i4, int __i5, int __i6, int __i7) +{ + return __extension__ (__m256i)(__v8si){ __i7, __i6, __i5, __i4, __i3, __i2, __i1, __i0 }; +} + +/// Constructs a 256-bit integer vector of [8 x i32], with each of the +/// 32-bit integral vector elements set to the specified 32-bit integral +/// value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPERMILPS+VINSERTF128 +/// instruction. +/// +/// \param __i +/// A 32-bit integral value used to initialize each vector element of the +/// result. +/// \returns An initialized 256-bit integer vector of [8 x i32]. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_set1_epi32(int __i) +{ + return _mm256_set_epi32(__i, __i, __i, __i, __i, __i, __i, __i); +} + +/// Constructs a 256-bit integer vector initialized with the specified +/// 64-bit integral values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPUNPCKLQDQ+VINSERTF128 +/// instruction. +/// +/// \param __a +/// A 64-bit integral value used to initialize bits [255:192] of the result. +/// \param __b +/// A 64-bit integral value used to initialize bits [191:128] of the result. +/// \param __c +/// A 64-bit integral value used to initialize bits [127:64] of the result. +/// \param __d +/// A 64-bit integral value used to initialize bits [63:0] of the result. +/// \returns An initialized 256-bit integer vector. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_set_epi64x(long long __a, long long __b, long long __c, long long __d) +{ + return __extension__ (__m256i)(__v4di){ __d, __c, __b, __a }; +} + +/// Constructs a 256-bit integer vector of [4 x i64], with each of the +/// 64-bit integral vector elements set to the specified 64-bit integral +/// value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDDUP+VINSERTF128 instruction. +/// +/// \param __q +/// A 64-bit integral value used to initialize each vector element of the +/// result. +/// \returns An initialized 256-bit integer vector of [4 x i64]. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_set1_epi64x(long long __q) +{ + return _mm256_set_epi64x(__q, __q, __q, __q); +} + +/// Extracts the sign bits of single-precision floating point elements +/// in a 256-bit vector of [8 x float] and writes them to the lower order +/// bits of the return value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVMSKPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing the single-precision floating +/// point values with sign bits to be extracted. +/// \returns The sign bits from the operand, written to bits [7:0]. +static __inline int __DEFAULT_FN_ATTRS +_mm256_movemask_ps(__m256 __a) +{ + return __builtin_ia32_movmskps256((__v8sf)__a); +} + +/* Vector extract sign mask */ +/// Extracts the sign bits of double-precision floating point elements +/// in a 256-bit vector of [4 x double] and writes them to the lower order +/// bits of the return value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVMSKPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing the double-precision +/// floating point values with sign bits to be extracted. +/// \returns The sign bits from the operand, written to bits [3:0]. +static __inline int __DEFAULT_FN_ATTRS +_mm256_movemask_pd(__m256d __a) +{ + return __builtin_ia32_movmskpd256((__v4df)__a); +} + +/* Vector Blend */ +/// Merges 64-bit double-precision data values stored in either of the +/// two 256-bit vectors of [4 x double], as specified by the immediate +/// integer operand. +/// +/// \headerfile +/// +/// \code +/// __m256d _mm256_blend_pd(__m256d V1, __m256d V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VBLENDPD instruction. +/// +/// \param V1 +/// A 256-bit vector of [4 x double]. +/// \param V2 +/// A 256-bit vector of [4 x double]. +/// \param M +/// An immediate integer operand, with mask bits [3:0] specifying how the +/// values are to be copied. The position of the mask bit corresponds to the +/// index of a copied value. When a mask bit is 0, the corresponding 64-bit +/// element in operand \a V1 is copied to the same position in the +/// destination. When a mask bit is 1, the corresponding 64-bit element in +/// operand \a V2 is copied to the same position in the destination. +/// \returns A 256-bit vector of [4 x double] containing the copied values. +#define _mm256_blend_pd(V1, V2, M) \ + (__m256d)__builtin_ia32_blendpd256((__v4df)(__m256d)(V1), \ + (__v4df)(__m256d)(V2), (int)(M)) + + +/// Compares each of the corresponding double-precision values of two +/// 256-bit vectors of [4 x double], using the operation specified by the +/// immediate integer operand. +/// +/// Returns a [4 x double] vector consisting of four doubles corresponding to +/// the four comparison results: zero if the comparison is false, and all 1's +/// if the comparison is true. +/// +/// \headerfile +/// +/// \code +/// __m256d _mm256_cmp_pd(__m256d a, __m256d b, const int c); +/// \endcode +/// +/// This intrinsic corresponds to the VCMPPD instruction. +/// +/// \param a +/// A 256-bit vector of [4 x double]. +/// \param b +/// A 256-bit vector of [4 x double]. +/// \param c +/// An immediate integer operand, with bits [4:0] specifying which comparison +/// operation to use: \n +/// 0x00: Equal (ordered, non-signaling) \n +/// 0x01: Less-than (ordered, signaling) \n +/// 0x02: Less-than-or-equal (ordered, signaling) \n +/// 0x03: Unordered (non-signaling) \n +/// 0x04: Not-equal (unordered, non-signaling) \n +/// 0x05: Not-less-than (unordered, signaling) \n +/// 0x06: Not-less-than-or-equal (unordered, signaling) \n +/// 0x07: Ordered (non-signaling) \n +/// 0x08: Equal (unordered, non-signaling) \n +/// 0x09: Not-greater-than-or-equal (unordered, signaling) \n +/// 0x0A: Not-greater-than (unordered, signaling) \n +/// 0x0B: False (ordered, non-signaling) \n +/// 0x0C: Not-equal (ordered, non-signaling) \n +/// 0x0D: Greater-than-or-equal (ordered, signaling) \n +/// 0x0E: Greater-than (ordered, signaling) \n +/// 0x0F: True (unordered, non-signaling) \n +/// 0x10: Equal (ordered, signaling) \n +/// 0x11: Less-than (ordered, non-signaling) \n +/// 0x12: Less-than-or-equal (ordered, non-signaling) \n +/// 0x13: Unordered (signaling) \n +/// 0x14: Not-equal (unordered, signaling) \n +/// 0x15: Not-less-than (unordered, non-signaling) \n +/// 0x16: Not-less-than-or-equal (unordered, non-signaling) \n +/// 0x17: Ordered (signaling) \n +/// 0x18: Equal (unordered, signaling) \n +/// 0x19: Not-greater-than-or-equal (unordered, non-signaling) \n +/// 0x1A: Not-greater-than (unordered, non-signaling) \n +/// 0x1B: False (ordered, signaling) \n +/// 0x1C: Not-equal (ordered, signaling) \n +/// 0x1D: Greater-than-or-equal (ordered, non-signaling) \n +/// 0x1E: Greater-than (ordered, non-signaling) \n +/// 0x1F: True (unordered, signaling) +/// \returns A 256-bit vector of [4 x double] containing the comparison results. +#define _mm256_cmp_pd(a, b, c) \ + (__m256d)__builtin_ia32_cmppd256((__v4df)(__m256d)(a), \ + (__v4df)(__m256d)(b), (c)) + +/// Compares each of the corresponding values of two 256-bit vectors of +/// [8 x float], using the operation specified by the immediate integer +/// operand. +/// +/// Returns a [8 x float] vector consisting of eight floats corresponding to +/// the eight comparison results: zero if the comparison is false, and all +/// 1's if the comparison is true. +/// +/// \headerfile +/// +/// \code +/// __m256 _mm256_cmp_ps(__m256 a, __m256 b, const int c); +/// \endcode +/// +/// This intrinsic corresponds to the VCMPPS instruction. +/// +/// \param a +/// A 256-bit vector of [8 x float]. +/// \param b +/// A 256-bit vector of [8 x float]. +/// \param c +/// An immediate integer operand, with bits [4:0] specifying which comparison +/// operation to use: \n +/// 0x00: Equal (ordered, non-signaling) \n +/// 0x01: Less-than (ordered, signaling) \n +/// 0x02: Less-than-or-equal (ordered, signaling) \n +/// 0x03: Unordered (non-signaling) \n +/// 0x04: Not-equal (unordered, non-signaling) \n +/// 0x05: Not-less-than (unordered, signaling) \n +/// 0x06: Not-less-than-or-equal (unordered, signaling) \n +/// 0x07: Ordered (non-signaling) \n +/// 0x08: Equal (unordered, non-signaling) \n +/// 0x09: Not-greater-than-or-equal (unordered, signaling) \n +/// 0x0A: Not-greater-than (unordered, signaling) \n +/// 0x0B: False (ordered, non-signaling) \n +/// 0x0C: Not-equal (ordered, non-signaling) \n +/// 0x0D: Greater-than-or-equal (ordered, signaling) \n +/// 0x0E: Greater-than (ordered, signaling) \n +/// 0x0F: True (unordered, non-signaling) \n +/// 0x10: Equal (ordered, signaling) \n +/// 0x11: Less-than (ordered, non-signaling) \n +/// 0x12: Less-than-or-equal (ordered, non-signaling) \n +/// 0x13: Unordered (signaling) \n +/// 0x14: Not-equal (unordered, signaling) \n +/// 0x15: Not-less-than (unordered, non-signaling) \n +/// 0x16: Not-less-than-or-equal (unordered, non-signaling) \n +/// 0x17: Ordered (signaling) \n +/// 0x18: Equal (unordered, signaling) \n +/// 0x19: Not-greater-than-or-equal (unordered, non-signaling) \n +/// 0x1A: Not-greater-than (unordered, non-signaling) \n +/// 0x1B: False (ordered, signaling) \n +/// 0x1C: Not-equal (ordered, signaling) \n +/// 0x1D: Greater-than-or-equal (ordered, non-signaling) \n +/// 0x1E: Greater-than (ordered, non-signaling) \n +/// 0x1F: True (unordered, signaling) +/// \returns A 256-bit vector of [8 x float] containing the comparison results. +#define _mm256_cmp_ps(a, b, c) \ + (__m256)__builtin_ia32_cmpps256((__v8sf)(__m256)(a), \ + (__v8sf)(__m256)(b), (c)) + +/* Cast between vector types */ +/// Casts a 256-bit floating-point vector of [4 x double] into a 256-bit +/// floating-point vector of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 256-bit floating-point vector of [4 x double]. +/// \returns A 256-bit floating-point vector of [8 x float] containing the same +/// bitwise pattern as the parameter. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_castpd_ps(__m256d __a) +{ + return (__m256)__a; +} + +/// Stores double-precision floating point values from a 256-bit vector +/// of [4 x double] to an unaligned memory location pointed to by \a __p. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVUPD instruction. +/// +/// \param __p +/// A pointer to a memory location that will receive the double-precision +/// floating point values. +/// \param __a +/// A 256-bit vector of [4 x double] containing the values to be moved. +static __inline void __DEFAULT_FN_ATTRS +_mm256_storeu_pd(double *__p, __m256d __a) +{ + struct __storeu_pd { + __m256d_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_pd*)__p)->__v = __a; +} + +#undef __DEFAULT_FN_ATTRS128 + +/*===---- avx2intrin.h - AVX2 intrinsics -----------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx2"), __min_vector_width__(256))) +#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx2"), __min_vector_width__(128))) + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_min_epu32(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pminud256((__v8si)__a, (__v8si)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_max_epu32(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pmaxud256((__v8si)__a, (__v8si)__b); +} + +#define _mm256_blend_epi32(V1, V2, M) \ + (__m256i)__builtin_ia32_pblendd256((__v8si)(__m256i)(V1), \ + (__v8si)(__m256i)(V2), (int)(M)) + + +#define _mm256_shuffle_epi32(a, imm) \ + (__m256i)__builtin_ia32_pshufd256((__v8si)(__m256i)(a), (int)(imm)) + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cmpgt_epi64(__m256i __a, __m256i __b) +{ + return (__m256i)((__v4di)__a > (__v4di)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtepu8_epi32(__m128i __V) +{ + return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8si); +} + +#define _mm256_permute4x64_pd(V, M) \ + (__m256d)__builtin_ia32_permdf256((__v4df)(__m256d)(V), (int)(M)) + + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_xor_si256(__m256i __a, __m256i __b) +{ + return (__m256i)((__v4du)__a ^ (__v4du)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cmpgt_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)((__v8si)__a > (__v8si)__b); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_permutevar8x32_ps(__m256 __a, __m256i __b) +{ + return (__m256)__builtin_ia32_permvarsf256((__v8sf)__a, (__v8si)__b); +} + +#undef __DEFAULT_FN_ATTRS +#undef __DEFAULT_FN_ATTRS128 + +/*===---- avx512fintrin.h - AVX512F intrinsics -----------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +typedef float __m512 __attribute__((__vector_size__(64), __aligned__(64))); +typedef long long __m512i __attribute__((__vector_size__(64), __aligned__(64))); +typedef double __m512d __attribute__((__vector_size__(64), __aligned__(64))); +typedef double __m512d_u __attribute__((__vector_size__(64), __aligned__(1))); +typedef long long __m512i __attribute__((__vector_size__(64), __aligned__(64))); +typedef long long __m512i_u __attribute__((__vector_size__(64), __aligned__(1))); +typedef float __m512_u __attribute__((__vector_size__(64), __aligned__(1))); + +typedef long long __v8di __attribute__((__vector_size__(64))); +typedef double __v8df __attribute__((__vector_size__(64))); +typedef int __v16si __attribute__((__vector_size__(64))); +typedef float __v16sf __attribute__((__vector_size__(64))); + +typedef unsigned char __mmask8; +typedef unsigned short __mmask16; + +#define _MM_FROUND_CUR_DIRECTION 0x04 + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS512 __attribute__((__always_inline__, __nodebug__, __target__("avx512f"), __min_vector_width__(512))) +#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512f"), __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512f"))) + +/* Constants for integer comparison predicates */ +typedef enum { + _MM_CMPINT_EQ, /* Equal */ + _MM_CMPINT_LT, /* Less than */ + _MM_CMPINT_LE, /* Less than or Equal */ + _MM_CMPINT_UNUSED, + _MM_CMPINT_NE, /* Not Equal */ + _MM_CMPINT_NLT, /* Not Less than */ +#define _MM_CMPINT_GE _MM_CMPINT_NLT /* Greater than or Equal */ + _MM_CMPINT_NLE /* Not Less than or Equal */ +#define _MM_CMPINT_GT _MM_CMPINT_NLE /* Greater than */ +} _MM_CMPINT_ENUM; + +typedef enum +{ + _MM_PERM_AAAA = 0x00, _MM_PERM_AAAB = 0x01, _MM_PERM_AAAC = 0x02, + _MM_PERM_AAAD = 0x03, _MM_PERM_AABA = 0x04, _MM_PERM_AABB = 0x05, + _MM_PERM_AABC = 0x06, _MM_PERM_AABD = 0x07, _MM_PERM_AACA = 0x08, + _MM_PERM_AACB = 0x09, _MM_PERM_AACC = 0x0A, _MM_PERM_AACD = 0x0B, + _MM_PERM_AADA = 0x0C, _MM_PERM_AADB = 0x0D, _MM_PERM_AADC = 0x0E, + _MM_PERM_AADD = 0x0F, _MM_PERM_ABAA = 0x10, _MM_PERM_ABAB = 0x11, + _MM_PERM_ABAC = 0x12, _MM_PERM_ABAD = 0x13, _MM_PERM_ABBA = 0x14, + _MM_PERM_ABBB = 0x15, _MM_PERM_ABBC = 0x16, _MM_PERM_ABBD = 0x17, + _MM_PERM_ABCA = 0x18, _MM_PERM_ABCB = 0x19, _MM_PERM_ABCC = 0x1A, + _MM_PERM_ABCD = 0x1B, _MM_PERM_ABDA = 0x1C, _MM_PERM_ABDB = 0x1D, + _MM_PERM_ABDC = 0x1E, _MM_PERM_ABDD = 0x1F, _MM_PERM_ACAA = 0x20, + _MM_PERM_ACAB = 0x21, _MM_PERM_ACAC = 0x22, _MM_PERM_ACAD = 0x23, + _MM_PERM_ACBA = 0x24, _MM_PERM_ACBB = 0x25, _MM_PERM_ACBC = 0x26, + _MM_PERM_ACBD = 0x27, _MM_PERM_ACCA = 0x28, _MM_PERM_ACCB = 0x29, + _MM_PERM_ACCC = 0x2A, _MM_PERM_ACCD = 0x2B, _MM_PERM_ACDA = 0x2C, + _MM_PERM_ACDB = 0x2D, _MM_PERM_ACDC = 0x2E, _MM_PERM_ACDD = 0x2F, + _MM_PERM_ADAA = 0x30, _MM_PERM_ADAB = 0x31, _MM_PERM_ADAC = 0x32, + _MM_PERM_ADAD = 0x33, _MM_PERM_ADBA = 0x34, _MM_PERM_ADBB = 0x35, + _MM_PERM_ADBC = 0x36, _MM_PERM_ADBD = 0x37, _MM_PERM_ADCA = 0x38, + _MM_PERM_ADCB = 0x39, _MM_PERM_ADCC = 0x3A, _MM_PERM_ADCD = 0x3B, + _MM_PERM_ADDA = 0x3C, _MM_PERM_ADDB = 0x3D, _MM_PERM_ADDC = 0x3E, + _MM_PERM_ADDD = 0x3F, _MM_PERM_BAAA = 0x40, _MM_PERM_BAAB = 0x41, + _MM_PERM_BAAC = 0x42, _MM_PERM_BAAD = 0x43, _MM_PERM_BABA = 0x44, + _MM_PERM_BABB = 0x45, _MM_PERM_BABC = 0x46, _MM_PERM_BABD = 0x47, + _MM_PERM_BACA = 0x48, _MM_PERM_BACB = 0x49, _MM_PERM_BACC = 0x4A, + _MM_PERM_BACD = 0x4B, _MM_PERM_BADA = 0x4C, _MM_PERM_BADB = 0x4D, + _MM_PERM_BADC = 0x4E, _MM_PERM_BADD = 0x4F, _MM_PERM_BBAA = 0x50, + _MM_PERM_BBAB = 0x51, _MM_PERM_BBAC = 0x52, _MM_PERM_BBAD = 0x53, + _MM_PERM_BBBA = 0x54, _MM_PERM_BBBB = 0x55, _MM_PERM_BBBC = 0x56, + _MM_PERM_BBBD = 0x57, _MM_PERM_BBCA = 0x58, _MM_PERM_BBCB = 0x59, + _MM_PERM_BBCC = 0x5A, _MM_PERM_BBCD = 0x5B, _MM_PERM_BBDA = 0x5C, + _MM_PERM_BBDB = 0x5D, _MM_PERM_BBDC = 0x5E, _MM_PERM_BBDD = 0x5F, + _MM_PERM_BCAA = 0x60, _MM_PERM_BCAB = 0x61, _MM_PERM_BCAC = 0x62, + _MM_PERM_BCAD = 0x63, _MM_PERM_BCBA = 0x64, _MM_PERM_BCBB = 0x65, + _MM_PERM_BCBC = 0x66, _MM_PERM_BCBD = 0x67, _MM_PERM_BCCA = 0x68, + _MM_PERM_BCCB = 0x69, _MM_PERM_BCCC = 0x6A, _MM_PERM_BCCD = 0x6B, + _MM_PERM_BCDA = 0x6C, _MM_PERM_BCDB = 0x6D, _MM_PERM_BCDC = 0x6E, + _MM_PERM_BCDD = 0x6F, _MM_PERM_BDAA = 0x70, _MM_PERM_BDAB = 0x71, + _MM_PERM_BDAC = 0x72, _MM_PERM_BDAD = 0x73, _MM_PERM_BDBA = 0x74, + _MM_PERM_BDBB = 0x75, _MM_PERM_BDBC = 0x76, _MM_PERM_BDBD = 0x77, + _MM_PERM_BDCA = 0x78, _MM_PERM_BDCB = 0x79, _MM_PERM_BDCC = 0x7A, + _MM_PERM_BDCD = 0x7B, _MM_PERM_BDDA = 0x7C, _MM_PERM_BDDB = 0x7D, + _MM_PERM_BDDC = 0x7E, _MM_PERM_BDDD = 0x7F, _MM_PERM_CAAA = 0x80, + _MM_PERM_CAAB = 0x81, _MM_PERM_CAAC = 0x82, _MM_PERM_CAAD = 0x83, + _MM_PERM_CABA = 0x84, _MM_PERM_CABB = 0x85, _MM_PERM_CABC = 0x86, + _MM_PERM_CABD = 0x87, _MM_PERM_CACA = 0x88, _MM_PERM_CACB = 0x89, + _MM_PERM_CACC = 0x8A, _MM_PERM_CACD = 0x8B, _MM_PERM_CADA = 0x8C, + _MM_PERM_CADB = 0x8D, _MM_PERM_CADC = 0x8E, _MM_PERM_CADD = 0x8F, + _MM_PERM_CBAA = 0x90, _MM_PERM_CBAB = 0x91, _MM_PERM_CBAC = 0x92, + _MM_PERM_CBAD = 0x93, _MM_PERM_CBBA = 0x94, _MM_PERM_CBBB = 0x95, + _MM_PERM_CBBC = 0x96, _MM_PERM_CBBD = 0x97, _MM_PERM_CBCA = 0x98, + _MM_PERM_CBCB = 0x99, _MM_PERM_CBCC = 0x9A, _MM_PERM_CBCD = 0x9B, + _MM_PERM_CBDA = 0x9C, _MM_PERM_CBDB = 0x9D, _MM_PERM_CBDC = 0x9E, + _MM_PERM_CBDD = 0x9F, _MM_PERM_CCAA = 0xA0, _MM_PERM_CCAB = 0xA1, + _MM_PERM_CCAC = 0xA2, _MM_PERM_CCAD = 0xA3, _MM_PERM_CCBA = 0xA4, + _MM_PERM_CCBB = 0xA5, _MM_PERM_CCBC = 0xA6, _MM_PERM_CCBD = 0xA7, + _MM_PERM_CCCA = 0xA8, _MM_PERM_CCCB = 0xA9, _MM_PERM_CCCC = 0xAA, + _MM_PERM_CCCD = 0xAB, _MM_PERM_CCDA = 0xAC, _MM_PERM_CCDB = 0xAD, + _MM_PERM_CCDC = 0xAE, _MM_PERM_CCDD = 0xAF, _MM_PERM_CDAA = 0xB0, + _MM_PERM_CDAB = 0xB1, _MM_PERM_CDAC = 0xB2, _MM_PERM_CDAD = 0xB3, + _MM_PERM_CDBA = 0xB4, _MM_PERM_CDBB = 0xB5, _MM_PERM_CDBC = 0xB6, + _MM_PERM_CDBD = 0xB7, _MM_PERM_CDCA = 0xB8, _MM_PERM_CDCB = 0xB9, + _MM_PERM_CDCC = 0xBA, _MM_PERM_CDCD = 0xBB, _MM_PERM_CDDA = 0xBC, + _MM_PERM_CDDB = 0xBD, _MM_PERM_CDDC = 0xBE, _MM_PERM_CDDD = 0xBF, + _MM_PERM_DAAA = 0xC0, _MM_PERM_DAAB = 0xC1, _MM_PERM_DAAC = 0xC2, + _MM_PERM_DAAD = 0xC3, _MM_PERM_DABA = 0xC4, _MM_PERM_DABB = 0xC5, + _MM_PERM_DABC = 0xC6, _MM_PERM_DABD = 0xC7, _MM_PERM_DACA = 0xC8, + _MM_PERM_DACB = 0xC9, _MM_PERM_DACC = 0xCA, _MM_PERM_DACD = 0xCB, + _MM_PERM_DADA = 0xCC, _MM_PERM_DADB = 0xCD, _MM_PERM_DADC = 0xCE, + _MM_PERM_DADD = 0xCF, _MM_PERM_DBAA = 0xD0, _MM_PERM_DBAB = 0xD1, + _MM_PERM_DBAC = 0xD2, _MM_PERM_DBAD = 0xD3, _MM_PERM_DBBA = 0xD4, + _MM_PERM_DBBB = 0xD5, _MM_PERM_DBBC = 0xD6, _MM_PERM_DBBD = 0xD7, + _MM_PERM_DBCA = 0xD8, _MM_PERM_DBCB = 0xD9, _MM_PERM_DBCC = 0xDA, + _MM_PERM_DBCD = 0xDB, _MM_PERM_DBDA = 0xDC, _MM_PERM_DBDB = 0xDD, + _MM_PERM_DBDC = 0xDE, _MM_PERM_DBDD = 0xDF, _MM_PERM_DCAA = 0xE0, + _MM_PERM_DCAB = 0xE1, _MM_PERM_DCAC = 0xE2, _MM_PERM_DCAD = 0xE3, + _MM_PERM_DCBA = 0xE4, _MM_PERM_DCBB = 0xE5, _MM_PERM_DCBC = 0xE6, + _MM_PERM_DCBD = 0xE7, _MM_PERM_DCCA = 0xE8, _MM_PERM_DCCB = 0xE9, + _MM_PERM_DCCC = 0xEA, _MM_PERM_DCCD = 0xEB, _MM_PERM_DCDA = 0xEC, + _MM_PERM_DCDB = 0xED, _MM_PERM_DCDC = 0xEE, _MM_PERM_DCDD = 0xEF, + _MM_PERM_DDAA = 0xF0, _MM_PERM_DDAB = 0xF1, _MM_PERM_DDAC = 0xF2, + _MM_PERM_DDAD = 0xF3, _MM_PERM_DDBA = 0xF4, _MM_PERM_DDBB = 0xF5, + _MM_PERM_DDBC = 0xF6, _MM_PERM_DDBD = 0xF7, _MM_PERM_DDCA = 0xF8, + _MM_PERM_DDCB = 0xF9, _MM_PERM_DDCC = 0xFA, _MM_PERM_DDCD = 0xFB, + _MM_PERM_DDDA = 0xFC, _MM_PERM_DDDB = 0xFD, _MM_PERM_DDDC = 0xFE, + _MM_PERM_DDDD = 0xFF +} _MM_PERM_ENUM; + +/* SIMD load ops */ + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_loadu_si512 (void const *__P) +{ + struct __loadu_si512 { + __m512i_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_si512*)__P)->__v; +} + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_loadu_pd(void const *__p) +{ + struct __loadu_pd { + __m512d_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_pd*)__p)->__v; +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_loadu_ps(void const *__p) +{ + struct __loadu_ps { + __m512_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_ps*)__p)->__v; +} + +static __inline void __DEFAULT_FN_ATTRS512 +_mm512_storeu_pd(void *__P, __m512d __A) +{ + struct __storeu_pd { + __m512d_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_pd*)__P)->__v = __A; +} + +static __inline void __DEFAULT_FN_ATTRS512 +_mm512_storeu_ps(void *__P, __m512 __A) +{ + struct __storeu_ps { + __m512_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_ps*)__P)->__v = __A; +} + +static __inline void __DEFAULT_FN_ATTRS512 +_mm512_storeu_si512 (void *__P, __m512i __A) +{ + struct __storeu_si512 { + __m512i_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_si512*)__P)->__v = __A; +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_min_epi64(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_pminsq512((__v8di)__A, (__v8di)__B); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_max_epi64(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_pmaxsq512((__v8di)__A, (__v8di)__B); +} + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_castsi512_pd (__m512i __A) +{ + return (__m512d) (__A); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_min_epu32(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_pminud512((__v16si)__A, (__v16si)__B); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_max_epu32(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_pmaxud512((__v16si)__A, (__v16si)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_max_epi64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_max_epi64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_max_epu32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, + (__v16si)_mm512_max_epu32(__A, __B), + (__v16si)__W); +} + + +#define _mm512_shuffle_epi32(A, I) \ + (__m512i)__builtin_ia32_pshufd512((__v16si)(__m512i)(A), (int)(I)) + +#define _mm512_permutex_pd(X, C) \ + (__m512d)__builtin_ia32_permdf512((__v8df)(__m512d)(X), (int)(C)) + +#define _mm512_permutex_epi64(X, C) \ + (__m512i)__builtin_ia32_permdi512((__v8di)(__m512i)(X), (int)(C)) + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_compressstoreu_pd (void *__P, __mmask8 __U, __m512d __A) +{ + __builtin_ia32_compressstoredf512_mask ((__v8df *) __P, (__v8df) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_compressstoreu_ps (void *__P, __mmask16 __U, __m512 __A) +{ + __builtin_ia32_compressstoresf512_mask ((__v16sf *) __P, (__v16sf) __A, + (__mmask16) __U); +} + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_set1_pd(double __w) +{ + return __extension__ (__m512d){ __w, __w, __w, __w, __w, __w, __w, __w }; +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_set1_ps(float __w) +{ + return __extension__ (__m512){ __w, __w, __w, __w, __w, __w, __w, __w, + __w, __w, __w, __w, __w, __w, __w, __w }; +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_set1_epi32(int __s) +{ + return __extension__ (__m512i)(__v16si){ + __s, __s, __s, __s, __s, __s, __s, __s, + __s, __s, __s, __s, __s, __s, __s, __s }; +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_set1_epi64(long long __d) +{ + return __extension__(__m512i)(__v8di){ __d, __d, __d, __d, __d, __d, __d, __d }; +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_compressstoreu_epi32 (void *__P, __mmask16 __U, __m512i __A) +{ + __builtin_ia32_compressstoresi512_mask ((__v16si *) __P, (__v16si) __A, + (__mmask16) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_compressstoreu_epi64 (void *__P, __mmask8 __U, __m512i __A) +{ + __builtin_ia32_compressstoredi512_mask ((__v8di *) __P, (__v8di) __A, + (__mmask8) __U); +} + +#define _mm512_permute_pd(X, C) \ + (__m512d)__builtin_ia32_vpermilpd512((__v8df)(__m512d)(X), (int)(C)) + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_castpd_si512 (__m512d __A) +{ + return (__m512i) (__A); +} + +#define _mm512_shuffle_f64x2(A, B, imm) \ + (__m512d)__builtin_ia32_shuf_f64x2((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(imm)) + + +#define _mm512_shuffle_i64x2(A, B, imm) \ + (__m512i)__builtin_ia32_shuf_i64x2((__v8di)(__m512i)(A), \ + (__v8di)(__m512i)(B), (int)(imm)) + +#define _mm256_cmp_ps_mask(a, b, p) \ + (__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \ + (__v8sf)(__m256)(b), (int)(p), \ + (__mmask8)-1) + +#define _mm512_cmp_ps_mask(A, B, P) \ + _mm512_cmp_round_ps_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION) + + +/* Integer compare */ + +#define _mm512_cmpeq_epi32_mask(A, B) \ + _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_EQ) +#define _mm512_mask_cmpeq_epi32_mask(k, A, B) \ + _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm512_cmpge_epi32_mask(A, B) \ + _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_GE) +#define _mm512_mask_cmpge_epi32_mask(k, A, B) \ + _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm512_cmpgt_epi32_mask(A, B) \ + _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_GT) +#define _mm512_mask_cmpgt_epi32_mask(k, A, B) \ + _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm512_cmple_epi32_mask(A, B) \ + _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_LE) +#define _mm512_mask_cmple_epi32_mask(k, A, B) \ + _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm512_cmplt_epi32_mask(A, B) \ + _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_LT) +#define _mm512_mask_cmplt_epi32_mask(k, A, B) \ + _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm512_cmpneq_epi32_mask(A, B) \ + _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_NE) +#define _mm512_mask_cmpneq_epi32_mask(k, A, B) \ + _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm512_cmpeq_epu32_mask(A, B) \ + _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_EQ) +#define _mm512_mask_cmpeq_epu32_mask(k, A, B) \ + _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm512_cmpge_epu32_mask(A, B) \ + _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_GE) +#define _mm512_mask_cmpge_epu32_mask(k, A, B) \ + _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm512_cmpgt_epu32_mask(A, B) \ + _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_GT) +#define _mm512_mask_cmpgt_epu32_mask(k, A, B) \ + _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm512_cmple_epu32_mask(A, B) \ + _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_LE) +#define _mm512_mask_cmple_epu32_mask(k, A, B) \ + _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm512_cmplt_epu32_mask(A, B) \ + _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_LT) +#define _mm512_mask_cmplt_epu32_mask(k, A, B) \ + _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm512_cmpneq_epu32_mask(A, B) \ + _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_NE) +#define _mm512_mask_cmpneq_epu32_mask(k, A, B) \ + _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm512_cmpeq_epi64_mask(A, B) \ + _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_EQ) +#define _mm512_mask_cmpeq_epi64_mask(k, A, B) \ + _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm512_cmpge_epi64_mask(A, B) \ + _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_GE) +#define _mm512_mask_cmpge_epi64_mask(k, A, B) \ + _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm512_cmpgt_epi64_mask(A, B) \ + _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_GT) +#define _mm512_mask_cmpgt_epi64_mask(k, A, B) \ + _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm512_cmple_epi64_mask(A, B) \ + _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_LE) +#define _mm512_mask_cmple_epi64_mask(k, A, B) \ + _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm512_cmplt_epi64_mask(A, B) \ + _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_LT) +#define _mm512_mask_cmplt_epi64_mask(k, A, B) \ + _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm512_cmpneq_epi64_mask(A, B) \ + _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_NE) +#define _mm512_mask_cmpneq_epi64_mask(k, A, B) \ + _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm512_cmpeq_epu64_mask(A, B) \ + _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_EQ) +#define _mm512_mask_cmpeq_epu64_mask(k, A, B) \ + _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm512_cmpge_epu64_mask(A, B) \ + _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_GE) +#define _mm512_mask_cmpge_epu64_mask(k, A, B) \ + _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm512_cmpgt_epu64_mask(A, B) \ + _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_GT) +#define _mm512_mask_cmpgt_epu64_mask(k, A, B) \ + _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm512_cmple_epu64_mask(A, B) \ + _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_LE) +#define _mm512_mask_cmple_epu64_mask(k, A, B) \ + _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm512_cmplt_epu64_mask(A, B) \ + _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_LT) +#define _mm512_mask_cmplt_epu64_mask(k, A, B) \ + _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm512_cmpneq_epu64_mask(A, B) \ + _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_NE) +#define _mm512_mask_cmpneq_epu64_mask(k, A, B) \ + _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm512_cmp_pd_mask(A, B, P) \ + _mm512_cmp_round_pd_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_cmp_epu64_mask(a, b, p) \ + (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \ + (__v8di)(__m512i)(b), (int)(p), \ + (__mmask8)-1) + +#define _mm512_cmp_epi32_mask(a, b, p) \ + (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \ + (__v16si)(__m512i)(b), (int)(p), \ + (__mmask16)-1) + +#define _mm512_cmp_epi64_mask(a, b, p) \ + (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \ + (__v8di)(__m512i)(b), (int)(p), \ + (__mmask8)-1) + +#define _mm512_cmp_epu32_mask(a, b, p) \ + (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \ + (__v16si)(__m512i)(b), (int)(p), \ + (__mmask16)-1) + +#define _mm512_cmp_round_ps_mask(A, B, P, R) \ + (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(P), \ + (__mmask16)-1, (int)(R)) + +#define _mm512_mask_cmp_round_ps_mask(U, A, B, P, R) \ + (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(P), \ + (__mmask16)(U), (int)(R)) + +#define _mm512_cmp_round_pd_mask(A, B, P, R) \ + (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(P), \ + (__mmask8)-1, (int)(R)) + + +#undef __DEFAULT_FN_ATTRS + +/*===---- popcntintrin.h - POPCNT intrinsics -------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("popcnt"))) + +/// Counts the number of bits in the source operand having a value of 1. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the POPCNT instruction. +/// +/// \param __A +/// An unsigned 32-bit integer operand. +/// \returns A 32-bit integer containing the number of bits with value 1 in the +/// source operand. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_popcnt_u32(unsigned int __A) +{ + return __builtin_popcount(__A); +} + +#ifdef __x86_64__ +/// Counts the number of bits in the source operand having a value of 1. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the POPCNT instruction. +/// +/// \param __A +/// An unsigned 64-bit integer operand. +/// \returns A 64-bit integer containing the number of bits with value 1 in the +/// source operand. +static __inline__ long long __DEFAULT_FN_ATTRS +_mm_popcnt_u64(unsigned long long __A) +{ + return __builtin_popcountll(__A); +} +#endif /* __x86_64__ */ + +#endif //_IMMINTRIN_H \ No newline at end of file diff --git a/src/coreclr/src/vm/CMakeLists.txt b/src/coreclr/src/vm/CMakeLists.txt index 4c28c1708ab3dd..cf6c6a65eaa973 100644 --- a/src/coreclr/src/vm/CMakeLists.txt +++ b/src/coreclr/src/vm/CMakeLists.txt @@ -542,7 +542,7 @@ set(GC_SOURCES_WKS ../gc/softwarewritewatch.cpp ../gc/handletablecache.cpp) -if (CLR_CMAKE_TARGET_WIN32 AND CLR_CMAKE_TARGET_ARCH_AMD64) +if (CLR_CMAKE_TARGET_ARCH_AMD64) set ( GC_SOURCES_WKS ${GC_SOURCES_WKS} ../gc/vxsort/isa_detection.cpp From 2409c50267541b10a584db9774d9c8a156529c97 Mon Sep 17 00:00:00 2001 From: Peter Sollich Date: Fri, 10 Jul 2020 14:55:07 +0200 Subject: [PATCH 28/31] Add more definitions to immintrinh.h --- src/coreclr/src/pal/inc/rt/cpp/immintrin.h | 175 +++++++++++++++++++++ 1 file changed, 175 insertions(+) diff --git a/src/coreclr/src/pal/inc/rt/cpp/immintrin.h b/src/coreclr/src/pal/inc/rt/cpp/immintrin.h index 7d9b02332a1868..04ab0144b15e0d 100644 --- a/src/coreclr/src/pal/inc/rt/cpp/immintrin.h +++ b/src/coreclr/src/pal/inc/rt/cpp/immintrin.h @@ -19,6 +19,9 @@ typedef long long __v4di __attribute__ ((__vector_size__ (32))); typedef float __v8sf __attribute__ ((__vector_size__ (32))); typedef unsigned char __v16qu __attribute__((__vector_size__(16))); typedef unsigned long long __v4du __attribute__ ((__vector_size__ (32))); +typedef unsigned int __v4su __attribute__((__vector_size__(16))); +typedef int __v4si __attribute__((__vector_size__(16))); +typedef unsigned int __v8su __attribute__ ((__vector_size__ (32))); typedef float __m256 __attribute__ ((__vector_size__ (32), __aligned__(32))); typedef double __m256d __attribute__((__vector_size__(32), __aligned__(32))); @@ -698,6 +701,80 @@ _mm256_storeu_pd(double *__p, __m256d __a) ((struct __storeu_pd*)__p)->__v = __a; } +/* Arithmetic */ +/// Adds two 256-bit vectors of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VADDPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [4 x double] containing one of the source operands. +/// \returns A 256-bit vector of [4 x double] containing the sums of both +/// operands. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_add_pd(__m256d __a, __m256d __b) +{ + return (__m256d)((__v4df)__a+(__v4df)__b); +} + +/// Subtracts two 256-bit vectors of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSUBPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing the minuend. +/// \param __b +/// A 256-bit vector of [4 x double] containing the subtrahend. +/// \returns A 256-bit vector of [4 x double] containing the differences between +/// both operands. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_sub_pd(__m256d __a, __m256d __b) +{ + return (__m256d)((__v4df)__a-(__v4df)__b); +} + +/// Adds two 256-bit vectors of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VADDPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [8 x float] containing one of the source operands. +/// \returns A 256-bit vector of [8 x float] containing the sums of both +/// operands. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_add_ps(__m256 __a, __m256 __b) +{ + return (__m256)((__v8sf)__a+(__v8sf)__b); +} + +/// Subtracts two 256-bit vectors of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSUBPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing the minuend. +/// \param __b +/// A 256-bit vector of [8 x float] containing the subtrahend. +/// \returns A 256-bit vector of [8 x float] containing the differences between +/// both operands. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_sub_ps(__m256 __a, __m256 __b) +{ + return (__m256)((__v8sf)__a-(__v8sf)__b); +} + + #undef __DEFAULT_FN_ATTRS128 /*===---- avx2intrin.h - AVX2 intrinsics -----------------------------------=== @@ -725,6 +802,18 @@ _mm256_max_epu32(__m256i __a, __m256i __b) return (__m256i)__builtin_ia32_pmaxud256((__v8si)__a, (__v8si)__b); } +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_min_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pminsd256((__v8si)__a, (__v8si)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_max_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pmaxsd256((__v8si)__a, (__v8si)__b); +} + #define _mm256_blend_epi32(V1, V2, M) \ (__m256i)__builtin_ia32_pblendd256((__v8si)(__m256i)(V1), \ (__v8si)(__m256i)(V2), (int)(M)) @@ -767,6 +856,72 @@ _mm256_permutevar8x32_ps(__m256 __a, __m256i __b) return (__m256)__builtin_ia32_permvarsf256((__v8sf)__a, (__v8si)__b); } +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_add_epi64(__m256i __a, __m256i __b) +{ + return (__m256i)((__v4du)__a + (__v4du)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sub_epi64(__m256i __a, __m256i __b) +{ + return (__m256i)((__v4du)__a - (__v4du)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_slli_epi64(__m256i __a, int __count) +{ + return __builtin_ia32_psllqi256((__v4di)__a, __count); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srli_epi64(__m256i __a, int __count) +{ + return __builtin_ia32_psrlqi256((__v4di)__a, __count); +} + +#define _mm256_extracti128_si256(V, M) \ + (__m128i)__builtin_ia32_extract128i256((__v4di)(__m256i)(V), (int)(M)) + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtepu32_epi64(__m128i __V) +{ + return (__m256i)__builtin_convertvector((__v4su)__V, __v4di); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtepi32_epi64(__m128i __V) +{ + return (__m256i)__builtin_convertvector((__v4si)__V, __v4di); +} + +#define _mm256_permute4x64_epi64(V, M) \ + (__m256i)__builtin_ia32_permdi256((__v4di)(__m256i)(V), (int)(M)) + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_add_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)((__v8su)__a + (__v8su)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sub_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)((__v8su)__a - (__v8su)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_slli_epi32(__m256i __a, int __count) +{ + return (__m256i)__builtin_ia32_pslldi256((__v8si)__a, __count); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srli_epi32(__m256i __a, int __count) +{ + return (__m256i)__builtin_ia32_psrldi256((__v8si)__a, __count); +} + #undef __DEFAULT_FN_ATTRS #undef __DEFAULT_FN_ATTRS128 @@ -961,6 +1116,18 @@ _mm512_storeu_si512 (void *__P, __m512i __A) ((struct __storeu_si512*)__P)->__v = __A; } +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_min_epi32(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_pminsd512((__v16si)__A, (__v16si)__B); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_max_epi32(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_pmaxsd512((__v16si)__A, (__v16si)__B); +} + static __inline __m512i __DEFAULT_FN_ATTRS512 _mm512_min_epi64(__m512i __A, __m512i __B) { @@ -991,6 +1158,14 @@ _mm512_max_epu32(__m512i __A, __m512i __B) return (__m512i)__builtin_ia32_pmaxud512((__v16si)__A, (__v16si)__B); } +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_max_epi32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, + (__v16si)_mm512_max_epi32(__A, __B), + (__v16si)__W); +} + static __inline__ __m512i __DEFAULT_FN_ATTRS512 _mm512_mask_max_epi64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B) { From 08b4c4c1a1add5a9c073e79a80fa8853afac739c Mon Sep 17 00:00:00 2001 From: Peter Sollich Date: Fri, 10 Jul 2020 15:39:49 +0200 Subject: [PATCH 29/31] Fix cmake warnings about mismatched endif clauses. --- src/coreclr/src/gc/CMakeLists.txt | 2 +- src/coreclr/src/gc/sample/CMakeLists.txt | 2 +- src/coreclr/src/vm/CMakeLists.txt | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/coreclr/src/gc/CMakeLists.txt b/src/coreclr/src/gc/CMakeLists.txt index 1a932482f2e038..0645e92911db38 100644 --- a/src/coreclr/src/gc/CMakeLists.txt +++ b/src/coreclr/src/gc/CMakeLists.txt @@ -51,7 +51,7 @@ if (CLR_CMAKE_TARGET_ARCH_AMD64) vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.cpp vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.cpp ) -endif (CLR_CMAKE_TARGET_WIN32 AND CLR_CMAKE_TARGET_ARCH_AMD64) +endif (CLR_CMAKE_TARGET_ARCH_AMD64) if (CLR_CMAKE_TARGET_WIN32) set(GC_HEADERS diff --git a/src/coreclr/src/gc/sample/CMakeLists.txt b/src/coreclr/src/gc/sample/CMakeLists.txt index 5cd20e017bb380..69016e073057fc 100644 --- a/src/coreclr/src/gc/sample/CMakeLists.txt +++ b/src/coreclr/src/gc/sample/CMakeLists.txt @@ -36,7 +36,7 @@ if (CLR_CMAKE_TARGET_ARCH_AMD64) ../vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.cpp ../vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.cpp ) -endif (CLR_CMAKE_TARGET_WIN32 AND CLR_CMAKE_TARGET_ARCH_AMD64) +endif (CLR_CMAKE_TARGET_ARCH_AMD64) if(CLR_CMAKE_TARGET_WIN32) set (GC_LINK_LIBRARIES diff --git a/src/coreclr/src/vm/CMakeLists.txt b/src/coreclr/src/vm/CMakeLists.txt index a79cdda5b1f4f9..ada07d14c12987 100644 --- a/src/coreclr/src/vm/CMakeLists.txt +++ b/src/coreclr/src/vm/CMakeLists.txt @@ -554,7 +554,7 @@ if (CLR_CMAKE_TARGET_ARCH_AMD64) ../gc/vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.cpp ../gc/vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.cpp ) -endif (CLR_CMAKE_TARGET_WIN32 AND CLR_CMAKE_TARGET_ARCH_AMD64) +endif (CLR_CMAKE_TARGET_ARCH_AMD64) set(GC_HEADERS_WKS ${GC_HEADERS_DAC_AND_WKS_COMMON} From f87af51a52e93fa471326580d6678c45becce414 Mon Sep 17 00:00:00 2001 From: Peter Sollich Date: Mon, 13 Jul 2020 15:00:19 +0200 Subject: [PATCH 30/31] Disable Linux support for now due to multiple compile & link errors. --- src/coreclr/src/gc/CMakeLists.txt | 4 +- src/coreclr/src/gc/gc.cpp | 2 +- src/coreclr/src/gc/sample/CMakeLists.txt | 4 +- src/coreclr/src/pal/inc/rt/cpp/immintrin.h | 1466 -------------------- src/coreclr/src/vm/CMakeLists.txt | 4 +- 5 files changed, 7 insertions(+), 1473 deletions(-) delete mode 100644 src/coreclr/src/pal/inc/rt/cpp/immintrin.h diff --git a/src/coreclr/src/gc/CMakeLists.txt b/src/coreclr/src/gc/CMakeLists.txt index 0645e92911db38..c46f46fdfbae17 100644 --- a/src/coreclr/src/gc/CMakeLists.txt +++ b/src/coreclr/src/gc/CMakeLists.txt @@ -39,7 +39,7 @@ else() windows/gcenv.windows.cpp) endif(CLR_CMAKE_HOST_UNIX) -if (CLR_CMAKE_TARGET_ARCH_AMD64) +if (CLR_CMAKE_TARGET_ARCH_AMD64 AND CLR_CMAKE_TARGET_WIN32) set ( GC_SOURCES ${GC_SOURCES} vxsort/isa_detection_dummy.cpp @@ -51,7 +51,7 @@ if (CLR_CMAKE_TARGET_ARCH_AMD64) vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.cpp vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.cpp ) -endif (CLR_CMAKE_TARGET_ARCH_AMD64) +endif (CLR_CMAKE_TARGET_ARCH_AMD64 AND CLR_CMAKE_TARGET_WIN32) if (CLR_CMAKE_TARGET_WIN32) set(GC_HEADERS diff --git a/src/coreclr/src/gc/gc.cpp b/src/coreclr/src/gc/gc.cpp index 97012072127851..9e68050b436b13 100644 --- a/src/coreclr/src/gc/gc.cpp +++ b/src/coreclr/src/gc/gc.cpp @@ -19,7 +19,7 @@ #include "gcpriv.h" -#if defined(TARGET_AMD64) +#if defined(TARGET_AMD64) && defined(TARGET_WINDOWS) #define USE_VXSORT #else #define USE_INTROSORT diff --git a/src/coreclr/src/gc/sample/CMakeLists.txt b/src/coreclr/src/gc/sample/CMakeLists.txt index 69016e073057fc..40bb0b5dcd5430 100644 --- a/src/coreclr/src/gc/sample/CMakeLists.txt +++ b/src/coreclr/src/gc/sample/CMakeLists.txt @@ -24,7 +24,7 @@ set(SOURCES ../softwarewritewatch.cpp ) -if (CLR_CMAKE_TARGET_ARCH_AMD64) +if (CLR_CMAKE_TARGET_ARCH_AMD64 AND CLR_CMAKE_TARGET_WIN32) set ( SOURCES ${SOURCES} ../vxsort/isa_detection_dummy.cpp @@ -36,7 +36,7 @@ if (CLR_CMAKE_TARGET_ARCH_AMD64) ../vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.cpp ../vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.cpp ) -endif (CLR_CMAKE_TARGET_ARCH_AMD64) +endif (CLR_CMAKE_TARGET_ARCH_AMD64 AND CLR_CMAKE_TARGET_WIN32) if(CLR_CMAKE_TARGET_WIN32) set (GC_LINK_LIBRARIES diff --git a/src/coreclr/src/pal/inc/rt/cpp/immintrin.h b/src/coreclr/src/pal/inc/rt/cpp/immintrin.h deleted file mode 100644 index 04ab0144b15e0d..00000000000000 --- a/src/coreclr/src/pal/inc/rt/cpp/immintrin.h +++ /dev/null @@ -1,1466 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. -// See the LICENSE file in the project root for more information. - -#ifndef _IMMINTRIN_H -#define _IMMINTRIN_H - -/*===---- avxintrin.h - AVX intrinsics -------------------------------------=== - * - * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. - * See https://llvm.org/LICENSE.txt for license information. - * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - * - *===-----------------------------------------------------------------------=== - */ - -typedef double __v4df __attribute__ ((__vector_size__ (32))); -typedef long long __v4di __attribute__ ((__vector_size__ (32))); -typedef float __v8sf __attribute__ ((__vector_size__ (32))); -typedef unsigned char __v16qu __attribute__((__vector_size__(16))); -typedef unsigned long long __v4du __attribute__ ((__vector_size__ (32))); -typedef unsigned int __v4su __attribute__((__vector_size__(16))); -typedef int __v4si __attribute__((__vector_size__(16))); -typedef unsigned int __v8su __attribute__ ((__vector_size__ (32))); - -typedef float __m256 __attribute__ ((__vector_size__ (32), __aligned__(32))); -typedef double __m256d __attribute__((__vector_size__(32), __aligned__(32))); -typedef float __m256_u __attribute__ ((__vector_size__ (32), __aligned__(1))); -typedef long long __m256i __attribute__((__vector_size__(32), __aligned__(32))); -typedef double __m256d_u __attribute__((__vector_size__(32), __aligned__(1))); -typedef long long __m256i_u __attribute__((__vector_size__(32), __aligned__(1))); -typedef int __v8si __attribute__ ((__vector_size__ (32))); - -/* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx"), __min_vector_width__(256))) -#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx"), __min_vector_width__(128))) - -#define _CMP_GT_OS 0x0e /* Greater-than (ordered, signaling) */ - -//// Stores integer values from a 256-bit integer vector to an unaligned -/// memory location pointed to by \a __p. -/// -/// \headerfile -/// -/// This intrinsic corresponds to the VMOVDQU instruction. -/// -/// \param __p -/// A pointer to a memory location that will receive the integer values. -/// \param __a -/// A 256-bit integer vector containing the values to be moved. -static __inline void __DEFAULT_FN_ATTRS -_mm256_storeu_si256(__m256i_u *__p, __m256i __a) -{ - struct __storeu_si256 { - __m256i_u __v; - } __attribute__((__packed__, __may_alias__)); - ((struct __storeu_si256*)__p)->__v = __a; -} - -/// Stores single-precision floating point values from a 256-bit vector -/// of [8 x float] to an unaligned memory location pointed to by \a __p. -/// -/// \headerfile -/// -/// This intrinsic corresponds to the VMOVUPS instruction. -/// -/// \param __p -/// A pointer to a memory location that will receive the float values. -/// \param __a -/// A 256-bit vector of [8 x float] containing the values to be moved. -static __inline void __DEFAULT_FN_ATTRS -_mm256_storeu_ps(float *__p, __m256 __a) -{ - struct __storeu_ps { - __m256_u __v; - } __attribute__((__packed__, __may_alias__)); - ((struct __storeu_ps*)__p)->__v = __a; -} - -/// Loads 256 bits of integer data from an unaligned memory location -/// pointed to by \a __p into a 256-bit integer vector. This intrinsic may -/// perform better than \c _mm256_loadu_si256 when the data crosses a cache -/// line boundary. -/// -/// \headerfile -/// -/// This intrinsic corresponds to the VLDDQU instruction. -/// -/// \param __p -/// A pointer to a 256-bit integer vector containing integer values. -/// \returns A 256-bit integer vector containing the moved values. -static __inline __m256i __DEFAULT_FN_ATTRS -_mm256_lddqu_si256(__m256i const *__p) -{ - return (__m256i)__builtin_ia32_lddqu256((char const *)__p); -} - -/// Loads 4 double-precision floating point values from an unaligned -/// memory location pointed to by \a __p into a vector of [4 x double]. -/// -/// \headerfile -/// -/// This intrinsic corresponds to the VMOVUPD instruction. -/// -/// \param __p -/// A pointer to a memory location containing double-precision floating -/// point values. -/// \returns A 256-bit vector of [4 x double] containing the moved values. -static __inline __m256d __DEFAULT_FN_ATTRS -_mm256_loadu_pd(double const *__p) -{ - struct __loadu_pd { - __m256d_u __v; - } __attribute__((__packed__, __may_alias__)); - return ((const struct __loadu_pd*)__p)->__v; -} - -/// Casts a 256-bit integer vector into a 256-bit floating-point vector -/// of [4 x double]. -/// -/// \headerfile -/// -/// This intrinsic has no corresponding instruction. -/// -/// \param __a -/// A 256-bit integer vector. -/// \returns A 256-bit floating-point vector of [4 x double] containing the same -/// bitwise pattern as the parameter. -static __inline __m256d __DEFAULT_FN_ATTRS -_mm256_castsi256_pd(__m256i __a) -{ - return (__m256d)__a; -} - -/// Casts a 256-bit floating-point vector of [8 x float] into a 256-bit -/// integer vector. -/// -/// \headerfile -/// -/// This intrinsic has no corresponding instruction. -/// -/// \param __a -/// A 256-bit floating-point vector of [8 x float]. -/// \returns A 256-bit integer vector containing the same bitwise pattern as the -/// parameter. -static __inline __m256i __DEFAULT_FN_ATTRS -_mm256_castps_si256(__m256 __a) -{ - return (__m256i)__a; -} - -/// Casts a 256-bit floating-point vector of [8 x float] into a 256-bit -/// floating-point vector of [4 x double]. -/// -/// \headerfile -/// -/// This intrinsic has no corresponding instruction. -/// -/// \param __a -/// A 256-bit floating-point vector of [8 x float]. -/// \returns A 256-bit floating-point vector of [4 x double] containing the same -/// bitwise pattern as the parameter. -static __inline __m256d __DEFAULT_FN_ATTRS -_mm256_castps_pd(__m256 __a) -{ - return (__m256d)__a; -} - -/// Merges 64-bit double-precision data values stored in either of the -/// two 256-bit vectors of [4 x double], as specified by the 256-bit vector -/// operand. -/// -/// \headerfile -/// -/// This intrinsic corresponds to the VBLENDVPD instruction. -/// -/// \param __a -/// A 256-bit vector of [4 x double]. -/// \param __b -/// A 256-bit vector of [4 x double]. -/// \param __c -/// A 256-bit vector operand, with mask bits 255, 191, 127, and 63 specifying -/// how the values are to be copied. The position of the mask bit corresponds -/// to the most significant bit of a copied value. When a mask bit is 0, the -/// corresponding 64-bit element in operand \a __a is copied to the same -/// position in the destination. When a mask bit is 1, the corresponding -/// 64-bit element in operand \a __b is copied to the same position in the -/// destination. -/// \returns A 256-bit vector of [4 x double] containing the copied values. -static __inline __m256d __DEFAULT_FN_ATTRS -_mm256_blendv_pd(__m256d __a, __m256d __b, __m256d __c) -{ - return (__m256d)__builtin_ia32_blendvpd256( - (__v4df)__a, (__v4df)__b, (__v4df)__c); -} - -/// Casts a 256-bit floating-point vector of [4 x double] into a 256-bit -/// integer vector. -/// -/// \headerfile -/// -/// This intrinsic has no corresponding instruction. -/// -/// \param __a -/// A 256-bit floating-point vector of [4 x double]. -/// \returns A 256-bit integer vector containing the same bitwise pattern as the -/// parameter. -static __inline __m256i __DEFAULT_FN_ATTRS -_mm256_castpd_si256(__m256d __a) -{ - return (__m256i)__a; -} - -/// Constructs a 256-bit floating-point vector of [4 x double] -/// initialized with the specified double-precision floating-point values. -/// -/// \headerfile -/// -/// This intrinsic corresponds to the VUNPCKLPD+VINSERTF128 -/// instruction. -/// -/// \param __a -/// A double-precision floating-point value used to initialize bits [255:192] -/// of the result. -/// \param __b -/// A double-precision floating-point value used to initialize bits [191:128] -/// of the result. -/// \param __c -/// A double-precision floating-point value used to initialize bits [127:64] -/// of the result. -/// \param __d -/// A double-precision floating-point value used to initialize bits [63:0] -/// of the result. -/// \returns An initialized 256-bit floating-point vector of [4 x double]. -static __inline __m256d __DEFAULT_FN_ATTRS -_mm256_set_pd(double __a, double __b, double __c, double __d) -{ - return __extension__ (__m256d){ __d, __c, __b, __a }; -} - -/* Create vectors with repeated elements */ -/// Constructs a 256-bit floating-point vector of [4 x double], with each -/// of the four double-precision floating-point vector elements set to the -/// specified double-precision floating-point value. -/// -/// \headerfile -/// -/// This intrinsic corresponds to the VMOVDDUP+VINSERTF128 instruction. -/// -/// \param __w -/// A double-precision floating-point value used to initialize each vector -/// element of the result. -/// \returns An initialized 256-bit floating-point vector of [4 x double]. -static __inline __m256d __DEFAULT_FN_ATTRS -_mm256_set1_pd(double __w) -{ - return _mm256_set_pd(__w, __w, __w, __w); -} - -/// This intrinsic corresponds to the VSHUFPD instruction. -/// -/// \param a -/// A 256-bit vector of [4 x double]. -/// \param b -/// A 256-bit vector of [4 x double]. -/// \param mask -/// An immediate value containing 8-bit values specifying which elements to -/// copy from \a a and \a b: \n -/// Bit [0]=0: Bits [63:0] are copied from \a a to bits [63:0] of the -/// destination. \n -/// Bit [0]=1: Bits [127:64] are copied from \a a to bits [63:0] of the -/// destination. \n -/// Bit [1]=0: Bits [63:0] are copied from \a b to bits [127:64] of the -/// destination. \n -/// Bit [1]=1: Bits [127:64] are copied from \a b to bits [127:64] of the -/// destination. \n -/// Bit [2]=0: Bits [191:128] are copied from \a a to bits [191:128] of the -/// destination. \n -/// Bit [2]=1: Bits [255:192] are copied from \a a to bits [191:128] of the -/// destination. \n -/// Bit [3]=0: Bits [191:128] are copied from \a b to bits [255:192] of the -/// destination. \n -/// Bit [3]=1: Bits [255:192] are copied from \a b to bits [255:192] of the -/// destination. -/// \returns A 256-bit vector of [4 x double] containing the shuffled values. -#define _mm256_shuffle_pd(a, b, mask) \ - (__m256d)__builtin_ia32_shufpd256((__v4df)(__m256d)(a), \ - (__v4df)(__m256d)(b), (int)(mask)) - -/// Loads 8 single-precision floating point values from an unaligned -/// memory location pointed to by \a __p into a vector of [8 x float]. -/// -/// \headerfile -/// -/// This intrinsic corresponds to the VMOVUPS instruction. -/// -/// \param __p -/// A pointer to a memory location containing single-precision floating -/// point values. -/// \returns A 256-bit vector of [8 x float] containing the moved values. -static __inline __m256 __DEFAULT_FN_ATTRS -_mm256_loadu_ps(float const *__p) -{ - struct __loadu_ps { - __m256_u __v; - } __attribute__((__packed__, __may_alias__)); - return ((const struct __loadu_ps*)__p)->__v; -} - -/// Casts a 256-bit integer vector into a 256-bit floating-point vector -/// of [8 x float]. -/// -/// \headerfile -/// -/// This intrinsic has no corresponding instruction. -/// -/// \param __a -/// A 256-bit integer vector. -/// \returns A 256-bit floating-point vector of [8 x float] containing the same -/// bitwise pattern as the parameter. -static __inline __m256 __DEFAULT_FN_ATTRS -_mm256_castsi256_ps(__m256i __a) -{ - return (__m256)__a; -} - -/// Constructs a 256-bit floating-point vector of [8 x float] initialized -/// with the specified single-precision floating-point values. -/// -/// \headerfile -/// -/// This intrinsic is a utility function and does not correspond to a specific -/// instruction. -/// -/// \param __a -/// A single-precision floating-point value used to initialize bits [255:224] -/// of the result. -/// \param __b -/// A single-precision floating-point value used to initialize bits [223:192] -/// of the result. -/// \param __c -/// A single-precision floating-point value used to initialize bits [191:160] -/// of the result. -/// \param __d -/// A single-precision floating-point value used to initialize bits [159:128] -/// of the result. -/// \param __e -/// A single-precision floating-point value used to initialize bits [127:96] -/// of the result. -/// \param __f -/// A single-precision floating-point value used to initialize bits [95:64] -/// of the result. -/// \param __g -/// A single-precision floating-point value used to initialize bits [63:32] -/// of the result. -/// \param __h -/// A single-precision floating-point value used to initialize bits [31:0] -/// of the result. -/// \returns An initialized 256-bit floating-point vector of [8 x float]. -static __inline __m256 __DEFAULT_FN_ATTRS -_mm256_set_ps(float __a, float __b, float __c, float __d, - float __e, float __f, float __g, float __h) -{ - return __extension__ (__m256){ __h, __g, __f, __e, __d, __c, __b, __a }; -} - -/// Constructs a 256-bit floating-point vector of [8 x float], with each -/// of the eight single-precision floating-point vector elements set to the -/// specified single-precision floating-point value. -/// -/// \headerfile -/// -/// This intrinsic corresponds to the VPERMILPS+VINSERTF128 -/// instruction. -/// -/// \param __w -/// A single-precision floating-point value used to initialize each vector -/// element of the result. -/// \returns An initialized 256-bit floating-point vector of [8 x float]. -static __inline __m256 __DEFAULT_FN_ATTRS -_mm256_set1_ps(float __w) -{ - return _mm256_set_ps(__w, __w, __w, __w, __w, __w, __w, __w); -} - -/// Constructs a 256-bit integer vector initialized with the specified -/// 32-bit integral values. -/// -/// \headerfile -/// -/// This intrinsic is a utility function and does not correspond to a specific -/// instruction. -/// -/// \param __i0 -/// A 32-bit integral value used to initialize bits [255:224] of the result. -/// \param __i1 -/// A 32-bit integral value used to initialize bits [223:192] of the result. -/// \param __i2 -/// A 32-bit integral value used to initialize bits [191:160] of the result. -/// \param __i3 -/// A 32-bit integral value used to initialize bits [159:128] of the result. -/// \param __i4 -/// A 32-bit integral value used to initialize bits [127:96] of the result. -/// \param __i5 -/// A 32-bit integral value used to initialize bits [95:64] of the result. -/// \param __i6 -/// A 32-bit integral value used to initialize bits [63:32] of the result. -/// \param __i7 -/// A 32-bit integral value used to initialize bits [31:0] of the result. -/// \returns An initialized 256-bit integer vector. -static __inline __m256i __DEFAULT_FN_ATTRS -_mm256_set_epi32(int __i0, int __i1, int __i2, int __i3, - int __i4, int __i5, int __i6, int __i7) -{ - return __extension__ (__m256i)(__v8si){ __i7, __i6, __i5, __i4, __i3, __i2, __i1, __i0 }; -} - -/// Constructs a 256-bit integer vector of [8 x i32], with each of the -/// 32-bit integral vector elements set to the specified 32-bit integral -/// value. -/// -/// \headerfile -/// -/// This intrinsic corresponds to the VPERMILPS+VINSERTF128 -/// instruction. -/// -/// \param __i -/// A 32-bit integral value used to initialize each vector element of the -/// result. -/// \returns An initialized 256-bit integer vector of [8 x i32]. -static __inline __m256i __DEFAULT_FN_ATTRS -_mm256_set1_epi32(int __i) -{ - return _mm256_set_epi32(__i, __i, __i, __i, __i, __i, __i, __i); -} - -/// Constructs a 256-bit integer vector initialized with the specified -/// 64-bit integral values. -/// -/// \headerfile -/// -/// This intrinsic corresponds to the VPUNPCKLQDQ+VINSERTF128 -/// instruction. -/// -/// \param __a -/// A 64-bit integral value used to initialize bits [255:192] of the result. -/// \param __b -/// A 64-bit integral value used to initialize bits [191:128] of the result. -/// \param __c -/// A 64-bit integral value used to initialize bits [127:64] of the result. -/// \param __d -/// A 64-bit integral value used to initialize bits [63:0] of the result. -/// \returns An initialized 256-bit integer vector. -static __inline __m256i __DEFAULT_FN_ATTRS -_mm256_set_epi64x(long long __a, long long __b, long long __c, long long __d) -{ - return __extension__ (__m256i)(__v4di){ __d, __c, __b, __a }; -} - -/// Constructs a 256-bit integer vector of [4 x i64], with each of the -/// 64-bit integral vector elements set to the specified 64-bit integral -/// value. -/// -/// \headerfile -/// -/// This intrinsic corresponds to the VMOVDDUP+VINSERTF128 instruction. -/// -/// \param __q -/// A 64-bit integral value used to initialize each vector element of the -/// result. -/// \returns An initialized 256-bit integer vector of [4 x i64]. -static __inline __m256i __DEFAULT_FN_ATTRS -_mm256_set1_epi64x(long long __q) -{ - return _mm256_set_epi64x(__q, __q, __q, __q); -} - -/// Extracts the sign bits of single-precision floating point elements -/// in a 256-bit vector of [8 x float] and writes them to the lower order -/// bits of the return value. -/// -/// \headerfile -/// -/// This intrinsic corresponds to the VMOVMSKPS instruction. -/// -/// \param __a -/// A 256-bit vector of [8 x float] containing the single-precision floating -/// point values with sign bits to be extracted. -/// \returns The sign bits from the operand, written to bits [7:0]. -static __inline int __DEFAULT_FN_ATTRS -_mm256_movemask_ps(__m256 __a) -{ - return __builtin_ia32_movmskps256((__v8sf)__a); -} - -/* Vector extract sign mask */ -/// Extracts the sign bits of double-precision floating point elements -/// in a 256-bit vector of [4 x double] and writes them to the lower order -/// bits of the return value. -/// -/// \headerfile -/// -/// This intrinsic corresponds to the VMOVMSKPD instruction. -/// -/// \param __a -/// A 256-bit vector of [4 x double] containing the double-precision -/// floating point values with sign bits to be extracted. -/// \returns The sign bits from the operand, written to bits [3:0]. -static __inline int __DEFAULT_FN_ATTRS -_mm256_movemask_pd(__m256d __a) -{ - return __builtin_ia32_movmskpd256((__v4df)__a); -} - -/* Vector Blend */ -/// Merges 64-bit double-precision data values stored in either of the -/// two 256-bit vectors of [4 x double], as specified by the immediate -/// integer operand. -/// -/// \headerfile -/// -/// \code -/// __m256d _mm256_blend_pd(__m256d V1, __m256d V2, const int M); -/// \endcode -/// -/// This intrinsic corresponds to the VBLENDPD instruction. -/// -/// \param V1 -/// A 256-bit vector of [4 x double]. -/// \param V2 -/// A 256-bit vector of [4 x double]. -/// \param M -/// An immediate integer operand, with mask bits [3:0] specifying how the -/// values are to be copied. The position of the mask bit corresponds to the -/// index of a copied value. When a mask bit is 0, the corresponding 64-bit -/// element in operand \a V1 is copied to the same position in the -/// destination. When a mask bit is 1, the corresponding 64-bit element in -/// operand \a V2 is copied to the same position in the destination. -/// \returns A 256-bit vector of [4 x double] containing the copied values. -#define _mm256_blend_pd(V1, V2, M) \ - (__m256d)__builtin_ia32_blendpd256((__v4df)(__m256d)(V1), \ - (__v4df)(__m256d)(V2), (int)(M)) - - -/// Compares each of the corresponding double-precision values of two -/// 256-bit vectors of [4 x double], using the operation specified by the -/// immediate integer operand. -/// -/// Returns a [4 x double] vector consisting of four doubles corresponding to -/// the four comparison results: zero if the comparison is false, and all 1's -/// if the comparison is true. -/// -/// \headerfile -/// -/// \code -/// __m256d _mm256_cmp_pd(__m256d a, __m256d b, const int c); -/// \endcode -/// -/// This intrinsic corresponds to the VCMPPD instruction. -/// -/// \param a -/// A 256-bit vector of [4 x double]. -/// \param b -/// A 256-bit vector of [4 x double]. -/// \param c -/// An immediate integer operand, with bits [4:0] specifying which comparison -/// operation to use: \n -/// 0x00: Equal (ordered, non-signaling) \n -/// 0x01: Less-than (ordered, signaling) \n -/// 0x02: Less-than-or-equal (ordered, signaling) \n -/// 0x03: Unordered (non-signaling) \n -/// 0x04: Not-equal (unordered, non-signaling) \n -/// 0x05: Not-less-than (unordered, signaling) \n -/// 0x06: Not-less-than-or-equal (unordered, signaling) \n -/// 0x07: Ordered (non-signaling) \n -/// 0x08: Equal (unordered, non-signaling) \n -/// 0x09: Not-greater-than-or-equal (unordered, signaling) \n -/// 0x0A: Not-greater-than (unordered, signaling) \n -/// 0x0B: False (ordered, non-signaling) \n -/// 0x0C: Not-equal (ordered, non-signaling) \n -/// 0x0D: Greater-than-or-equal (ordered, signaling) \n -/// 0x0E: Greater-than (ordered, signaling) \n -/// 0x0F: True (unordered, non-signaling) \n -/// 0x10: Equal (ordered, signaling) \n -/// 0x11: Less-than (ordered, non-signaling) \n -/// 0x12: Less-than-or-equal (ordered, non-signaling) \n -/// 0x13: Unordered (signaling) \n -/// 0x14: Not-equal (unordered, signaling) \n -/// 0x15: Not-less-than (unordered, non-signaling) \n -/// 0x16: Not-less-than-or-equal (unordered, non-signaling) \n -/// 0x17: Ordered (signaling) \n -/// 0x18: Equal (unordered, signaling) \n -/// 0x19: Not-greater-than-or-equal (unordered, non-signaling) \n -/// 0x1A: Not-greater-than (unordered, non-signaling) \n -/// 0x1B: False (ordered, signaling) \n -/// 0x1C: Not-equal (ordered, signaling) \n -/// 0x1D: Greater-than-or-equal (ordered, non-signaling) \n -/// 0x1E: Greater-than (ordered, non-signaling) \n -/// 0x1F: True (unordered, signaling) -/// \returns A 256-bit vector of [4 x double] containing the comparison results. -#define _mm256_cmp_pd(a, b, c) \ - (__m256d)__builtin_ia32_cmppd256((__v4df)(__m256d)(a), \ - (__v4df)(__m256d)(b), (c)) - -/// Compares each of the corresponding values of two 256-bit vectors of -/// [8 x float], using the operation specified by the immediate integer -/// operand. -/// -/// Returns a [8 x float] vector consisting of eight floats corresponding to -/// the eight comparison results: zero if the comparison is false, and all -/// 1's if the comparison is true. -/// -/// \headerfile -/// -/// \code -/// __m256 _mm256_cmp_ps(__m256 a, __m256 b, const int c); -/// \endcode -/// -/// This intrinsic corresponds to the VCMPPS instruction. -/// -/// \param a -/// A 256-bit vector of [8 x float]. -/// \param b -/// A 256-bit vector of [8 x float]. -/// \param c -/// An immediate integer operand, with bits [4:0] specifying which comparison -/// operation to use: \n -/// 0x00: Equal (ordered, non-signaling) \n -/// 0x01: Less-than (ordered, signaling) \n -/// 0x02: Less-than-or-equal (ordered, signaling) \n -/// 0x03: Unordered (non-signaling) \n -/// 0x04: Not-equal (unordered, non-signaling) \n -/// 0x05: Not-less-than (unordered, signaling) \n -/// 0x06: Not-less-than-or-equal (unordered, signaling) \n -/// 0x07: Ordered (non-signaling) \n -/// 0x08: Equal (unordered, non-signaling) \n -/// 0x09: Not-greater-than-or-equal (unordered, signaling) \n -/// 0x0A: Not-greater-than (unordered, signaling) \n -/// 0x0B: False (ordered, non-signaling) \n -/// 0x0C: Not-equal (ordered, non-signaling) \n -/// 0x0D: Greater-than-or-equal (ordered, signaling) \n -/// 0x0E: Greater-than (ordered, signaling) \n -/// 0x0F: True (unordered, non-signaling) \n -/// 0x10: Equal (ordered, signaling) \n -/// 0x11: Less-than (ordered, non-signaling) \n -/// 0x12: Less-than-or-equal (ordered, non-signaling) \n -/// 0x13: Unordered (signaling) \n -/// 0x14: Not-equal (unordered, signaling) \n -/// 0x15: Not-less-than (unordered, non-signaling) \n -/// 0x16: Not-less-than-or-equal (unordered, non-signaling) \n -/// 0x17: Ordered (signaling) \n -/// 0x18: Equal (unordered, signaling) \n -/// 0x19: Not-greater-than-or-equal (unordered, non-signaling) \n -/// 0x1A: Not-greater-than (unordered, non-signaling) \n -/// 0x1B: False (ordered, signaling) \n -/// 0x1C: Not-equal (ordered, signaling) \n -/// 0x1D: Greater-than-or-equal (ordered, non-signaling) \n -/// 0x1E: Greater-than (ordered, non-signaling) \n -/// 0x1F: True (unordered, signaling) -/// \returns A 256-bit vector of [8 x float] containing the comparison results. -#define _mm256_cmp_ps(a, b, c) \ - (__m256)__builtin_ia32_cmpps256((__v8sf)(__m256)(a), \ - (__v8sf)(__m256)(b), (c)) - -/* Cast between vector types */ -/// Casts a 256-bit floating-point vector of [4 x double] into a 256-bit -/// floating-point vector of [8 x float]. -/// -/// \headerfile -/// -/// This intrinsic has no corresponding instruction. -/// -/// \param __a -/// A 256-bit floating-point vector of [4 x double]. -/// \returns A 256-bit floating-point vector of [8 x float] containing the same -/// bitwise pattern as the parameter. -static __inline __m256 __DEFAULT_FN_ATTRS -_mm256_castpd_ps(__m256d __a) -{ - return (__m256)__a; -} - -/// Stores double-precision floating point values from a 256-bit vector -/// of [4 x double] to an unaligned memory location pointed to by \a __p. -/// -/// \headerfile -/// -/// This intrinsic corresponds to the VMOVUPD instruction. -/// -/// \param __p -/// A pointer to a memory location that will receive the double-precision -/// floating point values. -/// \param __a -/// A 256-bit vector of [4 x double] containing the values to be moved. -static __inline void __DEFAULT_FN_ATTRS -_mm256_storeu_pd(double *__p, __m256d __a) -{ - struct __storeu_pd { - __m256d_u __v; - } __attribute__((__packed__, __may_alias__)); - ((struct __storeu_pd*)__p)->__v = __a; -} - -/* Arithmetic */ -/// Adds two 256-bit vectors of [4 x double]. -/// -/// \headerfile -/// -/// This intrinsic corresponds to the VADDPD instruction. -/// -/// \param __a -/// A 256-bit vector of [4 x double] containing one of the source operands. -/// \param __b -/// A 256-bit vector of [4 x double] containing one of the source operands. -/// \returns A 256-bit vector of [4 x double] containing the sums of both -/// operands. -static __inline __m256d __DEFAULT_FN_ATTRS -_mm256_add_pd(__m256d __a, __m256d __b) -{ - return (__m256d)((__v4df)__a+(__v4df)__b); -} - -/// Subtracts two 256-bit vectors of [4 x double]. -/// -/// \headerfile -/// -/// This intrinsic corresponds to the VSUBPD instruction. -/// -/// \param __a -/// A 256-bit vector of [4 x double] containing the minuend. -/// \param __b -/// A 256-bit vector of [4 x double] containing the subtrahend. -/// \returns A 256-bit vector of [4 x double] containing the differences between -/// both operands. -static __inline __m256d __DEFAULT_FN_ATTRS -_mm256_sub_pd(__m256d __a, __m256d __b) -{ - return (__m256d)((__v4df)__a-(__v4df)__b); -} - -/// Adds two 256-bit vectors of [8 x float]. -/// -/// \headerfile -/// -/// This intrinsic corresponds to the VADDPS instruction. -/// -/// \param __a -/// A 256-bit vector of [8 x float] containing one of the source operands. -/// \param __b -/// A 256-bit vector of [8 x float] containing one of the source operands. -/// \returns A 256-bit vector of [8 x float] containing the sums of both -/// operands. -static __inline __m256 __DEFAULT_FN_ATTRS -_mm256_add_ps(__m256 __a, __m256 __b) -{ - return (__m256)((__v8sf)__a+(__v8sf)__b); -} - -/// Subtracts two 256-bit vectors of [8 x float]. -/// -/// \headerfile -/// -/// This intrinsic corresponds to the VSUBPS instruction. -/// -/// \param __a -/// A 256-bit vector of [8 x float] containing the minuend. -/// \param __b -/// A 256-bit vector of [8 x float] containing the subtrahend. -/// \returns A 256-bit vector of [8 x float] containing the differences between -/// both operands. -static __inline __m256 __DEFAULT_FN_ATTRS -_mm256_sub_ps(__m256 __a, __m256 __b) -{ - return (__m256)((__v8sf)__a-(__v8sf)__b); -} - - -#undef __DEFAULT_FN_ATTRS128 - -/*===---- avx2intrin.h - AVX2 intrinsics -----------------------------------=== - * - * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. - * See https://llvm.org/LICENSE.txt for license information. - * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - * - *===-----------------------------------------------------------------------=== - */ - -/* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx2"), __min_vector_width__(256))) -#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx2"), __min_vector_width__(128))) - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_min_epu32(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_ia32_pminud256((__v8si)__a, (__v8si)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_max_epu32(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_ia32_pmaxud256((__v8si)__a, (__v8si)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_min_epi32(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_ia32_pminsd256((__v8si)__a, (__v8si)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_max_epi32(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_ia32_pmaxsd256((__v8si)__a, (__v8si)__b); -} - -#define _mm256_blend_epi32(V1, V2, M) \ - (__m256i)__builtin_ia32_pblendd256((__v8si)(__m256i)(V1), \ - (__v8si)(__m256i)(V2), (int)(M)) - - -#define _mm256_shuffle_epi32(a, imm) \ - (__m256i)__builtin_ia32_pshufd256((__v8si)(__m256i)(a), (int)(imm)) - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_cmpgt_epi64(__m256i __a, __m256i __b) -{ - return (__m256i)((__v4di)__a > (__v4di)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_cvtepu8_epi32(__m128i __V) -{ - return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8si); -} - -#define _mm256_permute4x64_pd(V, M) \ - (__m256d)__builtin_ia32_permdf256((__v4df)(__m256d)(V), (int)(M)) - - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_xor_si256(__m256i __a, __m256i __b) -{ - return (__m256i)((__v4du)__a ^ (__v4du)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_cmpgt_epi32(__m256i __a, __m256i __b) -{ - return (__m256i)((__v8si)__a > (__v8si)__b); -} - -static __inline__ __m256 __DEFAULT_FN_ATTRS256 -_mm256_permutevar8x32_ps(__m256 __a, __m256i __b) -{ - return (__m256)__builtin_ia32_permvarsf256((__v8sf)__a, (__v8si)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_add_epi64(__m256i __a, __m256i __b) -{ - return (__m256i)((__v4du)__a + (__v4du)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_sub_epi64(__m256i __a, __m256i __b) -{ - return (__m256i)((__v4du)__a - (__v4du)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_slli_epi64(__m256i __a, int __count) -{ - return __builtin_ia32_psllqi256((__v4di)__a, __count); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_srli_epi64(__m256i __a, int __count) -{ - return __builtin_ia32_psrlqi256((__v4di)__a, __count); -} - -#define _mm256_extracti128_si256(V, M) \ - (__m128i)__builtin_ia32_extract128i256((__v4di)(__m256i)(V), (int)(M)) - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_cvtepu32_epi64(__m128i __V) -{ - return (__m256i)__builtin_convertvector((__v4su)__V, __v4di); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_cvtepi32_epi64(__m128i __V) -{ - return (__m256i)__builtin_convertvector((__v4si)__V, __v4di); -} - -#define _mm256_permute4x64_epi64(V, M) \ - (__m256i)__builtin_ia32_permdi256((__v4di)(__m256i)(V), (int)(M)) - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_add_epi32(__m256i __a, __m256i __b) -{ - return (__m256i)((__v8su)__a + (__v8su)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_sub_epi32(__m256i __a, __m256i __b) -{ - return (__m256i)((__v8su)__a - (__v8su)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_slli_epi32(__m256i __a, int __count) -{ - return (__m256i)__builtin_ia32_pslldi256((__v8si)__a, __count); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_srli_epi32(__m256i __a, int __count) -{ - return (__m256i)__builtin_ia32_psrldi256((__v8si)__a, __count); -} - -#undef __DEFAULT_FN_ATTRS -#undef __DEFAULT_FN_ATTRS128 - -/*===---- avx512fintrin.h - AVX512F intrinsics -----------------------------=== - * - * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. - * See https://llvm.org/LICENSE.txt for license information. - * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - * - *===-----------------------------------------------------------------------=== - */ - -typedef float __m512 __attribute__((__vector_size__(64), __aligned__(64))); -typedef long long __m512i __attribute__((__vector_size__(64), __aligned__(64))); -typedef double __m512d __attribute__((__vector_size__(64), __aligned__(64))); -typedef double __m512d_u __attribute__((__vector_size__(64), __aligned__(1))); -typedef long long __m512i __attribute__((__vector_size__(64), __aligned__(64))); -typedef long long __m512i_u __attribute__((__vector_size__(64), __aligned__(1))); -typedef float __m512_u __attribute__((__vector_size__(64), __aligned__(1))); - -typedef long long __v8di __attribute__((__vector_size__(64))); -typedef double __v8df __attribute__((__vector_size__(64))); -typedef int __v16si __attribute__((__vector_size__(64))); -typedef float __v16sf __attribute__((__vector_size__(64))); - -typedef unsigned char __mmask8; -typedef unsigned short __mmask16; - -#define _MM_FROUND_CUR_DIRECTION 0x04 - -/* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS512 __attribute__((__always_inline__, __nodebug__, __target__("avx512f"), __min_vector_width__(512))) -#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512f"), __min_vector_width__(128))) -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512f"))) - -/* Constants for integer comparison predicates */ -typedef enum { - _MM_CMPINT_EQ, /* Equal */ - _MM_CMPINT_LT, /* Less than */ - _MM_CMPINT_LE, /* Less than or Equal */ - _MM_CMPINT_UNUSED, - _MM_CMPINT_NE, /* Not Equal */ - _MM_CMPINT_NLT, /* Not Less than */ -#define _MM_CMPINT_GE _MM_CMPINT_NLT /* Greater than or Equal */ - _MM_CMPINT_NLE /* Not Less than or Equal */ -#define _MM_CMPINT_GT _MM_CMPINT_NLE /* Greater than */ -} _MM_CMPINT_ENUM; - -typedef enum -{ - _MM_PERM_AAAA = 0x00, _MM_PERM_AAAB = 0x01, _MM_PERM_AAAC = 0x02, - _MM_PERM_AAAD = 0x03, _MM_PERM_AABA = 0x04, _MM_PERM_AABB = 0x05, - _MM_PERM_AABC = 0x06, _MM_PERM_AABD = 0x07, _MM_PERM_AACA = 0x08, - _MM_PERM_AACB = 0x09, _MM_PERM_AACC = 0x0A, _MM_PERM_AACD = 0x0B, - _MM_PERM_AADA = 0x0C, _MM_PERM_AADB = 0x0D, _MM_PERM_AADC = 0x0E, - _MM_PERM_AADD = 0x0F, _MM_PERM_ABAA = 0x10, _MM_PERM_ABAB = 0x11, - _MM_PERM_ABAC = 0x12, _MM_PERM_ABAD = 0x13, _MM_PERM_ABBA = 0x14, - _MM_PERM_ABBB = 0x15, _MM_PERM_ABBC = 0x16, _MM_PERM_ABBD = 0x17, - _MM_PERM_ABCA = 0x18, _MM_PERM_ABCB = 0x19, _MM_PERM_ABCC = 0x1A, - _MM_PERM_ABCD = 0x1B, _MM_PERM_ABDA = 0x1C, _MM_PERM_ABDB = 0x1D, - _MM_PERM_ABDC = 0x1E, _MM_PERM_ABDD = 0x1F, _MM_PERM_ACAA = 0x20, - _MM_PERM_ACAB = 0x21, _MM_PERM_ACAC = 0x22, _MM_PERM_ACAD = 0x23, - _MM_PERM_ACBA = 0x24, _MM_PERM_ACBB = 0x25, _MM_PERM_ACBC = 0x26, - _MM_PERM_ACBD = 0x27, _MM_PERM_ACCA = 0x28, _MM_PERM_ACCB = 0x29, - _MM_PERM_ACCC = 0x2A, _MM_PERM_ACCD = 0x2B, _MM_PERM_ACDA = 0x2C, - _MM_PERM_ACDB = 0x2D, _MM_PERM_ACDC = 0x2E, _MM_PERM_ACDD = 0x2F, - _MM_PERM_ADAA = 0x30, _MM_PERM_ADAB = 0x31, _MM_PERM_ADAC = 0x32, - _MM_PERM_ADAD = 0x33, _MM_PERM_ADBA = 0x34, _MM_PERM_ADBB = 0x35, - _MM_PERM_ADBC = 0x36, _MM_PERM_ADBD = 0x37, _MM_PERM_ADCA = 0x38, - _MM_PERM_ADCB = 0x39, _MM_PERM_ADCC = 0x3A, _MM_PERM_ADCD = 0x3B, - _MM_PERM_ADDA = 0x3C, _MM_PERM_ADDB = 0x3D, _MM_PERM_ADDC = 0x3E, - _MM_PERM_ADDD = 0x3F, _MM_PERM_BAAA = 0x40, _MM_PERM_BAAB = 0x41, - _MM_PERM_BAAC = 0x42, _MM_PERM_BAAD = 0x43, _MM_PERM_BABA = 0x44, - _MM_PERM_BABB = 0x45, _MM_PERM_BABC = 0x46, _MM_PERM_BABD = 0x47, - _MM_PERM_BACA = 0x48, _MM_PERM_BACB = 0x49, _MM_PERM_BACC = 0x4A, - _MM_PERM_BACD = 0x4B, _MM_PERM_BADA = 0x4C, _MM_PERM_BADB = 0x4D, - _MM_PERM_BADC = 0x4E, _MM_PERM_BADD = 0x4F, _MM_PERM_BBAA = 0x50, - _MM_PERM_BBAB = 0x51, _MM_PERM_BBAC = 0x52, _MM_PERM_BBAD = 0x53, - _MM_PERM_BBBA = 0x54, _MM_PERM_BBBB = 0x55, _MM_PERM_BBBC = 0x56, - _MM_PERM_BBBD = 0x57, _MM_PERM_BBCA = 0x58, _MM_PERM_BBCB = 0x59, - _MM_PERM_BBCC = 0x5A, _MM_PERM_BBCD = 0x5B, _MM_PERM_BBDA = 0x5C, - _MM_PERM_BBDB = 0x5D, _MM_PERM_BBDC = 0x5E, _MM_PERM_BBDD = 0x5F, - _MM_PERM_BCAA = 0x60, _MM_PERM_BCAB = 0x61, _MM_PERM_BCAC = 0x62, - _MM_PERM_BCAD = 0x63, _MM_PERM_BCBA = 0x64, _MM_PERM_BCBB = 0x65, - _MM_PERM_BCBC = 0x66, _MM_PERM_BCBD = 0x67, _MM_PERM_BCCA = 0x68, - _MM_PERM_BCCB = 0x69, _MM_PERM_BCCC = 0x6A, _MM_PERM_BCCD = 0x6B, - _MM_PERM_BCDA = 0x6C, _MM_PERM_BCDB = 0x6D, _MM_PERM_BCDC = 0x6E, - _MM_PERM_BCDD = 0x6F, _MM_PERM_BDAA = 0x70, _MM_PERM_BDAB = 0x71, - _MM_PERM_BDAC = 0x72, _MM_PERM_BDAD = 0x73, _MM_PERM_BDBA = 0x74, - _MM_PERM_BDBB = 0x75, _MM_PERM_BDBC = 0x76, _MM_PERM_BDBD = 0x77, - _MM_PERM_BDCA = 0x78, _MM_PERM_BDCB = 0x79, _MM_PERM_BDCC = 0x7A, - _MM_PERM_BDCD = 0x7B, _MM_PERM_BDDA = 0x7C, _MM_PERM_BDDB = 0x7D, - _MM_PERM_BDDC = 0x7E, _MM_PERM_BDDD = 0x7F, _MM_PERM_CAAA = 0x80, - _MM_PERM_CAAB = 0x81, _MM_PERM_CAAC = 0x82, _MM_PERM_CAAD = 0x83, - _MM_PERM_CABA = 0x84, _MM_PERM_CABB = 0x85, _MM_PERM_CABC = 0x86, - _MM_PERM_CABD = 0x87, _MM_PERM_CACA = 0x88, _MM_PERM_CACB = 0x89, - _MM_PERM_CACC = 0x8A, _MM_PERM_CACD = 0x8B, _MM_PERM_CADA = 0x8C, - _MM_PERM_CADB = 0x8D, _MM_PERM_CADC = 0x8E, _MM_PERM_CADD = 0x8F, - _MM_PERM_CBAA = 0x90, _MM_PERM_CBAB = 0x91, _MM_PERM_CBAC = 0x92, - _MM_PERM_CBAD = 0x93, _MM_PERM_CBBA = 0x94, _MM_PERM_CBBB = 0x95, - _MM_PERM_CBBC = 0x96, _MM_PERM_CBBD = 0x97, _MM_PERM_CBCA = 0x98, - _MM_PERM_CBCB = 0x99, _MM_PERM_CBCC = 0x9A, _MM_PERM_CBCD = 0x9B, - _MM_PERM_CBDA = 0x9C, _MM_PERM_CBDB = 0x9D, _MM_PERM_CBDC = 0x9E, - _MM_PERM_CBDD = 0x9F, _MM_PERM_CCAA = 0xA0, _MM_PERM_CCAB = 0xA1, - _MM_PERM_CCAC = 0xA2, _MM_PERM_CCAD = 0xA3, _MM_PERM_CCBA = 0xA4, - _MM_PERM_CCBB = 0xA5, _MM_PERM_CCBC = 0xA6, _MM_PERM_CCBD = 0xA7, - _MM_PERM_CCCA = 0xA8, _MM_PERM_CCCB = 0xA9, _MM_PERM_CCCC = 0xAA, - _MM_PERM_CCCD = 0xAB, _MM_PERM_CCDA = 0xAC, _MM_PERM_CCDB = 0xAD, - _MM_PERM_CCDC = 0xAE, _MM_PERM_CCDD = 0xAF, _MM_PERM_CDAA = 0xB0, - _MM_PERM_CDAB = 0xB1, _MM_PERM_CDAC = 0xB2, _MM_PERM_CDAD = 0xB3, - _MM_PERM_CDBA = 0xB4, _MM_PERM_CDBB = 0xB5, _MM_PERM_CDBC = 0xB6, - _MM_PERM_CDBD = 0xB7, _MM_PERM_CDCA = 0xB8, _MM_PERM_CDCB = 0xB9, - _MM_PERM_CDCC = 0xBA, _MM_PERM_CDCD = 0xBB, _MM_PERM_CDDA = 0xBC, - _MM_PERM_CDDB = 0xBD, _MM_PERM_CDDC = 0xBE, _MM_PERM_CDDD = 0xBF, - _MM_PERM_DAAA = 0xC0, _MM_PERM_DAAB = 0xC1, _MM_PERM_DAAC = 0xC2, - _MM_PERM_DAAD = 0xC3, _MM_PERM_DABA = 0xC4, _MM_PERM_DABB = 0xC5, - _MM_PERM_DABC = 0xC6, _MM_PERM_DABD = 0xC7, _MM_PERM_DACA = 0xC8, - _MM_PERM_DACB = 0xC9, _MM_PERM_DACC = 0xCA, _MM_PERM_DACD = 0xCB, - _MM_PERM_DADA = 0xCC, _MM_PERM_DADB = 0xCD, _MM_PERM_DADC = 0xCE, - _MM_PERM_DADD = 0xCF, _MM_PERM_DBAA = 0xD0, _MM_PERM_DBAB = 0xD1, - _MM_PERM_DBAC = 0xD2, _MM_PERM_DBAD = 0xD3, _MM_PERM_DBBA = 0xD4, - _MM_PERM_DBBB = 0xD5, _MM_PERM_DBBC = 0xD6, _MM_PERM_DBBD = 0xD7, - _MM_PERM_DBCA = 0xD8, _MM_PERM_DBCB = 0xD9, _MM_PERM_DBCC = 0xDA, - _MM_PERM_DBCD = 0xDB, _MM_PERM_DBDA = 0xDC, _MM_PERM_DBDB = 0xDD, - _MM_PERM_DBDC = 0xDE, _MM_PERM_DBDD = 0xDF, _MM_PERM_DCAA = 0xE0, - _MM_PERM_DCAB = 0xE1, _MM_PERM_DCAC = 0xE2, _MM_PERM_DCAD = 0xE3, - _MM_PERM_DCBA = 0xE4, _MM_PERM_DCBB = 0xE5, _MM_PERM_DCBC = 0xE6, - _MM_PERM_DCBD = 0xE7, _MM_PERM_DCCA = 0xE8, _MM_PERM_DCCB = 0xE9, - _MM_PERM_DCCC = 0xEA, _MM_PERM_DCCD = 0xEB, _MM_PERM_DCDA = 0xEC, - _MM_PERM_DCDB = 0xED, _MM_PERM_DCDC = 0xEE, _MM_PERM_DCDD = 0xEF, - _MM_PERM_DDAA = 0xF0, _MM_PERM_DDAB = 0xF1, _MM_PERM_DDAC = 0xF2, - _MM_PERM_DDAD = 0xF3, _MM_PERM_DDBA = 0xF4, _MM_PERM_DDBB = 0xF5, - _MM_PERM_DDBC = 0xF6, _MM_PERM_DDBD = 0xF7, _MM_PERM_DDCA = 0xF8, - _MM_PERM_DDCB = 0xF9, _MM_PERM_DDCC = 0xFA, _MM_PERM_DDCD = 0xFB, - _MM_PERM_DDDA = 0xFC, _MM_PERM_DDDB = 0xFD, _MM_PERM_DDDC = 0xFE, - _MM_PERM_DDDD = 0xFF -} _MM_PERM_ENUM; - -/* SIMD load ops */ - -static __inline __m512i __DEFAULT_FN_ATTRS512 -_mm512_loadu_si512 (void const *__P) -{ - struct __loadu_si512 { - __m512i_u __v; - } __attribute__((__packed__, __may_alias__)); - return ((const struct __loadu_si512*)__P)->__v; -} - -static __inline __m512d __DEFAULT_FN_ATTRS512 -_mm512_loadu_pd(void const *__p) -{ - struct __loadu_pd { - __m512d_u __v; - } __attribute__((__packed__, __may_alias__)); - return ((const struct __loadu_pd*)__p)->__v; -} - -static __inline __m512 __DEFAULT_FN_ATTRS512 -_mm512_loadu_ps(void const *__p) -{ - struct __loadu_ps { - __m512_u __v; - } __attribute__((__packed__, __may_alias__)); - return ((const struct __loadu_ps*)__p)->__v; -} - -static __inline void __DEFAULT_FN_ATTRS512 -_mm512_storeu_pd(void *__P, __m512d __A) -{ - struct __storeu_pd { - __m512d_u __v; - } __attribute__((__packed__, __may_alias__)); - ((struct __storeu_pd*)__P)->__v = __A; -} - -static __inline void __DEFAULT_FN_ATTRS512 -_mm512_storeu_ps(void *__P, __m512 __A) -{ - struct __storeu_ps { - __m512_u __v; - } __attribute__((__packed__, __may_alias__)); - ((struct __storeu_ps*)__P)->__v = __A; -} - -static __inline void __DEFAULT_FN_ATTRS512 -_mm512_storeu_si512 (void *__P, __m512i __A) -{ - struct __storeu_si512 { - __m512i_u __v; - } __attribute__((__packed__, __may_alias__)); - ((struct __storeu_si512*)__P)->__v = __A; -} - -static __inline __m512i __DEFAULT_FN_ATTRS512 -_mm512_min_epi32(__m512i __A, __m512i __B) -{ - return (__m512i)__builtin_ia32_pminsd512((__v16si)__A, (__v16si)__B); -} - -static __inline __m512i __DEFAULT_FN_ATTRS512 -_mm512_max_epi32(__m512i __A, __m512i __B) -{ - return (__m512i)__builtin_ia32_pmaxsd512((__v16si)__A, (__v16si)__B); -} - -static __inline __m512i __DEFAULT_FN_ATTRS512 -_mm512_min_epi64(__m512i __A, __m512i __B) -{ - return (__m512i)__builtin_ia32_pminsq512((__v8di)__A, (__v8di)__B); -} - -static __inline __m512i __DEFAULT_FN_ATTRS512 -_mm512_max_epi64(__m512i __A, __m512i __B) -{ - return (__m512i)__builtin_ia32_pmaxsq512((__v8di)__A, (__v8di)__B); -} - -static __inline __m512d __DEFAULT_FN_ATTRS512 -_mm512_castsi512_pd (__m512i __A) -{ - return (__m512d) (__A); -} - -static __inline __m512i __DEFAULT_FN_ATTRS512 -_mm512_min_epu32(__m512i __A, __m512i __B) -{ - return (__m512i)__builtin_ia32_pminud512((__v16si)__A, (__v16si)__B); -} - -static __inline __m512i __DEFAULT_FN_ATTRS512 -_mm512_max_epu32(__m512i __A, __m512i __B) -{ - return (__m512i)__builtin_ia32_pmaxud512((__v16si)__A, (__v16si)__B); -} - -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_mask_max_epi32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B) -{ - return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, - (__v16si)_mm512_max_epi32(__A, __B), - (__v16si)__W); -} - -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_mask_max_epi64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B) -{ - return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, - (__v8di)_mm512_max_epi64(__A, __B), - (__v8di)__W); -} - -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_mask_max_epu32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B) -{ - return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, - (__v16si)_mm512_max_epu32(__A, __B), - (__v16si)__W); -} - - -#define _mm512_shuffle_epi32(A, I) \ - (__m512i)__builtin_ia32_pshufd512((__v16si)(__m512i)(A), (int)(I)) - -#define _mm512_permutex_pd(X, C) \ - (__m512d)__builtin_ia32_permdf512((__v8df)(__m512d)(X), (int)(C)) - -#define _mm512_permutex_epi64(X, C) \ - (__m512i)__builtin_ia32_permdi512((__v8di)(__m512i)(X), (int)(C)) - -static __inline__ void __DEFAULT_FN_ATTRS512 -_mm512_mask_compressstoreu_pd (void *__P, __mmask8 __U, __m512d __A) -{ - __builtin_ia32_compressstoredf512_mask ((__v8df *) __P, (__v8df) __A, - (__mmask8) __U); -} - -static __inline__ void __DEFAULT_FN_ATTRS512 -_mm512_mask_compressstoreu_ps (void *__P, __mmask16 __U, __m512 __A) -{ - __builtin_ia32_compressstoresf512_mask ((__v16sf *) __P, (__v16sf) __A, - (__mmask16) __U); -} - -static __inline __m512d __DEFAULT_FN_ATTRS512 -_mm512_set1_pd(double __w) -{ - return __extension__ (__m512d){ __w, __w, __w, __w, __w, __w, __w, __w }; -} - -static __inline __m512 __DEFAULT_FN_ATTRS512 -_mm512_set1_ps(float __w) -{ - return __extension__ (__m512){ __w, __w, __w, __w, __w, __w, __w, __w, - __w, __w, __w, __w, __w, __w, __w, __w }; -} - -static __inline __m512i __DEFAULT_FN_ATTRS512 -_mm512_set1_epi32(int __s) -{ - return __extension__ (__m512i)(__v16si){ - __s, __s, __s, __s, __s, __s, __s, __s, - __s, __s, __s, __s, __s, __s, __s, __s }; -} - -static __inline __m512i __DEFAULT_FN_ATTRS512 -_mm512_set1_epi64(long long __d) -{ - return __extension__(__m512i)(__v8di){ __d, __d, __d, __d, __d, __d, __d, __d }; -} - -static __inline__ void __DEFAULT_FN_ATTRS512 -_mm512_mask_compressstoreu_epi32 (void *__P, __mmask16 __U, __m512i __A) -{ - __builtin_ia32_compressstoresi512_mask ((__v16si *) __P, (__v16si) __A, - (__mmask16) __U); -} - -static __inline__ void __DEFAULT_FN_ATTRS512 -_mm512_mask_compressstoreu_epi64 (void *__P, __mmask8 __U, __m512i __A) -{ - __builtin_ia32_compressstoredi512_mask ((__v8di *) __P, (__v8di) __A, - (__mmask8) __U); -} - -#define _mm512_permute_pd(X, C) \ - (__m512d)__builtin_ia32_vpermilpd512((__v8df)(__m512d)(X), (int)(C)) - -static __inline __m512i __DEFAULT_FN_ATTRS512 -_mm512_castpd_si512 (__m512d __A) -{ - return (__m512i) (__A); -} - -#define _mm512_shuffle_f64x2(A, B, imm) \ - (__m512d)__builtin_ia32_shuf_f64x2((__v8df)(__m512d)(A), \ - (__v8df)(__m512d)(B), (int)(imm)) - - -#define _mm512_shuffle_i64x2(A, B, imm) \ - (__m512i)__builtin_ia32_shuf_i64x2((__v8di)(__m512i)(A), \ - (__v8di)(__m512i)(B), (int)(imm)) - -#define _mm256_cmp_ps_mask(a, b, p) \ - (__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \ - (__v8sf)(__m256)(b), (int)(p), \ - (__mmask8)-1) - -#define _mm512_cmp_ps_mask(A, B, P) \ - _mm512_cmp_round_ps_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION) - - -/* Integer compare */ - -#define _mm512_cmpeq_epi32_mask(A, B) \ - _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_EQ) -#define _mm512_mask_cmpeq_epi32_mask(k, A, B) \ - _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_EQ) -#define _mm512_cmpge_epi32_mask(A, B) \ - _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_GE) -#define _mm512_mask_cmpge_epi32_mask(k, A, B) \ - _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GE) -#define _mm512_cmpgt_epi32_mask(A, B) \ - _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_GT) -#define _mm512_mask_cmpgt_epi32_mask(k, A, B) \ - _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GT) -#define _mm512_cmple_epi32_mask(A, B) \ - _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_LE) -#define _mm512_mask_cmple_epi32_mask(k, A, B) \ - _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LE) -#define _mm512_cmplt_epi32_mask(A, B) \ - _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_LT) -#define _mm512_mask_cmplt_epi32_mask(k, A, B) \ - _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LT) -#define _mm512_cmpneq_epi32_mask(A, B) \ - _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_NE) -#define _mm512_mask_cmpneq_epi32_mask(k, A, B) \ - _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_NE) - -#define _mm512_cmpeq_epu32_mask(A, B) \ - _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_EQ) -#define _mm512_mask_cmpeq_epu32_mask(k, A, B) \ - _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_EQ) -#define _mm512_cmpge_epu32_mask(A, B) \ - _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_GE) -#define _mm512_mask_cmpge_epu32_mask(k, A, B) \ - _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GE) -#define _mm512_cmpgt_epu32_mask(A, B) \ - _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_GT) -#define _mm512_mask_cmpgt_epu32_mask(k, A, B) \ - _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GT) -#define _mm512_cmple_epu32_mask(A, B) \ - _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_LE) -#define _mm512_mask_cmple_epu32_mask(k, A, B) \ - _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LE) -#define _mm512_cmplt_epu32_mask(A, B) \ - _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_LT) -#define _mm512_mask_cmplt_epu32_mask(k, A, B) \ - _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LT) -#define _mm512_cmpneq_epu32_mask(A, B) \ - _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_NE) -#define _mm512_mask_cmpneq_epu32_mask(k, A, B) \ - _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_NE) - -#define _mm512_cmpeq_epi64_mask(A, B) \ - _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_EQ) -#define _mm512_mask_cmpeq_epi64_mask(k, A, B) \ - _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_EQ) -#define _mm512_cmpge_epi64_mask(A, B) \ - _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_GE) -#define _mm512_mask_cmpge_epi64_mask(k, A, B) \ - _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GE) -#define _mm512_cmpgt_epi64_mask(A, B) \ - _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_GT) -#define _mm512_mask_cmpgt_epi64_mask(k, A, B) \ - _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GT) -#define _mm512_cmple_epi64_mask(A, B) \ - _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_LE) -#define _mm512_mask_cmple_epi64_mask(k, A, B) \ - _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LE) -#define _mm512_cmplt_epi64_mask(A, B) \ - _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_LT) -#define _mm512_mask_cmplt_epi64_mask(k, A, B) \ - _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LT) -#define _mm512_cmpneq_epi64_mask(A, B) \ - _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_NE) -#define _mm512_mask_cmpneq_epi64_mask(k, A, B) \ - _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_NE) - -#define _mm512_cmpeq_epu64_mask(A, B) \ - _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_EQ) -#define _mm512_mask_cmpeq_epu64_mask(k, A, B) \ - _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_EQ) -#define _mm512_cmpge_epu64_mask(A, B) \ - _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_GE) -#define _mm512_mask_cmpge_epu64_mask(k, A, B) \ - _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GE) -#define _mm512_cmpgt_epu64_mask(A, B) \ - _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_GT) -#define _mm512_mask_cmpgt_epu64_mask(k, A, B) \ - _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GT) -#define _mm512_cmple_epu64_mask(A, B) \ - _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_LE) -#define _mm512_mask_cmple_epu64_mask(k, A, B) \ - _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LE) -#define _mm512_cmplt_epu64_mask(A, B) \ - _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_LT) -#define _mm512_mask_cmplt_epu64_mask(k, A, B) \ - _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LT) -#define _mm512_cmpneq_epu64_mask(A, B) \ - _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_NE) -#define _mm512_mask_cmpneq_epu64_mask(k, A, B) \ - _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_NE) - -#define _mm512_cmp_pd_mask(A, B, P) \ - _mm512_cmp_round_pd_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION) - -#define _mm512_cmp_epu64_mask(a, b, p) \ - (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \ - (__v8di)(__m512i)(b), (int)(p), \ - (__mmask8)-1) - -#define _mm512_cmp_epi32_mask(a, b, p) \ - (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \ - (__v16si)(__m512i)(b), (int)(p), \ - (__mmask16)-1) - -#define _mm512_cmp_epi64_mask(a, b, p) \ - (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \ - (__v8di)(__m512i)(b), (int)(p), \ - (__mmask8)-1) - -#define _mm512_cmp_epu32_mask(a, b, p) \ - (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \ - (__v16si)(__m512i)(b), (int)(p), \ - (__mmask16)-1) - -#define _mm512_cmp_round_ps_mask(A, B, P, R) \ - (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \ - (__v16sf)(__m512)(B), (int)(P), \ - (__mmask16)-1, (int)(R)) - -#define _mm512_mask_cmp_round_ps_mask(U, A, B, P, R) \ - (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \ - (__v16sf)(__m512)(B), (int)(P), \ - (__mmask16)(U), (int)(R)) - -#define _mm512_cmp_round_pd_mask(A, B, P, R) \ - (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \ - (__v8df)(__m512d)(B), (int)(P), \ - (__mmask8)-1, (int)(R)) - - -#undef __DEFAULT_FN_ATTRS - -/*===---- popcntintrin.h - POPCNT intrinsics -------------------------------=== - * - * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. - * See https://llvm.org/LICENSE.txt for license information. - * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - * - *===-----------------------------------------------------------------------=== - */ - -/* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("popcnt"))) - -/// Counts the number of bits in the source operand having a value of 1. -/// -/// \headerfile -/// -/// This intrinsic corresponds to the POPCNT instruction. -/// -/// \param __A -/// An unsigned 32-bit integer operand. -/// \returns A 32-bit integer containing the number of bits with value 1 in the -/// source operand. -static __inline__ int __DEFAULT_FN_ATTRS -_mm_popcnt_u32(unsigned int __A) -{ - return __builtin_popcount(__A); -} - -#ifdef __x86_64__ -/// Counts the number of bits in the source operand having a value of 1. -/// -/// \headerfile -/// -/// This intrinsic corresponds to the POPCNT instruction. -/// -/// \param __A -/// An unsigned 64-bit integer operand. -/// \returns A 64-bit integer containing the number of bits with value 1 in the -/// source operand. -static __inline__ long long __DEFAULT_FN_ATTRS -_mm_popcnt_u64(unsigned long long __A) -{ - return __builtin_popcountll(__A); -} -#endif /* __x86_64__ */ - -#endif //_IMMINTRIN_H \ No newline at end of file diff --git a/src/coreclr/src/vm/CMakeLists.txt b/src/coreclr/src/vm/CMakeLists.txt index ada07d14c12987..76616162a32301 100644 --- a/src/coreclr/src/vm/CMakeLists.txt +++ b/src/coreclr/src/vm/CMakeLists.txt @@ -542,7 +542,7 @@ set(GC_SOURCES_WKS ../gc/softwarewritewatch.cpp ../gc/handletablecache.cpp) -if (CLR_CMAKE_TARGET_ARCH_AMD64) +if (CLR_CMAKE_TARGET_ARCH_AMD64 AND CLR_CMAKE_TARGET_WIN32) set ( GC_SOURCES_WKS ${GC_SOURCES_WKS} ../gc/vxsort/isa_detection.cpp @@ -554,7 +554,7 @@ if (CLR_CMAKE_TARGET_ARCH_AMD64) ../gc/vxsort/smallsort/bitonic_sort.AVX512.int64_t.generated.cpp ../gc/vxsort/smallsort/bitonic_sort.AVX512.int32_t.generated.cpp ) -endif (CLR_CMAKE_TARGET_ARCH_AMD64) +endif (CLR_CMAKE_TARGET_ARCH_AMD64 AND CLR_CMAKE_TARGET_WIN32) set(GC_HEADERS_WKS ${GC_HEADERS_DAC_AND_WKS_COMMON} From 1301d661d1ff0aed045a59a256d6a412bf7aad9f Mon Sep 17 00:00:00 2001 From: Peter Sollich Date: Wed, 15 Jul 2020 16:20:45 +0200 Subject: [PATCH 31/31] Address code review feedback: - add instructions to bitonic_gen.py - centralize range and instruction set checks in do_vxsort - add parentheses around expressions. - removed some printfs, converted others to dprintf - strengthened assert --- src/coreclr/src/gc/gc.cpp | 311 ++++++++---------- src/coreclr/src/gc/gcsvr.cpp | 2 +- src/coreclr/src/gc/gcwks.cpp | 2 +- .../vxsort/smallsort/codegen/bitonic_gen.py | 9 + 4 files changed, 143 insertions(+), 181 deletions(-) diff --git a/src/coreclr/src/gc/gc.cpp b/src/coreclr/src/gc/gc.cpp index 9e68050b436b13..c0a0190bfe513d 100644 --- a/src/coreclr/src/gc/gc.cpp +++ b/src/coreclr/src/gc/gc.cpp @@ -2084,58 +2084,9 @@ uint8_t* tree_search (uint8_t* tree, uint8_t* old_address); #ifdef USE_INTROSORT #define _sort introsort::sort #elif defined(USE_VXSORT) -#define _sort do_vxsort -#ifdef USE_VXSORT - -// above this threshold, using AVX2 for sorting will likely pay off -// despite possible downclocking on some devices -const size_t AVX2_THRESHOLD_SIZE = 8 * 1024; - -// above this threshold, using AVX51F for sorting will likely pay off -// despite possible downclocking on current devices -const size_t AVX512F_THRESHOLD_SIZE = 128 * 1024; - -void do_vxsort (uint8_t** low, uint8_t** high, unsigned int depth) -{ - assert (IsSupportedInstructionSet (InstructionSet::AVX2)); - // use AVX512F only if the list is large enough to pay for downclocking impact - if (IsSupportedInstructionSet (InstructionSet::AVX512F) && ((high -low) > AVX512F_THRESHOLD_SIZE)) - { - do_vxsort_avx512 (low, high); - } - else - { - do_vxsort_avx2 (low, high); - } -#ifdef _DEBUG - for (uint8_t** p = low; p < high; p++) - { - assert (p[0] <= p[1]); - } -#endif -} - -void do_vxsort (int32_t* low, int32_t* high, unsigned int depth) -{ - assert (IsSupportedInstructionSet(InstructionSet::AVX2)); - // use AVX512F only if the list is large enough to pay for downclocking impact - if (IsSupportedInstructionSet (InstructionSet::AVX512F) && ((high - low) > AVX512F_THRESHOLD_SIZE)) - { - do_vxsort_avx512 (low, high); - } - else - { - do_vxsort_avx2 (low, high); - } -#ifdef _DEBUG - for (int32_t* p = low; p < high; p++) - { - assert (p[0] <= p[1]); - } -#endif -} -#endif //USE_VXSORT - +// in this case we have do_vxsort which takes an additional range that +// all items to be sorted are contained in +// so do not #define _sort #else //USE_INTROSORT #define _sort qsort1 void qsort1(uint8_t** low, uint8_t** high, unsigned int depth); @@ -8308,7 +8259,81 @@ inline static void swap_elements(uint8_t** i,uint8_t** j) }; -#endif //USE_INTROSORT +#endif //defined(USE_INTROSORT) || defined(USE_VXSORT) + +#ifdef USE_VXSORT +static void do_vxsort (uint8_t** item_array, ptrdiff_t item_count, uint8_t* range_low, uint8_t* range_high) +{ + // above this threshold, using AVX2 for sorting will likely pay off + // despite possible downclocking on some devices + const size_t AVX2_THRESHOLD_SIZE = 8 * 1024; + + // above this threshold, using AVX51F for sorting will likely pay off + // despite possible downclocking on current devices + const size_t AVX512F_THRESHOLD_SIZE = 128 * 1024; + + if (item_count <= 1) + return; + + if (IsSupportedInstructionSet (InstructionSet::AVX2) && (item_count > AVX2_THRESHOLD_SIZE)) + { + // is the range small enough for a 32-bit sort? + // the 32-bit sort is almost twice as fast + ptrdiff_t range = range_high - range_low; + assert(sizeof(uint8_t*) == (1 << 3)); + ptrdiff_t scaled_range = range >> 3; + if ((uint32_t)scaled_range == scaled_range) + { + dprintf (3, ("Sorting mark lists as 32-bit offsets")); + + do_pack_avx2 (item_array, item_count, range_low); + + int32_t* item_array_32 = (int32_t*)item_array; + + // use AVX512F only if the list is large enough to pay for downclocking impact + if (IsSupportedInstructionSet (InstructionSet::AVX512F) && (item_count > AVX512F_THRESHOLD_SIZE)) + { + do_vxsort_avx512 (item_array_32, &item_array_32[item_count - 1]); + } + else + { + do_vxsort_avx2 (item_array_32, &item_array_32[item_count - 1]); + } + + do_unpack_avx2 (item_array_32, item_count, range_low); + } + else + { + dprintf(3, ("Sorting mark lists")); + + // use AVX512F only if the list is large enough to pay for downclocking impact + if (IsSupportedInstructionSet (InstructionSet::AVX512F) && (item_count > AVX512F_THRESHOLD_SIZE)) + { + do_vxsort_avx512 (item_array, &item_array[item_count - 1]); + } + else + { + do_vxsort_avx2 (item_array, &item_array[item_count - 1]); + } + } + } + else + { + dprintf (3, ("Sorting mark lists")); + introsort::sort (item_array, &item_array[item_count - 1], 0); + } +#ifdef _DEBUG + // check the array is sorted + for (ptrdiff_t i = 0; i < item_count - 1; i++) + { + assert (item_array[i] <= item_array[i + 1]); + } + // check that the ends of the array are indeed in range + // together with the above this implies all elements are in range + assert ((range_low <= item_array[0]) && (item_array[item_count - 1] <= range_high)); +#endif +} +#endif //USE_VXSORT #ifdef MULTIPLE_HEAPS #ifdef PARALLEL_MARK_LIST_SORT @@ -8368,7 +8393,7 @@ void gc_heap::sort_mark_list() } // give up if this is not an ephemeral GC or the mark list size is unreasonably large - if (total_mark_list_size > total_ephemeral_size/256) + if (total_mark_list_size > (total_ephemeral_size / 256)) { mark_list_index = mark_list_end + 1; // let's not count this as a mark list overflow @@ -8378,116 +8403,72 @@ void gc_heap::sort_mark_list() #ifdef USE_VXSORT ptrdiff_t item_count = mark_list_index - mark_list; - // conservatively use AVX2 only for large mark lists, - // and do runtime test to check whether AVX2 is indeed available - if (item_count > AVX2_THRESHOLD_SIZE && IsSupportedInstructionSet (InstructionSet::AVX2)) - { +//#define WRITE_SORT_DATA #if defined(_DEBUG) || defined(WRITE_SORT_DATA) // in debug, make a copy of the mark list // for checking and debugging purposes - uint8_t** mark_list_copy = &g_mark_list_copy[heap_number * mark_list_size]; - uint8_t** mark_list_copy_index = &mark_list_copy[item_count]; - for (ptrdiff_t i = 0; i < item_count; i++) - { - uint8_t* item = mark_list[i]; - mark_list_copy[i] = item; - } + uint8_t** mark_list_copy = &g_mark_list_copy[heap_number * mark_list_size]; + uint8_t** mark_list_copy_index = &mark_list_copy[item_count]; + for (ptrdiff_t i = 0; i < item_count; i++) + { + uint8_t* item = mark_list[i]; + mark_list_copy[i] = item; + } #endif // defined(_DEBUG) || defined(WRITE_SORT_DATA) - // is the range small enough for a 32-bit sort? - ptrdiff_t range = high - low; - assert(sizeof(uint8_t*) == (1<<3)); - ptrdiff_t scaled_range = range >> 3; - if ((uint32_t)scaled_range == scaled_range) - { - dprintf (3, ("Sorting mark lists as 32-bit offsets")); - -//#define WRITE_SORT_DATA + ptrdiff_t start = get_cycle_count(); - // first step: scale the pointers down to 32-bit offsets - uint8_t** mark_list = this->mark_list; - int32_t* mark_list_32 = (int32_t*)mark_list; - do_pack_avx2 (mark_list, item_count, low); - // sort the 32-bit offsets - if (item_count > 0) - { - ptrdiff_t start = get_cycle_count(); + do_vxsort (mark_list, item_count, low, high); - _sort (&mark_list_32[0], &mark_list_32[item_count - 1], 0); - - ptrdiff_t elapsed_cycles = get_cycle_count() - start; - int log2_item_count = index_of_highest_set_bit (item_count); - double elapsed_cyles_by_n_log_n = (double)elapsed_cycles / item_count / log2_item_count; - -// printf ("GC#%d: first phase of sort_mark_list for heap %d took %u cycles to sort %u entries (cost/(n*log2(n) = %5.2f)\n", settings.gc_index, this->heap_number, elapsed_cycles, item_count, elapsed_cyles_by_n_log_n); + ptrdiff_t elapsed_cycles = get_cycle_count() - start; #ifdef WRITE_SORT_DATA - char file_name[256]; - sprintf_s (file_name, _countof(file_name), "sort_data_gc%d_heap%d", settings.gc_index, heap_number); - - FILE* f; - errno_t err = fopen_s (&f, file_name, "wb"); - - if (err == 0) - { - size_t magic = 'SDAT'; - if (fwrite (&magic, sizeof(magic), 1, f) != 1) - printf ("fwrite failed\n"); - if (fwrite (&elapsed_cycles, sizeof(elapsed_cycles), 1, f) != 1) - printf ("fwrite failed\n"); - if (fwrite (&low, sizeof(low), 1, f) != 1) - printf ("fwrite failed\n"); - if (fwrite (&item_count, sizeof(item_count), 1, f) != 1) - printf ("fwrite failed\n"); - if (fwrite (mark_list_copy, sizeof(mark_list_copy[0]), item_count, f) != item_count) - printf ("fwrite failed\n"); - if (fwrite (&magic, sizeof(magic), 1, f) != 1) - printf ("fwrite failed\n"); - if (fclose (f) != 0) - printf ("fclose failed\n"); - } + char file_name[256]; + sprintf_s (file_name, _countof(file_name), "sort_data_gc%d_heap%d", settings.gc_index, heap_number); + + FILE* f; + errno_t err = fopen_s (&f, file_name, "wb"); + + if (err == 0) + { + size_t magic = 'SDAT'; + if (fwrite (&magic, sizeof(magic), 1, f) != 1) + dprintf (3, ("fwrite failed\n")); + if (fwrite (&elapsed_cycles, sizeof(elapsed_cycles), 1, f) != 1) + dprintf (3, ("fwrite failed\n")); + if (fwrite (&low, sizeof(low), 1, f) != 1) + dprintf (3, ("fwrite failed\n")); + if (fwrite (&item_count, sizeof(item_count), 1, f) != 1) + dprintf (3, ("fwrite failed\n")); + if (fwrite (mark_list_copy, sizeof(mark_list_copy[0]), item_count, f) != item_count) + dprintf (3, ("fwrite failed\n")); + if (fwrite (&magic, sizeof(magic), 1, f) != 1) + dprintf (3, ("fwrite failed\n")); + if (fclose (f) != 0) + dprintf (3, ("fclose failed\n")); + } #endif - } - do_unpack_avx2 (mark_list_32, item_count, low); - } - else - { - dprintf (3, ("Sorting mark lists")); - if (mark_list_index > mark_list) - _sort (mark_list, mark_list_index - 1, 0); - } #ifdef _DEBUG - // in debug, sort the copy as well using the proven sort, so we can check we got the right result - if (mark_list_copy_index > mark_list_copy) - { - introsort::sort (mark_list_copy, mark_list_copy_index - 1, 0); - } - for (ptrdiff_t i = 0; i < item_count; i++) - { - uint8_t* item = mark_list[i]; - assert(mark_list_copy[i] == item); - } -#endif //_DEBUG + // in debug, sort the copy as well using the proven sort, so we can check we got the right result + if (mark_list_copy_index > mark_list_copy) + { + introsort::sort (mark_list_copy, mark_list_copy_index - 1, 0); } - else -#endif //USE_VXSORT + for (ptrdiff_t i = 0; i < item_count; i++) { - dprintf (3, ("Sorting mark lists")); - if (mark_list_index > mark_list) - { - ptrdiff_t start = get_cycle_count(); - - introsort::sort (mark_list, mark_list_index - 1, 0); - - ptrdiff_t elapsed_cycles = get_cycle_count() - start; - size_t item_count = mark_list_index - mark_list; - int log2_item_count = index_of_highest_set_bit (item_count); - double elapsed_cyles_by_n_log_n = (double)elapsed_cycles / item_count / log2_item_count; + uint8_t* item = mark_list[i]; + assert (mark_list_copy[i] == item); + } +#endif //_DEBUG -// printf ("GC#%d: first phase of sort_mark_list for heap %d took %u cycles to sort %u entries (cost/(n*log2(n) = %5.2f)\n", settings.gc_index, this->heap_number, elapsed_cycles, item_count, elapsed_cyles_by_n_log_n); - } +#else //USE_VXSORT + dprintf (3, ("Sorting mark lists")); + if (mark_list_index > mark_list) + { + introsort::sort (mark_list, mark_list_index - 1, 0); } +#endif // printf("first phase of sort_mark_list for heap %d took %u cycles to sort %u entries\n", this->heap_number, GetCycleCount32() - start, mark_list_index - mark_list); // start = GetCycleCount32(); @@ -22312,38 +22293,10 @@ void gc_heap::plan_phase (int condemned_gen_number) { #ifndef MULTIPLE_HEAPS #ifdef USE_VXSORT - ptrdiff_t entry_count = mark_list_index - mark_list; - // conservatively use AVX2 only for large mark lists, - // and do runtime test to check whether AVX2 is indeed available - if (entry_count > AVX2_THRESHOLD_SIZE && IsSupportedInstructionSet (InstructionSet::AVX2)) - { - int32_t* mark_list_32 = (int32_t*)mark_list; - uint8_t* low = slow; - ptrdiff_t range = shigh - low; - if ((uint32_t)range == range) - { - do_pack_avx2 (mark_list, entry_count, low); - _sort (&mark_list_32[0], &mark_list_32[entry_count - 1], 0); - do_unpack_avx2 (mark_list_32, entry_count, low); -#ifdef _DEBUG - uint8_t*high = heap_segment_allocated (ephemeral_heap_segment); - for (ptrdiff_t i = 0; i < entry_count; i++) - { - uint8_t* item = mark_list[i]; - assert (low <= item && item < high); - } -#endif //_DEBUG - } - else - { - _sort (&mark_list[0], mark_list_index - 1, 0); - } - } - else + do_vxsort (mark_list, mark_list_index - mark_list, slow, shigh); +#else //USE_VXSORT + _sort (&mark_list[0], mark_list_index - 1, 0); #endif //USE_VXSORT - { - introsort::sort (&mark_list[0], mark_list_index - 1, 0); - } //printf ("using mark list at GC #%d", dd_collection_count (dynamic_data_of (0))); //verify_qsort_array (&mark_list[0], mark_list_index-1); diff --git a/src/coreclr/src/gc/gcsvr.cpp b/src/coreclr/src/gc/gcsvr.cpp index 34d006887a5bda..8cdef316ea953a 100644 --- a/src/coreclr/src/gc/gcsvr.cpp +++ b/src/coreclr/src/gc/gcsvr.cpp @@ -21,7 +21,7 @@ #define SERVER_GC 1 -#if defined(TARGET_AMD64) +#if defined(TARGET_AMD64) && defined(TARGET_WINDOWS) #include "vxsort/do_vxsort.h" #endif diff --git a/src/coreclr/src/gc/gcwks.cpp b/src/coreclr/src/gc/gcwks.cpp index 50264323d6c077..531e8a0afdf283 100644 --- a/src/coreclr/src/gc/gcwks.cpp +++ b/src/coreclr/src/gc/gcwks.cpp @@ -21,7 +21,7 @@ #undef SERVER_GC #endif -#if defined(TARGET_AMD64) +#if defined(TARGET_AMD64) && defined(TARGET_WINDOWS) #include "vxsort/do_vxsort.h" #endif diff --git a/src/coreclr/src/gc/vxsort/smallsort/codegen/bitonic_gen.py b/src/coreclr/src/gc/vxsort/smallsort/codegen/bitonic_gen.py index 484f7fec5d9a39..4681e4986c3f88 100644 --- a/src/coreclr/src/gc/vxsort/smallsort/codegen/bitonic_gen.py +++ b/src/coreclr/src/gc/vxsort/smallsort/codegen/bitonic_gen.py @@ -4,6 +4,15 @@ ## #!/usr/bin/env python3 +# +# This is a tool to generate the bitonic sorter code that is used for small arrays. +# +# usage: bitonic_gen.py [-h] [--vector-isa VECTOR_ISA [VECTOR_ISA ...]] +# [--break-inline BREAK_INLINE] [--output-dir OUTPUT_DIR] +# +# the files in src/coreclr/src/gc/vxsort/smallsort checked in can be generated with: +# python bitonic_gen.py --output-dir c:\temp --vector-isa AVX2 AVX512 +# import argparse import os from enum import Enum