{
uint32 i = 0;
-#ifdef USE_SSE2
+#ifndef USE_NO_SIMD
/*
- * A 16-byte register only has four 4-byte lanes. For better
- * instruction-level parallelism, each loop iteration operates on a block
- * of four registers. Testing has showed this is ~40% faster than using a
- * block of two registers.
+ * For better instruction-level parallelism, each loop iteration operates
+ * on a block of four registers. Testing for SSE2 has showed this is ~40%
+ * faster than using a block of two registers.
*/
- const __m128i keys = _mm_set1_epi32(key); /* load 4 copies of key */
- uint32 iterations = nelem & ~0xF; /* round down to multiple of 16 */
+ const Vector32 keys = vector32_broadcast(key); /* load copies of key */
+ const uint32 nelem_per_vector = sizeof(Vector32) / sizeof(uint32);
+ const uint32 nelem_per_iteration = 4 * nelem_per_vector;
+
+ /* round down to multiple of elements per iteration */
+ const uint32 tail_idx = nelem & ~(nelem_per_iteration - 1);
#if defined(USE_ASSERT_CHECKING)
bool assert_result = false;
}
#endif
- for (i = 0; i < iterations; i += 16)
+ for (i = 0; i < tail_idx; i += nelem_per_iteration)
{
- /* load the next block into 4 registers holding 4 values each */
- const __m128i vals1 = _mm_loadu_si128((__m128i *) & base[i]);
- const __m128i vals2 = _mm_loadu_si128((__m128i *) & base[i + 4]);
- const __m128i vals3 = _mm_loadu_si128((__m128i *) & base[i + 8]);
- const __m128i vals4 = _mm_loadu_si128((__m128i *) & base[i + 12]);
+ Vector32 vals1,
+ vals2,
+ vals3,
+ vals4,
+ result1,
+ result2,
+ result3,
+ result4,
+ tmp1,
+ tmp2,
+ result;
+
+ /* load the next block into 4 registers */
+ vector32_load(&vals1, &base[i]);
+ vector32_load(&vals2, &base[i + nelem_per_vector]);
+ vector32_load(&vals3, &base[i + nelem_per_vector * 2]);
+ vector32_load(&vals4, &base[i + nelem_per_vector * 3]);
/* compare each value to the key */
- const __m128i result1 = _mm_cmpeq_epi32(keys, vals1);
- const __m128i result2 = _mm_cmpeq_epi32(keys, vals2);
- const __m128i result3 = _mm_cmpeq_epi32(keys, vals3);
- const __m128i result4 = _mm_cmpeq_epi32(keys, vals4);
+ result1 = vector32_eq(keys, vals1);
+ result2 = vector32_eq(keys, vals2);
+ result3 = vector32_eq(keys, vals3);
+ result4 = vector32_eq(keys, vals4);
/* combine the results into a single variable */
- const __m128i tmp1 = _mm_or_si128(result1, result2);
- const __m128i tmp2 = _mm_or_si128(result3, result4);
- const __m128i result = _mm_or_si128(tmp1, tmp2);
+ tmp1 = vector32_or(result1, result2);
+ tmp2 = vector32_or(result3, result4);
+ result = vector32_or(tmp1, tmp2);
/* see if there was a match */
- if (_mm_movemask_epi8(result) != 0)
+ if (vector8_is_highbit_set((Vector8) result))
{
-#if defined(USE_ASSERT_CHECKING)
Assert(assert_result == true);
-#endif
return true;
}
}
-#endif /* USE_SSE2 */
+#endif /* ! USE_NO_SIMD */
/* Process the remaining elements one at a time. */
for (; i < nelem; i++)
{
if (key == base[i])
{
-#if defined(USE_SSE2) && defined(USE_ASSERT_CHECKING)
+#ifndef USE_NO_SIMD
Assert(assert_result == true);
#endif
return true;
}
}
-#if defined(USE_SSE2) && defined(USE_ASSERT_CHECKING)
+#ifndef USE_NO_SIMD
Assert(assert_result == false);
#endif
return false;
#include <emmintrin.h>
#define USE_SSE2
typedef __m128i Vector8;
+typedef __m128i Vector32;
#else
/*
* If no SIMD instructions are available, we can in some cases emulate vector
- * operations using bitwise operations on unsigned integers.
+ * operations using bitwise operations on unsigned integers. Note that many
+ * of the functions in this file presently do not have non-SIMD
+ * implementations. In particular, none of the functions involving Vector32
+ * are implemented without SIMD since it's likely not worthwhile to represent
+ * two 32-bit integers using a uint64.
*/
#define USE_NO_SIMD
typedef uint64 Vector8;
#endif
-
/* load/store operations */
static inline void vector8_load(Vector8 *v, const uint8 *s);
+#ifndef USE_NO_SIMD
+static inline void vector32_load(Vector32 *v, const uint32 *s);
+#endif
/* assignment operations */
static inline Vector8 vector8_broadcast(const uint8 c);
+#ifndef USE_NO_SIMD
+static inline Vector32 vector32_broadcast(const uint32 c);
+#endif
/* element-wise comparisons to a scalar */
static inline bool vector8_has(const Vector8 v, const uint8 c);
/* arithmetic operations */
static inline Vector8 vector8_or(const Vector8 v1, const Vector8 v2);
-
-/* Different semantics for SIMD architectures. */
#ifndef USE_NO_SIMD
+static inline Vector32 vector32_or(const Vector32 v1, const Vector32 v2);
+static inline Vector8 vector8_ssub(const Vector8 v1, const Vector8 v2);
+#endif
-/* comparisons between vectors */
+/*
+ * comparisons between vectors
+ *
+ * Note: These return a vector rather than booloan, which is why we don't
+ * have non-SIMD implementations.
+ */
+#ifndef USE_NO_SIMD
static inline Vector8 vector8_eq(const Vector8 v1, const Vector8 v2);
-
-#endif /* ! USE_NO_SIMD */
+static inline Vector32 vector32_eq(const Vector32 v1, const Vector32 v2);
+#endif
/*
* Load a chunk of memory into the given vector.
#endif
}
+#ifndef USE_NO_SIMD
+static inline void
+vector32_load(Vector32 *v, const uint32 *s)
+{
+#ifdef USE_SSE2
+ *v = _mm_loadu_si128((const __m128i *) s);
+#endif
+}
+#endif /* ! USE_NO_SIMD */
/*
* Create a vector with all elements set to the same value.
#endif
}
+#ifndef USE_NO_SIMD
+static inline Vector32
+vector32_broadcast(const uint32 c)
+{
+#ifdef USE_SSE2
+ return _mm_set1_epi32(c);
+#endif
+}
+#endif /* ! USE_NO_SIMD */
+
/*
* Return true if any elements in the vector are equal to the given scalar.
*/
/* any bytes in v equal to c will evaluate to zero via XOR */
result = vector8_has_zero(v ^ vector8_broadcast(c));
#elif defined(USE_SSE2)
- result = _mm_movemask_epi8(_mm_cmpeq_epi8(v, vector8_broadcast(c)));
+ result = vector8_is_highbit_set(vector8_eq(v, vector8_broadcast(c)));
#endif
Assert(assert_result == result);
{
#if defined(USE_NO_SIMD)
/*
- * We cannot call vector8_has() here, because that would lead to a circular
- * definition.
+ * We cannot call vector8_has() here, because that would lead to a
+ * circular definition.
*/
return vector8_has_le(v, 0);
#elif defined(USE_SSE2)
vector8_has_le(const Vector8 v, const uint8 c)
{
bool result = false;
-#if defined(USE_SSE2)
- __m128i sub;
-#endif
/* pre-compute the result for assert checking */
#ifdef USE_ASSERT_CHECKING
/*
* Use saturating subtraction to find bytes <= c, which will present as
- * NUL bytes in 'sub'.
+ * NUL bytes. This approach is a workaround for the lack of unsigned
+ * comparison instructions on some architectures.
*/
- sub = _mm_subs_epu8(v, vector8_broadcast(c));
- result = vector8_has_zero(sub);
+ result = vector8_has_zero(vector8_ssub(v, vector8_broadcast(c)));
#endif
Assert(assert_result == result);
#endif
}
+#ifndef USE_NO_SIMD
+static inline Vector32
+vector32_or(const Vector32 v1, const Vector32 v2)
+{
+#ifdef USE_SSE2
+ return _mm_or_si128(v1, v2);
+#endif
+}
+#endif /* ! USE_NO_SIMD */
-/* Different semantics for SIMD architectures. */
+/*
+ * Return the result of subtracting the respective elements of the input
+ * vectors using saturation (i.e., if the operation would yield a value less
+ * than zero, zero is returned instead). For more information on saturation
+ * arithmetic, see https://en.wikipedia.org/wiki/Saturation_arithmetic
+ */
#ifndef USE_NO_SIMD
+static inline Vector8
+vector8_ssub(const Vector8 v1, const Vector8 v2)
+{
+#ifdef USE_SSE2
+ return _mm_subs_epu8(v1, v2);
+#endif
+}
+#endif /* ! USE_NO_SIMD */
/*
* Return a vector with all bits set in each lane where the the corresponding
* lanes in the inputs are equal.
*/
+#ifndef USE_NO_SIMD
static inline Vector8
vector8_eq(const Vector8 v1, const Vector8 v2)
{
return _mm_cmpeq_epi8(v1, v2);
#endif
}
+#endif /* ! USE_NO_SIMD */
+#ifndef USE_NO_SIMD
+static inline Vector32
+vector32_eq(const Vector32 v1, const Vector32 v2)
+{
+#ifdef USE_SSE2
+ return _mm_cmpeq_epi32(v1, v2);
+#endif
+}
#endif /* ! USE_NO_SIMD */
#endif /* SIMD_H */