1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
|
/*-------------------------------------------------------------------------
*
* simd.h
* Support for platform-specific vector operations.
*
* Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/port/simd.h
*
* NOTES
* - VectorN in this file refers to a register where the element operands
* are N bits wide. The vector width is platform-specific, so users that care
* about that will need to inspect "sizeof(VectorN)".
*
*-------------------------------------------------------------------------
*/
#ifndef SIMD_H
#define SIMD_H
#if (defined(__x86_64__) || defined(_M_AMD64))
/*
* SSE2 instructions are part of the spec for the 64-bit x86 ISA. We assume
* that compilers targeting this architecture understand SSE2 intrinsics.
*
* We use emmintrin.h rather than the comprehensive header immintrin.h in
* order to exclude extensions beyond SSE2. This is because MSVC, at least,
* will allow the use of intrinsics that haven't been enabled at compile
* time.
*/
#include <emmintrin.h>
#define USE_SSE2
typedef __m128i Vector8;
typedef __m128i Vector32;
#elif defined(__aarch64__) && defined(__ARM_NEON)
/*
* We use the Neon instructions if the compiler provides access to them (as
* indicated by __ARM_NEON) and we are on aarch64. While Neon support is
* technically optional for aarch64, it appears that all available 64-bit
* hardware does have it. Neon exists in some 32-bit hardware too, but we
* could not realistically use it there without a run-time check, which seems
* not worth the trouble for now.
*/
#include <arm_neon.h>
#define USE_NEON
typedef uint8x16_t Vector8;
typedef uint32x4_t Vector32;
#else
/*
* If no SIMD instructions are available, we can in some cases emulate vector
* operations using bitwise operations on unsigned integers. Note that many
* of the functions in this file presently do not have non-SIMD
* implementations. In particular, none of the functions involving Vector32
* are implemented without SIMD since it's likely not worthwhile to represent
* two 32-bit integers using a uint64.
*/
#define USE_NO_SIMD
typedef uint64 Vector8;
#endif
/* load/store operations */
static inline void vector8_load(Vector8 *v, const uint8 *s);
#ifndef USE_NO_SIMD
static inline void vector32_load(Vector32 *v, const uint32 *s);
#endif
/* assignment operations */
static inline Vector8 vector8_broadcast(const uint8 c);
#ifndef USE_NO_SIMD
static inline Vector32 vector32_broadcast(const uint32 c);
#endif
/* element-wise comparisons to a scalar */
static inline bool vector8_has(const Vector8 v, const uint8 c);
static inline bool vector8_has_zero(const Vector8 v);
static inline bool vector8_has_le(const Vector8 v, const uint8 c);
static inline bool vector8_is_highbit_set(const Vector8 v);
#ifndef USE_NO_SIMD
static inline bool vector32_is_highbit_set(const Vector32 v);
static inline uint32 vector8_highbit_mask(const Vector8 v);
#endif
/* arithmetic operations */
static inline Vector8 vector8_or(const Vector8 v1, const Vector8 v2);
#ifndef USE_NO_SIMD
static inline Vector32 vector32_or(const Vector32 v1, const Vector32 v2);
#endif
/*
* comparisons between vectors
*
* Note: These return a vector rather than boolean, which is why we don't
* have non-SIMD implementations.
*/
#ifndef USE_NO_SIMD
static inline Vector8 vector8_eq(const Vector8 v1, const Vector8 v2);
static inline Vector8 vector8_min(const Vector8 v1, const Vector8 v2);
static inline Vector32 vector32_eq(const Vector32 v1, const Vector32 v2);
#endif
/*
* Load a chunk of memory into the given vector.
*/
static inline void
vector8_load(Vector8 *v, const uint8 *s)
{
#if defined(USE_SSE2)
*v = _mm_loadu_si128((const __m128i *) s);
#elif defined(USE_NEON)
*v = vld1q_u8(s);
#else
memcpy(v, s, sizeof(Vector8));
#endif
}
#ifndef USE_NO_SIMD
static inline void
vector32_load(Vector32 *v, const uint32 *s)
{
#ifdef USE_SSE2
*v = _mm_loadu_si128((const __m128i *) s);
#elif defined(USE_NEON)
*v = vld1q_u32(s);
#endif
}
#endif /* ! USE_NO_SIMD */
/*
* Store a vector into the given memory address.
*/
#ifndef USE_NO_SIMD
static inline void
vector8_store(uint8 *s, Vector8 v)
{
#ifdef USE_SSE2
_mm_storeu_si128((Vector8 *) s, v);
#elif defined(USE_NEON)
vst1q_u8(s, v);
#endif
}
#endif /* ! USE_NO_SIMD */
/*
* Create a vector with all elements set to the same value.
*/
static inline Vector8
vector8_broadcast(const uint8 c)
{
#if defined(USE_SSE2)
return _mm_set1_epi8(c);
#elif defined(USE_NEON)
return vdupq_n_u8(c);
#else
return ~UINT64CONST(0) / 0xFF * c;
#endif
}
#ifndef USE_NO_SIMD
static inline Vector32
vector32_broadcast(const uint32 c)
{
#ifdef USE_SSE2
return _mm_set1_epi32(c);
#elif defined(USE_NEON)
return vdupq_n_u32(c);
#endif
}
#endif /* ! USE_NO_SIMD */
/*
* Return true if any elements in the vector are equal to the given scalar.
*/
static inline bool
vector8_has(const Vector8 v, const uint8 c)
{
bool result;
/* pre-compute the result for assert checking */
#ifdef USE_ASSERT_CHECKING
bool assert_result = false;
for (Size i = 0; i < sizeof(Vector8); i++)
{
if (((const uint8 *) &v)[i] == c)
{
assert_result = true;
break;
}
}
#endif /* USE_ASSERT_CHECKING */
#if defined(USE_NO_SIMD)
/* any bytes in v equal to c will evaluate to zero via XOR */
result = vector8_has_zero(v ^ vector8_broadcast(c));
#else
result = vector8_is_highbit_set(vector8_eq(v, vector8_broadcast(c)));
#endif
Assert(assert_result == result);
return result;
}
/*
* Convenience function equivalent to vector8_has(v, 0)
*/
static inline bool
vector8_has_zero(const Vector8 v)
{
#if defined(USE_NO_SIMD)
/*
* We cannot call vector8_has() here, because that would lead to a
* circular definition.
*/
return vector8_has_le(v, 0);
#else
return vector8_has(v, 0);
#endif
}
/*
* Return true if any elements in the vector are less than or equal to the
* given scalar.
*/
static inline bool
vector8_has_le(const Vector8 v, const uint8 c)
{
bool result = false;
#ifdef USE_SSE2
Vector8 umin;
Vector8 cmpe;
#endif
/* pre-compute the result for assert checking */
#ifdef USE_ASSERT_CHECKING
bool assert_result = false;
for (Size i = 0; i < sizeof(Vector8); i++)
{
if (((const uint8 *) &v)[i] <= c)
{
assert_result = true;
break;
}
}
#endif /* USE_ASSERT_CHECKING */
#if defined(USE_NO_SIMD)
/*
* To find bytes <= c, we can use bitwise operations to find bytes < c+1,
* but it only works if c+1 <= 128 and if the highest bit in v is not set.
* Adapted from
* https://graphics.stanford.edu/~seander/bithacks.html#HasLessInWord
*/
if ((int64) v >= 0 && c < 0x80)
result = (v - vector8_broadcast(c + 1)) & ~v & vector8_broadcast(0x80);
else
{
/* one byte at a time */
for (Size i = 0; i < sizeof(Vector8); i++)
{
if (((const uint8 *) &v)[i] <= c)
{
result = true;
break;
}
}
}
#elif defined(USE_SSE2)
umin = vector8_min(v, vector8_broadcast(c));
cmpe = vector8_eq(umin, v);
result = vector8_is_highbit_set(cmpe);
#elif defined(USE_NEON)
result = vminvq_u8(v) <= c;
#endif
Assert(assert_result == result);
return result;
}
/*
* Returns true if any elements in the vector are greater than or equal to the
* given scalar.
*/
#ifndef USE_NO_SIMD
static inline bool
vector8_has_ge(const Vector8 v, const uint8 c)
{
#ifdef USE_SSE2
Vector8 umax = _mm_max_epu8(v, vector8_broadcast(c));
Vector8 cmpe = vector8_eq(umax, v);
return vector8_is_highbit_set(cmpe);
#elif defined(USE_NEON)
return vmaxvq_u8(v) >= c;
#endif
}
#endif /* ! USE_NO_SIMD */
/*
* Return true if the high bit of any element is set
*/
static inline bool
vector8_is_highbit_set(const Vector8 v)
{
#ifdef USE_SSE2
return _mm_movemask_epi8(v) != 0;
#elif defined(USE_NEON)
return vmaxvq_u8(v) > 0x7F;
#else
return v & vector8_broadcast(0x80);
#endif
}
/*
* Exactly like vector8_is_highbit_set except for the input type, so it
* looks at each byte separately.
*
* XXX x86 uses the same underlying type for 8-bit, 16-bit, and 32-bit
* integer elements, but Arm does not, hence the need for a separate
* function. We could instead adopt the behavior of Arm's vmaxvq_u32(), i.e.
* check each 32-bit element, but that would require an additional mask
* operation on x86.
*/
#ifndef USE_NO_SIMD
static inline bool
vector32_is_highbit_set(const Vector32 v)
{
#if defined(USE_NEON)
return vector8_is_highbit_set((Vector8) v);
#else
return vector8_is_highbit_set(v);
#endif
}
#endif /* ! USE_NO_SIMD */
/*
* Return a bitmask formed from the high-bit of each element.
*/
#ifndef USE_NO_SIMD
static inline uint32
vector8_highbit_mask(const Vector8 v)
{
#ifdef USE_SSE2
return (uint32) _mm_movemask_epi8(v);
#elif defined(USE_NEON)
/*
* Note: It would be faster to use vget_lane_u64 and vshrn_n_u16, but that
* returns a uint64, making it inconvenient to combine mask values from
* multiple vectors.
*/
static const uint8 mask[16] = {
1 << 0, 1 << 1, 1 << 2, 1 << 3,
1 << 4, 1 << 5, 1 << 6, 1 << 7,
1 << 0, 1 << 1, 1 << 2, 1 << 3,
1 << 4, 1 << 5, 1 << 6, 1 << 7,
};
uint8x16_t masked = vandq_u8(vld1q_u8(mask), (uint8x16_t) vshrq_n_s8((int8x16_t) v, 7));
uint8x16_t maskedhi = vextq_u8(masked, masked, 8);
return (uint32) vaddvq_u16((uint16x8_t) vzip1q_u8(masked, maskedhi));
#endif
}
#endif /* ! USE_NO_SIMD */
/*
* Return the bitwise OR of the inputs
*/
static inline Vector8
vector8_or(const Vector8 v1, const Vector8 v2)
{
#ifdef USE_SSE2
return _mm_or_si128(v1, v2);
#elif defined(USE_NEON)
return vorrq_u8(v1, v2);
#else
return v1 | v2;
#endif
}
#ifndef USE_NO_SIMD
static inline Vector32
vector32_or(const Vector32 v1, const Vector32 v2)
{
#ifdef USE_SSE2
return _mm_or_si128(v1, v2);
#elif defined(USE_NEON)
return vorrq_u32(v1, v2);
#endif
}
#endif /* ! USE_NO_SIMD */
/*
* Return the bitwise AND of the inputs.
*/
#ifndef USE_NO_SIMD
static inline Vector8
vector8_and(const Vector8 v1, const Vector8 v2)
{
#ifdef USE_SSE2
return _mm_and_si128(v1, v2);
#elif defined(USE_NEON)
return vandq_u8(v1, v2);
#endif
}
#endif /* ! USE_NO_SIMD */
/*
* Return the result of adding the respective elements of the input vectors.
*/
#ifndef USE_NO_SIMD
static inline Vector8
vector8_add(const Vector8 v1, const Vector8 v2)
{
#ifdef USE_SSE2
return _mm_add_epi8(v1, v2);
#elif defined(USE_NEON)
return vaddq_u8(v1, v2);
#endif
}
#endif /* ! USE_NO_SIMD */
/*
* Return the result of subtracting the respective elements of the input
* vectors using signed saturation (i.e., if the operation would yield a value
* less than -128, -128 is returned instead). For more information on
* saturation arithmetic, see
* https://en.wikipedia.org/wiki/Saturation_arithmetic
*/
#ifndef USE_NO_SIMD
static inline Vector8
vector8_issub(const Vector8 v1, const Vector8 v2)
{
#ifdef USE_SSE2
return _mm_subs_epi8(v1, v2);
#elif defined(USE_NEON)
return (Vector8) vqsubq_s8((int8x16_t) v1, (int8x16_t) v2);
#endif
}
#endif /* ! USE_NO_SIMD */
/*
* Return a vector with all bits set in each lane where the corresponding
* lanes in the inputs are equal.
*/
#ifndef USE_NO_SIMD
static inline Vector8
vector8_eq(const Vector8 v1, const Vector8 v2)
{
#ifdef USE_SSE2
return _mm_cmpeq_epi8(v1, v2);
#elif defined(USE_NEON)
return vceqq_u8(v1, v2);
#endif
}
#endif /* ! USE_NO_SIMD */
#ifndef USE_NO_SIMD
static inline Vector32
vector32_eq(const Vector32 v1, const Vector32 v2)
{
#ifdef USE_SSE2
return _mm_cmpeq_epi32(v1, v2);
#elif defined(USE_NEON)
return vceqq_u32(v1, v2);
#endif
}
#endif /* ! USE_NO_SIMD */
/*
* Return a vector with all bits set for each lane of v1 that is greater than
* the corresponding lane of v2. NB: The comparison treats the elements as
* signed.
*/
#ifndef USE_NO_SIMD
static inline Vector8
vector8_gt(const Vector8 v1, const Vector8 v2)
{
#ifdef USE_SSE2
return _mm_cmpgt_epi8(v1, v2);
#elif defined(USE_NEON)
return vcgtq_s8((int8x16_t) v1, (int8x16_t) v2);
#endif
}
#endif /* ! USE_NO_SIMD */
/*
* Given two vectors, return a vector with the minimum element of each.
*/
#ifndef USE_NO_SIMD
static inline Vector8
vector8_min(const Vector8 v1, const Vector8 v2)
{
#ifdef USE_SSE2
return _mm_min_epu8(v1, v2);
#elif defined(USE_NEON)
return vminq_u8(v1, v2);
#endif
}
#endif /* ! USE_NO_SIMD */
/*
* Interleave elements of low halves (e.g., for SSE2, bits 0-63) of given
* vectors. Bytes 0, 2, 4, etc. use v1, and bytes 1, 3, 5, etc. use v2.
*/
#ifndef USE_NO_SIMD
static inline Vector8
vector8_interleave_low(const Vector8 v1, const Vector8 v2)
{
#ifdef USE_SSE2
return _mm_unpacklo_epi8(v1, v2);
#elif defined(USE_NEON)
return vzip1q_u8(v1, v2);
#endif
}
#endif /* ! USE_NO_SIMD */
/*
* Interleave elements of high halves (e.g., for SSE2, bits 64-127) of given
* vectors. Bytes 0, 2, 4, etc. use v1, and bytes 1, 3, 5, etc. use v2.
*/
#ifndef USE_NO_SIMD
static inline Vector8
vector8_interleave_high(const Vector8 v1, const Vector8 v2)
{
#ifdef USE_SSE2
return _mm_unpackhi_epi8(v1, v2);
#elif defined(USE_NEON)
return vzip2q_u8(v1, v2);
#endif
}
#endif /* ! USE_NO_SIMD */
/*
* Pack 16-bit elements in the given vectors into a single vector of 8-bit
* elements. The first half of the return vector (e.g., for SSE2, bits 0-63)
* uses v1, and the second half (e.g., for SSE2, bits 64-127) uses v2.
*
* NB: The upper 8-bits of each 16-bit element must be zeros, else this will
* produce different results on different architectures.
*/
#ifndef USE_NO_SIMD
static inline Vector8
vector8_pack_16(const Vector8 v1, const Vector8 v2)
{
Vector8 mask PG_USED_FOR_ASSERTS_ONLY;
mask = vector8_interleave_low(vector8_broadcast(0), vector8_broadcast(0xff));
Assert(!vector8_has_ge(vector8_and(v1, mask), 1));
Assert(!vector8_has_ge(vector8_and(v2, mask), 1));
#ifdef USE_SSE2
return _mm_packus_epi16(v1, v2);
#elif defined(USE_NEON)
return vuzp1q_u8(v1, v2);
#endif
}
#endif /* ! USE_NO_SIMD */
/*
* Unsigned shift left of each 32-bit element in the vector by "i" bits.
*
* XXX AArch64 requires an integer literal, so we have to list all expected
* values of "i" from all callers in a switch statement. If you add a new
* caller, be sure your expected values of "i" are handled.
*/
#ifndef USE_NO_SIMD
static inline Vector8
vector8_shift_left(const Vector8 v1, int i)
{
#ifdef USE_SSE2
return _mm_slli_epi32(v1, i);
#elif defined(USE_NEON)
switch (i)
{
case 4:
return (Vector8) vshlq_n_u32((Vector32) v1, 4);
default:
Assert(false);
return vector8_broadcast(0);
}
#endif
}
#endif /* ! USE_NO_SIMD */
/*
* Unsigned shift right of each 32-bit element in the vector by "i" bits.
*
* XXX AArch64 requires an integer literal, so we have to list all expected
* values of "i" from all callers in a switch statement. If you add a new
* caller, be sure your expected values of "i" are handled.
*/
#ifndef USE_NO_SIMD
static inline Vector8
vector8_shift_right(const Vector8 v1, int i)
{
#ifdef USE_SSE2
return _mm_srli_epi32(v1, i);
#elif defined(USE_NEON)
switch (i)
{
case 4:
return (Vector8) vshrq_n_u32((Vector32) v1, 4);
case 8:
return (Vector8) vshrq_n_u32((Vector32) v1, 8);
default:
Assert(false);
return vector8_broadcast(0);
}
#endif
}
#endif /* ! USE_NO_SIMD */
#endif /* SIMD_H */
|