Bitdefender Hypervisor Memory Introspection
intrinsics.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2020 Bitdefender
3  * SPDX-License-Identifier: Apache-2.0
4  */
5 #ifndef _INTRINSICS_H_
6 #define _INTRINSICS_H_
7 
8 #include <stddef.h>
9 #include <stdint.h>
10 
11 #ifndef INT_COMPILER_MSVC
12 
13 //
14 // Bit Twiddling
15 //
16 static inline uint8_t
17 _rotl8(uint8_t Value, uint8_t Shift)
18 {
19  Shift &= 0x7;
20  return Shift ? (Value << Shift) | (Value >> (8 - Shift)) : Value;
21 }
22 
23 static inline uint8_t
24 _rotr8(uint8_t Value, uint8_t Shift)
25 {
26  Shift &= 0x7;
27  return Shift ? (Value >> Shift) | (Value << (8 - Shift)) : Value;
28 }
29 
30 static inline uint16_t
31 _rtol16(uint16_t Value, uint8_t Shift)
32 {
33  Shift &= 0xf;
34  return Shift ? (Value << Shift) | (Value >> (16 - Shift)) : Value;
35 }
36 
37 static inline uint16_t
38 _rotr16(uint16_t Value, uint8_t Shift)
39 {
40  Shift &= 0xf;
41  return Shift ? (Value >> Shift) | (Value << (16 - Shift)) : Value;
42 }
43 
44 
45 //
46 // Bit Counting and Testing
47 //
48 static inline uint8_t
49 _BitScanForward(uint32_t *Index, uint32_t Mask)
50 {
51  if (!Mask)
52  return 0;
53  *Index = __builtin_ctzl(Mask);
54  return 1;
55 }
56 
57 static inline uint8_t
58 _BitScanReverse(uint32_t *Index, uint32_t Mask)
59 {
60  if (!Mask)
61  return 0;
62  *Index = 31 - __builtin_clzl(Mask);
63  return 1;
64 }
65 
66 static inline uint32_t
67 _mm_popcnt_u32(uint32_t Value)
68 {
69  return __builtin_popcount(Value);
70 }
71 
72 static inline uint8_t
73 _bittest(int32_t const *BitBase, int32_t BitPos)
74 {
75  return (*BitBase >> BitPos) & 1;
76 }
77 
78 static inline uint8_t
79 _bittestandcomplement(int32_t *BitBase, int32_t BitPos)
80 {
81  uint8_t _Res = (*BitBase >> BitPos) & 1;
82  *BitBase = *BitBase ^ (1 << BitPos);
83  return _Res;
84 }
85 
86 static inline uint8_t
87 _bittestandreset(int32_t *BitBase, int32_t BitPos)
88 {
89  uint8_t _Res = (*BitBase >> BitPos) & 1;
90  *BitBase = *BitBase & ~(1 << BitPos);
91  return _Res;
92 }
93 
94 static inline uint8_t
95 _bittestandset(int32_t *BitBase, int32_t BitPos)
96 {
97  uint8_t _Res = (*BitBase >> BitPos) & 1;
98  *BitBase = *BitBase | (1 << BitPos);
99  return _Res;
100 }
101 
102 
103 static inline uint8_t
104 _BitScanForward64(uint32_t *Index, uint64_t Mask)
105 {
106  if (!Mask)
107  return 0;
108  *Index = __builtin_ctzll(Mask);
109  return 1;
110 }
111 
112 static inline uint8_t
113 _BitScanReverse64(uint32_t *Index, uint64_t Mask)
114 {
115  if (!Mask)
116  return 0;
117  *Index = 63 - __builtin_clzll(Mask);
118  return 1;
119 }
120 
121 static inline uint64_t
122 _mm_popcnt_u64(uint64_t Value)
123 {
124  return __builtin_popcountll(Value);
125 }
126 
127 static inline uint8_t
128 _bittest64(int64_t const *BitBase, int64_t BitPos)
129 {
130  return (*BitBase >> BitPos) & 1;
131 }
132 
133 static inline uint8_t
134 _bittestandcomplement64(int64_t *BitBase, int64_t BitPos)
135 {
136  uint8_t Res = (*BitBase >> BitPos) & 1;
137  *BitBase = *BitBase ^ (1ll << BitPos);
138  return Res;
139 }
140 
141 static inline uint8_t
142 _bittestandreset64(int64_t *BitBase, int64_t BitPos)
143 {
144  uint8_t Res = (*BitBase >> BitPos) & 1;
145  *BitBase = *BitBase & ~(1ll << BitPos);
146  return Res;
147 }
148 
149 static inline uint8_t
150 _bittestandset64(int64_t *BitBase, int64_t BitPos)
151 {
152  uint8_t Res = (*BitBase >> BitPos) & 1;
153  *BitBase = *BitBase | (1ll << BitPos);
154  return Res;
155 }
156 
157 
158 //
159 // readfs, readgs
160 // (Pointers in address space #256 and #257 are relative to the GS and FS
161 // segment registers, respectively.)
162 //
163 #ifdef INT_COMPILER_CLANG
164 #define __ptr_to_addr_space(__addr_space_nbr, __type, offset) \
165  ((volatile __type __attribute__((__address_space__(__addr_space_nbr)))*) \
166  (offset))
167 
168 static inline uint8_t
169 __readgsbyte(uint64_t offset)
170 {
171  return *__ptr_to_addr_space(256, uint8_t, offset);
172 }
173 
174 static inline uint16_t
175 __readgsword(uint64_t offset)
176 {
177  return *__ptr_to_addr_space(256, uint16_t, offset);
178 }
179 
180 static inline uint32_t
181 __readgsdword(uint64_t offset)
182 {
183  return *__ptr_to_addr_space(256, uint32_t, offset);
184 }
185 
186 static inline uint64_t
187 __readgsqword(uint64_t offset)
188 {
189  return *__ptr_to_addr_space(256, uint64_t, offset);
190 }
191 
192 #undef __ptr_to_addr_space
193 
194 #endif
195 
196 //
197 // movs, stos
198 //
199 static inline void
200 __movsb(uint8_t *dst, uint8_t const *src, size_t n)
201 {
202  __asm__("rep movsb" : "+D"(dst), "+S"(src), "+c"(n));
203 }
204 
205 static inline void
206 __movsd(uint32_t *dst, uint32_t const *src, size_t n)
207 {
208  __asm__("rep movsl" : "+D"(dst), "+S"(src), "+c"(n));
209 }
210 
211 static inline void
212 __movsw(uint16_t *dst, uint16_t const *src, size_t n)
213 {
214  __asm__("rep movsh" : "+D"(dst), "+S"(src), "+c"(n));
215 }
216 
217 static inline void
218 __stosb(uint8_t *dst, uint8_t x, size_t n)
219 {
220  __asm__("rep stosb" : "+D"(dst), "+c"(n) : "a"(x));
221 }
222 
223 static inline void
224 __stosd(uint32_t *dst, uint32_t x, size_t n)
225 {
226  __asm__("rep stosl" : "+D"(dst), "+c"(n) : "a"(x));
227 }
228 
229 static inline void
230 __stosw(uint16_t *dst, uint16_t x, size_t n)
231 {
232  __asm__("rep stosh" : "+D"(dst), "+c"(n) : "a"(x));
233 }
234 
235 static inline void
236 __movsq(uint64_t *dst, uint64_t const *src, size_t n)
237 {
238  __asm__("rep movsq" : "+D"(dst), "+S"(src), "+c"(n));
239 }
240 
241 static inline void
242 __stosq(uint64_t *dst, uint64_t x, size_t n)
243 {
244  __asm__("rep stosq" : "+D"(dst), "+c"(n) : "a"(x));
245 }
246 
247 //
248 // Misc
249 //
250 static inline void *
252 {
253  return (void *)((int8_t *)__builtin_frame_address(0) + sizeof(void *));
254 }
255 
256 static inline void *
258 {
259  return __builtin_return_address(0);
260 }
261 
262 static inline void
263 __cpuid(int32_t info[4], int32_t level)
264 {
265  __asm__("cpuid" : "=a"(info[0]), "=b" (info[1]), "=c"(info[2]), "=d"(info[3])
266  : "a"(level));
267 }
268 
269 static inline void
270 __cpuidex(int32_t info[4], int32_t level, int32_t ecx)
271 {
272  __asm__("cpuid" : "=a"(info[0]), "=b" (info[1]), "=c"(info[2]), "=d"(info[3])
273  : "a"(level), "c"(ecx));
274 }
275 
276 static inline uint64_t
277 _xgetbv(uint32_t xcr_no)
278 {
279  uint32_t __eax, __edx;
280  __asm__("xgetbv" : "=a" (__eax), "=d" (__edx) : "c" (xcr_no));
281  return ((uint64_t)__edx << 32) | __eax;
282 }
283 
284 static inline void
285 __halt(void)
286 {
287  __asm__ volatile ("hlt");
288 }
289 
290 // __builtin_prefetch expects a compile-time constant
291 // and sometimes it won't detect it with __forceinline
292 // The GCC's prefetch takes three arguments: address, readwrite and hint
293 // It will generate a PREFETCH or PREFETCHW depending on readwrite
294 // For now use only readwrite=0 (the default on MSVC)
295 #define _mm_prefetch(p, i) __builtin_prefetch(p, 0, i)
296 
297 #ifndef INT_COMPILER_CLANG
298 
299 static inline void
301 {
302  __asm__ __volatile__("pause");
303 }
304 
305 static inline uint64_t
306 __rdtsc(void)
307 {
308  return __builtin_ia32_rdtsc();
309 }
310 
311 #endif
312 
313 
314 //
315 // Privileged intrinsics
316 //
317 static inline uint64_t
318 __readmsr(uint32_t reg)
319 {
320  // Loads the contents of a 64-bit model specific register (MSR) specified in
321  // the ECX register into registers EDX:EAX. The EDX register is loaded with
322  // the high-order 32 bits of the MSR and the EAX register is loaded with the
323  // low-order 32 bits. If less than 64 bits are implemented in the MSR being
324  // read, the values returned to EDX:EAX in unimplemented bit locations are
325  // undefined.
326  uint32_t edx;
327  uint32_t eax;
328  __asm__("rdmsr" : "=d"(edx), "=a"(eax) : "c"(reg));
329  return (((uint64_t)edx) << 32) | (uint64_t)eax;
330 }
331 
332 static inline uint64_t
334 {
335  uint64_t cr0_val;
336  __asm__ __volatile__("mov %%cr0, %0" : "=q"(cr0_val) : : "memory");
337  return cr0_val;
338 }
339 
340 static inline uint64_t
342 {
343  uint64_t cr3_val;
344  __asm__ __volatile__("mov %%cr3, %0" : "=q"(cr3_val) : : "memory");
345  return cr3_val;
346 }
347 
348 static inline uint64_t
350 {
351  uint64_t cr4_val;
352  __asm__ __volatile__("mov %%cr4, %0" : "=q"(cr4_val) : : "memory");
353  return cr4_val;
354 }
355 
356 static inline uint64_t
358 {
359  uint64_t cr8_val;
360  __asm__ __volatile__("mov %%cr8, %0" : "=q"(cr8_val) : : "memory");
361  return cr8_val;
362 }
363 
364 static inline void
365 __writecr0(uint64_t cr0_val)
366 {
367  __asm__("mov %0, %%cr0" : : "q"(cr0_val) : "memory");
368 }
369 
370 static inline void
371 __writecr3(uint64_t cr3_val)
372 {
373  __asm__("mov %0, %%cr3" : : "q"(cr3_val) : "memory");
374 }
375 
376 static inline void
377 __writecr4(uint64_t cr4_val)
378 {
379  __asm__("mov %0, %%cr4" : : "q"(cr4_val) : "memory");
380 }
381 
382 static inline void
383 __writecr8(uint64_t cr8_val)
384 {
385  __asm__("mov %0, %%cr8" : : "q"(cr8_val) : "memory");
386 }
387 
388 static inline void
389 __invlpg(void *Address)
390 {
391  __asm__ __volatile__("invlpg (%0)" : : "b"(Address) : "memory");
392 }
393 
394 static inline uint8_t
395 _interlockedbittestandset(int32_t volatile *BitBase, int32_t BitPos)
396 {
397  int32_t _PrevVal = __atomic_fetch_or(BitBase, 1l << BitPos, __ATOMIC_SEQ_CST);
398  return (_PrevVal >> BitPos) & 1;
399 }
400 
401 static inline uint8_t
402 _interlockedbittestandreset(int32_t volatile *BitBase, int32_t BitPos)
403 {
404  int32_t _PrevVal = __atomic_fetch_and(BitBase, ~(1l << BitPos), __ATOMIC_SEQ_CST);
405  return (_PrevVal >> BitPos) & 1;
406 }
407 
408 static inline uint8_t
409 _interlockedbittestandset64(int64_t volatile *BitBase, int64_t BitPos)
410 {
411  int64_t _PrevVal = __atomic_fetch_or(BitBase, 1ll << BitPos, __ATOMIC_SEQ_CST);
412  return (_PrevVal >> BitPos) & 1;
413 }
414 
415 
416 //
417 // Interlocked Exchange Add
418 //
419 static inline int8_t
420 _InterlockedExchangeAdd8(int8_t volatile *Addend, int8_t Value)
421 {
422  return __atomic_fetch_add(Addend, Value, __ATOMIC_SEQ_CST);
423 }
424 
425 static inline int16_t
426 _InterlockedExchangeAdd16(int16_t volatile *Addend, int16_t Value)
427 {
428  return __atomic_fetch_add(Addend, Value, __ATOMIC_SEQ_CST);
429 }
430 
431 
432 static inline int64_t
433 _InterlockedExchangeAdd64(int64_t volatile *Addend, int64_t Value)
434 {
435  return __atomic_fetch_add(Addend, Value, __ATOMIC_SEQ_CST);
436 }
437 
438 
439 //
440 // Interlocked Increment
441 //
442 static inline int32_t
443 _InterlockedIncrement(int32_t volatile *Value)
444 {
445  return __atomic_add_fetch(Value, 1, __ATOMIC_SEQ_CST);
446 }
447 
448 static inline int16_t
449 _InterlockedIncrement16(int16_t volatile *Value)
450 {
451  return __atomic_add_fetch(Value, 1, __ATOMIC_SEQ_CST);
452 }
453 
454 static inline int64_t
455 _InterlockedIncrement64(int64_t volatile *Value)
456 {
457  return __atomic_add_fetch(Value, 1, __ATOMIC_SEQ_CST);
458 }
459 
460 
461 //
462 // Interlocked Decrement
463 //
464 static inline int32_t
465 _InterlockedDecrement(int32_t volatile *Value)
466 {
467  return __atomic_sub_fetch(Value, 1, __ATOMIC_SEQ_CST);
468 }
469 
470 static inline int16_t
471 _InterlockedDecrement16(int16_t volatile *Value)
472 {
473  return __atomic_sub_fetch(Value, 1, __ATOMIC_SEQ_CST);
474 }
475 
476 static inline int64_t
477 _InterlockedDecrement64(int64_t volatile *Value)
478 {
479  return __atomic_sub_fetch(Value, 1, __ATOMIC_SEQ_CST);
480 }
481 
482 
483 //
484 // Interlocked And
485 //
486 static inline int8_t
487 _InterlockedAnd8(int8_t volatile *Value, int8_t Mask)
488 {
489  return __atomic_and_fetch(Value, Mask, __ATOMIC_SEQ_CST);
490 }
491 
492 static inline int16_t
493 _InterlockedAnd16(int16_t volatile *Value, int16_t Mask)
494 {
495  return __atomic_and_fetch(Value, Mask, __ATOMIC_SEQ_CST);
496 }
497 
498 static inline int32_t
499 _InterlockedAnd(int32_t volatile *Value, int32_t Mask)
500 {
501  return __atomic_and_fetch(Value, Mask, __ATOMIC_SEQ_CST);
502 }
503 
504 static inline int64_t
505 _InterlockedAnd64(int64_t volatile *Value, int64_t Mask)
506 {
507  return __atomic_and_fetch(Value, Mask, __ATOMIC_SEQ_CST);
508 }
509 
510 
511 //
512 // Interlocked Or
513 //
514 static inline int8_t
515 _InterlockedOr8(int8_t volatile *Value, int8_t Mask)
516 {
517  return __atomic_or_fetch(Value, Mask, __ATOMIC_SEQ_CST);
518 }
519 
520 static inline int16_t
521 _InterlockedOr16(int16_t volatile *Value, int16_t Mask)
522 {
523  return __atomic_or_fetch(Value, Mask, __ATOMIC_SEQ_CST);
524 }
525 
526 static inline int32_t
527 _InterlockedOr(int32_t volatile *Value, int32_t Mask)
528 {
529  return __atomic_or_fetch(Value, Mask, __ATOMIC_SEQ_CST);
530 }
531 
532 static inline int64_t
533 _InterlockedOr64(int64_t volatile *Value, int64_t Mask)
534 {
535  return __atomic_or_fetch(Value, Mask, __ATOMIC_SEQ_CST);
536 }
537 
538 
539 //
540 // Interlocked Xor
541 //
542 static inline int8_t
543 _InterlockedXor8(int8_t volatile *Value, int8_t Mask)
544 {
545  return __atomic_xor_fetch(Value, Mask, __ATOMIC_SEQ_CST);
546 }
547 
548 static inline int16_t
549 _InterlockedXor16(int16_t volatile *Value, int16_t Mask)
550 {
551  return __atomic_xor_fetch(Value, Mask, __ATOMIC_SEQ_CST);
552 }
553 
554 static inline int32_t
555 _InterlockedXor(int32_t volatile *Value, int32_t Mask)
556 {
557  return __atomic_xor_fetch(Value, Mask, __ATOMIC_SEQ_CST);
558 }
559 
560 static inline int64_t
561 _InterlockedXor64(int64_t volatile *Value, int64_t Mask)
562 {
563  return __atomic_xor_fetch(Value, Mask, __ATOMIC_SEQ_CST);
564 }
565 
566 
567 //
568 // Interlocked Exchange
569 //
570 static inline int32_t
571 _InterlockedExchange(int32_t volatile *Target, int32_t Value)
572 {
573  __atomic_exchange(Target, &Value, &Value, __ATOMIC_SEQ_CST);
574  return Value;
575 }
576 
577 static inline int8_t
578 _InterlockedExchange8(int8_t volatile *Target, int8_t Value)
579 {
580  __atomic_exchange(Target, &Value, &Value, __ATOMIC_SEQ_CST);
581  return Value;
582 }
583 
584 static inline int16_t
585 _InterlockedExchange16(int16_t volatile *Target, int16_t Value)
586 {
587  __atomic_exchange(Target, &Value, &Value, __ATOMIC_SEQ_CST);
588  return Value;
589 }
590 
591 static inline int64_t
592 _InterlockedExchange64(int64_t volatile *Target, int64_t Value)
593 {
594  __atomic_exchange(Target, &Value, &Value, __ATOMIC_SEQ_CST);
595  return Value;
596 }
597 
598 
599 //
600 // Interlocked Compare Exchange
601 //
602 static inline int8_t
603 _InterlockedCompareExchange8(int8_t volatile *Destination, int8_t Exchange, int8_t Comparand)
604 {
605  __atomic_compare_exchange(Destination, &Comparand, &Exchange, 0,
606  __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
607  return Comparand;
608 }
609 
610 static inline int16_t
611 _InterlockedCompareExchange16(int16_t volatile *Destination, int16_t Exchange, int16_t Comparand)
612 {
613  __atomic_compare_exchange(Destination, &Comparand, &Exchange, 0,
614  __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
615  return Comparand;
616 }
617 
618 static inline int32_t
619 _InterlockedCompareExchange(int32_t volatile *Destination, int32_t Exchange, int32_t Comparand)
620 {
621  __atomic_compare_exchange(Destination, &Comparand, &Exchange, 0,
622  __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
623  return Comparand;
624 }
625 
626 static inline int64_t
627 _InterlockedCompareExchange64(int64_t volatile *Destination, int64_t Exchange, int64_t Comparand)
628 {
629  __atomic_compare_exchange(Destination, &Comparand, &Exchange, 0,
630  __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
631  return Comparand;
632 }
633 
634 static inline void *
635 _InterlockedCompareExchangePointer(void volatile **Destination, void *Exchange, void *Comparand)
636 {
637  return (void *)_InterlockedCompareExchange64((int64_t volatile *)Destination, (int64_t)Exchange, (int64_t)Comparand);
638 }
639 
640 
641 //
642 // Barriers
643 //
644 static inline void
645 __attribute__((__deprecated__("use other intrinsics or C++11 atomics instead")))
647 {
648  __asm__ volatile ("" : : : "memory");
649 }
650 
651 static inline void
652 __attribute__((__deprecated__("use other intrinsics or C++11 atomics instead")))
654 {
655  __asm__ volatile ("" : : : "memory");
656 }
657 
658 static inline void
659 __attribute__((__deprecated__("use other intrinsics or C++11 atomics instead")))
661 {
662  __asm__ volatile ("" : : : "memory");
663 }
664 
665 static inline void
667 {
668  __asm__ volatile("lock orq $0, (%%rsp)" : : : "memory");
669 }
670 
671 #endif // INT_COMPILER_MSVC
672 
673 #endif // _INTRINSICS_H_
static int16_t _InterlockedXor16(int16_t volatile *Value, int16_t Mask)
Definition: intrinsics.h:549
static void __writecr8(uint64_t cr8_val)
Definition: intrinsics.h:383
static int16_t _InterlockedDecrement16(int16_t volatile *Value)
Definition: intrinsics.h:471
static int64_t _InterlockedExchange64(int64_t volatile *Target, int64_t Value)
Definition: intrinsics.h:592
static int64_t _InterlockedXor64(int64_t volatile *Value, int64_t Mask)
Definition: intrinsics.h:561
static uint8_t _bittestandreset64(int64_t *BitBase, int64_t BitPos)
Definition: intrinsics.h:142
static int32_t _InterlockedIncrement(int32_t volatile *Value)
Definition: intrinsics.h:443
static int16_t _InterlockedCompareExchange16(int16_t volatile *Destination, int16_t Exchange, int16_t Comparand)
Definition: intrinsics.h:611
static int64_t _InterlockedAnd64(int64_t volatile *Value, int64_t Mask)
Definition: intrinsics.h:505
static void __stosb(uint8_t *dst, uint8_t x, size_t n)
Definition: intrinsics.h:218
static uint8_t _BitScanForward64(uint32_t *Index, uint64_t Mask)
Definition: intrinsics.h:104
static uint8_t _bittestandcomplement(int32_t *BitBase, int32_t BitPos)
Definition: intrinsics.h:79
static uint8_t _bittestandset64(int64_t *BitBase, int64_t BitPos)
Definition: intrinsics.h:150
static void * _AddressOfReturnAddress(void)
Definition: intrinsics.h:251
static int32_t _InterlockedDecrement(int32_t volatile *Value)
Definition: intrinsics.h:465
static uint32_t _mm_popcnt_u32(uint32_t Value)
Definition: intrinsics.h:67
static uint16_t _rotr16(uint16_t Value, uint8_t Shift)
Definition: intrinsics.h:38
static uint8_t _interlockedbittestandreset(int32_t volatile *BitBase, int32_t BitPos)
Definition: intrinsics.h:402
static int64_t _InterlockedIncrement64(int64_t volatile *Value)
Definition: intrinsics.h:455
static uint8_t _bittestandset(int32_t *BitBase, int32_t BitPos)
Definition: intrinsics.h:95
static uint64_t __readcr8(void)
Definition: intrinsics.h:357
static uint8_t _rotl8(uint8_t Value, uint8_t Shift)
Definition: intrinsics.h:17
static uint8_t _interlockedbittestandset(int32_t volatile *BitBase, int32_t BitPos)
Definition: intrinsics.h:395
static int8_t _InterlockedAnd8(int8_t volatile *Value, int8_t Mask)
Definition: intrinsics.h:487
static uint64_t __readmsr(uint32_t reg)
Definition: intrinsics.h:318
static void __stosw(uint16_t *dst, uint16_t x, size_t n)
Definition: intrinsics.h:230
static int16_t _InterlockedOr16(int16_t volatile *Value, int16_t Mask)
Definition: intrinsics.h:521
static void __invlpg(void *Address)
Definition: intrinsics.h:389
static void __movsb(uint8_t *dst, uint8_t const *src, size_t n)
Definition: intrinsics.h:200
static uint8_t _BitScanForward(uint32_t *Index, uint32_t Mask)
Definition: intrinsics.h:49
static int8_t _InterlockedCompareExchange8(int8_t volatile *Destination, int8_t Exchange, int8_t Comparand)
Definition: intrinsics.h:603
static int8_t _InterlockedExchangeAdd8(int8_t volatile *Addend, int8_t Value)
Definition: intrinsics.h:420
static int64_t _InterlockedDecrement64(int64_t volatile *Value)
Definition: intrinsics.h:477
static void __writecr0(uint64_t cr0_val)
Definition: intrinsics.h:365
static uint8_t _bittest64(int64_t const *BitBase, int64_t BitPos)
Definition: intrinsics.h:128
static uint8_t _interlockedbittestandset64(int64_t volatile *BitBase, int64_t BitPos)
Definition: intrinsics.h:409
static void __faststorefence(void)
Definition: intrinsics.h:666
static int32_t _InterlockedExchange(int32_t volatile *Target, int32_t Value)
Definition: intrinsics.h:571
static uint8_t _bittestandreset(int32_t *BitBase, int32_t BitPos)
Definition: intrinsics.h:87
static void __stosd(uint32_t *dst, uint32_t x, size_t n)
Definition: intrinsics.h:224
static uint64_t _xgetbv(uint32_t xcr_no)
Definition: intrinsics.h:277
static int8_t _InterlockedXor8(int8_t volatile *Value, int8_t Mask)
Definition: intrinsics.h:543
static void __cpuidex(int32_t info[4], int32_t level, int32_t ecx)
Definition: intrinsics.h:270
static void * _ReturnAddress(void)
Definition: intrinsics.h:257
static void __movsd(uint32_t *dst, uint32_t const *src, size_t n)
Definition: intrinsics.h:206
static void _ReadWriteBarrier(void)
Definition: intrinsics.h:646
static void _mm_pause(void)
Definition: intrinsics.h:300
static void __writecr3(uint64_t cr3_val)
Definition: intrinsics.h:371
static void _WriteBarrier(void)
Definition: intrinsics.h:660
static uint8_t _BitScanReverse(uint32_t *Index, uint32_t Mask)
Definition: intrinsics.h:58
static uint64_t __readcr0(void)
Definition: intrinsics.h:333
static int32_t _InterlockedOr(int32_t volatile *Value, int32_t Mask)
Definition: intrinsics.h:527
static int8_t _InterlockedExchange8(int8_t volatile *Target, int8_t Value)
Definition: intrinsics.h:578
static void __cpuid(int32_t info[4], int32_t level)
Definition: intrinsics.h:263
static void _ReadBarrier(void)
Definition: intrinsics.h:653
static uint64_t __rdtsc(void)
Definition: intrinsics.h:306
static int64_t _InterlockedExchangeAdd64(int64_t volatile *Addend, int64_t Value)
Definition: intrinsics.h:433
static uint64_t _mm_popcnt_u64(uint64_t Value)
Definition: intrinsics.h:122
static int16_t _InterlockedIncrement16(int16_t volatile *Value)
Definition: intrinsics.h:449
static int16_t _InterlockedExchangeAdd16(int16_t volatile *Addend, int16_t Value)
Definition: intrinsics.h:426
static int64_t _InterlockedCompareExchange64(int64_t volatile *Destination, int64_t Exchange, int64_t Comparand)
Definition: intrinsics.h:627
static uint16_t _rtol16(uint16_t Value, uint8_t Shift)
Definition: intrinsics.h:31
static uint64_t __readcr3(void)
Definition: intrinsics.h:341
static uint8_t _rotr8(uint8_t Value, uint8_t Shift)
Definition: intrinsics.h:24
static uint8_t _bittestandcomplement64(int64_t *BitBase, int64_t BitPos)
Definition: intrinsics.h:134
static uint8_t _BitScanReverse64(uint32_t *Index, uint64_t Mask)
Definition: intrinsics.h:113
static void __movsw(uint16_t *dst, uint16_t const *src, size_t n)
Definition: intrinsics.h:212
static uint8_t _bittest(int32_t const *BitBase, int32_t BitPos)
Definition: intrinsics.h:73
static void __halt(void)
Definition: intrinsics.h:285
static int8_t _InterlockedOr8(int8_t volatile *Value, int8_t Mask)
Definition: intrinsics.h:515
static void __movsq(uint64_t *dst, uint64_t const *src, size_t n)
Definition: intrinsics.h:236
static int64_t _InterlockedOr64(int64_t volatile *Value, int64_t Mask)
Definition: intrinsics.h:533
static int32_t _InterlockedAnd(int32_t volatile *Value, int32_t Mask)
Definition: intrinsics.h:499
static int16_t _InterlockedExchange16(int16_t volatile *Target, int16_t Value)
Definition: intrinsics.h:585
static uint64_t __readcr4(void)
Definition: intrinsics.h:349
static void __stosq(uint64_t *dst, uint64_t x, size_t n)
Definition: intrinsics.h:242
static void * _InterlockedCompareExchangePointer(void volatile **Destination, void *Exchange, void *Comparand)
Definition: intrinsics.h:635
static int16_t _InterlockedAnd16(int16_t volatile *Value, int16_t Mask)
Definition: intrinsics.h:493
static int32_t _InterlockedXor(int32_t volatile *Value, int32_t Mask)
Definition: intrinsics.h:555
static int32_t _InterlockedCompareExchange(int32_t volatile *Destination, int32_t Exchange, int32_t Comparand)
Definition: intrinsics.h:619
static void __writecr4(uint64_t cr4_val)
Definition: intrinsics.h:377