12ced3ebb4b2f8035d8ea49f93e7930a90190270
[reactos.git] / reactos / include / crt / mingw32 / intrin_arm.h
1 /*
2 Compatibility <intrin.h> header for GCC -- GCC equivalents of intrinsic
3 Microsoft Visual C++ functions. Originally developed for the ReactOS
4 (<http://www.reactos.org/>) and TinyKrnl (<http://www.tinykrnl.org/>)
5 projects.
6
7 Copyright (c) 2006 KJK::Hyperion <hackbunny@reactos.com>
8
9 Permission is hereby granted, free of charge, to any person obtaining a
10 copy of this software and associated documentation files (the "Software"),
11 to deal in the Software without restriction, including without limitation
12 the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 and/or sell copies of the Software, and to permit persons to whom the
14 Software is furnished to do so, subject to the following conditions:
15
16 The above copyright notice and this permission notice shall be included in
17 all copies or substantial portions of the Software.
18
19 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
22 AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 DEALINGS IN THE SOFTWARE.
26 */
27
28 #ifndef KJK_INTRIN_ARM_H_
29 #define KJK_INTRIN_ARM_H_
30
31 #ifndef __GNUC__
32 #error Unsupported compiler
33 #endif
34
35 #define _ReturnAddress() (__builtin_return_address(0))
36 #define _ReadWriteBarrier() __sync_synchronize()
37
38 __INTRIN_INLINE void __yield(void) { __asm__ __volatile__("yield"); }
39
40 __INTRIN_INLINE void __break(unsigned int value) { __asm__ __volatile__("bkpt %0": : "M" (value)); }
41
42 __INTRIN_INLINE unsigned short _byteswap_ushort(unsigned short value)
43 {
44 return (value >> 8) || (value << 8);
45 }
46
47 __INTRIN_INLINE unsigned _CountLeadingZeros(long Mask)
48 {
49 return Mask ? __builtin_clz(Mask) : 32;
50 }
51
52 __INTRIN_INLINE unsigned _CountTrailingZeros(long Mask)
53 {
54 return Mask ? __builtin_ctz(Mask) : 32;
55 }
56
57 __INTRIN_INLINE unsigned char _BitScanForward(unsigned long * const Index, const unsigned long Mask)
58 {
59 *Index = __builtin_ctz(Mask);
60 return Mask ? 1 : 0;
61 }
62
63 __INTRIN_INLINE char _InterlockedCompareExchange8(volatile char * const Destination, const char Exchange, const char Comperand)
64 {
65 return __sync_val_compare_and_swap(Destination, Comperand, Exchange);
66 }
67
68 __INTRIN_INLINE short _InterlockedCompareExchange16(volatile short * const Destination, const short Exchange, const short Comperand)
69 {
70 return __sync_val_compare_and_swap(Destination, Comperand, Exchange);
71 }
72
73 __INTRIN_INLINE long _InterlockedExchangeAdd16(volatile short * const Addend, const short Value)
74 {
75 return __sync_fetch_and_add(Addend, Value);
76 }
77
78 __INTRIN_INLINE long _InterlockedCompareExchange(volatile long * const dest, const long exch, const long comp)
79 {
80 long a, b;
81
82 __asm__ __volatile__ ( "0:\n\t"
83 "ldr %1, [%2]\n\t"
84 "cmp %1, %4\n\t"
85 "bne 1f\n\t"
86 "swp %0, %3, [%2]\n\t"
87 "cmp %0, %1\n\t"
88 "swpne %3, %0, [%2]\n\t"
89 "bne 0b\n\t"
90 "1:"
91 : "=&r" (a), "=&r" (b)
92 : "r" (dest), "r" (exch), "r" (comp)
93 : "cc", "memory");
94
95 return a;
96 }
97
98 __INTRIN_INLINE long long _InterlockedCompareExchange64(volatile long long * const dest, const long long exch, const long long comp)
99 {
100 //
101 // FIXME
102 //
103 long long result;
104 result = *dest;
105 if (*dest == comp) *dest = exch;
106 return result;
107 }
108
109 __INTRIN_INLINE void * _InterlockedCompareExchangePointer(void * volatile * const Destination, void * const Exchange, void * const Comperand)
110 {
111 return (void*)_InterlockedCompareExchange((volatile long* const)Destination, (const long)Exchange, (const long)Comperand);
112 }
113
114
115 __INTRIN_INLINE long _InterlockedExchangeAdd(volatile long * const dest, const long add)
116 {
117 long a, b, c;
118
119 __asm__ __volatile__ ( "0:\n\t"
120 "ldr %0, [%3]\n\t"
121 "add %1, %0, %4\n\t"
122 "swp %2, %1, [%3]\n\t"
123 "cmp %0, %2\n\t"
124 "swpne %1, %2, [%3]\n\t"
125 "bne 0b"
126 : "=&r" (a), "=&r" (b), "=&r" (c)
127 : "r" (dest), "r" (add)
128 : "cc", "memory");
129
130 return a;
131 }
132
133 __INTRIN_INLINE long _InterlockedExchange(volatile long * const dest, const long exch)
134 {
135 long a;
136
137 __asm__ __volatile__ ( "swp %0, %2, [%1]"
138 : "=&r" (a)
139 : "r" (dest), "r" (exch));
140
141 return a;
142 }
143
144
145 __INTRIN_INLINE void * _InterlockedExchangePointer(void * volatile * const Target, void * const Value)
146 {
147 return (void *)_InterlockedExchange((volatile long * const)Target, (const long)Value);
148 }
149
150
151
152 __INTRIN_INLINE unsigned char _BitScanReverse(unsigned long * const Index, const unsigned long Mask)
153 {
154 *Index = 31 - __builtin_clz(Mask);
155 return Mask ? 1 : 0;
156 }
157
158 __INTRIN_INLINE char _InterlockedAnd8(volatile char * const value, const char mask)
159 {
160 char x;
161 char y;
162
163 y = *value;
164
165 do
166 {
167 x = y;
168 y = _InterlockedCompareExchange8(value, x & mask, x);
169 }
170 while(y != x);
171
172 return y;
173 }
174
175 __INTRIN_INLINE short _InterlockedAnd16(volatile short * const value, const short mask)
176 {
177 short x;
178 short y;
179
180 y = *value;
181
182 do
183 {
184 x = y;
185 y = _InterlockedCompareExchange16(value, x & mask, x);
186 }
187 while(y != x);
188
189 return y;
190 }
191
192 __INTRIN_INLINE long _InterlockedAnd(volatile long * const value, const long mask)
193 {
194 long x;
195 long y;
196
197 y = *value;
198
199 do
200 {
201 x = y;
202 y = _InterlockedCompareExchange(value, x & mask, x);
203 }
204 while(y != x);
205
206 return y;
207 }
208
209 __INTRIN_INLINE char _InterlockedOr8(volatile char * const value, const char mask)
210 {
211 char x;
212 char y;
213
214 y = *value;
215
216 do
217 {
218 x = y;
219 y = _InterlockedCompareExchange8(value, x | mask, x);
220 }
221 while(y != x);
222
223 return y;
224 }
225
226 __INTRIN_INLINE short _InterlockedOr16(volatile short * const value, const short mask)
227 {
228 short x;
229 short y;
230
231 y = *value;
232
233 do
234 {
235 x = y;
236 y = _InterlockedCompareExchange16(value, x | mask, x);
237 }
238 while(y != x);
239
240 return y;
241 }
242
243 __INTRIN_INLINE long _InterlockedOr(volatile long * const value, const long mask)
244 {
245 long x;
246 long y;
247
248 y = *value;
249
250 do
251 {
252 x = y;
253 y = _InterlockedCompareExchange(value, x | mask, x);
254 }
255 while(y != x);
256
257 return y;
258 }
259
260 __INTRIN_INLINE char _InterlockedXor8(volatile char * const value, const char mask)
261 {
262 char x;
263 char y;
264
265 y = *value;
266
267 do
268 {
269 x = y;
270 y = _InterlockedCompareExchange8(value, x ^ mask, x);
271 }
272 while(y != x);
273
274 return y;
275 }
276
277 __INTRIN_INLINE short _InterlockedXor16(volatile short * const value, const short mask)
278 {
279 short x;
280 short y;
281
282 y = *value;
283
284 do
285 {
286 x = y;
287 y = _InterlockedCompareExchange16(value, x ^ mask, x);
288 }
289 while(y != x);
290
291 return y;
292 }
293
294 __INTRIN_INLINE long _InterlockedXor(volatile long * const value, const long mask)
295 {
296 long x;
297 long y;
298
299 y = *value;
300
301 do
302 {
303 x = y;
304 y = _InterlockedCompareExchange(value, x ^ mask, x);
305 }
306 while(y != x);
307
308 return y;
309 }
310
311 __INTRIN_INLINE long _InterlockedDecrement(volatile long * const lpAddend)
312 {
313 return _InterlockedExchangeAdd(lpAddend, -1) - 1;
314 }
315
316 __INTRIN_INLINE long _InterlockedIncrement(volatile long * const lpAddend)
317 {
318 return _InterlockedExchangeAdd(lpAddend, 1) + 1;
319 }
320
321 __INTRIN_INLINE long _InterlockedDecrement16(volatile short * const lpAddend)
322 {
323 return _InterlockedExchangeAdd16(lpAddend, -1) - 1;
324 }
325
326 __INTRIN_INLINE long _InterlockedIncrement16(volatile short * const lpAddend)
327 {
328 return _InterlockedExchangeAdd16(lpAddend, 1) + 1;
329 }
330
331 __INTRIN_INLINE long _InterlockedAddLargeStatistic(volatile long long * const Addend, const long Value)
332 {
333 *Addend += Value;
334 return Value;
335 }
336
337 __INTRIN_INLINE void _disable(void)
338 {
339 __asm__ __volatile__
340 (
341 "cpsid i @ __cli" : : : "memory", "cc"
342 );
343 }
344
345 __INTRIN_INLINE void _enable(void)
346 {
347 __asm__ __volatile__
348 (
349 "cpsie i @ __sti" : : : "memory", "cc"
350 );
351 }
352
353 __INTRIN_INLINE unsigned char _interlockedbittestandset(volatile long * a, const long b)
354 {
355 return (_InterlockedOr(a, 1 << b) >> b) & 1;
356 }
357
358 __INTRIN_INLINE unsigned char _interlockedbittestandreset(volatile long * a, const long b)
359 {
360 return (_InterlockedAnd(a, ~(1 << b)) >> b) & 1;
361 }
362
363 #ifndef __MSVCRT__
364 __INTRIN_INLINE unsigned int _rotl(const unsigned int value, int shift)
365 {
366 return (((value) << ((int)(shift))) | ((value) >> (32 - (int)(shift))));
367 }
368 #endif
369
370 #define _clz(a) \
371 ({ ULONG __value, __arg = (a); \
372 asm ("clz\t%0, %1": "=r" (__value): "r" (__arg)); \
373 __value; })
374
375 #endif
376 /* EOF */