implement _byteswap_ushort for arm
[reactos.git] / reactos / include / crt / mingw32 / intrin_arm.h
1 /*
2 Compatibility <intrin.h> header for GCC -- GCC equivalents of intrinsic
3 Microsoft Visual C++ functions. Originally developed for the ReactOS
4 (<http://www.reactos.org/>) and TinyKrnl (<http://www.tinykrnl.org/>)
5 projects.
6
7 Copyright (c) 2006 KJK::Hyperion <hackbunny@reactos.com>
8
9 Permission is hereby granted, free of charge, to any person obtaining a
10 copy of this software and associated documentation files (the "Software"),
11 to deal in the Software without restriction, including without limitation
12 the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 and/or sell copies of the Software, and to permit persons to whom the
14 Software is furnished to do so, subject to the following conditions:
15
16 The above copyright notice and this permission notice shall be included in
17 all copies or substantial portions of the Software.
18
19 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
22 AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 DEALINGS IN THE SOFTWARE.
26 */
27
28 #ifndef KJK_INTRIN_ARM_H_
29 #define KJK_INTRIN_ARM_H_
30
31 #ifndef __GNUC__
32 #error Unsupported compiler
33 #endif
34
35 #define _ReturnAddress() (__builtin_return_address(0))
36 #define _ReadWriteBarrier() __sync_synchronize()
37
38 __INTRIN_INLINE unsigned short _byteswap_ushort(unsigned short value)
39 {
40 return __builtin_bswap32(value) >> 16;
41 }
42
43 __INTRIN_INLINE unsigned _CountLeadingZeros(long Mask)
44 {
45 return Mask ? __builtin_clz(Mask) : 32;
46 }
47
48 __INTRIN_INLINE unsigned _CountTrailingZeros(long Mask)
49 {
50 return Mask ? __builtin_ctz(Mask) : 32;
51 }
52
53 __INTRIN_INLINE unsigned char _BitScanForward(unsigned long * const Index, const unsigned long Mask)
54 {
55 *Index = __builtin_ctz(Mask);
56 return Mask ? 1 : 0;
57 }
58
59 __INTRIN_INLINE char _InterlockedCompareExchange8(volatile char * const Destination, const char Exchange, const char Comperand)
60 {
61 return __sync_val_compare_and_swap(Destination, Comperand, Exchange);
62 }
63
64 __INTRIN_INLINE short _InterlockedCompareExchange16(volatile short * const Destination, const short Exchange, const short Comperand)
65 {
66 return __sync_val_compare_and_swap(Destination, Comperand, Exchange);
67 }
68
69 __INTRIN_INLINE long _InterlockedExchangeAdd16(volatile short * const Addend, const short Value)
70 {
71 return __sync_fetch_and_add(Addend, Value);
72 }
73
74 __INTRIN_INLINE long _InterlockedCompareExchange(volatile long * const dest, const long exch, const long comp)
75 {
76 long a, b;
77
78 __asm__ __volatile__ ( "0:\n\t"
79 "ldr %1, [%2]\n\t"
80 "cmp %1, %4\n\t"
81 "bne 1f\n\t"
82 "swp %0, %3, [%2]\n\t"
83 "cmp %0, %1\n\t"
84 "swpne %3, %0, [%2]\n\t"
85 "bne 0b\n\t"
86 "1:"
87 : "=&r" (a), "=&r" (b)
88 : "r" (dest), "r" (exch), "r" (comp)
89 : "cc", "memory");
90
91 return a;
92 }
93
94 __INTRIN_INLINE long long _InterlockedCompareExchange64(volatile long long * const dest, const long long exch, const long long comp)
95 {
96 //
97 // FIXME
98 //
99 long long result;
100 result = *dest;
101 if (*dest == comp) *dest = exch;
102 return result;
103 }
104
105 __INTRIN_INLINE void * _InterlockedCompareExchangePointer(void * volatile * const Destination, void * const Exchange, void * const Comperand)
106 {
107 return (void*)_InterlockedCompareExchange((volatile long* const)Destination, (const long)Exchange, (const long)Comperand);
108 }
109
110
111 __INTRIN_INLINE long _InterlockedExchangeAdd(volatile long * const dest, const long add)
112 {
113 long a, b, c;
114
115 __asm__ __volatile__ ( "0:\n\t"
116 "ldr %0, [%3]\n\t"
117 "add %1, %0, %4\n\t"
118 "swp %2, %1, [%3]\n\t"
119 "cmp %0, %2\n\t"
120 "swpne %1, %2, [%3]\n\t"
121 "bne 0b"
122 : "=&r" (a), "=&r" (b), "=&r" (c)
123 : "r" (dest), "r" (add)
124 : "cc", "memory");
125
126 return a;
127 }
128
129 __INTRIN_INLINE long _InterlockedExchange(volatile long * const dest, const long exch)
130 {
131 long a;
132
133 __asm__ __volatile__ ( "swp %0, %2, [%1]"
134 : "=&r" (a)
135 : "r" (dest), "r" (exch));
136
137 return a;
138 }
139
140
141 __INTRIN_INLINE void * _InterlockedExchangePointer(void * volatile * const Target, void * const Value)
142 {
143 return (void *)_InterlockedExchange((volatile long * const)Target, (const long)Value);
144 }
145
146
147
148 __INTRIN_INLINE unsigned char _BitScanReverse(unsigned long * const Index, const unsigned long Mask)
149 {
150 *Index = 31 - __builtin_clz(Mask);
151 return Mask ? 1 : 0;
152 }
153
154 __INTRIN_INLINE char _InterlockedAnd8(volatile char * const value, const char mask)
155 {
156 char x;
157 char y;
158
159 y = *value;
160
161 do
162 {
163 x = y;
164 y = _InterlockedCompareExchange8(value, x & mask, x);
165 }
166 while(y != x);
167
168 return y;
169 }
170
171 __INTRIN_INLINE short _InterlockedAnd16(volatile short * const value, const short mask)
172 {
173 short x;
174 short y;
175
176 y = *value;
177
178 do
179 {
180 x = y;
181 y = _InterlockedCompareExchange16(value, x & mask, x);
182 }
183 while(y != x);
184
185 return y;
186 }
187
188 __INTRIN_INLINE long _InterlockedAnd(volatile long * const value, const long mask)
189 {
190 long x;
191 long y;
192
193 y = *value;
194
195 do
196 {
197 x = y;
198 y = _InterlockedCompareExchange(value, x & mask, x);
199 }
200 while(y != x);
201
202 return y;
203 }
204
205 __INTRIN_INLINE char _InterlockedOr8(volatile char * const value, const char mask)
206 {
207 char x;
208 char y;
209
210 y = *value;
211
212 do
213 {
214 x = y;
215 y = _InterlockedCompareExchange8(value, x | mask, x);
216 }
217 while(y != x);
218
219 return y;
220 }
221
222 __INTRIN_INLINE short _InterlockedOr16(volatile short * const value, const short mask)
223 {
224 short x;
225 short y;
226
227 y = *value;
228
229 do
230 {
231 x = y;
232 y = _InterlockedCompareExchange16(value, x | mask, x);
233 }
234 while(y != x);
235
236 return y;
237 }
238
239 __INTRIN_INLINE long _InterlockedOr(volatile long * const value, const long mask)
240 {
241 long x;
242 long y;
243
244 y = *value;
245
246 do
247 {
248 x = y;
249 y = _InterlockedCompareExchange(value, x | mask, x);
250 }
251 while(y != x);
252
253 return y;
254 }
255
256 __INTRIN_INLINE char _InterlockedXor8(volatile char * const value, const char mask)
257 {
258 char x;
259 char y;
260
261 y = *value;
262
263 do
264 {
265 x = y;
266 y = _InterlockedCompareExchange8(value, x ^ mask, x);
267 }
268 while(y != x);
269
270 return y;
271 }
272
273 __INTRIN_INLINE short _InterlockedXor16(volatile short * const value, const short mask)
274 {
275 short x;
276 short y;
277
278 y = *value;
279
280 do
281 {
282 x = y;
283 y = _InterlockedCompareExchange16(value, x ^ mask, x);
284 }
285 while(y != x);
286
287 return y;
288 }
289
290 __INTRIN_INLINE long _InterlockedXor(volatile long * const value, const long mask)
291 {
292 long x;
293 long y;
294
295 y = *value;
296
297 do
298 {
299 x = y;
300 y = _InterlockedCompareExchange(value, x ^ mask, x);
301 }
302 while(y != x);
303
304 return y;
305 }
306
307 __INTRIN_INLINE long _InterlockedDecrement(volatile long * const lpAddend)
308 {
309 return _InterlockedExchangeAdd(lpAddend, -1) - 1;
310 }
311
312 __INTRIN_INLINE long _InterlockedIncrement(volatile long * const lpAddend)
313 {
314 return _InterlockedExchangeAdd(lpAddend, 1) + 1;
315 }
316
317 __INTRIN_INLINE long _InterlockedDecrement16(volatile short * const lpAddend)
318 {
319 return _InterlockedExchangeAdd16(lpAddend, -1) - 1;
320 }
321
322 __INTRIN_INLINE long _InterlockedIncrement16(volatile short * const lpAddend)
323 {
324 return _InterlockedExchangeAdd16(lpAddend, 1) + 1;
325 }
326
327 __INTRIN_INLINE long _InterlockedAddLargeStatistic(volatile long long * const Addend, const long Value)
328 {
329 *Addend += Value;
330 return Value;
331 }
332
333 __INTRIN_INLINE void _disable(void)
334 {
335 __asm__ __volatile__
336 (
337 "cpsid i @ __cli" : : : "memory", "cc"
338 );
339 }
340
341 __INTRIN_INLINE void _enable(void)
342 {
343 __asm__ __volatile__
344 (
345 "cpsie i @ __sti" : : : "memory", "cc"
346 );
347 }
348
349 __INTRIN_INLINE unsigned char _interlockedbittestandset(volatile long * a, const long b)
350 {
351 return (_InterlockedOr(a, 1 << b) >> b) & 1;
352 }
353
354 __INTRIN_INLINE unsigned char _interlockedbittestandreset(volatile long * a, const long b)
355 {
356 return (_InterlockedAnd(a, ~(1 << b)) >> b) & 1;
357 }
358
359 #ifndef __MSVCRT__
360 __INTRIN_INLINE unsigned int _rotl(const unsigned int value, int shift)
361 {
362 return (((value) << ((int)(shift))) | ((value) >> (32 - (int)(shift))));
363 }
364 #endif
365
366 #define _clz(a) \
367 ({ ULONG __value, __arg = (a); \
368 asm ("clz\t%0, %1": "=r" (__value): "r" (__arg)); \
369 __value; })
370
371 #endif
372 /* EOF */