cfc2a280dcaa2276357a837d57a5429896f0818e
[reactos.git] / reactos / include / psdk / intrin_ppc.h
1 /*
2 Compatibility <intrin.h> header for GCC -- GCC equivalents of intrinsic
3 Microsoft Visual C++ functions. Originally developed for the ReactOS
4 (<http://www.reactos.org/>) and TinyKrnl (<http://www.tinykrnl.org/>)
5 projects.
6
7 Copyright (c) 2006 KJK::Hyperion <hackbunny@reactos.com>
8
9 Permission is hereby granted, free of charge, to any person obtaining a
10 copy of this software and associated documentation files (the "Software"),
11 to deal in the Software without restriction, including without limitation
12 the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 and/or sell copies of the Software, and to permit persons to whom the
14 Software is furnished to do so, subject to the following conditions:
15
16 The above copyright notice and this permission notice shall be included in
17 all copies or substantial portions of the Software.
18
19 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
22 AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 DEALINGS IN THE SOFTWARE.
26 */
27
28 #ifndef KJK_INTRIN_PPC_H_
29 #define KJK_INTRIN_PPC_H_
30
31 //#define PPC_QUAL static __inline__ __attribute__((always_inline))
32 #define PPC_QUAL extern __inline__
33
34 #ifndef __GNUC__
35 #error Unsupported compiler
36 #endif
37
38 /*** Stack frame juggling ***/
39 #define _ReturnAddress() (__builtin_return_address(0))
40 #define _AddressOfReturnAddress() (&(((void **)(__builtin_frame_address(0)))[1]))
41 /* TODO: __getcallerseflags but how??? */
42
43
44 /*** Atomic operations ***/
45 /* TODO: _ReadBarrier */
46 /* TODO: _WriteBarrier */
47
48 #if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100
49 #define _ReadWriteBarrier() __sync_synchronize()
50 #else
51 /* TODO: _ReadWriteBarrier() */
52 #endif
53
54 #if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100
55
56 PPC_QUAL char _InterlockedCompareExchange8(volatile char * const Destination, const char Exchange, const char Comperand)
57 {
58 return __sync_val_compare_and_swap(Destination, Comperand, Exchange);
59 }
60
61 PPC_QUAL short _InterlockedCompareExchange16(volatile short * const Destination, const short Exchange, const short Comperand)
62 {
63 return __sync_val_compare_and_swap(Destination, Comperand, Exchange);
64 }
65
66 PPC_QUAL long _InterlockedCompareExchange(volatile long * const Destination, const long Exchange, const long Comperand)
67 {
68 return __sync_val_compare_and_swap(Destination, Comperand, Exchange);
69 }
70
71 PPC_QUAL long long _InterlockedCompareExchange64(volatile long long * const Destination, const long long Exchange, const long long Comperand)
72 {
73 return __sync_val_compare_and_swap(Destination, Comperand, Exchange);
74 }
75
76 PPC_QUAL void * _InterlockedCompareExchangePointer(void * volatile * const Destination, void * const Exchange, void * const Comperand)
77 {
78 return __sync_val_compare_and_swap(Destination, Comperand, Exchange);
79 }
80
81 PPC_QUAL long _InterlockedExchange(volatile long * const Target, const long Value)
82 {
83 /* NOTE: __sync_lock_test_and_set would be an acquire barrier, so we force a full barrier */
84 __sync_synchronize();
85 return __sync_lock_test_and_set(Target, Value);
86 }
87
88 PPC_QUAL void * _InterlockedExchangePointer(void * volatile * const Target, void * const Value)
89 {
90 /* NOTE: ditto */
91 __sync_synchronize();
92 return __sync_lock_test_and_set(Target, Value);
93 }
94
95 PPC_QUAL long _InterlockedExchangeAdd(volatile long * const Addend, const long Value)
96 {
97 return __sync_fetch_and_add(Addend, Value);
98 }
99
100 PPC_QUAL char _InterlockedAnd8(volatile char * const value, const char mask)
101 {
102 return __sync_fetch_and_and(value, mask);
103 }
104
105 PPC_QUAL short _InterlockedAnd16(volatile short * const value, const short mask)
106 {
107 return __sync_fetch_and_and(value, mask);
108 }
109
110 PPC_QUAL long _InterlockedAnd(volatile long * const value, const long mask)
111 {
112 return __sync_fetch_and_and(value, mask);
113 }
114
115 PPC_QUAL char _InterlockedOr8(volatile char * const value, const char mask)
116 {
117 return __sync_fetch_and_or(value, mask);
118 }
119
120 PPC_QUAL short _InterlockedOr16(volatile short * const value, const short mask)
121 {
122 return __sync_fetch_and_or(value, mask);
123 }
124
125 PPC_QUAL long _InterlockedOr(volatile long * const value, const long mask)
126 {
127 return __sync_fetch_and_or(value, mask);
128 }
129
130 PPC_QUAL char _InterlockedXor8(volatile char * const value, const char mask)
131 {
132 return __sync_fetch_and_xor(value, mask);
133 }
134
135 PPC_QUAL short _InterlockedXor16(volatile short * const value, const short mask)
136 {
137 return __sync_fetch_and_xor(value, mask);
138 }
139
140 PPC_QUAL long _InterlockedXor(volatile long * const value, const long mask)
141 {
142 return __sync_fetch_and_xor(value, mask);
143 }
144
145 #else
146
147 PPC_QUAL char _InterlockedCompareExchange8(volatile char * const Destination, const char Exchange, const char Comperand)
148 {
149 char retval = Comperand;
150 __asm__ __volatile__ (
151 "sync\n"
152 "1: lbarx %0,0,%1\n"
153 " subf. %0,%2,%0\n"
154 " bne 2f\n"
155 " stbcx. %3,0,%1\n"
156 " bne- 1b\n"
157 "2: isync"
158 : "=b" (retval)
159 : "b" (Destination), "r" (Comperand), "r" (Exchange)
160 : "cr0", "memory");
161 return retval;
162 }
163
164 PPC_QUAL short _InterlockedCompareExchange16(volatile short * const Destination, const short Exchange, const short Comperand)
165 {
166 short retval = Comperand;
167 __asm__ __volatile__ (
168 "sync\n"
169 "1: lharx %0,0,%1\n"
170 " subf. %0,%2,%0\n"
171 " bne 2f\n"
172 " sthcx. %3,0,%1\n"
173 " bne- 1b\n"
174 "2: isync"
175 : "=r" (retval)
176 : "r" (Destination), "r" (Comperand), "r" (Exchange)
177 : "memory");
178 return retval;
179 }
180
181 PPC_QUAL long _InterlockedCompareExchange(volatile long * const Destination, const long Exchange, const long Comperand)
182 {
183 short retval = Comperand;
184 __asm__ __volatile__ (
185 "sync\n"
186 "1: lwarx %0,0,%1\n"
187 " subf. %0,%2,%0\n"
188 " bne 2f\n"
189 " stwcx. %3,0,%1\n"
190 " bne- 1b\n"
191 "2: isync"
192 : "=r" (retval)
193 : "r" (Destination), "r" (Comperand), "r" (Exchange)
194 : "memory");
195 return retval;
196 }
197
198 PPC_QUAL long long _InterlockedCompareExchange64(volatile long long * const Destination, const long long Exchange, const long long Comperand)
199 {
200 unsigned long lo32Retval = (unsigned long)((Comperand >> 0) & 0xFFFFFFFF);
201 long hi32Retval = (unsigned long)((Comperand >> 32) & 0xFFFFFFFF);
202
203 unsigned long lo32Exchange = (unsigned long)((Exchange >> 0) & 0xFFFFFFFF);
204 long hi32Exchange = (unsigned long)((Exchange >> 32) & 0xFFFFFFFF);
205
206 #if 0
207 __asm__
208 (
209 "cmpxchg8b %[Destination]" :
210 "a" (lo32Retval), "d" (hi32Retval) :
211 [Destination] "rm" (Destination), "b" (lo32Exchange), "c" (hi32Exchange) :
212 "memory"
213 );
214 #endif
215 {
216 union u_
217 {
218 long long ll;
219 struct s_
220 {
221 unsigned long lo32;
222 long hi32;
223 }
224 s;
225 }
226 u = { s : { lo32 : lo32Retval, hi32 : hi32Retval } };
227
228 return u.ll;
229 }
230 }
231
232 PPC_QUAL void * _InterlockedCompareExchangePointer(void * volatile * const Destination, void * const Exchange, void * const Comperand)
233 {
234 return (void *)_InterlockedCompareExchange
235 ((long *)Destination, (long) Exchange, (long) Comperand);
236 }
237
238 PPC_QUAL long _InterlockedExchange(volatile long * const Target, const long Value)
239 {
240 long retval;
241 __asm__ __volatile__ (
242 "sync\n"
243 "1: lwarx %0,0,%1\n"
244 " stwcx. %2,0,%1\n"
245 " bne- 1b\n"
246 : "=b" (retval)
247 : "b" (Target), "b" (Value)
248 : "cr0", "memory");
249 return retval;
250 }
251
252 PPC_QUAL void * _InterlockedExchangePointer(void * volatile * const Target, void * const Value)
253 {
254 void * retval;
255 __asm__ __volatile__ (
256 "sync\n"
257 "1: lwarx %0,0,%1\n"
258 " stwcx. %2,0,%1\n"
259 " bne- 1b\n"
260 : "=b" (retval)
261 : "b" (Target), "b" (Value)
262 : "cr0", "memory");
263 return retval;
264 }
265
266 PPC_QUAL long _InterlockedExchangeAdd(volatile long * const Addend, const long Value)
267 {
268 long x;
269 long y = *Addend;
270 long addend = y;
271
272 do
273 {
274 x = y;
275 y = _InterlockedCompareExchange(Addend, addend + Value, x);
276 }
277 while(y != x);
278
279 return y;
280 }
281
282 PPC_QUAL char _InterlockedAnd8(volatile char * const value, const char mask)
283 {
284 char x;
285 char y;
286
287 y = *value;
288
289 do
290 {
291 x = y;
292 y = _InterlockedCompareExchange8(value, x & mask, x);
293 }
294 while(y != x);
295
296 return y;
297 }
298
299 PPC_QUAL short _InterlockedAnd16(volatile short * const value, const short mask)
300 {
301 short x;
302 short y;
303
304 y = *value;
305
306 do
307 {
308 x = y;
309 y = _InterlockedCompareExchange16(value, x & mask, x);
310 }
311 while(y != x);
312
313 return y;
314 }
315
316 PPC_QUAL long _InterlockedAnd(volatile long * const value, const long mask)
317 {
318 long x;
319 long y;
320
321 y = *value;
322
323 do
324 {
325 x = y;
326 y = _InterlockedCompareExchange(value, x & mask, x);
327 }
328 while(y != x);
329
330 return y;
331 }
332
333 PPC_QUAL char _InterlockedOr8(volatile char * const value, const char mask)
334 {
335 char x;
336 char y;
337
338 y = *value;
339
340 do
341 {
342 x = y;
343 y = _InterlockedCompareExchange8(value, x | mask, x);
344 }
345 while(y != x);
346
347 return y;
348 }
349
350 PPC_QUAL short _InterlockedOr16(volatile short * const value, const short mask)
351 {
352 short x;
353 short y;
354
355 y = *value;
356
357 do
358 {
359 x = y;
360 y = _InterlockedCompareExchange16(value, x | mask, x);
361 }
362 while(y != x);
363
364 return y;
365 }
366
367 PPC_QUAL long _InterlockedOr(volatile long * const value, const long mask)
368 {
369 long x;
370 long y;
371
372 y = *value;
373
374 do
375 {
376 x = y;
377 y = _InterlockedCompareExchange(value, x | mask, x);
378 }
379 while(y != x);
380
381 return y;
382 }
383
384 PPC_QUAL char _InterlockedXor8(volatile char * const value, const char mask)
385 {
386 char x;
387 char y;
388
389 y = *value;
390
391 do
392 {
393 x = y;
394 y = _InterlockedCompareExchange8(value, x ^ mask, x);
395 }
396 while(y != x);
397
398 return y;
399 }
400
401 PPC_QUAL short _InterlockedXor16(volatile short * const value, const short mask)
402 {
403 short x;
404 short y;
405
406 y = *value;
407
408 do
409 {
410 x = y;
411 y = _InterlockedCompareExchange16(value, x ^ mask, x);
412 }
413 while(y != x);
414
415 return y;
416 }
417
418 PPC_QUAL long _InterlockedXor(volatile long * const value, const long mask)
419 {
420 long x;
421 long y;
422
423 y = *value;
424
425 do
426 {
427 x = y;
428 y = _InterlockedCompareExchange(value, x ^ mask, x);
429 }
430 while(y != x);
431
432 return y;
433 }
434
435 PPC_QUAL unsigned char _interlockedbittestandreset(volatile long * const a, const long b)
436 {
437 long x;
438 long y;
439 long mask = ~(1<<b);
440
441 y = *a;
442
443 do
444 {
445 x = y;
446 y = _InterlockedCompareExchange(a, x & mask, x);
447 }
448 while(y != x);
449
450 return (y & ~mask) != 0;
451 }
452
453 PPC_QUAL unsigned char _interlockedbittestandset(volatile long * const a, const long b)
454 {
455 long x;
456 long y;
457 long mask = 1<<b;
458
459 y = *a;
460
461 do
462 {
463 x = y;
464 y = _InterlockedCompareExchange(a, x | mask, x);
465 }
466 while(y != x);
467
468 return (y & ~mask) != 0;
469 }
470 #endif
471
472 PPC_QUAL long _InterlockedDecrement(volatile long * const lpAddend)
473 {
474 return _InterlockedExchangeAdd(lpAddend, -1) - 1;
475 }
476
477 PPC_QUAL long _InterlockedIncrement(volatile long * const lpAddend)
478 {
479 return _InterlockedExchangeAdd(lpAddend, 1) + 1;
480 }
481
482 /*** String operations ***/
483 /* NOTE: we don't set a memory clobber in the __stosX functions because Visual C++ doesn't */
484 /* Note that the PPC store multiple operations may raise an exception in LE
485 * mode */
486 PPC_QUAL void __stosb(unsigned char * Dest, const unsigned char Data, unsigned long Count)
487 {
488 memset(Dest, Data, Count);
489 }
490
491 PPC_QUAL void __stosw(unsigned short * Dest, const unsigned short Data, unsigned long Count)
492 {
493 while(Count--)
494 *Dest++ = Data;
495 }
496
497 PPC_QUAL void __stosd(unsigned long * Dest, const unsigned long Data, unsigned long Count)
498 {
499 while(Count--)
500 *Dest++ = Data;
501 }
502
503 PPC_QUAL void __movsb(unsigned char * Destination, const unsigned char * Source, unsigned long Count)
504 {
505 memcpy(Destination, Source, Count);
506 }
507
508 PPC_QUAL void __movsw(unsigned short * Destination, const unsigned short * Source, unsigned long Count)
509 {
510 memcpy(Destination, Source, Count * sizeof(*Source));
511 }
512
513 PPC_QUAL void __movsd(unsigned long * Destination, const unsigned long * Source, unsigned long Count)
514 {
515 memcpy(Destination, Source, Count * sizeof(*Source));
516 }
517
518
519 /*** FS segment addressing ***/
520 /* On PowerPC, r13 points to TLS data, including the TEB at 0(r13) from what I
521 * can tell */
522 PPC_QUAL void __writefsbyte(const unsigned long Offset, const unsigned char Data)
523 {
524 char *addr;
525 __asm__("\tadd %0,13,%1\n\tstb %2,0(%0)" : "=r" (addr) : "r" (Offset), "r" (Data));
526 }
527
528 PPC_QUAL void __writefsword(const unsigned long Offset, const unsigned short Data)
529 {
530 char *addr;
531 __asm__("\tadd %0,13,%1\n\tsth %2,0(%0)" : "=r" (addr) : "r" (Offset), "r" (Data));
532 }
533
534 PPC_QUAL void __writefsdword(const unsigned long Offset, const unsigned long Data)
535 {
536 char *addr;
537 __asm__("\tadd %0,13,%1\n\tstw %2,0(%0)" : "=r" (addr) : "r" (Offset), "r" (Data));
538 }
539
540 PPC_QUAL unsigned char __readfsbyte(const unsigned long Offset)
541 {
542 unsigned short result;
543 __asm__("\tadd 7,13,%1\n"
544 "\tlbz %0,0(7)\n"
545 : "=r" (result)
546 : "r" (Offset)
547 : "r7");
548 return result;
549 }
550
551 PPC_QUAL unsigned short __readfsword(const unsigned long Offset)
552 {
553 unsigned short result;
554 __asm__("\tadd 7,13,%1\n"
555 "\tlhz %0,0(7)\n"
556 : "=r" (result)
557 : "r" (Offset)
558 : "r7");
559 return result;
560 }
561
562 PPC_QUAL unsigned long __readfsdword(const unsigned long Offset)
563 {
564 unsigned long result;
565 __asm__("\tadd 7,13,%1\n"
566 "\tlwz %0,0(7)\n"
567 : "=r" (result)
568 : "r" (Offset)
569 : "r7");
570 return result;
571 }
572
573 PPC_QUAL void __incfsbyte(const unsigned long Offset)
574 {
575 __writefsbyte(Offset, __readfsbyte(Offset)+1);
576 }
577
578 PPC_QUAL void __incfsword(const unsigned long Offset)
579 {
580 __writefsword(Offset, __readfsword(Offset)+1);
581 }
582
583 PPC_QUAL void __incfsdword(const unsigned long Offset)
584 {
585 __writefsdword(Offset, __readfsdword(Offset)+1);
586 }
587
588 /* NOTE: the bizarre implementation of __addfsxxx mimics the broken Visual C++ behavior */
589 /* PPC Note: Not sure about the bizarre behavior. We'll try to emulate it later */
590 PPC_QUAL void __addfsbyte(const unsigned long Offset, const unsigned char Data)
591 {
592 __writefsbyte(Offset, __readfsbyte(Offset) + Data);
593 }
594
595 PPC_QUAL void __addfsword(const unsigned long Offset, const unsigned short Data)
596 {
597 __writefsword(Offset, __readfsword(Offset) + Data);
598 }
599
600 PPC_QUAL void __addfsdword(const unsigned long Offset, const unsigned int Data)
601 {
602 __writefsdword(Offset, __readfsdword(Offset) + Data);
603 }
604
605
606 /*** Bit manipulation ***/
607 PPC_QUAL unsigned char _BitScanForward(unsigned long * const Index, const unsigned long Mask)
608 {
609 if(Mask == 0) return 0;
610 else {
611 unsigned long mask = Mask;
612 mask &= -mask;
613 *Index =
614 ((mask & 0xffff0000) ? 16 : 0) +
615 ((mask & 0xff00ff00) ? 8 : 0) +
616 ((mask & 0xf0f0f0f0) ? 4 : 0) +
617 ((mask & 0xcccccccc) ? 2 : 0) +
618 ((mask & 0xaaaaaaaa) ? 1 : 0);
619 return 1;
620 }
621 }
622
623 /* Thanks http://www.jjj.de/bitwizardry/files/bithigh.h */
624 PPC_QUAL unsigned char _BitScanReverse(unsigned long * const Index, const unsigned long Mask)
625 {
626 unsigned long check = 16, checkmask;
627 if(Mask == 0) return 0;
628 else {
629 unsigned long mask = Mask;
630 *Index = 0;
631 while(check) {
632 checkmask = ((1<<check)-1) << check;
633 if( mask & checkmask ) {
634 mask >>= check;
635 *Index += check;
636 }
637 check >>= 1;
638 }
639 return 1;
640 }
641 }
642
643 /* NOTE: again, the bizarre implementation follows Visual C++ */
644 PPC_QUAL unsigned char _bittest(const long * const a, const long b)
645 {
646 return ((*a) & (1<<b)) != 0;
647 }
648
649 PPC_QUAL unsigned char _bittestandcomplement(long * const a, const long b)
650 {
651 unsigned char ret = ((*a) & (1<<b)) != 0;
652 (*a) ^= (1<<b);
653 return ret;
654 }
655
656 PPC_QUAL unsigned char _bittestandreset(long * const a, const long b)
657 {
658 unsigned char ret = ((*a) & (1<<b)) != 0;
659 (*a) &= ~(1<<b);
660 return ret;
661 }
662
663 PPC_QUAL unsigned char _bittestandset(long * const a, const long b)
664 {
665 unsigned char ret = ((*a) & (1<<b)) != 0;
666 (*a) |= (1<<b);
667 return ret;
668 }
669
670 PPC_QUAL unsigned char _rotl8(const unsigned char value, const unsigned char shift)
671 {
672 return (value << shift) | (value >> (8-shift));
673 }
674
675 PPC_QUAL unsigned short _rotl16(const unsigned short value, const unsigned char shift)
676 {
677 return (value << shift) | (value >> (16-shift));
678 }
679
680 PPC_QUAL unsigned char _rotr8(const unsigned char value, const unsigned char shift)
681 {
682 return (value >> shift) | (value << (8-shift));
683 }
684
685 PPC_QUAL unsigned short _rotr16(const unsigned short value, const unsigned char shift)
686 {
687 return (value >> shift) | (value << (16-shift));
688 }
689
690 PPC_QUAL unsigned long long __ll_lshift(const unsigned long long Mask, int Bit)
691 {
692 return Mask << Bit;
693 }
694
695 PPC_QUAL long long __ll_rshift(const long long Mask, const int Bit)
696 {
697 return Mask >> Bit;
698 }
699
700 PPC_QUAL unsigned long long __ull_rshift(const unsigned long long Mask, int Bit)
701 {
702 return Mask >> Bit;
703 }
704
705
706 /*** 64-bit math ***/
707 PPC_QUAL long long __emul(const int a, const int b)
708 {
709 return a * b;
710 }
711
712 PPC_QUAL unsigned long long __emulu(const unsigned int a, const unsigned int b)
713 {
714 return a * b;
715 }
716
717
718 /*** Port I/O ***/
719 PPC_QUAL unsigned char __inbyte(const unsigned short Port)
720 {
721 int ret;
722 __asm__(
723 "mfmsr 5\n\t"
724 "andi. 6,5,0xffef\n\t"/* turn off MSR[DR] */
725 "mtmsr 6\n\t"
726 "isync\n\t"
727 "sync\n\t"
728 "lbz %0,0(%1)\n\t" /* Get actual value at phys addr r3 */
729 "mtmsr 5\n\t" : "=r" (ret) : "b" (Port)
730 );
731 return ret;
732 }
733
734 PPC_QUAL unsigned short __inword(const unsigned short Port)
735 {
736 int ret;
737 __asm__(
738 "mfmsr 5\n\t"
739 "andi. 6,5,0xffef\n\t"/* turn off MSR[DR] */
740 "mtmsr 6\n\t"
741 "isync\n\t"
742 "sync\n\t"
743 "lhz %0,0(%1)\n\t" /* Get actual value at phys addr r3 */
744 "mtmsr 5\n\t" : "=r" (ret) : "b" (Port)
745 );
746 return ret;
747 }
748
749 PPC_QUAL unsigned long __indword(const unsigned short Port)
750 {
751 int ret;
752 __asm__(
753 "mfmsr 5\n\t"
754 "andi. 6,5,0xffef\n\t"/* turn off MSR[DR] */
755 "mtmsr 6\n\t"
756 "isync\n\t"
757 "sync\n\t"
758 "lwz %0,0(%1)\n\t" /* Get actual value at phys addr r3 */
759 "mtmsr 5\n\t" : "=r" (ret) : "b" (Port)
760 );
761 return ret;
762 }
763
764 PPC_QUAL void __inbytestring(unsigned short Port, unsigned char * Buffer, unsigned long Count)
765 {
766 while(Count--) {
767 *Buffer++ = __inbyte(Port);
768 }
769 }
770
771 PPC_QUAL void __inwordstring(unsigned short Port, unsigned short * Buffer, unsigned long Count)
772 {
773 while(Count--) {
774 *Buffer++ = __inword(Port);
775 }
776 }
777
778 PPC_QUAL void __indwordstring(unsigned short Port, unsigned long * Buffer, unsigned long Count)
779 {
780 while(Count--) {
781 *Buffer++ = __indword(Port);
782 }
783 }
784
785 PPC_QUAL void __outbyte(unsigned short const Port, const unsigned char Data)
786 {
787 __asm__(
788 "mfmsr 5\n\t"
789 "andi. 6,5,0xffef\n\t"/* turn off MSR[DR] */
790 "mtmsr 6\n\t"
791 "sync\n\t"
792 "eieio\n\t"
793 "stb %1,0(%0)\n\t" /* Set actual value at phys addr r3 */
794 "dcbst 0,%1\n\t"
795 "mtmsr 5\n\t"
796 "sync\n\t"
797 "eieio\n\t" : : "b" (Port), "r" (Data)
798 );
799 }
800
801 PPC_QUAL void __outword(unsigned short const Port, const unsigned short Data)
802 {
803 __asm__(
804 "mfmsr 5\n\t"
805 "andi. 6,5,0xffef\n\t"/* turn off MSR[DR] */
806 "mtmsr 6\n\t"
807 "sync\n\t"
808 "eieio\n\t"
809 "sth %1,0(%0)\n\t" /* Set actual value at phys addr r3 */
810 "dcbst 0,%1\n\t"
811 "mtmsr 5\n\t"
812 "sync\n\t"
813 "eieio\n\t" : : "b" (Port), "b" (Data)
814 );
815 }
816
817 PPC_QUAL void __outdword(unsigned short const Port, const unsigned long Data)
818 {
819 __asm__(
820 "mfmsr 5\n\t"
821 "andi. 6,5,0xffef\n\t"/* turn off MSR[DR] */
822 "mtmsr 6\n\t"
823 "sync\n\t"
824 "eieio\n\t"
825 "stw %1,0(%0)\n\t" /* Set actual value at phys addr r3 */
826 "dcbst 0,%1\n\t"
827 "mtmsr 5\n\t"
828 "sync\n\t"
829 "eieio\n\t" : : "b" (Port), "b" (Data)
830 );
831 }
832
833 PPC_QUAL void __outbytestring(unsigned short const Port, const unsigned char * const Buffer, const unsigned long Count)
834 {
835 unsigned long count = Count;
836 const unsigned char *buffer = Buffer;
837 while(count--) {
838 __outbyte(Port, *buffer++);
839 }
840 }
841
842 PPC_QUAL void __outwordstring(unsigned short const Port, const unsigned short * const Buffer, const unsigned long Count)
843 {
844 unsigned long count = Count;
845 const unsigned short *buffer = Buffer;
846 while(count--) {
847 __outword(Port, *buffer++);
848 }
849 }
850
851 PPC_QUAL void __outdwordstring(unsigned short const Port, const unsigned long * const Buffer, const unsigned long Count)
852 {
853 unsigned long count = Count;
854 const unsigned long *buffer = Buffer;
855 while(count--) {
856 __outdword(Port, *buffer++);
857 }
858 }
859
860
861 /*** System information ***/
862 PPC_QUAL void __cpuid(int CPUInfo[], const int InfoType)
863 {
864 unsigned long lo32;
865 __asm__("mfpvr" : "=b" (lo32));
866 }
867
868 PPC_QUAL unsigned long long __rdtsc(void)
869 {
870 unsigned long lo32;
871 __asm__("mfdec %0" : "=b" (lo32));
872 return -lo32;
873 }
874
875
876 /*** Interrupts ***/
877 /* Finally decided to do this by enabling single step trap */
878 PPC_QUAL void __debugbreak(void)
879 {
880
881 }
882
883 PPC_QUAL void __int2c(void)
884 {
885 /* Not sure yet */
886 }
887
888 #ifndef _ENABLE_DISABLE_DEFINED
889 #define _ENABLE_DISABLE_DEFINED
890 PPC_QUAL void _disable(void)
891 {
892 __asm__ __volatile__("mfmsr 0\n\t" \
893 "li 8,0x7fff\n\t" \
894 "and 0,8,0\n\t" \
895 "mtmsr 0\n\t");
896 }
897
898 PPC_QUAL void _enable(void)
899 {
900 __asm__ __volatile__("mfmsr 0\n\t" \
901 "lis 8,0x8000@ha\n\t" \
902 "or 0,8,0\n\t" \
903 "mtmsr 0\n\t");
904 }
905
906 /*** Protected memory management ***/
907 PPC_QUAL unsigned long __readsdr1(void)
908 {
909 unsigned long value;
910 __asm__("mfsdr1 %0" : "=b" (value));
911 return value;
912 }
913
914 PPC_QUAL void __writesdr1(const unsigned long long Data)
915 {
916 __asm__("mtsdr1 %0" : : "b" (Data));
917 }
918
919 /*** System operations ***/
920 /* This likely has a different meaning from the X86 equivalent. We'll keep
921 * the name cause it fits */
922 PPC_QUAL unsigned long long __readmsr()
923 {
924 unsigned long temp;
925 __asm__("mfmsr %0" : "=b" (temp));
926 return temp;
927 }
928
929 PPC_QUAL void __writemsr(const unsigned long Value)
930 {
931 __asm__("mtmsr %0" : : "b" (Value));
932 }
933
934 /* We'll make sure of the following:
935 * IO operations have completed
936 * Write operations through cache have completed
937 * We've reloaded anything in the data or instruction cache that might have
938 * changed in real ram.
939 */
940 PPC_QUAL void __wbinvd(void)
941 {
942 __asm__("eieio\n\t"
943 "dcs\n\t"
944 "sync\n\t"
945 "isync\n\t");
946 }
947 #endif
948
949 PPC_QUAL long _InterlockedAddLargeStatistic(volatile long long * const Addend, const long Value)
950 {
951 #if 0
952 __asm__
953 (
954 "lock; add %[Value], %[Lo32];"
955 "jae LABEL%=;"
956 "lock; adc $0, %[Hi32];"
957 "LABEL%=:;" :
958 [Lo32] "=m" (*((volatile long *)(Addend) + 0)), [Hi32] "=m" (*((volatile long *)(Addend) + 1)) :
959 [Value] "ir" (Value)
960 );
961 #endif
962 return Value;
963 }
964
965 /*** Miscellaneous ***/
966 /* BUGBUG: only good for use in macros. Cannot be taken the address of */
967 #define __noop(...) ((void)0)
968
969 /* TODO: __assume. GCC only supports the weaker __builtin_expect */
970
971 #endif
972 /* EOF */