1010#define STACK_ALIGN (type , name , cnt , alignment ) u8 _al__##name[((sizeof(type)*(cnt)) + (alignment) + (((sizeof(type)*(cnt))%(alignment)) > 0 ? ((alignment) - ((sizeof(type)*(cnt))%(alignment))) : 0))]; \
1111 type *name = (type*)(((u32)(_al__##name)) + ((alignment) - (((u32)(_al__##name))&((alignment)-1))))
1212
13- #define _sync () asm volatile ("sync")
14- #define _nop () asm volatile ("nop")
15- #define ppcsync () asm volatile ("sc")
16- #define ppchalt () ({ \
17- asm volatile ("sync"); \
18- while(1) { \
19- asm volatile ("nop"); \
20- asm volatile ("li 3,0"); \
21- asm volatile ("nop"); \
22- } \
13+ #define _sync () __asm__ __volatile__ ("sync")
14+ #define _nop () __asm__ __volatile__ ("nop")
15+ #define ppcsync () __asm__ __volatile__ ("sc")
16+ #define ppchalt () ({ \
17+ __asm__ __volatile__ ("sync"); \
18+ while(1) { \
19+ __asm__ __volatile__ ("nop"); \
20+ __asm__ __volatile__ ("li 3,0"); \
21+ __asm__ __volatile__ ("nop"); \
22+ } \
2323})
2424
2525#define mfpvr () ({register u32 _rval; \
26- asm volatile ("mfpvr %0" : "=r"(_rval)); _rval;})
26+ __asm__ __volatile__ ("mfpvr %0" : "=r"(_rval)); _rval;})
2727
2828#define mfdcr (_rn ) ({register u32 _rval; \
29- asm volatile ("mfdcr %0," __stringify(_rn) \
29+ __asm__ __volatile__ ("mfdcr %0," __stringify(_rn) \
3030 : "=r" (_rval)); _rval;})
31- #define mtdcr (rn , val ) asm volatile ("mtdcr " __stringify(rn) ",%0" : : "r" (val))
31+ #define mtdcr (rn , val ) __asm__ __volatile__ ("mtdcr " __stringify(rn) ",%0" : : "r" (val))
3232
3333#define mfmsr () ({register u32 _rval; \
34- asm volatile ("mfmsr %0" : "=r" (_rval)); _rval;})
35- #define mtmsr (val ) asm volatile ("mtmsr %0" : : "r" (val))
34+ __asm__ __volatile__ ("mfmsr %0" : "=r" (_rval)); _rval;})
35+ #define mtmsr (val ) __asm__ __volatile__ ("mtmsr %0" : : "r" (val))
3636
3737#define mfdec () ({register u32 _rval; \
38- asm volatile ("mfdec %0" : "=r" (_rval)); _rval;})
39- #define mtdec (_val ) asm volatile ("mtdec %0" : : "r" (_val))
38+ __asm__ __volatile__ ("mfdec %0" : "=r" (_rval)); _rval;})
39+ #define mtdec (_val ) __asm__ __volatile__ ("mtdec %0" : : "r" (_val))
4040
4141#define mfspr (_rn ) \
4242({ register u32 _rval = 0; \
43- asm volatile ("mfspr %0," __stringify(_rn) \
43+ __asm__ __volatile__ ("mfspr %0," __stringify(_rn) \
4444 : "=r" (_rval));\
4545 _rval; \
4646})
4747
48- #define mtspr (_rn , _val ) asm volatile ("mtspr " __stringify(_rn) ",%0" : : "r" (_val))
48+ #define mtspr (_rn , _val ) __asm__ __volatile__ ("mtspr " __stringify(_rn) ",%0" : : "r" (_val))
4949
5050#define mfwpar () mfspr(WPAR)
5151#define mtwpar (_val ) mtspr(WPAR,_val)
9090 __asm__ volatile ("stwbrx %0,%1,%2" : : "r"(value), "b%"(index), "r"(base) : "memory")
9191
9292#define cntlzw (_val ) ({register u32 _rval; \
93- asm volatile ("cntlzw %0, %1" : "=r"((_rval)) : "r"((_val))); _rval;})
93+ __asm__ __volatile__ ("cntlzw %0, %1" : "=r"((_rval)) : "r"((_val))); _rval;})
9494
9595#define _CPU_MSR_GET ( _msr_value ) \
9696 do { \
9797 _msr_value = 0; \
98- asm volatile ("mfmsr %0" : "=&r" ((_msr_value)) : "0" ((_msr_value))); \
98+ __asm__ __volatile__ ("mfmsr %0" : "=&r" ((_msr_value)) : "0" ((_msr_value))); \
9999 } while (0)
100100
101101#define _CPU_MSR_SET ( _msr_value ) \
102- { asm volatile ("mtmsr %0" : "=&r" ((_msr_value)) : "0" ((_msr_value))); }
102+ { __asm__ __volatile__ ("mtmsr %0" : "=&r" ((_msr_value)) : "0" ((_msr_value))); }
103103
104104#define _CPU_ISR_Enable () \
105105 { register u32 _val = 0; \
156156
157157#define _CPU_FPR_Enable () \
158158{ register u32 _val = 0; \
159- asm volatile ("mfmsr %0; ori %0,%0,0x2000; mtmsr %0" : \
159+ __asm__ __volatile__ ("mfmsr %0; ori %0,%0,0x2000; mtmsr %0" : \
160160 "=&r" (_val) : "0" (_val));\
161161}
162162
163163#define _CPU_FPR_Disable () \
164164{ register u32 _val = 0; \
165- asm volatile ("mfmsr %0; rlwinm %0,%0,0,19,17; mtmsr %0" : \
165+ __asm__ __volatile__ ("mfmsr %0; rlwinm %0,%0,0,19,17; mtmsr %0" : \
166166 "=&r" (_val) : "0" (_val));\
167167}
168168
@@ -201,13 +201,13 @@ static inline u64 bswap64(u64 val)
201201static inline u32 read32 (u32 addr )
202202{
203203 u32 x ;
204- asm volatile ("lwz %0,0(%1) ; sync" : "=r" (x ) : "b" (0xc0000000 | addr ));
204+ __asm__ __volatile__ ("lwz %0,0(%1) ; sync" : "=r" (x ) : "b" (0xc0000000 | addr ));
205205 return x ;
206206}
207207
208208static inline void write32 (u32 addr , u32 x )
209209{
210- asm ("stw %0,0(%1) ; eieio" : : "r" (x ), "b" (0xc0000000 | addr ));
210+ __asm__ ("stw %0,0(%1) ; eieio" : : "r" (x ), "b" (0xc0000000 | addr ));
211211}
212212
213213static inline void mask32 (u32 addr , u32 clear , u32 set )
@@ -218,30 +218,30 @@ static inline void mask32(u32 addr, u32 clear, u32 set)
218218static inline u16 read16 (u32 addr )
219219{
220220 u16 x ;
221- asm volatile ("lhz %0,0(%1) ; sync" : "=r" (x ) : "b" (0xc0000000 | addr ));
221+ __asm__ __volatile__ ("lhz %0,0(%1) ; sync" : "=r" (x ) : "b" (0xc0000000 | addr ));
222222 return x ;
223223}
224224
225225static inline void write16 (u32 addr , u16 x )
226226{
227- asm ("sth %0,0(%1) ; eieio" : : "r" (x ), "b" (0xc0000000 | addr ));
227+ __asm__ ("sth %0,0(%1) ; eieio" : : "r" (x ), "b" (0xc0000000 | addr ));
228228}
229229
230230static inline u8 read8 (u32 addr )
231231{
232232 u8 x ;
233- asm volatile ("lbz %0,0(%1) ; sync" : "=r" (x ) : "b" (0xc0000000 | addr ));
233+ __asm__ __volatile__ ("lbz %0,0(%1) ; sync" : "=r" (x ) : "b" (0xc0000000 | addr ));
234234 return x ;
235235}
236236
237237static inline void write8 (u32 addr , u8 x )
238238{
239- asm ("stb %0,0(%1) ; eieio" : : "r" (x ), "b" (0xc0000000 | addr ));
239+ __asm__ ("stb %0,0(%1) ; eieio" : : "r" (x ), "b" (0xc0000000 | addr ));
240240}
241241
242242static inline void writef32 (u32 addr , f32 x )
243243{
244- asm ("stfs %0,0(%1) ; eieio" : : "f" (x ), "b" (0xc0000000 | addr ));
244+ __asm__ ("stfs %0,0(%1) ; eieio" : : "f" (x ), "b" (0xc0000000 | addr ));
245245}
246246
247247#ifdef __cplusplus
0 commit comments