00001 #ifndef __ATOMIC_PRIMITIVES_LINUX_H_INCLUDDED__
00002 #define __ATOMIC_PRIMITIVES_LINUX_H_INCLUDDED__
00003
00004 #ifndef SMP
00005 #define SMP
00006 #endif
00007
00008 #ifdef SMP
00009 #define CPU_LOCK "lock; "
00010 #else
00011 #define CPU_LOCK
00012 #endif
00013
00014 #define VOLATILE_PREF __volatile__
00015
00016
00017
00018 typedef struct { volatile int counter; } atomic_t;
00019
00020 static __inline__ void atomic_inc(atomic_t *vptr)
00021 {
00022 __asm__ VOLATILE_PREF (CPU_LOCK "incl %0"
00023 : "=m" (vptr->counter) );
00024 }
00025 static __inline__ void atomic_dec(atomic_t *vptr)
00026 {
00027 __asm__ VOLATILE_PREF (CPU_LOCK "decl %0"
00028 : "=m" (vptr->counter) );
00029 }
00030 static __inline__ void atomic_add(atomic_t *vptr, const int additive)
00031 {
00032 __asm__ VOLATILE_PREF (CPU_LOCK "addl %1, %0"
00033 : "=m" (vptr->counter)
00034 : "r" (additive) );
00035 }
00036 static __inline__ void atomic_and(atomic_t *vptr, const int par)
00037 {
00038 __asm__ VOLATILE_PREF (CPU_LOCK "andl %1, %0"
00039 : "=m" (vptr->counter)
00040 : "r" (par) );
00041 }
00042 static __inline__ int atomic_inc_return(atomic_t *vptr)
00043 {
00044 int ret;
00045 __asm__ VOLATILE_PREF("movl $1, %1; " CPU_LOCK "xaddl %1, %0; incl %1"
00046 : "=m" (vptr->counter), "=a" (ret)
00047 );
00048 return ret;
00049 }
00050 static __inline__ int atomic_dec_return(atomic_t *vptr)
00051 {
00052 int ret;
00053 __asm__ VOLATILE_PREF ( "movl $-1, %1; " CPU_LOCK "xaddl %1, %0; decl %1"
00054 : "=m" (vptr->counter), "=a" (ret)
00055 );
00056 return ret;
00057 }
00058
00059 static __inline__ int atomic_add_return(atomic_t *vptr, int additive)
00060 {
00061 __asm__ VOLATILE_PREF ( CPU_LOCK "xaddl %1, %0"
00062 : "=m" (vptr->counter), "=a" (additive)
00063 : "m" (vptr->counter), "a" (additive) );
00064 return additive;
00065 }
00066
00067 static __inline__ int atomic_cmpexch_return(atomic_t *vptr, const int new_val, int comparand)
00068 {
00069 __asm__ VOLATILE_PREF (CPU_LOCK "cmpxchgl %2, %0"
00070 : "=m" (vptr->counter), "=a" (comparand)
00071 : "r" (new_val), "a" (comparand)
00072 : "1" );
00073 return comparand;
00074 }
00075
00076 #define atomic_cmpexch atomic_cmpexch_return
00077
00078 inline void atomic_inc(volatile int *vptr)
00079 {
00080 atomic_inc((atomic_t *)vptr);
00081 }
00082
00083 inline void atomic_dec(volatile int *vptr)
00084 {
00085 atomic_dec((atomic_t *)vptr);
00086 }
00087
00088 inline void atomic_add(volatile int *vptr, const int additive)
00089 {
00090 atomic_add((atomic_t *)vptr, additive);
00091 }
00092 inline int atomic_inc_return(volatile int *vptr)
00093 {
00094 return atomic_inc_return((atomic_t*)vptr);
00095 }
00096
00097 inline int atomic_dec_return(volatile int *vptr)
00098 {
00099 return atomic_dec_return((atomic_t*)vptr);
00100 }
00101
00102 inline int atomic_add_return(volatile int *vptr, int additive)
00103 {
00104 return atomic_add_return((atomic_t *)vptr, additive);
00105 }
00106
00107 inline int atomic_cmpexch_return(volatile int *v, const int new_val, int comparand)
00108 {
00109 return atomic_cmpexch_return((atomic_t *)v, new_val, comparand);
00110 }
00111
00112 inline void atomic_and(volatile int *vptr, const int par)
00113 {
00114 atomic_and((atomic_t *)vptr, par);
00115 }
00116
00117 #endif // #ifndef __ATOMIC_PRIMITIVES_LINUX_H_INCLUDDED__