00001
00002 #ifndef __ATOMIC_PRIMITIVES_W32_H_INCLUDDED__
00003 #define __ATOMIC_PRIMITIVES_W32_H_INCLUDDED__
00004
00005 #define _ATOMIC_UTIL_USE_ASM_
00006
00007 #ifdef _ATOMIC_UTIL_USE_ASM_
00008 #define _ATOMIC_UTIL_SMP_
00009 #ifdef _ATOMIC_UTIL_SMP_
00010 #define CPU_LOCK lock
00011 #else
00012 #define CPU_LOCK
00013 #endif // _ATOMIC_UTIL_SMP_
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028 inline void atomic_inc(volatile int *vptr)
00029 {
00030 __asm
00031 {
00032 mov eax, vptr
00033 lock inc [eax]
00034 }
00035 }
00036 inline void atomic_dec(volatile int *vptr)
00037 {
00038 __asm
00039 {
00040 mov eax, vptr
00041 lock dec [eax]
00042 }
00043 }
00044 inline void atomic_add(volatile int *vptr, const int additive)
00045 {
00046 __asm
00047 {
00048 mov eax, vptr
00049 mov ebx, additive
00050 lock add [eax], ebx
00051 }
00052 }
00053 inline void atomic_and(volatile int *vptr, const int param)
00054 {
00055 __asm
00056 {
00057 mov eax, vptr
00058 mov ebx, param
00059 lock and [eax], ebx
00060 }
00061 }
00062 inline int atomic_inc_return(volatile int *vptr)
00063 {
00064 __asm
00065 {
00066 mov ebx, vptr
00067 mov eax, 1
00068 lock xadd [ebx], eax
00069 inc eax
00070 }
00071 }
00072 inline int atomic_dec_return(volatile int *vptr)
00073 {
00074 __asm
00075 {
00076 mov ebx, vptr
00077 mov eax, -1
00078 lock xadd [ebx], eax
00079 dec eax
00080 }
00081 }
00082 inline int atomic_add_return(volatile int *vptr, const int additive)
00083 {
00084 __asm
00085 {
00086 mov ebx, vptr
00087 mov eax, additive
00088 lock xadd [ebx], eax
00089 }
00090 }
00091 inline int atomic_cmpexch_return(volatile int *vptr, const int new_val, const int comparand)
00092 {
00093 __asm
00094 {
00095 mov eax, comparand
00096 mov ebx, new_val
00097 mov ecx, vptr
00098 lock cmpxchg [ecx], ebx
00099 }
00100 }
00101 inline short atomic_cmpexch_return_w(volatile short *vptr, const short new_val, const short comparand)
00102 {
00103 __asm
00104 {
00105 mov ax, comparand
00106 mov bx, new_val
00107 mov ecx, vptr
00108 lock cmpxchg [ecx], bx
00109 }
00110 }
00111
00112 #define atomic_cmpexch atomic_cmpexch_return
00113
00114 #else // _ATOMIC_UTIL_USE_ASM_
00115 inline long atomic_inc_return(volatile long *v) { return InterlockedIncrement(v); }
00116 inline long atomic_dec_return(volatile long *v) { return InterlockedDecrement(v); }
00117
00118 inline long atomic_add_return(volatile long *vptr, const long additive)
00119 { return InterlockedExchangeAdd(vptr, additive); }
00120
00121 inline long atomic_cmpexch_return(volatile long *vptr, const long new_val, const long comparand)
00122 { return InterlockedCompareExchange(vptr, new_val, comparand ); }
00123
00124 #define atomic_add atomic_add_return
00125 #define atomic_inc atomic_inc_return
00126 #define atomic_dec atomic_dec_return
00127 #define atomic_cmpexch atomic_cmpexch_return
00128
00129 #endif // _ATOMIC_UTIL_USE_ASM_
00130 #endif // __ATOMIC_PRIMITIVES_W32_H_INCLUDDED__