MySQL 5.6.14 Source Code Document
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
generic-msvc.h
1 /* Copyright (c) 2006-2008 MySQL AB, 2009 Sun Microsystems, Inc.
2  Use is subject to license terms.
3 
4  This program is free software; you can redistribute it and/or modify
5  it under the terms of the GNU General Public License as published by
6  the Free Software Foundation; version 2 of the License.
7 
8  This program is distributed in the hope that it will be useful,
9  but WITHOUT ANY WARRANTY; without even the implied warranty of
10  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11  GNU General Public License for more details.
12 
13  You should have received a copy of the GNU General Public License
14  along with this program; if not, write to the Free Software
15  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
16 
17 #ifndef _atomic_h_cleanup_
18 #define _atomic_h_cleanup_ "atomic/generic-msvc.h"
19 
20 /*
21  We don't implement anything specific for MY_ATOMIC_MODE_DUMMY, always use
22  intrinsics.
23  8 and 16-bit atomics are not implemented, but it can be done if necessary.
24 */
25 #undef MY_ATOMIC_HAS_8_16
26 
27 #include <windows.h>
28 /*
29  x86 compilers (both VS2003 or VS2005) never use instrinsics, but generate
30  function calls to kernel32 instead, even in the optimized build.
31  We force intrinsics as described in MSDN documentation for
32  _InterlockedCompareExchange.
33 */
34 #ifdef _M_IX86
35 
36 #if (_MSC_VER >= 1500)
37 #include <intrin.h>
38 #else
39 C_MODE_START
40 /*Visual Studio 2003 and earlier do not have prototypes for atomic intrinsics*/
41 LONG _InterlockedCompareExchange (LONG volatile *Target, LONG Value, LONG Comp);
42 LONGLONG _InterlockedCompareExchange64 (LONGLONG volatile *Target,
43  LONGLONG Value, LONGLONG Comp);
44 C_MODE_END
45 
46 #pragma intrinsic(_InterlockedCompareExchange)
47 #pragma intrinsic(_InterlockedCompareExchange64)
48 #endif
49 
50 #define InterlockedCompareExchange _InterlockedCompareExchange
51 #define InterlockedCompareExchange64 _InterlockedCompareExchange64
52 /*
53  No need to do something special for InterlockedCompareExchangePointer
54  as it is a #define to InterlockedCompareExchange. The same applies to
55  InterlockedExchangePointer.
56 */
57 #endif /*_M_IX86*/
58 
59 #define MY_ATOMIC_MODE "msvc-intrinsics"
60 /* Implement using CAS on WIN32 */
61 #define IL_COMP_EXCHG32(X,Y,Z) \
62  InterlockedCompareExchange((volatile LONG *)(X),(Y),(Z))
63 #define IL_COMP_EXCHG64(X,Y,Z) \
64  InterlockedCompareExchange64((volatile LONGLONG *)(X), \
65  (LONGLONG)(Y),(LONGLONG)(Z))
66 #define IL_COMP_EXCHGptr InterlockedCompareExchangePointer
67 
68 #define make_atomic_cas_body(S) \
69  int ## S initial_cmp= *cmp; \
70  int ## S initial_a= IL_COMP_EXCHG ## S (a, set, initial_cmp); \
71  if (!(ret= (initial_a == initial_cmp))) *cmp= initial_a;
72 
73 #ifndef _M_IX86
74 /* Use full set of optimised functions on WIN64 */
75 #define IL_EXCHG_ADD32(X,Y) \
76  InterlockedExchangeAdd((volatile LONG *)(X),(Y))
77 #define IL_EXCHG_ADD64(X,Y) \
78  InterlockedExchangeAdd64((volatile LONGLONG *)(X),(LONGLONG)(Y))
79 #define IL_EXCHG32(X,Y) \
80  InterlockedExchange((volatile LONG *)(X),(Y))
81 #define IL_EXCHG64(X,Y) \
82  InterlockedExchange64((volatile LONGLONG *)(X),(LONGLONG)(Y))
83 #define IL_EXCHGptr InterlockedExchangePointer
84 
85 #define make_atomic_add_body(S) \
86  v= IL_EXCHG_ADD ## S (a, v)
87 #define make_atomic_swap_body(S) \
88  v= IL_EXCHG ## S (a, v)
89 #define make_atomic_load_body(S) \
90  ret= 0; /* avoid compiler warning */ \
91  ret= IL_COMP_EXCHG ## S (a, ret, ret);
92 #endif
93 /*
94  my_yield_processor (equivalent of x86 PAUSE instruction) should be used
95  to improve performance on hyperthreaded CPUs. Intel recommends to use it in
96  spin loops also on non-HT machines to reduce power consumption (see e.g
97  http://softwarecommunity.intel.com/articles/eng/2004.htm)
98 
99  Running benchmarks for spinlocks implemented with InterlockedCompareExchange
100  and YieldProcessor shows that much better performance is achieved by calling
101  YieldProcessor in a loop - that is, yielding longer. On Intel boxes setting
102  loop count in the range 200-300 brought best results.
103  */
104 #ifndef YIELD_LOOPS
105 #define YIELD_LOOPS 200
106 #endif
107 
108 static __inline int my_yield_processor()
109 {
110  int i;
111  for(i=0; i<YIELD_LOOPS; i++)
112  {
113 #if (_MSC_VER <= 1310)
114  /* On older compilers YieldProcessor is not available, use inline assembly*/
115  __asm { rep nop }
116 #else
117  YieldProcessor();
118 #endif
119  }
120  return 1;
121 }
122 
123 #define LF_BACKOFF my_yield_processor()
124 #else /* cleanup */
125 
126 #undef IL_EXCHG_ADD32
127 #undef IL_EXCHG_ADD64
128 #undef IL_COMP_EXCHG32
129 #undef IL_COMP_EXCHG64
130 #undef IL_COMP_EXCHGptr
131 #undef IL_EXCHG32
132 #undef IL_EXCHG64
133 #undef IL_EXCHGptr
134 
135 #endif