MySQL 5.6.14 Source Code Document
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
sync0rw.cc
Go to the documentation of this file.
1 /*****************************************************************************
2 
3 Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved.
4 Copyright (c) 2008, Google Inc.
5 
6 Portions of this file contain modifications contributed and copyrighted by
7 Google, Inc. Those modifications are gratefully acknowledged and are described
8 briefly in the InnoDB documentation. The contributions by Google are
9 incorporated with their permission, and subject to the conditions contained in
10 the file COPYING.Google.
11 
12 This program is free software; you can redistribute it and/or modify it under
13 the terms of the GNU General Public License as published by the Free Software
14 Foundation; version 2 of the License.
15 
16 This program is distributed in the hope that it will be useful, but WITHOUT
17 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
18 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
19 
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc.,
22 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
23 
24 *****************************************************************************/
25 
26 /**************************************************/
33 #include "sync0rw.h"
34 #ifdef UNIV_NONINL
35 #include "sync0rw.ic"
36 #endif
37 
38 #include "os0thread.h"
39 #include "mem0mem.h"
40 #include "srv0srv.h"
41 #include "os0sync.h" /* for INNODB_RW_LOCKS_USE_ATOMICS */
42 #include "ha_prototypes.h"
43 
44 /*
45  IMPLEMENTATION OF THE RW_LOCK
46  =============================
47 The status of a rw_lock is held in lock_word. The initial value of lock_word is
48 X_LOCK_DECR. lock_word is decremented by 1 for each s-lock and by X_LOCK_DECR
49 for each x-lock. This describes the lock state for each value of lock_word:
50 
51 lock_word == X_LOCK_DECR: Unlocked.
52 0 < lock_word < X_LOCK_DECR: Read locked, no waiting writers.
53  (X_LOCK_DECR - lock_word) is the
54  number of readers that hold the lock.
55 lock_word == 0: Write locked
56 -X_LOCK_DECR < lock_word < 0: Read locked, with a waiting writer.
57  (-lock_word) is the number of readers
58  that hold the lock.
59 lock_word <= -X_LOCK_DECR: Recursively write locked. lock_word has been
60  decremented by X_LOCK_DECR for the first lock
61  and the first recursive lock, then by 1 for
62  each recursive lock thereafter.
63  So the number of locks is:
64  (lock_copy == 0) ? 1 : 2 - (lock_copy + X_LOCK_DECR)
65 
66 The lock_word is always read and updated atomically and consistently, so that
67 it always represents the state of the lock, and the state of the lock changes
68 with a single atomic operation. This lock_word holds all of the information
69 that a thread needs in order to determine if it is eligible to gain the lock
70 or if it must spin or sleep. The one exception to this is that writer_thread
71 must be verified before recursive write locks: to solve this scenario, we make
72 writer_thread readable by all threads, but only writeable by the x-lock holder.
73 
74 The other members of the lock obey the following rules to remain consistent:
75 
76 recursive: This and the writer_thread field together control the
77  behaviour of recursive x-locking.
78  lock->recursive must be FALSE in following states:
79  1) The writer_thread contains garbage i.e.: the
80  lock has just been initialized.
81  2) The lock is not x-held and there is no
82  x-waiter waiting on WAIT_EX event.
83  3) The lock is x-held or there is an x-waiter
84  waiting on WAIT_EX event but the 'pass' value
85  is non-zero.
86  lock->recursive is TRUE iff:
87  1) The lock is x-held or there is an x-waiter
88  waiting on WAIT_EX event and the 'pass' value
89  is zero.
90  This flag must be set after the writer_thread field
91  has been updated with a memory ordering barrier.
92  It is unset before the lock_word has been incremented.
93 writer_thread: Is used only in recursive x-locking. Can only be safely
94  read iff lock->recursive flag is TRUE.
95  This field is uninitialized at lock creation time and
96  is updated atomically when x-lock is acquired or when
97  move_ownership is called. A thread is only allowed to
98  set the value of this field to it's thread_id i.e.: a
99  thread cannot set writer_thread to some other thread's
100  id.
101 waiters: May be set to 1 anytime, but to avoid unnecessary wake-up
102  signals, it should only be set to 1 when there are threads
103  waiting on event. Must be 1 when a writer starts waiting to
104  ensure the current x-locking thread sends a wake-up signal
105  during unlock. May only be reset to 0 immediately before a
106  a wake-up signal is sent to event. On most platforms, a
107  memory barrier is required after waiters is set, and before
108  verifying lock_word is still held, to ensure some unlocker
109  really does see the flags new value.
110 event: Threads wait on event for read or writer lock when another
111  thread has an x-lock or an x-lock reservation (wait_ex). A
112  thread may only wait on event after performing the following
113  actions in order:
114  (1) Record the counter value of event (with os_event_reset).
115  (2) Set waiters to 1.
116  (3) Verify lock_word <= 0.
117  (1) must come before (2) to ensure signal is not missed.
118  (2) must come before (3) to ensure a signal is sent.
119  These restrictions force the above ordering.
120  Immediately before sending the wake-up signal, we should:
121  (1) Verify lock_word == X_LOCK_DECR (unlocked)
122  (2) Reset waiters to 0.
123 wait_ex_event: A thread may only wait on the wait_ex_event after it has
124  performed the following actions in order:
125  (1) Decrement lock_word by X_LOCK_DECR.
126  (2) Record counter value of wait_ex_event (os_event_reset,
127  called from sync_array_reserve_cell).
128  (3) Verify that lock_word < 0.
129  (1) must come first to ensures no other threads become reader
130  or next writer, and notifies unlocker that signal must be sent.
131  (2) must come before (3) to ensure the signal is not missed.
132  These restrictions force the above ordering.
133  Immediately before sending the wake-up signal, we should:
134  Verify lock_word == 0 (waiting thread holds x_lock)
135 */
136 
138 
139 /* The global list of rw-locks */
140 UNIV_INTERN rw_lock_list_t rw_lock_list;
141 UNIV_INTERN ib_mutex_t rw_lock_list_mutex;
142 
143 #ifdef UNIV_PFS_MUTEX
144 UNIV_INTERN mysql_pfs_key_t rw_lock_list_mutex_key;
145 UNIV_INTERN mysql_pfs_key_t rw_lock_mutex_key;
146 #endif /* UNIV_PFS_MUTEX */
147 
148 #ifdef UNIV_SYNC_DEBUG
149 /* The global mutex which protects debug info lists of all rw-locks.
150 To modify the debug info list of an rw-lock, this mutex has to be
151 acquired in addition to the mutex protecting the lock. */
152 
153 UNIV_INTERN ib_mutex_t rw_lock_debug_mutex;
154 
155 # ifdef UNIV_PFS_MUTEX
156 UNIV_INTERN mysql_pfs_key_t rw_lock_debug_mutex_key;
157 # endif
158 
159 /* If deadlock detection does not get immediately the mutex,
160 it may wait for this event */
161 UNIV_INTERN os_event_t rw_lock_debug_event;
162 /* This is set to TRUE, if there may be waiters for the event */
163 UNIV_INTERN ibool rw_lock_debug_waiters;
164 
165 /******************************************************************/
167 static
168 rw_lock_debug_t*
169 rw_lock_debug_create(void);
170 /*======================*/
171 /******************************************************************/
173 static
174 void
175 rw_lock_debug_free(
176 /*===============*/
177  rw_lock_debug_t* info);
178 
179 /******************************************************************/
182 static
183 rw_lock_debug_t*
184 rw_lock_debug_create(void)
185 /*======================*/
186 {
187  return((rw_lock_debug_t*) mem_alloc(sizeof(rw_lock_debug_t)));
188 }
189 
190 /******************************************************************/
192 static
193 void
194 rw_lock_debug_free(
195 /*===============*/
196  rw_lock_debug_t* info)
197 {
198  mem_free(info);
199 }
200 #endif /* UNIV_SYNC_DEBUG */
201 
202 /******************************************************************/
207 UNIV_INTERN
208 void
210 /*================*/
211  rw_lock_t* lock,
212 #ifdef UNIV_DEBUG
213 # ifdef UNIV_SYNC_DEBUG
214  ulint level,
215 # endif /* UNIV_SYNC_DEBUG */
216  const char* cmutex_name,
217 #endif /* UNIV_DEBUG */
218  const char* cfile_name,
219  ulint cline)
220 {
221  /* If this is the very first time a synchronization object is
222  created, then the following call initializes the sync system. */
223 
224 #ifndef INNODB_RW_LOCKS_USE_ATOMICS
225  mutex_create(rw_lock_mutex_key, rw_lock_get_mutex(lock),
226  SYNC_NO_ORDER_CHECK);
227 
228  lock->mutex.cfile_name = cfile_name;
229  lock->mutex.cline = cline;
230 
231  ut_d(lock->mutex.cmutex_name = cmutex_name);
232  ut_d(lock->mutex.ib_mutex_type = 1);
233 #else /* INNODB_RW_LOCKS_USE_ATOMICS */
234 # ifdef UNIV_DEBUG
235  UT_NOT_USED(cmutex_name);
236 # endif
237 #endif /* INNODB_RW_LOCKS_USE_ATOMICS */
238 
239  lock->lock_word = X_LOCK_DECR;
240  lock->waiters = 0;
241 
242  /* We set this value to signify that lock->writer_thread
243  contains garbage at initialization and cannot be used for
244  recursive x-locking. */
245  lock->recursive = FALSE;
246  /* Silence Valgrind when UNIV_DEBUG_VALGRIND is not enabled. */
247  memset((void*) &lock->writer_thread, 0, sizeof lock->writer_thread);
248  UNIV_MEM_INVALID(&lock->writer_thread, sizeof lock->writer_thread);
249 
250 #ifdef UNIV_SYNC_DEBUG
251  UT_LIST_INIT(lock->debug_list);
252 
253  lock->level = level;
254 #endif /* UNIV_SYNC_DEBUG */
255 
256  ut_d(lock->magic_n = RW_LOCK_MAGIC_N);
257 
258  lock->cfile_name = cfile_name;
259  lock->cline = (unsigned int) cline;
260 
261  lock->count_os_wait = 0;
262  lock->last_s_file_name = "not yet reserved";
263  lock->last_x_file_name = "not yet reserved";
264  lock->last_s_line = 0;
265  lock->last_x_line = 0;
266  lock->event = os_event_create();
267  lock->wait_ex_event = os_event_create();
268 
269  mutex_enter(&rw_lock_list_mutex);
270 
271  ut_ad(UT_LIST_GET_FIRST(rw_lock_list) == NULL
272  || UT_LIST_GET_FIRST(rw_lock_list)->magic_n == RW_LOCK_MAGIC_N);
273 
274  UT_LIST_ADD_FIRST(list, rw_lock_list, lock);
275 
276  mutex_exit(&rw_lock_list_mutex);
277 }
278 
279 /******************************************************************/
283 UNIV_INTERN
284 void
286 /*==============*/
287  rw_lock_t* lock)
288 {
289 #ifndef INNODB_RW_LOCKS_USE_ATOMICS
290  ib_mutex_t* mutex;
291 #endif /* !INNODB_RW_LOCKS_USE_ATOMICS */
292 
293  ut_ad(rw_lock_validate(lock));
294  ut_a(lock->lock_word == X_LOCK_DECR);
295 
296  mutex_enter(&rw_lock_list_mutex);
297 
298 #ifndef INNODB_RW_LOCKS_USE_ATOMICS
299  mutex = rw_lock_get_mutex(lock);
300 #endif /* !INNODB_RW_LOCKS_USE_ATOMICS */
301 
302  os_event_free(lock->event);
303 
305 
306  ut_ad(UT_LIST_GET_PREV(list, lock) == NULL
307  || UT_LIST_GET_PREV(list, lock)->magic_n == RW_LOCK_MAGIC_N);
308  ut_ad(UT_LIST_GET_NEXT(list, lock) == NULL
309  || UT_LIST_GET_NEXT(list, lock)->magic_n == RW_LOCK_MAGIC_N);
310 
311  UT_LIST_REMOVE(list, rw_lock_list, lock);
312 
313  mutex_exit(&rw_lock_list_mutex);
314 
315  ut_d(lock->magic_n = 0);
316 
317 #ifndef INNODB_RW_LOCKS_USE_ATOMICS
318  /* We have merely removed the rw_lock from the list, the memory
319  has not been freed. Therefore the pointer to mutex is valid. */
320  mutex_free(mutex);
321 #endif /* !INNODB_RW_LOCKS_USE_ATOMICS */
322 }
323 
324 #ifdef UNIV_DEBUG
325 /******************************************************************/
329 UNIV_INTERN
330 ibool
331 rw_lock_validate(
332 /*=============*/
333  rw_lock_t* lock)
334 {
335  ulint waiters;
336  lint lock_word;
337 
338  ut_ad(lock);
339 
340  waiters = rw_lock_get_waiters(lock);
341  lock_word = lock->lock_word;
342 
343  ut_ad(lock->magic_n == RW_LOCK_MAGIC_N);
344  ut_ad(waiters == 0 || waiters == 1);
345  ut_ad(lock_word > -(2 * X_LOCK_DECR));
346  ut_ad(lock_word <= X_LOCK_DECR);
347 
348  return(TRUE);
349 }
350 #endif /* UNIV_DEBUG */
351 
352 /******************************************************************/
357 UNIV_INTERN
358 void
360 /*================*/
361  rw_lock_t* lock,
362  ulint pass,
364  const char* file_name,
365  ulint line)
366 {
367  ulint index; /* index of the reserved wait cell */
368  ulint i = 0; /* spin round count */
369  sync_array_t* sync_arr;
370  size_t counter_index;
371 
372  /* We reuse the thread id to index into the counter, cache
373  it here for efficiency. */
374 
375  counter_index = (size_t) os_thread_get_curr_id();
376 
377  ut_ad(rw_lock_validate(lock));
378 
379  rw_lock_stats.rw_s_spin_wait_count.add(counter_index, 1);
380 lock_loop:
381 
382  /* Spin waiting for the writer field to become free */
383  while (i < SYNC_SPIN_ROUNDS && lock->lock_word <= 0) {
384  if (srv_spin_wait_delay) {
385  ut_delay(ut_rnd_interval(0, srv_spin_wait_delay));
386  }
387 
388  i++;
389  }
390 
391  if (i == SYNC_SPIN_ROUNDS) {
392  os_thread_yield();
393  }
394 
395  /* We try once again to obtain the lock */
396  if (TRUE == rw_lock_s_lock_low(lock, pass, file_name, line)) {
397  rw_lock_stats.rw_s_spin_round_count.add(counter_index, i);
398 
399  return; /* Success */
400  } else {
401 
402  if (i < SYNC_SPIN_ROUNDS) {
403  goto lock_loop;
404  }
405 
406  rw_lock_stats.rw_s_spin_round_count.add(counter_index, i);
407 
408  sync_arr = sync_array_get();
409 
411  sync_arr, lock, RW_LOCK_SHARED,
412  file_name, line, &index);
413 
414  /* Set waiters before checking lock_word to ensure wake-up
415  signal is sent. This may lead to some unnecessary signals. */
416  rw_lock_set_waiter_flag(lock);
417 
418  if (TRUE == rw_lock_s_lock_low(lock, pass, file_name, line)) {
419  sync_array_free_cell(sync_arr, index);
420  return; /* Success */
421  }
422 
423  /* these stats may not be accurate */
424  lock->count_os_wait++;
425  rw_lock_stats.rw_s_os_wait_count.add(counter_index, 1);
426 
427  sync_array_wait_event(sync_arr, index);
428 
429  i = 0;
430  goto lock_loop;
431  }
432 }
433 
434 /******************************************************************/
442 UNIV_INTERN
443 void
445 /*==========================*/
446  rw_lock_t* lock)
448 {
449  ut_ad(rw_lock_is_locked(lock, RW_LOCK_EX));
450 
452 }
453 
454 /******************************************************************/
457 UNIV_INLINE
458 void
460 /*================*/
461  rw_lock_t* lock,
462 #ifdef UNIV_SYNC_DEBUG
463  ulint pass,
465 #endif
466  const char* file_name,
467  ulint line)
468 {
469  ulint index;
470  ulint i = 0;
471  sync_array_t* sync_arr;
472  size_t counter_index;
473 
474  /* We reuse the thread id to index into the counter, cache
475  it here for efficiency. */
476 
477  counter_index = (size_t) os_thread_get_curr_id();
478 
479  ut_ad(lock->lock_word <= 0);
480 
481  while (lock->lock_word < 0) {
482  if (srv_spin_wait_delay) {
483  ut_delay(ut_rnd_interval(0, srv_spin_wait_delay));
484  }
485  if(i < SYNC_SPIN_ROUNDS) {
486  i++;
487  continue;
488  }
489 
490  /* If there is still a reader, then go to sleep.*/
491  rw_lock_stats.rw_x_spin_round_count.add(counter_index, i);
492 
493  sync_arr = sync_array_get();
494 
496  sync_arr, lock, RW_LOCK_WAIT_EX,
497  file_name, line, &index);
498 
499  i = 0;
500 
501  /* Check lock_word to ensure wake-up isn't missed.*/
502  if (lock->lock_word < 0) {
503 
504  /* these stats may not be accurate */
505  lock->count_os_wait++;
506  rw_lock_stats.rw_x_os_wait_count.add(counter_index, 1);
507 
508  /* Add debug info as it is needed to detect possible
509  deadlock. We must add info for WAIT_EX thread for
510  deadlock detection to work properly. */
511 #ifdef UNIV_SYNC_DEBUG
512  rw_lock_add_debug_info(lock, pass, RW_LOCK_WAIT_EX,
513  file_name, line);
514 #endif
515 
516  sync_array_wait_event(sync_arr, index);
517 #ifdef UNIV_SYNC_DEBUG
518  rw_lock_remove_debug_info(
519  lock, pass, RW_LOCK_WAIT_EX);
520 #endif
521  /* It is possible to wake when lock_word < 0.
522  We must pass the while-loop check to proceed.*/
523  } else {
524  sync_array_free_cell(sync_arr, index);
525  }
526  }
527  rw_lock_stats.rw_x_spin_round_count.add(counter_index, i);
528 }
529 
530 /******************************************************************/
533 UNIV_INLINE
534 ibool
536 /*===============*/
537  rw_lock_t* lock,
538  ulint pass,
540  const char* file_name,
541  ulint line)
542 {
543  if (rw_lock_lock_word_decr(lock, X_LOCK_DECR)) {
544 
545  /* lock->recursive also tells us if the writer_thread
546  field is stale or active. As we are going to write
547  our own thread id in that field it must be that the
548  current writer_thread value is not active. */
549  ut_a(!lock->recursive);
550 
551  /* Decrement occurred: we are writer or next-writer. */
553  lock, pass ? FALSE : TRUE);
554 
555  rw_lock_x_lock_wait(lock,
556 #ifdef UNIV_SYNC_DEBUG
557  pass,
558 #endif
559  file_name, line);
560 
561  } else {
563 
564  /* Decrement failed: relock or failed lock */
565  if (!pass && lock->recursive
566  && os_thread_eq(lock->writer_thread, thread_id)) {
567  /* Relock */
568  if (lock->lock_word == 0) {
569  lock->lock_word -= X_LOCK_DECR;
570  } else {
571  --lock->lock_word;
572  }
573 
574  } else {
575  /* Another thread locked before us */
576  return(FALSE);
577  }
578  }
579 #ifdef UNIV_SYNC_DEBUG
580  rw_lock_add_debug_info(lock, pass, RW_LOCK_EX, file_name, line);
581 #endif
582  lock->last_x_file_name = file_name;
583  lock->last_x_line = (unsigned int) line;
584 
585  return(TRUE);
586 }
587 
588 /******************************************************************/
597 UNIV_INTERN
598 void
600 /*================*/
601  rw_lock_t* lock,
602  ulint pass,
604  const char* file_name,
605  ulint line)
606 {
607  ulint i;
608  ulint index;
609  sync_array_t* sync_arr;
610  ibool spinning = FALSE;
611  size_t counter_index;
612 
613  /* We reuse the thread id to index into the counter, cache
614  it here for efficiency. */
615 
616  counter_index = (size_t) os_thread_get_curr_id();
617 
618  ut_ad(rw_lock_validate(lock));
619 #ifdef UNIV_SYNC_DEBUG
620  ut_ad(!rw_lock_own(lock, RW_LOCK_SHARED));
621 #endif /* UNIV_SYNC_DEBUG */
622 
623  i = 0;
624 
625 lock_loop:
626 
627  if (rw_lock_x_lock_low(lock, pass, file_name, line)) {
628  rw_lock_stats.rw_x_spin_round_count.add(counter_index, i);
629 
630  return; /* Locking succeeded */
631 
632  } else {
633 
634  if (!spinning) {
635  spinning = TRUE;
636 
638  counter_index, 1);
639  }
640 
641  /* Spin waiting for the lock_word to become free */
642  while (i < SYNC_SPIN_ROUNDS
643  && lock->lock_word <= 0) {
644  if (srv_spin_wait_delay) {
646  srv_spin_wait_delay));
647  }
648 
649  i++;
650  }
651  if (i == SYNC_SPIN_ROUNDS) {
652  os_thread_yield();
653  } else {
654  goto lock_loop;
655  }
656  }
657 
658  rw_lock_stats.rw_x_spin_round_count.add(counter_index, i);
659 
660  sync_arr = sync_array_get();
661 
663  sync_arr, lock, RW_LOCK_EX, file_name, line, &index);
664 
665  /* Waiters must be set before checking lock_word, to ensure signal
666  is sent. This could lead to a few unnecessary wake-up signals. */
667  rw_lock_set_waiter_flag(lock);
668 
669  if (rw_lock_x_lock_low(lock, pass, file_name, line)) {
670  sync_array_free_cell(sync_arr, index);
671  return; /* Locking succeeded */
672  }
673 
674  /* these stats may not be accurate */
675  lock->count_os_wait++;
676  rw_lock_stats.rw_x_os_wait_count.add(counter_index, 1);
677 
678  sync_array_wait_event(sync_arr, index);
679 
680  i = 0;
681  goto lock_loop;
682 }
683 
684 #ifdef UNIV_SYNC_DEBUG
685 /******************************************************************/
691 UNIV_INTERN
692 void
693 rw_lock_debug_mutex_enter(void)
694 /*===========================*/
695 {
696 loop:
697  if (0 == mutex_enter_nowait(&rw_lock_debug_mutex)) {
698  return;
699  }
700 
701  os_event_reset(rw_lock_debug_event);
702 
703  rw_lock_debug_waiters = TRUE;
704 
705  if (0 == mutex_enter_nowait(&rw_lock_debug_mutex)) {
706  return;
707  }
708 
709  os_event_wait(rw_lock_debug_event);
710 
711  goto loop;
712 }
713 
714 /******************************************************************/
716 UNIV_INTERN
717 void
718 rw_lock_debug_mutex_exit(void)
719 /*==========================*/
720 {
721  mutex_exit(&rw_lock_debug_mutex);
722 
723  if (rw_lock_debug_waiters) {
724  rw_lock_debug_waiters = FALSE;
725  os_event_set(rw_lock_debug_event);
726  }
727 }
728 
729 /******************************************************************/
731 UNIV_INTERN
732 void
733 rw_lock_add_debug_info(
734 /*===================*/
735  rw_lock_t* lock,
736  ulint pass,
737  ulint lock_type,
738  const char* file_name,
739  ulint line)
740 {
741  rw_lock_debug_t* info;
742 
743  ut_ad(lock);
744  ut_ad(file_name);
745 
746  info = rw_lock_debug_create();
747 
748  rw_lock_debug_mutex_enter();
749 
750  info->file_name = file_name;
751  info->line = line;
752  info->lock_type = lock_type;
753  info->thread_id = os_thread_get_curr_id();
754  info->pass = pass;
755 
756  UT_LIST_ADD_FIRST(list, lock->debug_list, info);
757 
758  rw_lock_debug_mutex_exit();
759 
760  if ((pass == 0) && (lock_type != RW_LOCK_WAIT_EX)) {
761  sync_thread_add_level(lock, lock->level,
762  lock_type == RW_LOCK_EX
763  && lock->lock_word < 0);
764  }
765 }
766 
767 /******************************************************************/
769 UNIV_INTERN
770 void
771 rw_lock_remove_debug_info(
772 /*======================*/
773  rw_lock_t* lock,
774  ulint pass,
775  ulint lock_type)
776 {
777  rw_lock_debug_t* info;
778 
779  ut_ad(lock);
780 
781  if ((pass == 0) && (lock_type != RW_LOCK_WAIT_EX)) {
782  sync_thread_reset_level(lock);
783  }
784 
785  rw_lock_debug_mutex_enter();
786 
787  info = UT_LIST_GET_FIRST(lock->debug_list);
788 
789  while (info != NULL) {
790  if ((pass == info->pass)
791  && ((pass != 0)
792  || os_thread_eq(info->thread_id,
794  && (info->lock_type == lock_type)) {
795 
796  /* Found! */
797  UT_LIST_REMOVE(list, lock->debug_list, info);
798  rw_lock_debug_mutex_exit();
799 
800  rw_lock_debug_free(info);
801 
802  return;
803  }
804 
805  info = UT_LIST_GET_NEXT(list, info);
806  }
807 
808  ut_error;
809 }
810 #endif /* UNIV_SYNC_DEBUG */
811 
812 #ifdef UNIV_SYNC_DEBUG
813 /******************************************************************/
817 UNIV_INTERN
818 ibool
819 rw_lock_own(
820 /*========*/
821  rw_lock_t* lock,
822  ulint lock_type)
824 {
825  rw_lock_debug_t* info;
826 
827  ut_ad(lock);
828  ut_ad(rw_lock_validate(lock));
829 
830  rw_lock_debug_mutex_enter();
831 
832  info = UT_LIST_GET_FIRST(lock->debug_list);
833 
834  while (info != NULL) {
835 
836  if (os_thread_eq(info->thread_id, os_thread_get_curr_id())
837  && (info->pass == 0)
838  && (info->lock_type == lock_type)) {
839 
840  rw_lock_debug_mutex_exit();
841  /* Found! */
842 
843  return(TRUE);
844  }
845 
846  info = UT_LIST_GET_NEXT(list, info);
847  }
848  rw_lock_debug_mutex_exit();
849 
850  return(FALSE);
851 }
852 #endif /* UNIV_SYNC_DEBUG */
853 
854 /******************************************************************/
857 UNIV_INTERN
858 ibool
860 /*==============*/
861  rw_lock_t* lock,
862  ulint lock_type)
864 {
865  ibool ret = FALSE;
866 
867  ut_ad(lock);
868  ut_ad(rw_lock_validate(lock));
869 
870  if (lock_type == RW_LOCK_SHARED) {
871  if (rw_lock_get_reader_count(lock) > 0) {
872  ret = TRUE;
873  }
874  } else if (lock_type == RW_LOCK_EX) {
875  if (rw_lock_get_writer(lock) == RW_LOCK_EX) {
876  ret = TRUE;
877  }
878  } else {
879  ut_error;
880  }
881 
882  return(ret);
883 }
884 
885 #ifdef UNIV_SYNC_DEBUG
886 /***************************************************************/
888 UNIV_INTERN
889 void
890 rw_lock_list_print_info(
891 /*====================*/
892  FILE* file)
893 {
894  rw_lock_t* lock;
895  ulint count = 0;
896  rw_lock_debug_t* info;
897 
898  mutex_enter(&rw_lock_list_mutex);
899 
900  fputs("-------------\n"
901  "RW-LATCH INFO\n"
902  "-------------\n", file);
903 
904  lock = UT_LIST_GET_FIRST(rw_lock_list);
905 
906  while (lock != NULL) {
907 
908  count++;
909 
910 #ifndef INNODB_RW_LOCKS_USE_ATOMICS
911  mutex_enter(&(lock->mutex));
912 #endif
913  if (lock->lock_word != X_LOCK_DECR) {
914 
915  fprintf(file, "RW-LOCK: %p ", (void*) lock);
916 
917  if (rw_lock_get_waiters(lock)) {
918  fputs(" Waiters for the lock exist\n", file);
919  } else {
920  putc('\n', file);
921  }
922 
923  rw_lock_debug_mutex_enter();
924  info = UT_LIST_GET_FIRST(lock->debug_list);
925  while (info != NULL) {
926  rw_lock_debug_print(file, info);
927  info = UT_LIST_GET_NEXT(list, info);
928  }
929  rw_lock_debug_mutex_exit();
930  }
931 #ifndef INNODB_RW_LOCKS_USE_ATOMICS
932  mutex_exit(&(lock->mutex));
933 #endif
934 
935  lock = UT_LIST_GET_NEXT(list, lock);
936  }
937 
938  fprintf(file, "Total number of rw-locks %ld\n", count);
939  mutex_exit(&rw_lock_list_mutex);
940 }
941 
942 /***************************************************************/
944 UNIV_INTERN
945 void
946 rw_lock_print(
947 /*==========*/
948  rw_lock_t* lock)
949 {
950  rw_lock_debug_t* info;
951 
952  fprintf(stderr,
953  "-------------\n"
954  "RW-LATCH INFO\n"
955  "RW-LATCH: %p ", (void*) lock);
956 
957 #ifndef INNODB_RW_LOCKS_USE_ATOMICS
958  /* We used to acquire lock->mutex here, but it would cause a
959  recursive call to sync_thread_add_level() if UNIV_SYNC_DEBUG
960  is defined. Since this function is only invoked from
961  sync_thread_levels_g(), let us choose the smaller evil:
962  performing dirty reads instead of causing bogus deadlocks or
963  assertion failures. */
964 #endif
965  if (lock->lock_word != X_LOCK_DECR) {
966 
967  if (rw_lock_get_waiters(lock)) {
968  fputs(" Waiters for the lock exist\n", stderr);
969  } else {
970  putc('\n', stderr);
971  }
972 
973  rw_lock_debug_mutex_enter();
974  info = UT_LIST_GET_FIRST(lock->debug_list);
975  while (info != NULL) {
976  rw_lock_debug_print(stderr, info);
977  info = UT_LIST_GET_NEXT(list, info);
978  }
979  rw_lock_debug_mutex_exit();
980  }
981 }
982 
983 /*********************************************************************/
985 UNIV_INTERN
986 void
987 rw_lock_debug_print(
988 /*================*/
989  FILE* f,
990  rw_lock_debug_t* info)
991 {
992  ulint rwt;
993 
994  rwt = info->lock_type;
995 
996  fprintf(f, "Locked: thread %lu file %s line %lu ",
997  (ulong) os_thread_pf(info->thread_id), info->file_name,
998  (ulong) info->line);
999  if (rwt == RW_LOCK_SHARED) {
1000  fputs("S-LOCK", f);
1001  } else if (rwt == RW_LOCK_EX) {
1002  fputs("X-LOCK", f);
1003  } else if (rwt == RW_LOCK_WAIT_EX) {
1004  fputs("WAIT X-LOCK", f);
1005  } else {
1006  ut_error;
1007  }
1008  if (info->pass != 0) {
1009  fprintf(f, " pass value %lu", (ulong) info->pass);
1010  }
1011  putc('\n', f);
1012 }
1013 
1014 /***************************************************************/
1018 UNIV_INTERN
1019 ulint
1020 rw_lock_n_locked(void)
1021 /*==================*/
1022 {
1023  rw_lock_t* lock;
1024  ulint count = 0;
1025 
1026  mutex_enter(&rw_lock_list_mutex);
1027 
1028  lock = UT_LIST_GET_FIRST(rw_lock_list);
1029 
1030  while (lock != NULL) {
1031 
1032  if (lock->lock_word != X_LOCK_DECR) {
1033  count++;
1034  }
1035 
1036  lock = UT_LIST_GET_NEXT(list, lock);
1037  }
1038 
1039  mutex_exit(&rw_lock_list_mutex);
1040 
1041  return(count);
1042 }
1043 #endif /* UNIV_SYNC_DEBUG */