MySQL 5.6.14 Source Code Document
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
sync0arr.cc
Go to the documentation of this file.
1 /*****************************************************************************
2 
3 Copyright (c) 1995, 2013, Oracle and/or its affiliates. All Rights Reserved.
4 Copyright (c) 2008, Google Inc.
5 
6 Portions of this file contain modifications contributed and copyrighted by
7 Google, Inc. Those modifications are gratefully acknowledged and are described
8 briefly in the InnoDB documentation. The contributions by Google are
9 incorporated with their permission, and subject to the conditions contained in
10 the file COPYING.Google.
11 
12 This program is free software; you can redistribute it and/or modify it under
13 the terms of the GNU General Public License as published by the Free Software
14 Foundation; version 2 of the License.
15 
16 This program is distributed in the hope that it will be useful, but WITHOUT
17 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
18 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
19 
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc.,
22 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
23 
24 *****************************************************************************/
25 
26 /**************************************************/
33 #include "sync0arr.h"
34 #ifdef UNIV_NONINL
35 #include "sync0arr.ic"
36 #endif
37 
38 #include "sync0sync.h"
39 #include "sync0rw.h"
40 #include "os0sync.h"
41 #include "os0file.h"
42 #include "lock0lock.h"
43 #include "srv0srv.h"
44 #include "ha_prototypes.h"
45 
46 /*
47  WAIT ARRAY
48  ==========
49 
50 The wait array consists of cells each of which has an
51 an operating system event object created for it. The threads
52 waiting for a mutex, for example, can reserve a cell
53 in the array and suspend themselves to wait for the event
54 to become signaled. When using the wait array, remember to make
55 sure that some thread holding the synchronization object
56 will eventually know that there is a waiter in the array and
57 signal the object, to prevent infinite wait.
58 Why we chose to implement a wait array? First, to make
59 mutexes fast, we had to code our own implementation of them,
60 which only in usually uncommon cases resorts to using
61 slow operating system primitives. Then we had the choice of
62 assigning a unique OS event for each mutex, which would
63 be simpler, or using a global wait array. In some operating systems,
64 the global wait array solution is more efficient and flexible,
65 because we can do with a very small number of OS events,
66 say 200. In NT 3.51, allocating events seems to be a quadratic
67 algorithm, because 10 000 events are created fast, but
68 100 000 events takes a couple of minutes to create.
69 
70 As of 5.0.30 the above mentioned design is changed. Since now
71 OS can handle millions of wait events efficiently, we no longer
72 have this concept of each cell of wait array having one event.
73 Instead, now the event that a thread wants to wait on is embedded
74 in the wait object (mutex or rw_lock). We still keep the global
75 wait array for the sake of diagnostics and also to avoid infinite
76 wait The error_monitor thread scans the global wait array to signal
77 any waiting threads who have missed the signal. */
78 
82 struct sync_cell_t {
83  void* wait_object;
90  ulint request_type;
92  const char* file;
94  ulint line;
98  ibool waiting;
101  ib_int64_t signal_count;
111 };
112 
113 /* NOTE: It is allowed for a thread to wait
114 for an event allocated for the array without owning the
115 protecting mutex (depending on the case: OS or database mutex), but
116 all changes (set or reset) to the state of the event must be made
117 while owning the mutex. */
118 
120 struct sync_array_t {
121  ulint n_reserved;
123  ulint n_cells;
135  ulint res_count;
137 };
138 
140 UNIV_INTERN ulong srv_sync_array_size = 32;
141 
143 static ulint sync_array_size;
144 
147 static sync_array_t** sync_wait_array;
148 
150 static ulint sg_count;
151 
152 #ifdef UNIV_SYNC_DEBUG
153 /******************************************************************/
157 static
158 ibool
159 sync_array_detect_deadlock(
160 /*=======================*/
161  sync_array_t* arr,
163  sync_cell_t* start,
164  sync_cell_t* cell,
165  ulint depth);
166 #endif /* UNIV_SYNC_DEBUG */
167 
168 /*****************************************************************/
171 static
173 sync_array_get_nth_cell(
174 /*====================*/
175  sync_array_t* arr,
176  ulint n)
177 {
178  ut_a(arr);
179  ut_a(n < arr->n_cells);
180 
181  return(arr->array + n);
182 }
183 
184 /******************************************************************/
186 static
187 void
188 sync_array_enter(
189 /*=============*/
190  sync_array_t* arr)
191 {
192  os_mutex_enter(arr->os_mutex);
193 }
194 
195 /******************************************************************/
197 static
198 void
199 sync_array_exit(
200 /*============*/
201  sync_array_t* arr)
202 {
203  os_mutex_exit(arr->os_mutex);
204 }
205 
206 /*******************************************************************/
211 static
213 sync_array_create(
214 /*==============*/
215  ulint n_cells)
217 {
218  ulint sz;
219  sync_array_t* arr;
220 
221  ut_a(n_cells > 0);
222 
223  /* Allocate memory for the data structures */
224  arr = static_cast<sync_array_t*>(ut_malloc(sizeof(*arr)));
225  memset(arr, 0x0, sizeof(*arr));
226 
227  sz = sizeof(sync_cell_t) * n_cells;
228  arr->array = static_cast<sync_cell_t*>(ut_malloc(sz));
229  memset(arr->array, 0x0, sz);
230 
231  arr->n_cells = n_cells;
232 
233  /* Then create the mutex to protect the wait array complex */
234  arr->os_mutex = os_mutex_create();
235 
236  return(arr);
237 }
238 
239 /******************************************************************/
241 static
242 void
243 sync_array_free(
244 /*============*/
245  sync_array_t* arr)
246 {
247  ut_a(arr->n_reserved == 0);
248 
249  sync_array_validate(arr);
250 
251  /* Release the mutex protecting the wait array complex */
252 
253  os_mutex_free(arr->os_mutex);
254 
255  ut_free(arr->array);
256  ut_free(arr);
257 }
258 
259 /********************************************************************/
262 UNIV_INTERN
263 void
265 /*================*/
266  sync_array_t* arr)
267 {
268  ulint i;
269  sync_cell_t* cell;
270  ulint count = 0;
271 
272  sync_array_enter(arr);
273 
274  for (i = 0; i < arr->n_cells; i++) {
275  cell = sync_array_get_nth_cell(arr, i);
276  if (cell->wait_object != NULL) {
277  count++;
278  }
279  }
280 
281  ut_a(count == arr->n_reserved);
282 
283  sync_array_exit(arr);
284 }
285 
286 /*******************************************************************/
288 static
290 sync_cell_get_event(
291 /*================*/
292  sync_cell_t* cell)
293 {
294  ulint type = cell->request_type;
295 
296  if (type == SYNC_MUTEX) {
297  return(((ib_mutex_t*) cell->wait_object)->event);
298  } else if (type == RW_LOCK_WAIT_EX) {
299  return(((rw_lock_t*) cell->wait_object)->wait_ex_event);
300  } else { /* RW_LOCK_SHARED and RW_LOCK_EX wait on the same event */
301  return(((rw_lock_t*) cell->wait_object)->event);
302  }
303 }
304 
305 /******************************************************************/
308 UNIV_INTERN
309 void
311 /*====================*/
312  sync_array_t* arr,
313  void* object,
314  ulint type,
315  const char* file,
316  ulint line,
317  ulint* index)
318 {
319  sync_cell_t* cell;
321  ulint i;
322 
323  ut_a(object);
324  ut_a(index);
325 
326  sync_array_enter(arr);
327 
328  arr->res_count++;
329 
330  /* Reserve a new cell. */
331  for (i = 0; i < arr->n_cells; i++) {
332  cell = sync_array_get_nth_cell(arr, i);
333 
334  if (cell->wait_object == NULL) {
335 
336  cell->waiting = FALSE;
337  cell->wait_object = object;
338 
339  if (type == SYNC_MUTEX) {
340  cell->old_wait_mutex =
341  static_cast<ib_mutex_t*>(object);
342  } else {
343  cell->old_wait_rw_lock =
344  static_cast<rw_lock_t*>(object);
345  }
346 
347  cell->request_type = type;
348 
349  cell->file = file;
350  cell->line = line;
351 
352  arr->n_reserved++;
353 
354  *index = i;
355 
356  sync_array_exit(arr);
357 
358  /* Make sure the event is reset and also store
359  the value of signal_count at which the event
360  was reset. */
361  event = sync_cell_get_event(cell);
362  cell->signal_count = os_event_reset(event);
363 
364  cell->reservation_time = ut_time();
365 
366  cell->thread = os_thread_get_curr_id();
367 
368  return;
369  }
370  }
371 
372  ut_error; /* No free cell found */
373 
374  return;
375 }
376 
377 /******************************************************************/
382 UNIV_INTERN
383 void
385 /*==================*/
386  sync_array_t* arr,
387  ulint index)
388 {
389  sync_cell_t* cell;
391 
392  ut_a(arr);
393 
394  sync_array_enter(arr);
395 
396  cell = sync_array_get_nth_cell(arr, index);
397 
398  ut_a(cell->wait_object);
399  ut_a(!cell->waiting);
400  ut_ad(os_thread_get_curr_id() == cell->thread);
401 
402  event = sync_cell_get_event(cell);
403  cell->waiting = TRUE;
404 
405 #ifdef UNIV_SYNC_DEBUG
406 
407  /* We use simple enter to the mutex below, because if
408  we cannot acquire it at once, mutex_enter would call
409  recursively sync_array routines, leading to trouble.
410  rw_lock_debug_mutex freezes the debug lists. */
411 
412  rw_lock_debug_mutex_enter();
413 
414  if (TRUE == sync_array_detect_deadlock(arr, cell, cell, 0)) {
415 
416  fputs("########################################\n", stderr);
417  ut_error;
418  }
419 
420  rw_lock_debug_mutex_exit();
421 #endif
422  sync_array_exit(arr);
423 
424  os_event_wait_low(event, cell->signal_count);
425 
426  sync_array_free_cell(arr, index);
427 }
428 
429 /******************************************************************/
431 static
432 void
433 sync_array_cell_print(
434 /*==================*/
435  FILE* file,
436  sync_cell_t* cell)
437 {
438  ib_mutex_t* mutex;
439  rw_lock_t* rwlock;
440  ulint type;
441  ulint writer;
442 
443  type = cell->request_type;
444 
445  fprintf(file,
446  "--Thread %lu has waited at %s line %lu"
447  " for %.2f seconds the semaphore:\n",
448  (ulong) os_thread_pf(cell->thread),
449  innobase_basename(cell->file), (ulong) cell->line,
450  difftime(time(NULL), cell->reservation_time));
451 
452  if (type == SYNC_MUTEX) {
453  /* We use old_wait_mutex in case the cell has already
454  been freed meanwhile */
455  mutex = cell->old_wait_mutex;
456 
457  fprintf(file,
458  "Mutex at %p created file %s line %lu, lock var %lu\n"
459 #ifdef UNIV_SYNC_DEBUG
460  "Last time reserved in file %s line %lu, "
461 #endif /* UNIV_SYNC_DEBUG */
462  "waiters flag %lu\n",
463  (void*) mutex, innobase_basename(mutex->cfile_name),
464  (ulong) mutex->cline,
465  (ulong) mutex->lock_word,
466 #ifdef UNIV_SYNC_DEBUG
467  mutex->file_name, (ulong) mutex->line,
468 #endif /* UNIV_SYNC_DEBUG */
469  (ulong) mutex->waiters);
470 
471  } else if (type == RW_LOCK_EX
472  || type == RW_LOCK_WAIT_EX
473  || type == RW_LOCK_SHARED) {
474 
475  fputs(type == RW_LOCK_EX ? "X-lock on"
476  : type == RW_LOCK_WAIT_EX ? "X-lock (wait_ex) on"
477  : "S-lock on", file);
478 
479  rwlock = cell->old_wait_rw_lock;
480 
481  fprintf(file,
482  " RW-latch at %p created in file %s line %lu\n",
483  (void*) rwlock, innobase_basename(rwlock->cfile_name),
484  (ulong) rwlock->cline);
485  writer = rw_lock_get_writer(rwlock);
486  if (writer != RW_LOCK_NOT_LOCKED) {
487  fprintf(file,
488  "a writer (thread id %lu) has"
489  " reserved it in mode %s",
490  (ulong) os_thread_pf(rwlock->writer_thread),
491  writer == RW_LOCK_EX
492  ? " exclusive\n"
493  : " wait exclusive\n");
494  }
495 
496  fprintf(file,
497  "number of readers %lu, waiters flag %lu, "
498  "lock_word: %lx\n"
499  "Last time read locked in file %s line %lu\n"
500  "Last time write locked in file %s line %lu\n",
501  (ulong) rw_lock_get_reader_count(rwlock),
502  (ulong) rwlock->waiters,
503  rwlock->lock_word,
505  (ulong) rwlock->last_s_line,
506  rwlock->last_x_file_name,
507  (ulong) rwlock->last_x_line);
508  } else {
509  ut_error;
510  }
511 
512  if (!cell->waiting) {
513  fputs("wait has ended\n", file);
514  }
515 }
516 
517 #ifdef UNIV_SYNC_DEBUG
518 /******************************************************************/
521 static
523 sync_array_find_thread(
524 /*===================*/
525  sync_array_t* arr,
526  os_thread_id_t thread)
527 {
528  ulint i;
529  sync_cell_t* cell;
530 
531  for (i = 0; i < arr->n_cells; i++) {
532 
533  cell = sync_array_get_nth_cell(arr, i);
534 
535  if (cell->wait_object != NULL
536  && os_thread_eq(cell->thread, thread)) {
537 
538  return(cell); /* Found */
539  }
540  }
541 
542  return(NULL); /* Not found */
543 }
544 
545 /******************************************************************/
548 static
549 ibool
550 sync_array_deadlock_step(
551 /*=====================*/
552  sync_array_t* arr,
554  sync_cell_t* start,
556  os_thread_id_t thread,
557  ulint pass,
558  ulint depth)
559 {
560  sync_cell_t* new_cell;
561 
562  if (pass != 0) {
563  /* If pass != 0, then we do not know which threads are
564  responsible of releasing the lock, and no deadlock can
565  be detected. */
566 
567  return(FALSE);
568  }
569 
570  new_cell = sync_array_find_thread(arr, thread);
571 
572  if (new_cell == start) {
573  /* Deadlock */
574  fputs("########################################\n"
575  "DEADLOCK of threads detected!\n", stderr);
576 
577  return(TRUE);
578 
579  } else if (new_cell) {
580  return(sync_array_detect_deadlock(
581  arr, start, new_cell, depth + 1));
582  }
583  return(FALSE);
584 }
585 
586 /******************************************************************/
590 static
591 ibool
592 sync_array_detect_deadlock(
593 /*=======================*/
594  sync_array_t* arr,
596  sync_cell_t* start,
597  sync_cell_t* cell,
598  ulint depth)
599 {
600  ib_mutex_t* mutex;
601  rw_lock_t* lock;
602  os_thread_id_t thread;
603  ibool ret;
604  rw_lock_debug_t*debug;
605 
606  ut_a(arr);
607  ut_a(start);
608  ut_a(cell);
609  ut_ad(cell->wait_object);
610  ut_ad(os_thread_get_curr_id() == start->thread);
611  ut_ad(depth < 100);
612 
613  depth++;
614 
615  if (!cell->waiting) {
616 
617  return(FALSE); /* No deadlock here */
618  }
619 
620  if (cell->request_type == SYNC_MUTEX) {
621 
622  mutex = static_cast<ib_mutex_t*>(cell->wait_object);
623 
624  if (mutex_get_lock_word(mutex) != 0) {
625 
626  thread = mutex->thread_id;
627 
628  /* Note that mutex->thread_id above may be
629  also OS_THREAD_ID_UNDEFINED, because the
630  thread which held the mutex maybe has not
631  yet updated the value, or it has already
632  released the mutex: in this case no deadlock
633  can occur, as the wait array cannot contain
634  a thread with ID_UNDEFINED value. */
635 
636  ret = sync_array_deadlock_step(arr, start, thread, 0,
637  depth);
638  if (ret) {
639  fprintf(stderr,
640  "Mutex %p owned by thread %lu file %s line %lu\n",
641  mutex, (ulong) os_thread_pf(mutex->thread_id),
642  mutex->file_name, (ulong) mutex->line);
643  sync_array_cell_print(stderr, cell);
644 
645  return(TRUE);
646  }
647  }
648 
649  return(FALSE); /* No deadlock */
650 
651  } else if (cell->request_type == RW_LOCK_EX
652  || cell->request_type == RW_LOCK_WAIT_EX) {
653 
654  lock = static_cast<rw_lock_t*>(cell->wait_object);
655 
656  for (debug = UT_LIST_GET_FIRST(lock->debug_list);
657  debug != 0;
658  debug = UT_LIST_GET_NEXT(list, debug)) {
659 
660  thread = debug->thread_id;
661 
662  if (((debug->lock_type == RW_LOCK_EX)
663  && !os_thread_eq(thread, cell->thread))
664  || ((debug->lock_type == RW_LOCK_WAIT_EX)
665  && !os_thread_eq(thread, cell->thread))
666  || (debug->lock_type == RW_LOCK_SHARED)) {
667 
668  /* The (wait) x-lock request can block
669  infinitely only if someone (can be also cell
670  thread) is holding s-lock, or someone
671  (cannot be cell thread) (wait) x-lock, and
672  he is blocked by start thread */
673 
674  ret = sync_array_deadlock_step(
675  arr, start, thread, debug->pass,
676  depth);
677  if (ret) {
678 print:
679  fprintf(stderr, "rw-lock %p ",
680  (void*) lock);
681  sync_array_cell_print(stderr, cell);
682  rw_lock_debug_print(stderr, debug);
683  return(TRUE);
684  }
685  }
686  }
687 
688  return(FALSE);
689 
690  } else if (cell->request_type == RW_LOCK_SHARED) {
691 
692  lock = static_cast<rw_lock_t*>(cell->wait_object);
693 
694  for (debug = UT_LIST_GET_FIRST(lock->debug_list);
695  debug != 0;
696  debug = UT_LIST_GET_NEXT(list, debug)) {
697 
698  thread = debug->thread_id;
699 
700  if ((debug->lock_type == RW_LOCK_EX)
701  || (debug->lock_type == RW_LOCK_WAIT_EX)) {
702 
703  /* The s-lock request can block infinitely
704  only if someone (can also be cell thread) is
705  holding (wait) x-lock, and he is blocked by
706  start thread */
707 
708  ret = sync_array_deadlock_step(
709  arr, start, thread, debug->pass,
710  depth);
711  if (ret) {
712  goto print;
713  }
714  }
715  }
716 
717  return(FALSE);
718 
719  } else {
720  ut_error;
721  }
722 
723  return(TRUE); /* Execution never reaches this line: for compiler
724  fooling only */
725 }
726 #endif /* UNIV_SYNC_DEBUG */
727 
728 /******************************************************************/
730 static
731 ibool
732 sync_arr_cell_can_wake_up(
733 /*======================*/
734  sync_cell_t* cell)
735 {
736  ib_mutex_t* mutex;
737  rw_lock_t* lock;
738 
739  if (cell->request_type == SYNC_MUTEX) {
740 
741  mutex = static_cast<ib_mutex_t*>(cell->wait_object);
742 
743  if (mutex_get_lock_word(mutex) == 0) {
744 
745  return(TRUE);
746  }
747 
748  } else if (cell->request_type == RW_LOCK_EX) {
749 
750  lock = static_cast<rw_lock_t*>(cell->wait_object);
751 
752  if (lock->lock_word > 0) {
753  /* Either unlocked or only read locked. */
754 
755  return(TRUE);
756  }
757 
758  } else if (cell->request_type == RW_LOCK_WAIT_EX) {
759 
760  lock = static_cast<rw_lock_t*>(cell->wait_object);
761 
762  /* lock_word == 0 means all readers have left */
763  if (lock->lock_word == 0) {
764 
765  return(TRUE);
766  }
767  } else if (cell->request_type == RW_LOCK_SHARED) {
768  lock = static_cast<rw_lock_t*>(cell->wait_object);
769 
770  /* lock_word > 0 means no writer or reserved writer */
771  if (lock->lock_word > 0) {
772 
773  return(TRUE);
774  }
775  }
776 
777  return(FALSE);
778 }
779 
780 /******************************************************************/
783 UNIV_INTERN
784 void
786 /*=================*/
787  sync_array_t* arr,
788  ulint index)
789 {
790  sync_cell_t* cell;
791 
792  sync_array_enter(arr);
793 
794  cell = sync_array_get_nth_cell(arr, index);
795 
796  ut_a(cell->wait_object != NULL);
797 
798  cell->waiting = FALSE;
799  cell->wait_object = NULL;
800  cell->signal_count = 0;
801 
802  ut_a(arr->n_reserved > 0);
803  arr->n_reserved--;
804 
805  sync_array_exit(arr);
806 }
807 
808 /**********************************************************************/
810 UNIV_INTERN
811 void
813 /*=============================*/
814 {
815 #ifdef HAVE_ATOMIC_BUILTINS
816  (void) os_atomic_increment_ulint(&sg_count, 1);
817 #else
818  ++sg_count;
819 #endif /* HAVE_ATOMIC_BUILTINS */
820 }
821 
822 /**********************************************************************/
830 static
831 void
832 sync_array_wake_threads_if_sema_free_low(
833 /*=====================================*/
834  sync_array_t* arr) /* in/out: wait array */
835 {
836  ulint i = 0;
837  ulint count;
838 
839  sync_array_enter(arr);
840 
841  for (count = 0; count < arr->n_reserved; ++i) {
842  sync_cell_t* cell;
843 
844  cell = sync_array_get_nth_cell(arr, i);
845 
846  if (cell->wait_object != NULL) {
847 
848  count++;
849 
850  if (sync_arr_cell_can_wake_up(cell)) {
852 
853  event = sync_cell_get_event(cell);
854 
855  os_event_set(event);
856  }
857  }
858  }
859 
860  sync_array_exit(arr);
861 }
862 
863 /**********************************************************************/
871 UNIV_INTERN
872 void
874 /*====================================*/
875 {
876  ulint i;
877 
878  for (i = 0; i < sync_array_size; ++i) {
879 
880  sync_array_wake_threads_if_sema_free_low(
881  sync_wait_array[i]);
882  }
883 }
884 
885 /**********************************************************************/
888 static
889 ibool
890 sync_array_print_long_waits_low(
891 /*============================*/
892  sync_array_t* arr,
893  os_thread_id_t* waiter,
894  const void** sema,
895  ibool* noticed)
896 {
897  ulint i;
898  ulint fatal_timeout = srv_fatal_semaphore_wait_threshold;
899  ibool fatal = FALSE;
900  double longest_diff = 0;
901 
902  /* For huge tables, skip the check during CHECK TABLE etc... */
903  if (fatal_timeout > SRV_SEMAPHORE_WAIT_EXTENSION) {
904  return(FALSE);
905  }
906 
907 #ifdef UNIV_DEBUG_VALGRIND
908  /* Increase the timeouts if running under valgrind because it executes
909  extremely slowly. UNIV_DEBUG_VALGRIND does not necessary mean that
910  we are running under valgrind but we have no better way to tell.
911  See Bug#58432 innodb.innodb_bug56143 fails under valgrind
912  for an example */
913 # define SYNC_ARRAY_TIMEOUT 2400
914  fatal_timeout *= 10;
915 #else
916 # define SYNC_ARRAY_TIMEOUT 240
917 #endif
918 
919  for (i = 0; i < arr->n_cells; i++) {
920 
921  double diff;
922  sync_cell_t* cell;
923  void* wait_object;
924 
925  cell = sync_array_get_nth_cell(arr, i);
926 
927  wait_object = cell->wait_object;
928 
929  if (wait_object == NULL || !cell->waiting) {
930 
931  continue;
932  }
933 
934  diff = difftime(time(NULL), cell->reservation_time);
935 
936  if (diff > SYNC_ARRAY_TIMEOUT) {
937  fputs("InnoDB: Warning: a long semaphore wait:\n",
938  stderr);
939  sync_array_cell_print(stderr, cell);
940  *noticed = TRUE;
941  }
942 
943  if (diff > fatal_timeout) {
944  fatal = TRUE;
945  }
946 
947  if (diff > longest_diff) {
948  longest_diff = diff;
949  *sema = wait_object;
950  *waiter = cell->thread;
951  }
952  }
953 
954 #undef SYNC_ARRAY_TIMEOUT
955 
956  return(fatal);
957 }
958 
959 /**********************************************************************/
962 UNIV_INTERN
963 ibool
965 /*========================*/
966  os_thread_id_t* waiter,
967  const void** sema)
968 {
969  ulint i;
970  ibool fatal = FALSE;
971  ibool noticed = FALSE;
972 
973  for (i = 0; i < sync_array_size; ++i) {
974 
975  sync_array_t* arr = sync_wait_array[i];
976 
977  sync_array_enter(arr);
978 
979  if (sync_array_print_long_waits_low(
980  arr, waiter, sema, &noticed)) {
981 
982  fatal = TRUE;
983  }
984 
985  sync_array_exit(arr);
986  }
987 
988  if (noticed) {
989  ibool old_val;
990 
991  fprintf(stderr,
992  "InnoDB: ###### Starts InnoDB Monitor"
993  " for 30 secs to print diagnostic info:\n");
994 
995  old_val = srv_print_innodb_monitor;
996 
997  /* If some crucial semaphore is reserved, then also the InnoDB
998  Monitor can hang, and we do not get diagnostics. Since in
999  many cases an InnoDB hang is caused by a pwrite() or a pread()
1000  call hanging inside the operating system, let us print right
1001  now the values of pending calls of these. */
1002 
1003  fprintf(stderr,
1004  "InnoDB: Pending preads %lu, pwrites %lu\n",
1005  (ulong) os_file_n_pending_preads,
1006  (ulong) os_file_n_pending_pwrites);
1007 
1008  srv_print_innodb_monitor = TRUE;
1010 
1011  os_thread_sleep(30000000);
1012 
1013  srv_print_innodb_monitor = old_val;
1014  fprintf(stderr,
1015  "InnoDB: ###### Diagnostic info printed"
1016  " to the standard error stream\n");
1017  }
1018 
1019  return(fatal);
1020 }
1021 
1022 /**********************************************************************/
1024 static
1025 void
1026 sync_array_print_info_low(
1027 /*======================*/
1028  FILE* file,
1029  sync_array_t* arr)
1030 {
1031  ulint i;
1032  ulint count = 0;
1033 
1034  fprintf(file,
1035  "OS WAIT ARRAY INFO: reservation count %ld\n",
1036  (long) arr->res_count);
1037 
1038  for (i = 0; count < arr->n_reserved; ++i) {
1039  sync_cell_t* cell;
1040 
1041  cell = sync_array_get_nth_cell(arr, i);
1042 
1043  if (cell->wait_object != NULL) {
1044  count++;
1045  sync_array_cell_print(file, cell);
1046  }
1047  }
1048 }
1049 
1050 /**********************************************************************/
1052 static
1053 void
1054 sync_array_print_info(
1055 /*==================*/
1056  FILE* file,
1057  sync_array_t* arr)
1058 {
1059  sync_array_enter(arr);
1060 
1061  sync_array_print_info_low(file, arr);
1062 
1063  sync_array_exit(arr);
1064 }
1065 
1066 /**********************************************************************/
1068 UNIV_INTERN
1069 void
1071 /*============*/
1072  ulint n_threads)
1074 {
1075  ulint i;
1076  ulint n_slots;
1077 
1078  ut_a(sync_wait_array == NULL);
1080  ut_a(n_threads > srv_sync_array_size);
1081 
1082  sync_array_size = srv_sync_array_size;
1083 
1084  /* We have to use ut_malloc() because the mutex infrastructure
1085  hasn't been initialised yet. It is required by mem_alloc() and
1086  the heap functions. */
1087 
1088  sync_wait_array = static_cast<sync_array_t**>(
1089  ut_malloc(sizeof(*sync_wait_array) * sync_array_size));
1090 
1091  n_slots = 1 + (n_threads - 1) / sync_array_size;
1092 
1093  for (i = 0; i < sync_array_size; ++i) {
1094 
1095  sync_wait_array[i] = sync_array_create(n_slots);
1096  }
1097 }
1098 
1099 /**********************************************************************/
1101 UNIV_INTERN
1102 void
1104 /*==================*/
1105 {
1106  ulint i;
1107 
1108  for (i = 0; i < sync_array_size; ++i) {
1109  sync_array_free(sync_wait_array[i]);
1110  }
1111 
1112  ut_free(sync_wait_array);
1113  sync_wait_array = NULL;
1114 }
1115 
1116 /**********************************************************************/
1118 UNIV_INTERN
1119 void
1121 /*=============*/
1122  FILE* file)
1123 {
1124  ulint i;
1125 
1126  for (i = 0; i < sync_array_size; ++i) {
1127  sync_array_print_info(file, sync_wait_array[i]);
1128  }
1129 
1130  fprintf(file,
1131  "OS WAIT ARRAY INFO: signal count %ld\n", (long) sg_count);
1132 
1133 }
1134 
1135 /**********************************************************************/
1137 UNIV_INTERN
1138 sync_array_t*
1140 /*================*/
1141 {
1142  ulint i;
1143  static ulint count;
1144 
1145 #ifdef HAVE_ATOMIC_BUILTINS
1146  i = os_atomic_increment_ulint(&count, 1);
1147 #else
1148  i = count++;
1149 #endif /* HAVE_ATOMIC_BUILTINS */
1150 
1151  return(sync_wait_array[i % sync_array_size]);
1152 }