MySQL 5.6.14 Source Code Document
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
trx0i_s.cc
Go to the documentation of this file.
1 /*****************************************************************************
2 
3 Copyright (c) 2007, 2012, Oracle and/or its affiliates. All Rights Reserved.
4 
5 This program is free software; you can redistribute it and/or modify it under
6 the terms of the GNU General Public License as published by the Free Software
7 Foundation; version 2 of the License.
8 
9 This program is distributed in the hope that it will be useful, but WITHOUT
10 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
11 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
12 
13 You should have received a copy of the GNU General Public License along with
14 this program; if not, write to the Free Software Foundation, Inc.,
15 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
16 
17 *****************************************************************************/
18 
19 /**************************************************/
31 /* Found during the build of 5.5.3 on Linux 2.4 and early 2.6 kernels:
32  The includes "univ.i" -> "my_global.h" cause a different path
33  to be taken further down with pthread functions and types,
34  so they must come first.
35  From the symptoms, this is related to bug#46587 in the MySQL bug DB.
36 */
37 #include "univ.i"
38 
39 #include <mysql/plugin.h>
40 
41 #include "buf0buf.h"
42 #include "dict0dict.h"
43 #include "ha0storage.h"
44 #include "ha_prototypes.h"
45 #include "hash0hash.h"
46 #include "lock0iter.h"
47 #include "lock0lock.h"
48 #include "mem0mem.h"
49 #include "page0page.h"
50 #include "rem0rec.h"
51 #include "row0row.h"
52 #include "srv0srv.h"
53 #include "sync0rw.h"
54 #include "sync0sync.h"
55 #include "sync0types.h"
56 #include "trx0i_s.h"
57 #include "trx0sys.h"
58 #include "trx0trx.h"
59 #include "ut0mem.h"
60 #include "ut0ut.h"
61 
63 #define TABLE_CACHE_INITIAL_ROWSNUM 1024
64 
73 #define MEM_CHUNKS_IN_TABLE_CACHE 39
74 
77 /* @{ */
78 
79 #if 0
80 
84 #define TEST_LOCK_FOLD_ALWAYS_DIFFERENT
85 #endif
86 
87 #if 0
88 
92 #define TEST_NO_LOCKS_ROW_IS_EVER_EQUAL_TO_LOCK_T
93 #endif
94 
95 #if 0
96 
99 #define TEST_ADD_EACH_LOCKS_ROW_MANY_TIMES
100 #endif
101 
102 #if 0
103 
105 #define TEST_DO_NOT_CHECK_FOR_DUPLICATE_ROWS
106 #endif
107 
108 #if 0
109 
112 #define TEST_DO_NOT_INSERT_INTO_THE_HASH_TABLE
113 #endif
114 /* @} */
115 
119 #define MAX_ALLOWED_FOR_STORAGE(cache) \
120  (TRX_I_S_MEM_LIMIT \
121  - (cache)->mem_allocd)
122 
126 #define MAX_ALLOWED_FOR_ALLOC(cache) \
127  (TRX_I_S_MEM_LIMIT \
128  - (cache)->mem_allocd \
129  - ha_storage_get_size((cache)->storage))
130 
135  ulint offset;
136  ulint rows_allocd;
138  void* base;
139 };
140 
143  ulint rows_used;
144  ulint rows_allocd;
145  ulint row_size;
149 };
150 
155  ullint last_read;
166 #define LOCKS_HASH_CELLS_NUM 10000
171 #define CACHE_STORAGE_INITIAL_SIZE 1024
172 
173 #define CACHE_STORAGE_HASH_CELLS 2048
174  ha_storage_t* storage;
178  ulint mem_allocd;
180  ibool is_truncated;
183 };
184 
188 static trx_i_s_cache_t trx_i_s_cache_static;
192 UNIV_INTERN trx_i_s_cache_t* trx_i_s_cache = &trx_i_s_cache_static;
193 
194 /* Key to register the lock/mutex with performance schema */
195 #ifdef UNIV_PFS_RWLOCK
196 UNIV_INTERN mysql_pfs_key_t trx_i_s_cache_lock_key;
197 #endif /* UNIV_PFS_RWLOCK */
198 
199 #ifdef UNIV_PFS_MUTEX
200 UNIV_INTERN mysql_pfs_key_t cache_last_read_mutex_key;
201 #endif /* UNIV_PFS_MUTEX */
202 
203 /*******************************************************************/
207 static
208 ulint
209 wait_lock_get_heap_no(
210 /*==================*/
211  const lock_t* lock)
212 {
213  ulint ret;
214 
215  switch (lock_get_type(lock)) {
216  case LOCK_REC:
217  ret = lock_rec_find_set_bit(lock);
218  ut_a(ret != ULINT_UNDEFINED);
219  break;
220  case LOCK_TABLE:
221  ret = ULINT_UNDEFINED;
222  break;
223  default:
224  ut_error;
225  }
226 
227  return(ret);
228 }
229 
230 /*******************************************************************/
232 static
233 void
234 table_cache_init(
235 /*=============*/
236  i_s_table_cache_t* table_cache,
237  size_t row_size)
239 {
240  ulint i;
241 
242  table_cache->rows_used = 0;
243  table_cache->rows_allocd = 0;
244  table_cache->row_size = row_size;
245 
246  for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
247 
248  /* the memory is actually allocated in
249  table_cache_create_empty_row() */
250  table_cache->chunks[i].base = NULL;
251  }
252 }
253 
254 /*******************************************************************/
256 static
257 void
258 table_cache_free(
259 /*=============*/
260  i_s_table_cache_t* table_cache)
261 {
262  ulint i;
263 
264  for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
265 
266  /* the memory is actually allocated in
267  table_cache_create_empty_row() */
268  if (table_cache->chunks[i].base) {
269  mem_free(table_cache->chunks[i].base);
270  table_cache->chunks[i].base = NULL;
271  }
272  }
273 }
274 
275 /*******************************************************************/
281 static
282 void*
283 table_cache_create_empty_row(
284 /*=========================*/
285  i_s_table_cache_t* table_cache,
286  trx_i_s_cache_t* cache)
289 {
290  ulint i;
291  void* row;
292 
293  ut_a(table_cache->rows_used <= table_cache->rows_allocd);
294 
295  if (table_cache->rows_used == table_cache->rows_allocd) {
296 
297  /* rows_used == rows_allocd means that new chunk needs
298  to be allocated: either no more empty rows in the
299  last allocated chunk or nothing has been allocated yet
300  (rows_num == rows_allocd == 0); */
301 
302  i_s_mem_chunk_t* chunk;
303  ulint req_bytes;
304  ulint got_bytes;
305  ulint req_rows;
306  ulint got_rows;
307 
308  /* find the first not allocated chunk */
309  for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
310 
311  if (table_cache->chunks[i].base == NULL) {
312 
313  break;
314  }
315  }
316 
317  /* i == MEM_CHUNKS_IN_TABLE_CACHE means that all chunks
318  have been allocated :-X */
319  ut_a(i < MEM_CHUNKS_IN_TABLE_CACHE);
320 
321  /* allocate the chunk we just found */
322 
323  if (i == 0) {
324 
325  /* first chunk, nothing is allocated yet */
326  req_rows = TABLE_CACHE_INITIAL_ROWSNUM;
327  } else {
328 
329  /* Memory is increased by the formula
330  new = old + old / 2; We are trying not to be
331  aggressive here (= using the common new = old * 2)
332  because the allocated memory will not be freed
333  until InnoDB exit (it is reused). So it is better
334  to once allocate the memory in more steps, but
335  have less unused/wasted memory than to use less
336  steps in allocation (which is done once in a
337  lifetime) but end up with lots of unused/wasted
338  memory. */
339  req_rows = table_cache->rows_allocd / 2;
340  }
341  req_bytes = req_rows * table_cache->row_size;
342 
343  if (req_bytes > MAX_ALLOWED_FOR_ALLOC(cache)) {
344 
345  return(NULL);
346  }
347 
348  chunk = &table_cache->chunks[i];
349 
350  chunk->base = mem_alloc2(req_bytes, &got_bytes);
351 
352  got_rows = got_bytes / table_cache->row_size;
353 
354  cache->mem_allocd += got_bytes;
355 
356 #if 0
357  printf("allocating chunk %d req bytes=%lu, got bytes=%lu, "
358  "row size=%lu, "
359  "req rows=%lu, got rows=%lu\n",
360  i, req_bytes, got_bytes,
361  table_cache->row_size,
362  req_rows, got_rows);
363 #endif
364 
365  chunk->rows_allocd = got_rows;
366 
367  table_cache->rows_allocd += got_rows;
368 
369  /* adjust the offset of the next chunk */
370  if (i < MEM_CHUNKS_IN_TABLE_CACHE - 1) {
371 
372  table_cache->chunks[i + 1].offset
373  = chunk->offset + chunk->rows_allocd;
374  }
375 
376  /* return the first empty row in the newly allocated
377  chunk */
378  row = chunk->base;
379  } else {
380 
381  char* chunk_start;
382  ulint offset;
383 
384  /* there is an empty row, no need to allocate new
385  chunks */
386 
387  /* find the first chunk that contains allocated but
388  empty/unused rows */
389  for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
390 
391  if (table_cache->chunks[i].offset
392  + table_cache->chunks[i].rows_allocd
393  > table_cache->rows_used) {
394 
395  break;
396  }
397  }
398 
399  /* i == MEM_CHUNKS_IN_TABLE_CACHE means that all chunks
400  are full, but
401  table_cache->rows_used != table_cache->rows_allocd means
402  exactly the opposite - there are allocated but
403  empty/unused rows :-X */
404  ut_a(i < MEM_CHUNKS_IN_TABLE_CACHE);
405 
406  chunk_start = (char*) table_cache->chunks[i].base;
407  offset = table_cache->rows_used
408  - table_cache->chunks[i].offset;
409 
410  row = chunk_start + offset * table_cache->row_size;
411  }
412 
413  table_cache->rows_used++;
414 
415  return(row);
416 }
417 
418 #ifdef UNIV_DEBUG
419 /*******************************************************************/
422 static
423 ibool
424 i_s_locks_row_validate(
425 /*===================*/
426  const i_s_locks_row_t* row)
427 {
428  ut_ad(row->lock_trx_id != 0);
429  ut_ad(row->lock_mode != NULL);
430  ut_ad(row->lock_type != NULL);
431  ut_ad(row->lock_table != NULL);
432  ut_ad(row->lock_table_id != 0);
433 
434  if (row->lock_space == ULINT_UNDEFINED) {
435  /* table lock */
436  ut_ad(!strcmp("TABLE", row->lock_type));
437  ut_ad(row->lock_index == NULL);
438  ut_ad(row->lock_data == NULL);
439  ut_ad(row->lock_page == ULINT_UNDEFINED);
440  ut_ad(row->lock_rec == ULINT_UNDEFINED);
441  } else {
442  /* record lock */
443  ut_ad(!strcmp("RECORD", row->lock_type));
444  ut_ad(row->lock_index != NULL);
445  /* row->lock_data == NULL if buf_page_try_get() == NULL */
446  ut_ad(row->lock_page != ULINT_UNDEFINED);
447  ut_ad(row->lock_rec != ULINT_UNDEFINED);
448  }
449 
450  return(TRUE);
451 }
452 #endif /* UNIV_DEBUG */
453 
454 /*******************************************************************/
458 static
459 ibool
460 fill_trx_row(
461 /*=========*/
462  i_s_trx_row_t* row,
464  const trx_t* trx,
466  const i_s_locks_row_t* requested_lock_row,
471  trx_i_s_cache_t* cache)
474 {
475  const char* stmt;
476  size_t stmt_len;
477  const char* s;
478 
480 
481  row->trx_id = trx->id;
482  row->trx_started = (ib_time_t) trx->start_time;
483  row->trx_state = trx_get_que_state_str(trx);
484  row->requested_lock_row = requested_lock_row;
485  ut_ad(requested_lock_row == NULL
486  || i_s_locks_row_validate(requested_lock_row));
487 
488  if (trx->lock.wait_lock != NULL) {
489 
490  ut_a(requested_lock_row != NULL);
492  } else {
493  ut_a(requested_lock_row == NULL);
494  row->trx_wait_started = 0;
495  }
496 
497  row->trx_weight = (ullint) TRX_WEIGHT(trx);
498 
499  if (trx->mysql_thd == NULL) {
500  /* For internal transactions e.g., purge and transactions
501  being recovered at startup there is no associated MySQL
502  thread data structure. */
503  row->trx_mysql_thread_id = 0;
504  row->trx_query = NULL;
505  goto thd_done;
506  }
507 
508  row->trx_mysql_thread_id = thd_get_thread_id(trx->mysql_thd);
509 
510  stmt = innobase_get_stmt(trx->mysql_thd, &stmt_len);
511 
512  if (stmt != NULL) {
514 
515  if (stmt_len > TRX_I_S_TRX_QUERY_MAX_LEN) {
516  stmt_len = TRX_I_S_TRX_QUERY_MAX_LEN;
517  }
518 
519  memcpy(query, stmt, stmt_len);
520  query[stmt_len] = '\0';
521 
522  row->trx_query = static_cast<const char*>(
524  cache->storage, query, stmt_len + 1,
525  MAX_ALLOWED_FOR_STORAGE(cache)));
526 
528 
529  if (row->trx_query == NULL) {
530 
531  return(FALSE);
532  }
533  } else {
534 
535  row->trx_query = NULL;
536  }
537 
538 thd_done:
539  s = trx->op_info;
540 
541  if (s != NULL && s[0] != '\0') {
542 
545 
546  if (row->trx_operation_state == NULL) {
547 
548  return(FALSE);
549  }
550  } else {
551 
552  row->trx_operation_state = NULL;
553  }
554 
556 
558 
559  /* These are protected by both trx->mutex or lock_sys->mutex,
560  or just lock_sys->mutex. For reading, it suffices to hold
561  lock_sys->mutex. */
562 
564 
566 
568 
569  row->trx_rows_modified = trx->undo_no;
570 
572 
573  switch (trx->isolation_level) {
574  case TRX_ISO_READ_UNCOMMITTED:
575  row->trx_isolation_level = "READ UNCOMMITTED";
576  break;
577  case TRX_ISO_READ_COMMITTED:
578  row->trx_isolation_level = "READ COMMITTED";
579  break;
580  case TRX_ISO_REPEATABLE_READ:
581  row->trx_isolation_level = "REPEATABLE READ";
582  break;
583  case TRX_ISO_SERIALIZABLE:
584  row->trx_isolation_level = "SERIALIZABLE";
585  break;
586  /* Should not happen as TRX_ISO_READ_COMMITTED is default */
587  default:
588  row->trx_isolation_level = "UNKNOWN";
589  }
590 
591  row->trx_unique_checks = (ibool) trx->check_unique_secondary;
592 
593  row->trx_foreign_key_checks = (ibool) trx->check_foreigns;
594 
595  s = trx->detailed_error;
596 
597  if (s != NULL && s[0] != '\0') {
598 
602 
603  if (row->trx_foreign_key_error == NULL) {
604 
605  return(FALSE);
606  }
607  } else {
608  row->trx_foreign_key_error = NULL;
609  }
610 
611  row->trx_has_search_latch = (ibool) trx->has_search_latch;
612 
614 
615  row->trx_is_read_only = trx->read_only;
616 
618 
619  return(TRUE);
620 }
621 
622 /*******************************************************************/
627 static
628 ulint
629 put_nth_field(
630 /*==========*/
631  char* buf,
632  ulint buf_size,
633  ulint n,
634  const dict_index_t* index,
635  const rec_t* rec,
636  const ulint* offsets)
638 {
639  const byte* data;
640  ulint data_len;
641  dict_field_t* dict_field;
642  ulint ret;
643 
644  ut_ad(rec_offs_validate(rec, NULL, offsets));
645 
646  if (buf_size == 0) {
647 
648  return(0);
649  }
650 
651  ret = 0;
652 
653  if (n > 0) {
654  /* we must append ", " before the actual data */
655 
656  if (buf_size < 3) {
657 
658  buf[0] = '\0';
659  return(1);
660  }
661 
662  memcpy(buf, ", ", 3);
663 
664  buf += 2;
665  buf_size -= 2;
666  ret += 2;
667  }
668 
669  /* now buf_size >= 1 */
670 
671  data = rec_get_nth_field(rec, offsets, n, &data_len);
672 
673  dict_field = dict_index_get_nth_field(index, n);
674 
675  ret += row_raw_format((const char*) data, data_len,
676  dict_field, buf, buf_size);
677 
678  return(ret);
679 }
680 
681 /*******************************************************************/
685 static
686 ibool
687 fill_lock_data(
688 /*===========*/
689  const char** lock_data,
690  const lock_t* lock,
691  ulint heap_no,
692  trx_i_s_cache_t* cache)
694 {
695  mtr_t mtr;
696 
697  const buf_block_t* block;
698  const page_t* page;
699  const rec_t* rec;
700 
701  ut_a(lock_get_type(lock) == LOCK_REC);
702 
703  mtr_start(&mtr);
704 
706  lock_rec_get_page_no(lock),
707  &mtr);
708 
709  if (block == NULL) {
710 
711  *lock_data = NULL;
712 
713  mtr_commit(&mtr);
714 
715  return(TRUE);
716  }
717 
718  page = (const page_t*) buf_block_get_frame(block);
719 
720  rec = page_find_rec_with_heap_no(page, heap_no);
721 
722  if (page_rec_is_infimum(rec)) {
723 
724  *lock_data = ha_storage_put_str_memlim(
725  cache->storage, "infimum pseudo-record",
726  MAX_ALLOWED_FOR_STORAGE(cache));
727  } else if (page_rec_is_supremum(rec)) {
728 
729  *lock_data = ha_storage_put_str_memlim(
730  cache->storage, "supremum pseudo-record",
731  MAX_ALLOWED_FOR_STORAGE(cache));
732  } else {
733 
734  const dict_index_t* index;
735  ulint n_fields;
736  mem_heap_t* heap;
737  ulint offsets_onstack[REC_OFFS_NORMAL_SIZE];
738  ulint* offsets;
739  char buf[TRX_I_S_LOCK_DATA_MAX_LEN];
740  ulint buf_used;
741  ulint i;
742 
743  rec_offs_init(offsets_onstack);
744  offsets = offsets_onstack;
745 
746  index = lock_rec_get_index(lock);
747 
748  n_fields = dict_index_get_n_unique(index);
749 
750  ut_a(n_fields > 0);
751 
752  heap = NULL;
753  offsets = rec_get_offsets(rec, index, offsets, n_fields,
754  &heap);
755 
756  /* format and store the data */
757 
758  buf_used = 0;
759  for (i = 0; i < n_fields; i++) {
760 
761  buf_used += put_nth_field(
762  buf + buf_used, sizeof(buf) - buf_used,
763  i, index, rec, offsets) - 1;
764  }
765 
766  *lock_data = (const char*) ha_storage_put_memlim(
767  cache->storage, buf, buf_used + 1,
768  MAX_ALLOWED_FOR_STORAGE(cache));
769 
770  if (UNIV_UNLIKELY(heap != NULL)) {
771 
772  /* this means that rec_get_offsets() has created a new
773  heap and has stored offsets in it; check that this is
774  really the case and free the heap */
775  ut_a(offsets != offsets_onstack);
776  mem_heap_free(heap);
777  }
778  }
779 
780  mtr_commit(&mtr);
781 
782  if (*lock_data == NULL) {
783 
784  return(FALSE);
785  }
786 
787  return(TRUE);
788 }
789 
790 /*******************************************************************/
794 static
795 ibool
796 fill_locks_row(
797 /*===========*/
798  i_s_locks_row_t* row,
799  const lock_t* lock,
800  ulint heap_no,
803  trx_i_s_cache_t* cache)
805 {
806  row->lock_trx_id = lock_get_trx_id(lock);
807  row->lock_mode = lock_get_mode_str(lock);
808  row->lock_type = lock_get_type_str(lock);
809 
811  cache->storage, lock_get_table_name(lock),
812  MAX_ALLOWED_FOR_STORAGE(cache));
813 
814  /* memory could not be allocated */
815  if (row->lock_table == NULL) {
816 
817  return(FALSE);
818  }
819 
820  switch (lock_get_type(lock)) {
821  case LOCK_REC:
823  cache->storage, lock_rec_get_index_name(lock),
824  MAX_ALLOWED_FOR_STORAGE(cache));
825 
826  /* memory could not be allocated */
827  if (row->lock_index == NULL) {
828 
829  return(FALSE);
830  }
831 
832  row->lock_space = lock_rec_get_space_id(lock);
833  row->lock_page = lock_rec_get_page_no(lock);
834  row->lock_rec = heap_no;
835 
836  if (!fill_lock_data(&row->lock_data, lock, heap_no, cache)) {
837 
838  /* memory could not be allocated */
839  return(FALSE);
840  }
841 
842  break;
843  case LOCK_TABLE:
844  row->lock_index = NULL;
845 
846  row->lock_space = ULINT_UNDEFINED;
847  row->lock_page = ULINT_UNDEFINED;
848  row->lock_rec = ULINT_UNDEFINED;
849 
850  row->lock_data = NULL;
851 
852  break;
853  default:
854  ut_error;
855  }
856 
857  row->lock_table_id = lock_get_table_id(lock);
858 
859  row->hash_chain.value = row;
860  ut_ad(i_s_locks_row_validate(row));
861 
862  return(TRUE);
863 }
864 
865 /*******************************************************************/
868 static
870 fill_lock_waits_row(
871 /*================*/
872  i_s_lock_waits_row_t* row,
874  const i_s_locks_row_t* requested_lock_row,
877  const i_s_locks_row_t* blocking_lock_row)
880 {
881  ut_ad(i_s_locks_row_validate(requested_lock_row));
882  ut_ad(i_s_locks_row_validate(blocking_lock_row));
883 
884  row->requested_lock_row = requested_lock_row;
885  row->blocking_lock_row = blocking_lock_row;
886 
887  return(row);
888 }
889 
890 /*******************************************************************/
896 static
897 ulint
898 fold_lock(
899 /*======*/
900  const lock_t* lock,
901  ulint heap_no)
904 {
905 #ifdef TEST_LOCK_FOLD_ALWAYS_DIFFERENT
906  static ulint fold = 0;
907 
908  return(fold++);
909 #else
910  ulint ret;
911 
912  switch (lock_get_type(lock)) {
913  case LOCK_REC:
914  ut_a(heap_no != ULINT_UNDEFINED);
915 
916  ret = ut_fold_ulint_pair((ulint) lock_get_trx_id(lock),
917  lock_rec_get_space_id(lock));
918 
919  ret = ut_fold_ulint_pair(ret,
920  lock_rec_get_page_no(lock));
921 
922  ret = ut_fold_ulint_pair(ret, heap_no);
923 
924  break;
925  case LOCK_TABLE:
926  /* this check is actually not necessary for continuing
927  correct operation, but something must have gone wrong if
928  it fails. */
929  ut_a(heap_no == ULINT_UNDEFINED);
930 
931  ret = (ulint) lock_get_table_id(lock);
932 
933  break;
934  default:
935  ut_error;
936  }
937 
938  return(ret);
939 #endif
940 }
941 
942 /*******************************************************************/
945 static
946 ibool
947 locks_row_eq_lock(
948 /*==============*/
949  const i_s_locks_row_t* row,
950  const lock_t* lock,
951  ulint heap_no)
954 {
955  ut_ad(i_s_locks_row_validate(row));
956 #ifdef TEST_NO_LOCKS_ROW_IS_EVER_EQUAL_TO_LOCK_T
957  return(0);
958 #else
959  switch (lock_get_type(lock)) {
960  case LOCK_REC:
961  ut_a(heap_no != ULINT_UNDEFINED);
962 
963  return(row->lock_trx_id == lock_get_trx_id(lock)
964  && row->lock_space == lock_rec_get_space_id(lock)
965  && row->lock_page == lock_rec_get_page_no(lock)
966  && row->lock_rec == heap_no);
967 
968  case LOCK_TABLE:
969  /* this check is actually not necessary for continuing
970  correct operation, but something must have gone wrong if
971  it fails. */
972  ut_a(heap_no == ULINT_UNDEFINED);
973 
974  return(row->lock_trx_id == lock_get_trx_id(lock)
975  && row->lock_table_id == lock_get_table_id(lock));
976 
977  default:
978  ut_error;
979  return(FALSE);
980  }
981 #endif
982 }
983 
984 /*******************************************************************/
989 static
991 search_innodb_locks(
992 /*================*/
993  trx_i_s_cache_t* cache,
994  const lock_t* lock,
995  ulint heap_no)
998 {
999  i_s_hash_chain_t* hash_chain;
1000 
1001  HASH_SEARCH(
1002  /* hash_chain->"next" */
1003  next,
1004  /* the hash table */
1005  cache->locks_hash,
1006  /* fold */
1007  fold_lock(lock, heap_no),
1008  /* the type of the next variable */
1010  /* auxiliary variable */
1011  hash_chain,
1012  /* assertion on every traversed item */
1013  ut_ad(i_s_locks_row_validate(hash_chain->value)),
1014  /* this determines if we have found the lock */
1015  locks_row_eq_lock(hash_chain->value, lock, heap_no));
1016 
1017  if (hash_chain == NULL) {
1018 
1019  return(NULL);
1020  }
1021  /* else */
1022 
1023  return(hash_chain->value);
1024 }
1025 
1026 /*******************************************************************/
1032 static
1034 add_lock_to_cache(
1035 /*==============*/
1036  trx_i_s_cache_t* cache,
1037  const lock_t* lock,
1038  ulint heap_no)
1041 {
1042  i_s_locks_row_t* dst_row;
1043 
1044 #ifdef TEST_ADD_EACH_LOCKS_ROW_MANY_TIMES
1045  ulint i;
1046  for (i = 0; i < 10000; i++) {
1047 #endif
1048 #ifndef TEST_DO_NOT_CHECK_FOR_DUPLICATE_ROWS
1049  /* quit if this lock is already present */
1050  dst_row = search_innodb_locks(cache, lock, heap_no);
1051  if (dst_row != NULL) {
1052 
1053  ut_ad(i_s_locks_row_validate(dst_row));
1054  return(dst_row);
1055  }
1056 #endif
1057 
1058  dst_row = (i_s_locks_row_t*)
1059  table_cache_create_empty_row(&cache->innodb_locks, cache);
1060 
1061  /* memory could not be allocated */
1062  if (dst_row == NULL) {
1063 
1064  return(NULL);
1065  }
1066 
1067  if (!fill_locks_row(dst_row, lock, heap_no, cache)) {
1068 
1069  /* memory could not be allocated */
1070  cache->innodb_locks.rows_used--;
1071  return(NULL);
1072  }
1073 
1074 #ifndef TEST_DO_NOT_INSERT_INTO_THE_HASH_TABLE
1075  HASH_INSERT(
1076  /* the type used in the hash chain */
1078  /* hash_chain->"next" */
1079  next,
1080  /* the hash table */
1081  cache->locks_hash,
1082  /* fold */
1083  fold_lock(lock, heap_no),
1084  /* add this data to the hash */
1085  &dst_row->hash_chain);
1086 #endif
1087 #ifdef TEST_ADD_EACH_LOCKS_ROW_MANY_TIMES
1088  } /* for()-loop */
1089 #endif
1090 
1091  ut_ad(i_s_locks_row_validate(dst_row));
1092  return(dst_row);
1093 }
1094 
1095 /*******************************************************************/
1099 static
1100 ibool
1101 add_lock_wait_to_cache(
1102 /*===================*/
1103  trx_i_s_cache_t* cache,
1104  const i_s_locks_row_t* requested_lock_row,
1107  const i_s_locks_row_t* blocking_lock_row)
1110 {
1111  i_s_lock_waits_row_t* dst_row;
1112 
1113  dst_row = (i_s_lock_waits_row_t*)
1114  table_cache_create_empty_row(&cache->innodb_lock_waits,
1115  cache);
1116 
1117  /* memory could not be allocated */
1118  if (dst_row == NULL) {
1119 
1120  return(FALSE);
1121  }
1122 
1123  fill_lock_waits_row(dst_row, requested_lock_row, blocking_lock_row);
1124 
1125  return(TRUE);
1126 }
1127 
1128 /*******************************************************************/
1136 static
1137 ibool
1138 add_trx_relevant_locks_to_cache(
1139 /*============================*/
1140  trx_i_s_cache_t* cache,
1141  const trx_t* trx,
1142  i_s_locks_row_t** requested_lock_row)
1145 {
1146  ut_ad(lock_mutex_own());
1147 
1148  /* If transaction is waiting we add the wait lock and all locks
1149  from another transactions that are blocking the wait lock. */
1150  if (trx->lock.que_state == TRX_QUE_LOCK_WAIT) {
1151 
1152  const lock_t* curr_lock;
1153  ulint wait_lock_heap_no;
1154  i_s_locks_row_t* blocking_lock_row;
1155  lock_queue_iterator_t iter;
1156 
1157  ut_a(trx->lock.wait_lock != NULL);
1158 
1159  wait_lock_heap_no
1160  = wait_lock_get_heap_no(trx->lock.wait_lock);
1161 
1162  /* add the requested lock */
1163  *requested_lock_row
1164  = add_lock_to_cache(cache, trx->lock.wait_lock,
1165  wait_lock_heap_no);
1166 
1167  /* memory could not be allocated */
1168  if (*requested_lock_row == NULL) {
1169 
1170  return(FALSE);
1171  }
1172 
1173  /* then iterate over the locks before the wait lock and
1174  add the ones that are blocking it */
1175 
1177  ULINT_UNDEFINED);
1178 
1179  for (curr_lock = lock_queue_iterator_get_prev(&iter);
1180  curr_lock != NULL;
1181  curr_lock = lock_queue_iterator_get_prev(&iter)) {
1182 
1183  if (lock_has_to_wait(trx->lock.wait_lock,
1184  curr_lock)) {
1185 
1186  /* add the lock that is
1187  blocking trx->lock.wait_lock */
1188  blocking_lock_row
1189  = add_lock_to_cache(
1190  cache, curr_lock,
1191  /* heap_no is the same
1192  for the wait and waited
1193  locks */
1194  wait_lock_heap_no);
1195 
1196  /* memory could not be allocated */
1197  if (blocking_lock_row == NULL) {
1198 
1199  return(FALSE);
1200  }
1201 
1202  /* add the relation between both locks
1203  to innodb_lock_waits */
1204  if (!add_lock_wait_to_cache(
1205  cache, *requested_lock_row,
1206  blocking_lock_row)) {
1207 
1208  /* memory could not be allocated */
1209  return(FALSE);
1210  }
1211  }
1212  }
1213  } else {
1214 
1215  *requested_lock_row = NULL;
1216  }
1217 
1218  return(TRUE);
1219 }
1220 
1225 #define CACHE_MIN_IDLE_TIME_US 100000 /* 0.1 sec */
1226 
1227 /*******************************************************************/
1230 static
1231 ibool
1232 can_cache_be_updated(
1233 /*=================*/
1234  trx_i_s_cache_t* cache)
1235 {
1236  ullint now;
1237 
1238  /* Here we read cache->last_read without acquiring its mutex
1239  because last_read is only updated when a shared rw lock on the
1240  whole cache is being held (see trx_i_s_cache_end_read()) and
1241  we are currently holding an exclusive rw lock on the cache.
1242  So it is not possible for last_read to be updated while we are
1243  reading it. */
1244 
1245 #ifdef UNIV_SYNC_DEBUG
1246  ut_a(rw_lock_own(&cache->rw_lock, RW_LOCK_EX));
1247 #endif
1248 
1249  now = ut_time_us(NULL);
1250  if (now - cache->last_read > CACHE_MIN_IDLE_TIME_US) {
1251 
1252  return(TRUE);
1253  }
1254 
1255  return(FALSE);
1256 }
1257 
1258 /*******************************************************************/
1261 static
1262 void
1263 trx_i_s_cache_clear(
1264 /*================*/
1265  trx_i_s_cache_t* cache)
1266 {
1267  cache->innodb_trx.rows_used = 0;
1268  cache->innodb_locks.rows_used = 0;
1269  cache->innodb_lock_waits.rows_used = 0;
1270 
1271  hash_table_clear(cache->locks_hash);
1272 
1273  ha_storage_empty(&cache->storage);
1274 }
1275 
1276 /*******************************************************************/
1279 static
1280 void
1281 fetch_data_into_cache_low(
1282 /*======================*/
1283  trx_i_s_cache_t* cache,
1284  ibool only_ac_nl,
1286  trx_list_t* trx_list)
1287 {
1288  const trx_t* trx;
1289 
1290  ut_ad(trx_list == &trx_sys->rw_trx_list
1291  || trx_list == &trx_sys->ro_trx_list
1292  || trx_list == &trx_sys->mysql_trx_list);
1293 
1294  ut_ad(only_ac_nl == (trx_list == &trx_sys->mysql_trx_list));
1295 
1296  /* Iterate over the transaction list and add each one
1297  to innodb_trx's cache. We also add all locks that are relevant
1298  to each transaction into innodb_locks' and innodb_lock_waits'
1299  caches. */
1300 
1301  for (trx = UT_LIST_GET_FIRST(*trx_list);
1302  trx != NULL;
1303  trx =
1304  (trx_list == &trx_sys->mysql_trx_list
1305  ? UT_LIST_GET_NEXT(mysql_trx_list, trx)
1306  : UT_LIST_GET_NEXT(trx_list, trx))) {
1307 
1308  i_s_trx_row_t* trx_row;
1309  i_s_locks_row_t* requested_lock_row;
1310 
1311  if (trx->state == TRX_STATE_NOT_STARTED
1312  || (only_ac_nl && !trx_is_autocommit_non_locking(trx))) {
1313 
1314  continue;
1315  }
1316 
1318 
1319  ut_ad(trx->in_ro_trx_list
1320  == (trx_list == &trx_sys->ro_trx_list));
1321 
1322  ut_ad(trx->in_rw_trx_list
1323  == (trx_list == &trx_sys->rw_trx_list));
1324 
1325  if (!add_trx_relevant_locks_to_cache(cache, trx,
1326  &requested_lock_row)) {
1327 
1328  cache->is_truncated = TRUE;
1329  return;
1330  }
1331 
1332  trx_row = (i_s_trx_row_t*)
1333  table_cache_create_empty_row(&cache->innodb_trx,
1334  cache);
1335 
1336  /* memory could not be allocated */
1337  if (trx_row == NULL) {
1338 
1339  cache->is_truncated = TRUE;
1340  return;
1341  }
1342 
1343  if (!fill_trx_row(trx_row, trx, requested_lock_row, cache)) {
1344 
1345  /* memory could not be allocated */
1346  cache->innodb_trx.rows_used--;
1347  cache->is_truncated = TRUE;
1348  return;
1349  }
1350  }
1351 }
1352 
1353 /*******************************************************************/
1356 static
1357 void
1358 fetch_data_into_cache(
1359 /*==================*/
1360  trx_i_s_cache_t* cache)
1361 {
1362  ut_ad(lock_mutex_own());
1363  ut_ad(mutex_own(&trx_sys->mutex));
1364 
1365  trx_i_s_cache_clear(cache);
1366 
1367  fetch_data_into_cache_low(cache, FALSE, &trx_sys->rw_trx_list);
1368  fetch_data_into_cache_low(cache, FALSE, &trx_sys->ro_trx_list);
1369 
1370  /* Only select autocommit non-locking selects because they can
1371  only be on the MySQL transaction list (TRUE). */
1372  fetch_data_into_cache_low(cache, TRUE, &trx_sys->mysql_trx_list);
1373 
1374  cache->is_truncated = FALSE;
1375 }
1376 
1377 /*******************************************************************/
1381 UNIV_INTERN
1382 int
1384 /*===================================*/
1385  trx_i_s_cache_t* cache)
1386 {
1387  if (!can_cache_be_updated(cache)) {
1388 
1389  return(1);
1390  }
1391 
1392  /* We need to read trx_sys and record/table lock queues */
1393 
1394  lock_mutex_enter();
1395 
1396  mutex_enter(&trx_sys->mutex);
1397 
1398  fetch_data_into_cache(cache);
1399 
1400  mutex_exit(&trx_sys->mutex);
1401 
1402  lock_mutex_exit();
1403 
1404  return(0);
1405 }
1406 
1407 /*******************************************************************/
1411 UNIV_INTERN
1412 ibool
1414 /*=======================*/
1415  trx_i_s_cache_t* cache)
1416 {
1417  return(cache->is_truncated);
1418 }
1419 
1420 /*******************************************************************/
1422 UNIV_INTERN
1423 void
1425 /*===============*/
1426  trx_i_s_cache_t* cache)
1427 {
1428  /* The latching is done in the following order:
1429  acquire trx_i_s_cache_t::rw_lock, X
1430  acquire lock mutex
1431  release lock mutex
1432  release trx_i_s_cache_t::rw_lock
1433  acquire trx_i_s_cache_t::rw_lock, S
1434  acquire trx_i_s_cache_t::last_read_mutex
1435  release trx_i_s_cache_t::last_read_mutex
1436  release trx_i_s_cache_t::rw_lock */
1437 
1438  rw_lock_create(trx_i_s_cache_lock_key, &cache->rw_lock,
1439  SYNC_TRX_I_S_RWLOCK);
1440 
1441  cache->last_read = 0;
1442 
1443  mutex_create(cache_last_read_mutex_key,
1444  &cache->last_read_mutex, SYNC_TRX_I_S_LAST_READ);
1445 
1446  table_cache_init(&cache->innodb_trx, sizeof(i_s_trx_row_t));
1447  table_cache_init(&cache->innodb_locks, sizeof(i_s_locks_row_t));
1448  table_cache_init(&cache->innodb_lock_waits,
1449  sizeof(i_s_lock_waits_row_t));
1450 
1451  cache->locks_hash = hash_create(LOCKS_HASH_CELLS_NUM);
1452 
1455 
1456  cache->mem_allocd = 0;
1457 
1458  cache->is_truncated = FALSE;
1459 }
1460 
1461 /*******************************************************************/
1463 UNIV_INTERN
1464 void
1466 /*===============*/
1467  trx_i_s_cache_t* cache)
1468 {
1469  hash_table_free(cache->locks_hash);
1470  ha_storage_free(cache->storage);
1471  table_cache_free(&cache->innodb_trx);
1472  table_cache_free(&cache->innodb_locks);
1473  table_cache_free(&cache->innodb_lock_waits);
1474  memset(cache, 0, sizeof *cache);
1475 }
1476 
1477 /*******************************************************************/
1479 UNIV_INTERN
1480 void
1482 /*=====================*/
1483  trx_i_s_cache_t* cache)
1484 {
1485  rw_lock_s_lock(&cache->rw_lock);
1486 }
1487 
1488 /*******************************************************************/
1490 UNIV_INTERN
1491 void
1493 /*===================*/
1494  trx_i_s_cache_t* cache)
1495 {
1496  ullint now;
1497 
1498 #ifdef UNIV_SYNC_DEBUG
1499  ut_a(rw_lock_own(&cache->rw_lock, RW_LOCK_SHARED));
1500 #endif
1501 
1502  /* update cache last read time */
1503  now = ut_time_us(NULL);
1504  mutex_enter(&cache->last_read_mutex);
1505  cache->last_read = now;
1506  mutex_exit(&cache->last_read_mutex);
1507 
1508  rw_lock_s_unlock(&cache->rw_lock);
1509 }
1510 
1511 /*******************************************************************/
1513 UNIV_INTERN
1514 void
1516 /*======================*/
1517  trx_i_s_cache_t* cache)
1518 {
1519  rw_lock_x_lock(&cache->rw_lock);
1520 }
1521 
1522 /*******************************************************************/
1524 UNIV_INTERN
1525 void
1527 /*====================*/
1528  trx_i_s_cache_t* cache)
1529 {
1530 #ifdef UNIV_SYNC_DEBUG
1531  ut_a(rw_lock_own(&cache->rw_lock, RW_LOCK_EX));
1532 #endif
1533 
1534  rw_lock_x_unlock(&cache->rw_lock);
1535 }
1536 
1537 /*******************************************************************/
1540 static
1542 cache_select_table(
1543 /*===============*/
1544  trx_i_s_cache_t* cache,
1545  enum i_s_table table)
1546 {
1547  i_s_table_cache_t* table_cache;
1548 
1549 #ifdef UNIV_SYNC_DEBUG
1550  ut_a(rw_lock_own(&cache->rw_lock, RW_LOCK_SHARED)
1551  || rw_lock_own(&cache->rw_lock, RW_LOCK_EX));
1552 #endif
1553 
1554  switch (table) {
1555  case I_S_INNODB_TRX:
1556  table_cache = &cache->innodb_trx;
1557  break;
1558  case I_S_INNODB_LOCKS:
1559  table_cache = &cache->innodb_locks;
1560  break;
1561  case I_S_INNODB_LOCK_WAITS:
1562  table_cache = &cache->innodb_lock_waits;
1563  break;
1564  default:
1565  ut_error;
1566  }
1567 
1568  return(table_cache);
1569 }
1570 
1571 /*******************************************************************/
1575 UNIV_INTERN
1576 ulint
1578 /*========================*/
1579  trx_i_s_cache_t* cache,
1580  enum i_s_table table)
1581 {
1582  i_s_table_cache_t* table_cache;
1583 
1584  table_cache = cache_select_table(cache, table);
1585 
1586  return(table_cache->rows_used);
1587 }
1588 
1589 /*******************************************************************/
1593 UNIV_INTERN
1594 void*
1596 /*======================*/
1597  trx_i_s_cache_t* cache,
1598  enum i_s_table table,
1599  ulint n)
1600 {
1601  i_s_table_cache_t* table_cache;
1602  ulint i;
1603  void* row;
1604 
1605  table_cache = cache_select_table(cache, table);
1606 
1607  ut_a(n < table_cache->rows_used);
1608 
1609  row = NULL;
1610 
1611  for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
1612 
1613  if (table_cache->chunks[i].offset
1614  + table_cache->chunks[i].rows_allocd > n) {
1615 
1616  row = (char*) table_cache->chunks[i].base
1617  + (n - table_cache->chunks[i].offset)
1618  * table_cache->row_size;
1619  break;
1620  }
1621  }
1622 
1623  ut_a(row != NULL);
1624 
1625  return(row);
1626 }
1627 
1628 /*******************************************************************/
1634 UNIV_INTERN
1635 char*
1637 /*===================*/
1638  const i_s_locks_row_t* row,
1639  char* lock_id,
1640  ulint lock_id_size)
1642 {
1643  int res_len;
1644 
1645  /* please adjust TRX_I_S_LOCK_ID_MAX_LEN if you change this */
1646 
1647  if (row->lock_space != ULINT_UNDEFINED) {
1648  /* record lock */
1649  res_len = ut_snprintf(lock_id, lock_id_size,
1650  TRX_ID_FMT ":%lu:%lu:%lu",
1651  row->lock_trx_id, row->lock_space,
1652  row->lock_page, row->lock_rec);
1653  } else {
1654  /* table lock */
1655  res_len = ut_snprintf(lock_id, lock_id_size,
1656  TRX_ID_FMT":"UINT64PF,
1657  row->lock_trx_id,
1658  row->lock_table_id);
1659  }
1660 
1661  /* the typecast is safe because snprintf(3) never returns
1662  negative result */
1663  ut_a(res_len >= 0);
1664  ut_a((ulint) res_len < lock_id_size);
1665 
1666  return(lock_id);
1667 }