MySQL 5.6.14 Source Code Document
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
btr0sea.cc
Go to the documentation of this file.
1 /*****************************************************************************
2 
3 Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
4 Copyright (c) 2008, Google Inc.
5 
6 Portions of this file contain modifications contributed and copyrighted by
7 Google, Inc. Those modifications are gratefully acknowledged and are described
8 briefly in the InnoDB documentation. The contributions by Google are
9 incorporated with their permission, and subject to the conditions contained in
10 the file COPYING.Google.
11 
12 This program is free software; you can redistribute it and/or modify it under
13 the terms of the GNU General Public License as published by the Free Software
14 Foundation; version 2 of the License.
15 
16 This program is distributed in the hope that it will be useful, but WITHOUT
17 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
18 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
19 
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc.,
22 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
23 
24 *****************************************************************************/
25 
26 /********************************************************************/
33 #include "btr0sea.h"
34 #ifdef UNIV_NONINL
35 #include "btr0sea.ic"
36 #endif
37 
38 #include "buf0buf.h"
39 #include "page0page.h"
40 #include "page0cur.h"
41 #include "btr0cur.h"
42 #include "btr0pcur.h"
43 #include "btr0btr.h"
44 #include "ha0ha.h"
45 
48 UNIV_INTERN char btr_search_enabled = TRUE;
49 
51 UNIV_INTERN ulint btr_search_this_is_zero = 0;
52 
53 #ifdef UNIV_SEARCH_PERF_STAT
54 
55 UNIV_INTERN ulint btr_search_n_succ = 0;
57 UNIV_INTERN ulint btr_search_n_hash_fail = 0;
58 #endif /* UNIV_SEARCH_PERF_STAT */
59 
63 UNIV_INTERN byte btr_sea_pad1[64];
64 
71 /* We will allocate the latch from dynamic memory to get it to the
72 same DRAM page as other hotspot semaphores */
74 
77 UNIV_INTERN byte btr_sea_pad2[64];
78 
81 
82 #ifdef UNIV_PFS_RWLOCK
83 /* Key to register btr_search_sys with performance schema */
84 UNIV_INTERN mysql_pfs_key_t btr_search_latch_key;
85 #endif /* UNIV_PFS_RWLOCK */
86 
90 #define BTR_SEARCH_PAGE_BUILD_LIMIT 16
91 
94 #define BTR_SEARCH_BUILD_LIMIT 100
95 
96 /********************************************************************/
101 static
102 void
103 btr_search_build_page_hash_index(
104 /*=============================*/
107  buf_block_t* block,
108  ulint n_fields,
109  ulint n_bytes,
111  ibool left_side);
113 /*****************************************************************/
123 static
124 void
125 btr_search_check_free_space_in_heap(void)
126 /*=====================================*/
127 {
129  mem_heap_t* heap;
130 
131 #ifdef UNIV_SYNC_DEBUG
132  ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
133  ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
134 #endif /* UNIV_SYNC_DEBUG */
135 
136  table = btr_search_sys->hash_index;
137 
138  heap = table->heap;
139 
140  /* Note that we peek the value of heap->free_block without reserving
141  the latch: this is ok, because we will not guarantee that there will
142  be enough free space in the hash table. */
143 
144  if (heap->free_block == NULL) {
146 
147  rw_lock_x_lock(&btr_search_latch);
148 
149  if (heap->free_block == NULL) {
150  heap->free_block = block;
151  } else {
152  buf_block_free(block);
153  }
154 
155  rw_lock_x_unlock(&btr_search_latch);
156  }
157 }
158 
159 /*****************************************************************/
161 UNIV_INTERN
162 void
164 /*==================*/
165  ulint hash_size)
166 {
167  /* We allocate the search latch from dynamic memory:
168  see above at the global variable definition */
169 
170  btr_search_latch_temp = (rw_lock_t*) mem_alloc(sizeof(rw_lock_t));
171 
172  rw_lock_create(btr_search_latch_key, &btr_search_latch,
173  SYNC_SEARCH_SYS);
174 
176  mem_alloc(sizeof(btr_search_sys_t));
177 
178  btr_search_sys->hash_index = ha_create(hash_size, 0,
179  MEM_HEAP_FOR_BTR_SEARCH, 0);
180 #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
181  btr_search_sys->hash_index->adaptive = TRUE;
182 #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
183 
184 }
185 
186 /*****************************************************************/
188 UNIV_INTERN
189 void
191 /*=====================*/
192 {
193  rw_lock_free(&btr_search_latch);
195  btr_search_latch_temp = NULL;
199  btr_search_sys = NULL;
200 }
201 
202 /********************************************************************/
204 static
205 void
206 btr_search_disable_ref_count(
207 /*=========================*/
208  dict_table_t* table)
209 {
211 
212  ut_ad(mutex_own(&dict_sys->mutex));
213 #ifdef UNIV_SYNC_DEBUG
214  ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
215 #endif /* UNIV_SYNC_DEBUG */
216 
217  for (index = dict_table_get_first_index(table); index;
218  index = dict_table_get_next_index(index)) {
219 
220  index->search_info->ref_count = 0;
221  }
222 }
223 
224 /********************************************************************/
226 UNIV_INTERN
227 void
229 /*====================*/
230 {
232 
233  mutex_enter(&dict_sys->mutex);
234  rw_lock_x_lock(&btr_search_latch);
235 
236  btr_search_enabled = FALSE;
237 
238  /* Clear the index->search_info->ref_count of every index in
239  the data dictionary cache. */
240  for (table = UT_LIST_GET_FIRST(dict_sys->table_LRU); table;
241  table = UT_LIST_GET_NEXT(table_LRU, table)) {
242 
243  btr_search_disable_ref_count(table);
244  }
245 
246  for (table = UT_LIST_GET_FIRST(dict_sys->table_non_LRU); table;
247  table = UT_LIST_GET_NEXT(table_LRU, table)) {
248 
249  btr_search_disable_ref_count(table);
250  }
251 
252  mutex_exit(&dict_sys->mutex);
253 
254  /* Set all block->index = NULL. */
256 
257  /* Clear the adaptive hash index. */
260 
261  rw_lock_x_unlock(&btr_search_latch);
262 }
263 
264 /********************************************************************/
266 UNIV_INTERN
267 void
269 /*====================*/
270 {
271  rw_lock_x_lock(&btr_search_latch);
272 
273  btr_search_enabled = TRUE;
274 
275  rw_lock_x_unlock(&btr_search_latch);
276 }
277 
278 /*****************************************************************/
281 UNIV_INTERN
284 /*===================*/
285  mem_heap_t* heap)
286 {
287  btr_search_t* info;
288 
289  info = (btr_search_t*) mem_heap_alloc(heap, sizeof(btr_search_t));
290 
291 #ifdef UNIV_DEBUG
292  info->magic_n = BTR_SEARCH_MAGIC_N;
293 #endif /* UNIV_DEBUG */
294 
295  info->ref_count = 0;
296  info->root_guess = NULL;
297 
298  info->hash_analysis = 0;
299  info->n_hash_potential = 0;
300 
301  info->last_hash_succ = FALSE;
302 
303 #ifdef UNIV_SEARCH_PERF_STAT
304  info->n_hash_succ = 0;
305  info->n_hash_fail = 0;
306  info->n_patt_succ = 0;
307  info->n_searches = 0;
308 #endif /* UNIV_SEARCH_PERF_STAT */
309 
310  /* Set some sensible values */
311  info->n_fields = 1;
312  info->n_bytes = 0;
313 
314  info->left_side = TRUE;
315 
316  return(info);
317 }
318 
319 /*****************************************************************/
323 UNIV_INTERN
324 ulint
326 /*==========================*/
327  btr_search_t* info)
328 {
329  ulint ret;
330 
331  ut_ad(info);
332 
333 #ifdef UNIV_SYNC_DEBUG
334  ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
335  ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
336 #endif /* UNIV_SYNC_DEBUG */
337 
339  ret = info->ref_count;
340  rw_lock_s_unlock(&btr_search_latch);
341 
342  return(ret);
343 }
344 
345 /*********************************************************************/
349 static
350 void
351 btr_search_info_update_hash(
352 /*========================*/
353  btr_search_t* info,
354  btr_cur_t* cursor)
355 {
357  ulint n_unique;
358  int cmp;
359 
360 #ifdef UNIV_SYNC_DEBUG
361  ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
362  ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
363 #endif /* UNIV_SYNC_DEBUG */
364 
365  index = cursor->index;
366 
367  if (dict_index_is_ibuf(index)) {
368  /* So many deletes are performed on an insert buffer tree
369  that we do not consider a hash index useful on it: */
370 
371  return;
372  }
373 
374  n_unique = dict_index_get_n_unique_in_tree(index);
375 
376  if (info->n_hash_potential == 0) {
377 
378  goto set_new_recomm;
379  }
380 
381  /* Test if the search would have succeeded using the recommended
382  hash prefix */
383 
384  if (info->n_fields >= n_unique && cursor->up_match >= n_unique) {
385 increment_potential:
386  info->n_hash_potential++;
387 
388  return;
389  }
390 
391  cmp = ut_pair_cmp(info->n_fields, info->n_bytes,
392  cursor->low_match, cursor->low_bytes);
393 
394  if (info->left_side ? cmp <= 0 : cmp > 0) {
395 
396  goto set_new_recomm;
397  }
398 
399  cmp = ut_pair_cmp(info->n_fields, info->n_bytes,
400  cursor->up_match, cursor->up_bytes);
401 
402  if (info->left_side ? cmp <= 0 : cmp > 0) {
403 
404  goto increment_potential;
405  }
406 
407 set_new_recomm:
408  /* We have to set a new recommendation; skip the hash analysis
409  for a while to avoid unnecessary CPU time usage when there is no
410  chance for success */
411 
412  info->hash_analysis = 0;
413 
414  cmp = ut_pair_cmp(cursor->up_match, cursor->up_bytes,
415  cursor->low_match, cursor->low_bytes);
416  if (cmp == 0) {
417  info->n_hash_potential = 0;
418 
419  /* For extra safety, we set some sensible values here */
420 
421  info->n_fields = 1;
422  info->n_bytes = 0;
423 
424  info->left_side = TRUE;
425 
426  } else if (cmp > 0) {
427  info->n_hash_potential = 1;
428 
429  if (cursor->up_match >= n_unique) {
430 
431  info->n_fields = n_unique;
432  info->n_bytes = 0;
433 
434  } else if (cursor->low_match < cursor->up_match) {
435 
436  info->n_fields = cursor->low_match + 1;
437  info->n_bytes = 0;
438  } else {
439  info->n_fields = cursor->low_match;
440  info->n_bytes = cursor->low_bytes + 1;
441  }
442 
443  info->left_side = TRUE;
444  } else {
445  info->n_hash_potential = 1;
446 
447  if (cursor->low_match >= n_unique) {
448 
449  info->n_fields = n_unique;
450  info->n_bytes = 0;
451 
452  } else if (cursor->low_match > cursor->up_match) {
453 
454  info->n_fields = cursor->up_match + 1;
455  info->n_bytes = 0;
456  } else {
457  info->n_fields = cursor->up_match;
458  info->n_bytes = cursor->up_bytes + 1;
459  }
460 
461  info->left_side = FALSE;
462  }
463 }
464 
465 /*********************************************************************/
470 static
471 ibool
472 btr_search_update_block_hash_info(
473 /*==============================*/
474  btr_search_t* info,
475  buf_block_t* block,
476  btr_cur_t* cursor __attribute__((unused)))
478 {
479 #ifdef UNIV_SYNC_DEBUG
480  ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
481  ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
482  ut_ad(rw_lock_own(&block->lock, RW_LOCK_SHARED)
483  || rw_lock_own(&block->lock, RW_LOCK_EX));
484 #endif /* UNIV_SYNC_DEBUG */
485  ut_ad(cursor);
486 
487  info->last_hash_succ = FALSE;
488 
489  ut_a(buf_block_state_valid(block));
490  ut_ad(info->magic_n == BTR_SEARCH_MAGIC_N);
491 
492  if ((block->n_hash_helps > 0)
493  && (info->n_hash_potential > 0)
494  && (block->n_fields == info->n_fields)
495  && (block->n_bytes == info->n_bytes)
496  && (block->left_side == info->left_side)) {
497 
498  if ((block->index)
499  && (block->curr_n_fields == info->n_fields)
500  && (block->curr_n_bytes == info->n_bytes)
501  && (block->curr_left_side == info->left_side)) {
502 
503  /* The search would presumably have succeeded using
504  the hash index */
505 
506  info->last_hash_succ = TRUE;
507  }
508 
509  block->n_hash_helps++;
510  } else {
511  block->n_hash_helps = 1;
512  block->n_fields = info->n_fields;
513  block->n_bytes = info->n_bytes;
514  block->left_side = info->left_side;
515  }
516 
517 #ifdef UNIV_DEBUG
518  if (cursor->index->table->does_not_fit_in_memory) {
519  block->n_hash_helps = 0;
520  }
521 #endif /* UNIV_DEBUG */
522 
523  if ((block->n_hash_helps > page_get_n_recs(block->frame)
526 
527  if ((!block->index)
528  || (block->n_hash_helps
529  > 2 * page_get_n_recs(block->frame))
530  || (block->n_fields != block->curr_n_fields)
531  || (block->n_bytes != block->curr_n_bytes)
532  || (block->left_side != block->curr_left_side)) {
533 
534  /* Build a new hash index on the page */
535 
536  return(TRUE);
537  }
538  }
539 
540  return(FALSE);
541 }
542 
543 /*********************************************************************/
551 static
552 void
553 btr_search_update_hash_ref(
554 /*=======================*/
555  btr_search_t* info,
556  buf_block_t* block,
557  btr_cur_t* cursor)
558 {
560  ulint fold;
561  const rec_t* rec;
562 
563  ut_ad(cursor->flag == BTR_CUR_HASH_FAIL);
564 #ifdef UNIV_SYNC_DEBUG
565  ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
566  ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED)
567  || rw_lock_own(&(block->lock), RW_LOCK_EX));
568 #endif /* UNIV_SYNC_DEBUG */
569  ut_ad(page_align(btr_cur_get_rec(cursor))
570  == buf_block_get_frame(block));
571 
572  index = block->index;
573 
574  if (!index) {
575 
576  return;
577  }
578 
579  ut_a(index == cursor->index);
580  ut_a(!dict_index_is_ibuf(index));
581 
582  if ((info->n_hash_potential > 0)
583  && (block->curr_n_fields == info->n_fields)
584  && (block->curr_n_bytes == info->n_bytes)
585  && (block->curr_left_side == info->left_side)) {
586  mem_heap_t* heap = NULL;
587  ulint offsets_[REC_OFFS_NORMAL_SIZE];
588  rec_offs_init(offsets_);
589 
590  rec = btr_cur_get_rec(cursor);
591 
592  if (!page_rec_is_user_rec(rec)) {
593 
594  return;
595  }
596 
597  fold = rec_fold(rec,
598  rec_get_offsets(rec, index, offsets_,
599  ULINT_UNDEFINED, &heap),
600  block->curr_n_fields,
601  block->curr_n_bytes, index->id);
602  if (UNIV_LIKELY_NULL(heap)) {
603  mem_heap_free(heap);
604  }
605 #ifdef UNIV_SYNC_DEBUG
606  ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
607 #endif /* UNIV_SYNC_DEBUG */
608 
610  block, rec);
611 
612  MONITOR_INC(MONITOR_ADAPTIVE_HASH_ROW_ADDED);
613  }
614 }
615 
616 /*********************************************************************/
618 UNIV_INTERN
619 void
621 /*========================*/
622  btr_search_t* info,
623  btr_cur_t* cursor)
624 {
626  ibool build_index;
627  ulint* params;
628  ulint* params2;
629 
630 #ifdef UNIV_SYNC_DEBUG
631  ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
632  ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
633 #endif /* UNIV_SYNC_DEBUG */
634 
635  block = btr_cur_get_block(cursor);
636 
637  /* NOTE that the following two function calls do NOT protect
638  info or block->n_fields etc. with any semaphore, to save CPU time!
639  We cannot assume the fields are consistent when we return from
640  those functions! */
641 
642  btr_search_info_update_hash(info, cursor);
643 
644  build_index = btr_search_update_block_hash_info(info, block, cursor);
645 
646  if (build_index || (cursor->flag == BTR_CUR_HASH_FAIL)) {
647 
648  btr_search_check_free_space_in_heap();
649  }
650 
651  if (cursor->flag == BTR_CUR_HASH_FAIL) {
652  /* Update the hash node reference, if appropriate */
653 
654 #ifdef UNIV_SEARCH_PERF_STAT
655  btr_search_n_hash_fail++;
656 #endif /* UNIV_SEARCH_PERF_STAT */
657 
658  rw_lock_x_lock(&btr_search_latch);
659 
660  btr_search_update_hash_ref(info, block, cursor);
661 
662  rw_lock_x_unlock(&btr_search_latch);
663  }
664 
665  if (build_index) {
666  /* Note that since we did not protect block->n_fields etc.
667  with any semaphore, the values can be inconsistent. We have
668  to check inside the function call that they make sense. We
669  also malloc an array and store the values there to make sure
670  the compiler does not let the function call parameters change
671  inside the called function. It might be that the compiler
672  would optimize the call just to pass pointers to block. */
673 
674  params = (ulint*) mem_alloc(3 * sizeof(ulint));
675  params[0] = block->n_fields;
676  params[1] = block->n_bytes;
677  params[2] = block->left_side;
678 
679  /* Make sure the compiler cannot deduce the values and do
680  optimizations */
681 
682  params2 = params + btr_search_this_is_zero;
683 
684  btr_search_build_page_hash_index(cursor->index,
685  block,
686  params2[0],
687  params2[1],
688  params2[2]);
689  mem_free(params);
690  }
691 }
692 
693 /******************************************************************/
698 static
699 ibool
700 btr_search_check_guess(
701 /*===================*/
702  btr_cur_t* cursor,
703  ibool can_only_compare_to_cursor_rec,
711  const dtuple_t* tuple,
712  ulint mode,
714  mtr_t* mtr)
715 {
716  rec_t* rec;
717  ulint n_unique;
718  ulint match;
719  ulint bytes;
720  int cmp;
721  mem_heap_t* heap = NULL;
722  ulint offsets_[REC_OFFS_NORMAL_SIZE];
723  ulint* offsets = offsets_;
724  ibool success = FALSE;
725  rec_offs_init(offsets_);
726 
727  n_unique = dict_index_get_n_unique_in_tree(cursor->index);
728 
729  rec = btr_cur_get_rec(cursor);
730 
732 
733  match = 0;
734  bytes = 0;
735 
736  offsets = rec_get_offsets(rec, cursor->index, offsets,
737  n_unique, &heap);
738  cmp = page_cmp_dtuple_rec_with_match(tuple, rec,
739  offsets, &match, &bytes);
740 
741  if (mode == PAGE_CUR_GE) {
742  if (cmp == 1) {
743  goto exit_func;
744  }
745 
746  cursor->up_match = match;
747 
748  if (match >= n_unique) {
749  success = TRUE;
750  goto exit_func;
751  }
752  } else if (mode == PAGE_CUR_LE) {
753  if (cmp == -1) {
754  goto exit_func;
755  }
756 
757  cursor->low_match = match;
758 
759  } else if (mode == PAGE_CUR_G) {
760  if (cmp != -1) {
761  goto exit_func;
762  }
763  } else if (mode == PAGE_CUR_L) {
764  if (cmp != 1) {
765  goto exit_func;
766  }
767  }
768 
769  if (can_only_compare_to_cursor_rec) {
770  /* Since we could not determine if our guess is right just by
771  looking at the record under the cursor, return FALSE */
772  goto exit_func;
773  }
774 
775  match = 0;
776  bytes = 0;
777 
778  if ((mode == PAGE_CUR_G) || (mode == PAGE_CUR_GE)) {
779  rec_t* prev_rec;
780 
781  ut_ad(!page_rec_is_infimum(rec));
782 
783  prev_rec = page_rec_get_prev(rec);
784 
785  if (page_rec_is_infimum(prev_rec)) {
786  success = btr_page_get_prev(page_align(prev_rec), mtr)
787  == FIL_NULL;
788 
789  goto exit_func;
790  }
791 
792  offsets = rec_get_offsets(prev_rec, cursor->index, offsets,
793  n_unique, &heap);
794  cmp = page_cmp_dtuple_rec_with_match(tuple, prev_rec,
795  offsets, &match, &bytes);
796  if (mode == PAGE_CUR_GE) {
797  success = cmp == 1;
798  } else {
799  success = cmp != -1;
800  }
801 
802  goto exit_func;
803  } else {
804  rec_t* next_rec;
805 
807 
808  next_rec = page_rec_get_next(rec);
809 
810  if (page_rec_is_supremum(next_rec)) {
811  if (btr_page_get_next(page_align(next_rec), mtr)
812  == FIL_NULL) {
813 
814  cursor->up_match = 0;
815  success = TRUE;
816  }
817 
818  goto exit_func;
819  }
820 
821  offsets = rec_get_offsets(next_rec, cursor->index, offsets,
822  n_unique, &heap);
823  cmp = page_cmp_dtuple_rec_with_match(tuple, next_rec,
824  offsets, &match, &bytes);
825  if (mode == PAGE_CUR_LE) {
826  success = cmp == -1;
827  cursor->up_match = match;
828  } else {
829  success = cmp != 1;
830  }
831  }
832 exit_func:
833  if (UNIV_LIKELY_NULL(heap)) {
834  mem_heap_free(heap);
835  }
836  return(success);
837 }
838 
839 /******************************************************************/
845 UNIV_INTERN
846 ibool
848 /*=====================*/
849  dict_index_t* index,
850  btr_search_t* info,
851  const dtuple_t* tuple,
852  ulint mode,
853  ulint latch_mode,
859  btr_cur_t* cursor,
860  ulint has_search_latch,
863  mtr_t* mtr)
864 {
865  buf_pool_t* buf_pool;
867  const rec_t* rec;
868  ulint fold;
869  index_id_t index_id;
870 #ifdef notdefined
871  btr_cur_t cursor2;
873 #endif
874  ut_ad(index && info && tuple && cursor && mtr);
875  ut_ad(!dict_index_is_ibuf(index));
876  ut_ad((latch_mode == BTR_SEARCH_LEAF)
877  || (latch_mode == BTR_MODIFY_LEAF));
878 
879  /* Note that, for efficiency, the struct info may not be protected by
880  any latch here! */
881 
882  if (UNIV_UNLIKELY(info->n_hash_potential == 0)) {
883 
884  return(FALSE);
885  }
886 
887  cursor->n_fields = info->n_fields;
888  cursor->n_bytes = info->n_bytes;
889 
890  if (UNIV_UNLIKELY(dtuple_get_n_fields(tuple)
891  < cursor->n_fields + (cursor->n_bytes > 0))) {
892 
893  return(FALSE);
894  }
895 
896  index_id = index->id;
897 
898 #ifdef UNIV_SEARCH_PERF_STAT
899  info->n_hash_succ++;
900 #endif
901  fold = dtuple_fold(tuple, cursor->n_fields, cursor->n_bytes, index_id);
902 
903  cursor->fold = fold;
904  cursor->flag = BTR_CUR_HASH;
905 
906  if (UNIV_LIKELY(!has_search_latch)) {
908 
909  if (UNIV_UNLIKELY(!btr_search_enabled)) {
910  goto failure_unlock;
911  }
912  }
913 
914  ut_ad(rw_lock_get_writer(&btr_search_latch) != RW_LOCK_EX);
916 
917  rec = (rec_t*) ha_search_and_get_data(btr_search_sys->hash_index, fold);
918 
919  if (UNIV_UNLIKELY(!rec)) {
920  goto failure_unlock;
921  }
922 
923  block = buf_block_align(rec);
924 
925  if (UNIV_LIKELY(!has_search_latch)) {
926 
927  if (UNIV_UNLIKELY(
928  !buf_page_get_known_nowait(latch_mode, block,
930  __FILE__, __LINE__,
931  mtr))) {
932  goto failure_unlock;
933  }
934 
935  rw_lock_s_unlock(&btr_search_latch);
936 
937  buf_block_dbg_add_level(block, SYNC_TREE_NODE_FROM_HASH);
938  }
939 
940  if (UNIV_UNLIKELY(buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE)) {
942 
943  if (UNIV_LIKELY(!has_search_latch)) {
944 
945  btr_leaf_page_release(block, latch_mode, mtr);
946  }
947 
948  goto failure;
949  }
950 
952 
953  btr_cur_position(index, (rec_t*) rec, block, cursor);
954 
955  /* Check the validity of the guess within the page */
956 
957  /* If we only have the latch on btr_search_latch, not on the
958  page, it only protects the columns of the record the cursor
959  is positioned on. We cannot look at the next of the previous
960  record to determine if our guess for the cursor position is
961  right. */
962  if (UNIV_UNLIKELY(index_id != btr_page_get_index_id(block->frame))
963  || !btr_search_check_guess(cursor,
964  has_search_latch,
965  tuple, mode, mtr)) {
966  if (UNIV_LIKELY(!has_search_latch)) {
967  btr_leaf_page_release(block, latch_mode, mtr);
968  }
969 
970  goto failure;
971  }
972 
973  if (UNIV_LIKELY(info->n_hash_potential < BTR_SEARCH_BUILD_LIMIT + 5)) {
974 
975  info->n_hash_potential++;
976  }
977 
978 #ifdef notdefined
979  /* These lines of code can be used in a debug version to check
980  the correctness of the searched cursor position: */
981 
982  info->last_hash_succ = FALSE;
983 
984  /* Currently, does not work if the following fails: */
985  ut_ad(!has_search_latch);
986 
987  btr_leaf_page_release(block, latch_mode, mtr);
988 
989  btr_cur_search_to_nth_level(index, 0, tuple, mode, latch_mode,
990  &cursor2, 0, mtr);
991  if (mode == PAGE_CUR_GE
992  && page_rec_is_supremum(btr_cur_get_rec(&cursor2))) {
993 
994  /* If mode is PAGE_CUR_GE, then the binary search
995  in the index tree may actually take us to the supremum
996  of the previous page */
997 
998  info->last_hash_succ = FALSE;
999 
1000  btr_pcur_open_on_user_rec(index, tuple, mode, latch_mode,
1001  &pcur, mtr);
1002  ut_ad(btr_pcur_get_rec(&pcur) == btr_cur_get_rec(cursor));
1003  } else {
1004  ut_ad(btr_cur_get_rec(&cursor2) == btr_cur_get_rec(cursor));
1005  }
1006 
1007  /* NOTE that it is theoretically possible that the above assertions
1008  fail if the page of the cursor gets removed from the buffer pool
1009  meanwhile! Thus it might not be a bug. */
1010 #endif
1011  info->last_hash_succ = TRUE;
1012 
1013 #ifdef UNIV_SEARCH_PERF_STAT
1014  btr_search_n_succ++;
1015 #endif
1016  if (UNIV_LIKELY(!has_search_latch)
1017  && buf_page_peek_if_too_old(&block->page)) {
1018 
1019  buf_page_make_young(&block->page);
1020  }
1021 
1022  /* Increment the page get statistics though we did not really
1023  fix the page: for user info only */
1024  buf_pool = buf_pool_from_bpage(&block->page);
1025  buf_pool->stat.n_page_gets++;
1026 
1027  return(TRUE);
1028 
1029  /*-------------------------------------------*/
1030 failure_unlock:
1031  if (UNIV_LIKELY(!has_search_latch)) {
1032  rw_lock_s_unlock(&btr_search_latch);
1033  }
1034 failure:
1035  cursor->flag = BTR_CUR_HASH_FAIL;
1036 
1037 #ifdef UNIV_SEARCH_PERF_STAT
1038  info->n_hash_fail++;
1039 
1040  if (info->n_hash_succ > 0) {
1041  info->n_hash_succ--;
1042  }
1043 #endif
1044  info->last_hash_succ = FALSE;
1045 
1046  return(FALSE);
1047 }
1048 
1049 /********************************************************************/
1051 UNIV_INTERN
1052 void
1054 /*============================*/
1055  buf_block_t* block)
1063 {
1065  ulint n_fields;
1066  ulint n_bytes;
1067  const page_t* page;
1068  const rec_t* rec;
1069  ulint fold;
1070  ulint prev_fold;
1071  index_id_t index_id;
1072  ulint n_cached;
1073  ulint n_recs;
1074  ulint* folds;
1075  ulint i;
1076  mem_heap_t* heap;
1077  const dict_index_t* index;
1078  ulint* offsets;
1079  btr_search_t* info;
1080 
1081 #ifdef UNIV_SYNC_DEBUG
1082  ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
1083  ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
1084 #endif /* UNIV_SYNC_DEBUG */
1085 
1086  /* Do a dirty check on block->index, return if the block is
1087  not in the adaptive hash index. This is to avoid acquiring
1088  shared btr_search_latch for performance consideration. */
1089  if (!block->index) {
1090  return;
1091  }
1092 
1093 retry:
1095  index = block->index;
1096 
1097  if (UNIV_LIKELY(!index)) {
1098 
1099  rw_lock_s_unlock(&btr_search_latch);
1100 
1101  return;
1102  }
1103 
1104  ut_a(!dict_index_is_ibuf(index));
1105 #ifdef UNIV_DEBUG
1106  switch (dict_index_get_online_status(index)) {
1107  case ONLINE_INDEX_CREATION:
1108  /* The index is being created (bulk loaded). */
1109  case ONLINE_INDEX_COMPLETE:
1110  /* The index has been published. */
1111  case ONLINE_INDEX_ABORTED:
1112  /* Either the index creation was aborted due to an
1113  error observed by InnoDB (in which case there should
1114  not be any adaptive hash index entries), or it was
1115  completed and then flagged aborted in
1116  rollback_inplace_alter_table(). */
1117  break;
1119  /* The index should have been dropped from the tablespace
1120  already, and the adaptive hash index entries should have
1121  been dropped as well. */
1122  ut_error;
1123  }
1124 #endif /* UNIV_DEBUG */
1125 
1126  table = btr_search_sys->hash_index;
1127 
1128 #ifdef UNIV_SYNC_DEBUG
1129  ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED)
1130  || rw_lock_own(&(block->lock), RW_LOCK_EX)
1131  || block->page.buf_fix_count == 0
1133 #endif /* UNIV_SYNC_DEBUG */
1134 
1135  n_fields = block->curr_n_fields;
1136  n_bytes = block->curr_n_bytes;
1137 
1138  /* NOTE: The fields of block must not be accessed after
1139  releasing btr_search_latch, as the index page might only
1140  be s-latched! */
1141 
1142  rw_lock_s_unlock(&btr_search_latch);
1143 
1144  ut_a(n_fields + n_bytes > 0);
1145 
1146  page = block->frame;
1147  n_recs = page_get_n_recs(page);
1148 
1149  /* Calculate and cache fold values into an array for fast deletion
1150  from the hash index */
1151 
1152  folds = (ulint*) mem_alloc(n_recs * sizeof(ulint));
1153 
1154  n_cached = 0;
1155 
1156  rec = page_get_infimum_rec(page);
1157  rec = page_rec_get_next_low(rec, page_is_comp(page));
1158 
1159  index_id = btr_page_get_index_id(page);
1160 
1161  ut_a(index_id == index->id);
1162 
1163  prev_fold = 0;
1164 
1165  heap = NULL;
1166  offsets = NULL;
1167 
1168  while (!page_rec_is_supremum(rec)) {
1169  offsets = rec_get_offsets(rec, index, offsets,
1170  n_fields + (n_bytes > 0), &heap);
1171  ut_a(rec_offs_n_fields(offsets) == n_fields + (n_bytes > 0));
1172  fold = rec_fold(rec, offsets, n_fields, n_bytes, index_id);
1173 
1174  if (fold == prev_fold && prev_fold != 0) {
1175 
1176  goto next_rec;
1177  }
1178 
1179  /* Remove all hash nodes pointing to this page from the
1180  hash chain */
1181 
1182  folds[n_cached] = fold;
1183  n_cached++;
1184 next_rec:
1185  rec = page_rec_get_next_low(rec, page_rec_is_comp(rec));
1186  prev_fold = fold;
1187  }
1188 
1189  if (UNIV_LIKELY_NULL(heap)) {
1190  mem_heap_free(heap);
1191  }
1192 
1193  rw_lock_x_lock(&btr_search_latch);
1194 
1195  if (UNIV_UNLIKELY(!block->index)) {
1196  /* Someone else has meanwhile dropped the hash index */
1197 
1198  goto cleanup;
1199  }
1200 
1201  ut_a(block->index == index);
1202 
1203  if (UNIV_UNLIKELY(block->curr_n_fields != n_fields)
1204  || UNIV_UNLIKELY(block->curr_n_bytes != n_bytes)) {
1205 
1206  /* Someone else has meanwhile built a new hash index on the
1207  page, with different parameters */
1208 
1209  rw_lock_x_unlock(&btr_search_latch);
1210 
1211  mem_free(folds);
1212  goto retry;
1213  }
1214 
1215  for (i = 0; i < n_cached; i++) {
1216 
1217  ha_remove_all_nodes_to_page(table, folds[i], page);
1218  }
1219 
1220  info = btr_search_get_info(block->index);
1221  ut_a(info->ref_count > 0);
1222  info->ref_count--;
1223 
1224  block->index = NULL;
1225 
1226  MONITOR_INC(MONITOR_ADAPTIVE_HASH_PAGE_REMOVED);
1227  MONITOR_INC_VALUE(MONITOR_ADAPTIVE_HASH_ROW_REMOVED, n_cached);
1228 
1229 cleanup:
1230 #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
1231  if (UNIV_UNLIKELY(block->n_pointers)) {
1232  /* Corruption */
1233  ut_print_timestamp(stderr);
1234  fprintf(stderr,
1235  " InnoDB: Corruption of adaptive hash index."
1236  " After dropping\n"
1237  "InnoDB: the hash index to a page of %s,"
1238  " still %lu hash nodes remain.\n",
1239  index->name, (ulong) block->n_pointers);
1240  rw_lock_x_unlock(&btr_search_latch);
1241 
1242  ut_ad(btr_search_validate());
1243  } else {
1244  rw_lock_x_unlock(&btr_search_latch);
1245  }
1246 #else /* UNIV_AHI_DEBUG || UNIV_DEBUG */
1247  rw_lock_x_unlock(&btr_search_latch);
1248 #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
1249 
1250  mem_free(folds);
1251 }
1252 
1253 /********************************************************************/
1256 UNIV_INTERN
1257 void
1259 /*=================================*/
1260  ulint space,
1261  ulint zip_size,
1263  ulint page_no)
1264 {
1265  buf_block_t* block;
1266  mtr_t mtr;
1267 
1268  mtr_start(&mtr);
1269 
1270  /* If the caller has a latch on the page, then the caller must
1271  have a x-latch on the page and it must have already dropped
1272  the hash index for the page. Because of the x-latch that we
1273  are possibly holding, we cannot s-latch the page, but must
1274  (recursively) x-latch it, even though we are only reading. */
1275 
1276  block = buf_page_get_gen(space, zip_size, page_no, RW_X_LATCH, NULL,
1277  BUF_PEEK_IF_IN_POOL, __FILE__, __LINE__,
1278  &mtr);
1279 
1280  if (block && block->index) {
1281 
1282  buf_block_dbg_add_level(block, SYNC_TREE_NODE_FROM_HASH);
1283 
1285  }
1286 
1287  mtr_commit(&mtr);
1288 }
1289 
1290 /********************************************************************/
1295 static
1296 void
1297 btr_search_build_page_hash_index(
1298 /*=============================*/
1299  dict_index_t* index,
1300  buf_block_t* block,
1301  ulint n_fields,
1302  ulint n_bytes,
1304  ibool left_side)
1305 {
1307  page_t* page;
1308  rec_t* rec;
1309  rec_t* next_rec;
1310  ulint fold;
1311  ulint next_fold;
1312  ulint n_cached;
1313  ulint n_recs;
1314  ulint* folds;
1315  rec_t** recs;
1316  ulint i;
1317  mem_heap_t* heap = NULL;
1318  ulint offsets_[REC_OFFS_NORMAL_SIZE];
1319  ulint* offsets = offsets_;
1320  rec_offs_init(offsets_);
1321 
1322  ut_ad(index);
1323  ut_a(!dict_index_is_ibuf(index));
1324 
1325 #ifdef UNIV_SYNC_DEBUG
1326  ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
1327  ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED)
1328  || rw_lock_own(&(block->lock), RW_LOCK_EX));
1329 #endif /* UNIV_SYNC_DEBUG */
1330 
1332 
1333  if (!btr_search_enabled) {
1334  rw_lock_s_unlock(&btr_search_latch);
1335  return;
1336  }
1337 
1338  table = btr_search_sys->hash_index;
1339  page = buf_block_get_frame(block);
1340 
1341  if (block->index && ((block->curr_n_fields != n_fields)
1342  || (block->curr_n_bytes != n_bytes)
1343  || (block->curr_left_side != left_side))) {
1344 
1345  rw_lock_s_unlock(&btr_search_latch);
1346 
1348  } else {
1349  rw_lock_s_unlock(&btr_search_latch);
1350  }
1351 
1352  n_recs = page_get_n_recs(page);
1353 
1354  if (n_recs == 0) {
1355 
1356  return;
1357  }
1358 
1359  /* Check that the values for hash index build are sensible */
1360 
1361  if (n_fields + n_bytes == 0) {
1362 
1363  return;
1364  }
1365 
1366  if (dict_index_get_n_unique_in_tree(index) < n_fields
1367  || (dict_index_get_n_unique_in_tree(index) == n_fields
1368  && n_bytes > 0)) {
1369  return;
1370  }
1371 
1372  /* Calculate and cache fold values and corresponding records into
1373  an array for fast insertion to the hash index */
1374 
1375  folds = (ulint*) mem_alloc(n_recs * sizeof(ulint));
1376  recs = (rec_t**) mem_alloc(n_recs * sizeof(rec_t*));
1377 
1378  n_cached = 0;
1379 
1380  ut_a(index->id == btr_page_get_index_id(page));
1381 
1382  rec = page_rec_get_next(page_get_infimum_rec(page));
1383 
1384  offsets = rec_get_offsets(rec, index, offsets,
1385  n_fields + (n_bytes > 0), &heap);
1386 
1387  if (!page_rec_is_supremum(rec)) {
1388  ut_a(n_fields <= rec_offs_n_fields(offsets));
1389 
1390  if (n_bytes > 0) {
1391  ut_a(n_fields < rec_offs_n_fields(offsets));
1392  }
1393  }
1394 
1395  fold = rec_fold(rec, offsets, n_fields, n_bytes, index->id);
1396 
1397  if (left_side) {
1398 
1399  folds[n_cached] = fold;
1400  recs[n_cached] = rec;
1401  n_cached++;
1402  }
1403 
1404  for (;;) {
1405  next_rec = page_rec_get_next(rec);
1406 
1407  if (page_rec_is_supremum(next_rec)) {
1408 
1409  if (!left_side) {
1410 
1411  folds[n_cached] = fold;
1412  recs[n_cached] = rec;
1413  n_cached++;
1414  }
1415 
1416  break;
1417  }
1418 
1419  offsets = rec_get_offsets(next_rec, index, offsets,
1420  n_fields + (n_bytes > 0), &heap);
1421  next_fold = rec_fold(next_rec, offsets, n_fields,
1422  n_bytes, index->id);
1423 
1424  if (fold != next_fold) {
1425  /* Insert an entry into the hash index */
1426 
1427  if (left_side) {
1428 
1429  folds[n_cached] = next_fold;
1430  recs[n_cached] = next_rec;
1431  n_cached++;
1432  } else {
1433  folds[n_cached] = fold;
1434  recs[n_cached] = rec;
1435  n_cached++;
1436  }
1437  }
1438 
1439  rec = next_rec;
1440  fold = next_fold;
1441  }
1442 
1443  btr_search_check_free_space_in_heap();
1444 
1445  rw_lock_x_lock(&btr_search_latch);
1446 
1447  if (UNIV_UNLIKELY(!btr_search_enabled)) {
1448  goto exit_func;
1449  }
1450 
1451  if (block->index && ((block->curr_n_fields != n_fields)
1452  || (block->curr_n_bytes != n_bytes)
1453  || (block->curr_left_side != left_side))) {
1454  goto exit_func;
1455  }
1456 
1457  /* This counter is decremented every time we drop page
1458  hash index entries and is incremented here. Since we can
1459  rebuild hash index for a page that is already hashed, we
1460  have to take care not to increment the counter in that
1461  case. */
1462  if (!block->index) {
1463  index->search_info->ref_count++;
1464  }
1465 
1466  block->n_hash_helps = 0;
1467 
1468  block->curr_n_fields = n_fields;
1469  block->curr_n_bytes = n_bytes;
1470  block->curr_left_side = left_side;
1471  block->index = index;
1472 
1473  for (i = 0; i < n_cached; i++) {
1474 
1475  ha_insert_for_fold(table, folds[i], block, recs[i]);
1476  }
1477 
1478  MONITOR_INC(MONITOR_ADAPTIVE_HASH_PAGE_ADDED);
1479  MONITOR_INC_VALUE(MONITOR_ADAPTIVE_HASH_ROW_ADDED, n_cached);
1480 exit_func:
1481  rw_lock_x_unlock(&btr_search_latch);
1482 
1483  mem_free(folds);
1484  mem_free(recs);
1485  if (UNIV_LIKELY_NULL(heap)) {
1486  mem_heap_free(heap);
1487  }
1488 }
1489 
1490 /********************************************************************/
1495 UNIV_INTERN
1496 void
1498 /*===================================*/
1499  buf_block_t* new_block,
1501  buf_block_t* block,
1505  dict_index_t* index)
1506 {
1507  ulint n_fields;
1508  ulint n_bytes;
1509  ibool left_side;
1510 
1511 #ifdef UNIV_SYNC_DEBUG
1512  ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
1513  ut_ad(rw_lock_own(&(new_block->lock), RW_LOCK_EX));
1514 #endif /* UNIV_SYNC_DEBUG */
1515 
1517 
1518  ut_a(!new_block->index || new_block->index == index);
1519  ut_a(!block->index || block->index == index);
1520  ut_a(!(new_block->index || block->index)
1521  || !dict_index_is_ibuf(index));
1522 
1523  if (new_block->index) {
1524 
1525  rw_lock_s_unlock(&btr_search_latch);
1526 
1528 
1529  return;
1530  }
1531 
1532  if (block->index) {
1533 
1534  n_fields = block->curr_n_fields;
1535  n_bytes = block->curr_n_bytes;
1536  left_side = block->curr_left_side;
1537 
1538  new_block->n_fields = block->curr_n_fields;
1539  new_block->n_bytes = block->curr_n_bytes;
1540  new_block->left_side = left_side;
1541 
1542  rw_lock_s_unlock(&btr_search_latch);
1543 
1544  ut_a(n_fields + n_bytes > 0);
1545 
1546  btr_search_build_page_hash_index(index, new_block, n_fields,
1547  n_bytes, left_side);
1548  ut_ad(n_fields == block->curr_n_fields);
1549  ut_ad(n_bytes == block->curr_n_bytes);
1550  ut_ad(left_side == block->curr_left_side);
1551  return;
1552  }
1553 
1554  rw_lock_s_unlock(&btr_search_latch);
1555 }
1556 
1557 /********************************************************************/
1559 UNIV_INTERN
1560 void
1562 /*=============================*/
1563  btr_cur_t* cursor)
1566 {
1568  buf_block_t* block;
1569  const rec_t* rec;
1570  ulint fold;
1572  ulint offsets_[REC_OFFS_NORMAL_SIZE];
1573  mem_heap_t* heap = NULL;
1574  rec_offs_init(offsets_);
1575 
1576  block = btr_cur_get_block(cursor);
1577 
1578 #ifdef UNIV_SYNC_DEBUG
1579  ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
1580 #endif /* UNIV_SYNC_DEBUG */
1581 
1582  index = block->index;
1583 
1584  if (!index) {
1585 
1586  return;
1587  }
1588 
1589  ut_a(index == cursor->index);
1590  ut_a(block->curr_n_fields + block->curr_n_bytes > 0);
1591  ut_a(!dict_index_is_ibuf(index));
1592 
1593  table = btr_search_sys->hash_index;
1594 
1595  rec = btr_cur_get_rec(cursor);
1596 
1597  fold = rec_fold(rec, rec_get_offsets(rec, index, offsets_,
1598  ULINT_UNDEFINED, &heap),
1599  block->curr_n_fields, block->curr_n_bytes, index->id);
1600  if (UNIV_LIKELY_NULL(heap)) {
1601  mem_heap_free(heap);
1602  }
1603 
1604  rw_lock_x_lock(&btr_search_latch);
1605 
1606  if (block->index) {
1607  ut_a(block->index == index);
1608 
1609  if (ha_search_and_delete_if_found(table, fold, rec)) {
1610  MONITOR_INC(MONITOR_ADAPTIVE_HASH_ROW_REMOVED);
1611  } else {
1612  MONITOR_INC(
1613  MONITOR_ADAPTIVE_HASH_ROW_REMOVE_NOT_FOUND);
1614  }
1615  }
1616 
1617  rw_lock_x_unlock(&btr_search_latch);
1618 }
1619 
1620 /********************************************************************/
1622 UNIV_INTERN
1623 void
1625 /*==================================*/
1626  btr_cur_t* cursor)
1630 {
1632  buf_block_t* block;
1634  rec_t* rec;
1635 
1636  rec = btr_cur_get_rec(cursor);
1637 
1638  block = btr_cur_get_block(cursor);
1639 
1640 #ifdef UNIV_SYNC_DEBUG
1641  ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
1642 #endif /* UNIV_SYNC_DEBUG */
1643 
1644  index = block->index;
1645 
1646  if (!index) {
1647 
1648  return;
1649  }
1650 
1651  ut_a(cursor->index == index);
1652  ut_a(!dict_index_is_ibuf(index));
1653 
1654  rw_lock_x_lock(&btr_search_latch);
1655 
1656  if (!block->index) {
1657 
1658  goto func_exit;
1659  }
1660 
1661  ut_a(block->index == index);
1662 
1663  if ((cursor->flag == BTR_CUR_HASH)
1664  && (cursor->n_fields == block->curr_n_fields)
1665  && (cursor->n_bytes == block->curr_n_bytes)
1666  && !block->curr_left_side) {
1667 
1668  table = btr_search_sys->hash_index;
1669 
1671  table, cursor->fold, rec, block,
1672  page_rec_get_next(rec))) {
1673  MONITOR_INC(MONITOR_ADAPTIVE_HASH_ROW_UPDATED);
1674  }
1675 
1676 func_exit:
1677  rw_lock_x_unlock(&btr_search_latch);
1678  } else {
1679  rw_lock_x_unlock(&btr_search_latch);
1680 
1682  }
1683 }
1684 
1685 /********************************************************************/
1687 UNIV_INTERN
1688 void
1690 /*=============================*/
1691  btr_cur_t* cursor)
1695 {
1697  buf_block_t* block;
1699  const rec_t* rec;
1700  const rec_t* ins_rec;
1701  const rec_t* next_rec;
1702  ulint fold;
1703  ulint ins_fold;
1704  ulint next_fold = 0; /* remove warning (??? bug ???) */
1705  ulint n_fields;
1706  ulint n_bytes;
1707  ibool left_side;
1708  ibool locked = FALSE;
1709  mem_heap_t* heap = NULL;
1710  ulint offsets_[REC_OFFS_NORMAL_SIZE];
1711  ulint* offsets = offsets_;
1712  rec_offs_init(offsets_);
1713 
1714  block = btr_cur_get_block(cursor);
1715 
1716 #ifdef UNIV_SYNC_DEBUG
1717  ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
1718 #endif /* UNIV_SYNC_DEBUG */
1719 
1720  index = block->index;
1721 
1722  if (!index) {
1723 
1724  return;
1725  }
1726 
1727  btr_search_check_free_space_in_heap();
1728 
1729  table = btr_search_sys->hash_index;
1730 
1731  rec = btr_cur_get_rec(cursor);
1732 
1733  ut_a(index == cursor->index);
1734  ut_a(!dict_index_is_ibuf(index));
1735 
1736  n_fields = block->curr_n_fields;
1737  n_bytes = block->curr_n_bytes;
1738  left_side = block->curr_left_side;
1739 
1740  ins_rec = page_rec_get_next_const(rec);
1741  next_rec = page_rec_get_next_const(ins_rec);
1742 
1743  offsets = rec_get_offsets(ins_rec, index, offsets,
1744  ULINT_UNDEFINED, &heap);
1745  ins_fold = rec_fold(ins_rec, offsets, n_fields, n_bytes, index->id);
1746 
1747  if (!page_rec_is_supremum(next_rec)) {
1748  offsets = rec_get_offsets(next_rec, index, offsets,
1749  n_fields + (n_bytes > 0), &heap);
1750  next_fold = rec_fold(next_rec, offsets, n_fields,
1751  n_bytes, index->id);
1752  }
1753 
1754  if (!page_rec_is_infimum(rec)) {
1755  offsets = rec_get_offsets(rec, index, offsets,
1756  n_fields + (n_bytes > 0), &heap);
1757  fold = rec_fold(rec, offsets, n_fields, n_bytes, index->id);
1758  } else {
1759  if (left_side) {
1760 
1761  rw_lock_x_lock(&btr_search_latch);
1762 
1763  locked = TRUE;
1764 
1765  if (!btr_search_enabled) {
1766  goto function_exit;
1767  }
1768 
1769  ha_insert_for_fold(table, ins_fold, block, ins_rec);
1770  }
1771 
1772  goto check_next_rec;
1773  }
1774 
1775  if (fold != ins_fold) {
1776 
1777  if (!locked) {
1778 
1779  rw_lock_x_lock(&btr_search_latch);
1780 
1781  locked = TRUE;
1782 
1783  if (!btr_search_enabled) {
1784  goto function_exit;
1785  }
1786  }
1787 
1788  if (!left_side) {
1789  ha_insert_for_fold(table, fold, block, rec);
1790  } else {
1791  ha_insert_for_fold(table, ins_fold, block, ins_rec);
1792  }
1793  }
1794 
1795 check_next_rec:
1796  if (page_rec_is_supremum(next_rec)) {
1797 
1798  if (!left_side) {
1799 
1800  if (!locked) {
1801  rw_lock_x_lock(&btr_search_latch);
1802 
1803  locked = TRUE;
1804 
1805  if (!btr_search_enabled) {
1806  goto function_exit;
1807  }
1808  }
1809 
1810  ha_insert_for_fold(table, ins_fold, block, ins_rec);
1811  }
1812 
1813  goto function_exit;
1814  }
1815 
1816  if (ins_fold != next_fold) {
1817 
1818  if (!locked) {
1819 
1820  rw_lock_x_lock(&btr_search_latch);
1821 
1822  locked = TRUE;
1823 
1824  if (!btr_search_enabled) {
1825  goto function_exit;
1826  }
1827  }
1828 
1829  if (!left_side) {
1830 
1831  ha_insert_for_fold(table, ins_fold, block, ins_rec);
1832  /*
1833  fputs("Hash insert for ", stderr);
1834  dict_index_name_print(stderr, index);
1835  fprintf(stderr, " fold %lu\n", ins_fold);
1836  */
1837  } else {
1838  ha_insert_for_fold(table, next_fold, block, next_rec);
1839  }
1840  }
1841 
1842 function_exit:
1843  if (UNIV_LIKELY_NULL(heap)) {
1844  mem_heap_free(heap);
1845  }
1846  if (locked) {
1847  rw_lock_x_unlock(&btr_search_latch);
1848  }
1849 }
1850 
1851 #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
1852 /********************************************************************/
1855 UNIV_INTERN
1856 ibool
1857 btr_search_validate(void)
1858 /*=====================*/
1859 {
1860  ha_node_t* node;
1861  ulint n_page_dumps = 0;
1862  ibool ok = TRUE;
1863  ulint i;
1864  ulint cell_count;
1865  mem_heap_t* heap = NULL;
1866  ulint offsets_[REC_OFFS_NORMAL_SIZE];
1867  ulint* offsets = offsets_;
1868 
1869  /* How many cells to check before temporarily releasing
1870  btr_search_latch. */
1871  ulint chunk_size = 10000;
1872 
1873  rec_offs_init(offsets_);
1874 
1875  rw_lock_x_lock(&btr_search_latch);
1877 
1879 
1880  for (i = 0; i < cell_count; i++) {
1881  /* We release btr_search_latch every once in a while to
1882  give other queries a chance to run. */
1883  if ((i != 0) && ((i % chunk_size) == 0)) {
1885  rw_lock_x_unlock(&btr_search_latch);
1886  os_thread_yield();
1887  rw_lock_x_lock(&btr_search_latch);
1889  }
1890 
1891  node = (ha_node_t*)
1893 
1894  for (; node != NULL; node = node->next) {
1895  const buf_block_t* block
1896  = buf_block_align((byte*) node->data);
1897  const buf_block_t* hash_block;
1898  buf_pool_t* buf_pool;
1899  index_id_t page_index_id;
1900 
1901  buf_pool = buf_pool_from_bpage((buf_page_t*) block);
1902 
1903  if (UNIV_LIKELY(buf_block_get_state(block)
1904  == BUF_BLOCK_FILE_PAGE)) {
1905 
1906  /* The space and offset are only valid
1907  for file blocks. It is possible that
1908  the block is being freed
1909  (BUF_BLOCK_REMOVE_HASH, see the
1910  assertion and the comment below) */
1911  hash_block = buf_block_hash_get(
1912  buf_pool,
1913  buf_block_get_space(block),
1914  buf_block_get_page_no(block));
1915  } else {
1916  hash_block = NULL;
1917  }
1918 
1919  if (hash_block) {
1920  ut_a(hash_block == block);
1921  } else {
1922  /* When a block is being freed,
1923  buf_LRU_search_and_free_block() first
1924  removes the block from
1925  buf_pool->page_hash by calling
1926  buf_LRU_block_remove_hashed_page().
1927  After that, it invokes
1928  btr_search_drop_page_hash_index() to
1929  remove the block from
1930  btr_search_sys->hash_index. */
1931 
1932  ut_a(buf_block_get_state(block)
1934  }
1935 
1936  ut_a(!dict_index_is_ibuf(block->index));
1937 
1938  page_index_id = btr_page_get_index_id(block->frame);
1939 
1940  offsets = rec_get_offsets(node->data,
1941  block->index, offsets,
1942  block->curr_n_fields
1943  + (block->curr_n_bytes > 0),
1944  &heap);
1945 
1946  if (!block->index || node->fold
1947  != rec_fold(node->data,
1948  offsets,
1949  block->curr_n_fields,
1950  block->curr_n_bytes,
1951  page_index_id)) {
1952  const page_t* page = block->frame;
1953 
1954  ok = FALSE;
1955  ut_print_timestamp(stderr);
1956 
1957  fprintf(stderr,
1958  " InnoDB: Error in an adaptive hash"
1959  " index pointer to page %lu\n"
1960  "InnoDB: ptr mem address %p"
1961  " index id %llu,"
1962  " node fold %lu, rec fold %lu\n",
1963  (ulong) page_get_page_no(page),
1964  node->data,
1965  (ullint) page_index_id,
1966  (ulong) node->fold,
1967  (ulong) rec_fold(node->data,
1968  offsets,
1969  block->curr_n_fields,
1970  block->curr_n_bytes,
1971  page_index_id));
1972 
1973  fputs("InnoDB: Record ", stderr);
1974  rec_print_new(stderr, node->data, offsets);
1975  fprintf(stderr, "\nInnoDB: on that page."
1976  " Page mem address %p, is hashed %p,"
1977  " n fields %lu, n bytes %lu\n"
1978  "InnoDB: side %lu\n",
1979  (void*) page, (void*) block->index,
1980  (ulong) block->curr_n_fields,
1981  (ulong) block->curr_n_bytes,
1982  (ulong) block->curr_left_side);
1983 
1984  if (n_page_dumps < 20) {
1986  page, 0,
1988  n_page_dumps++;
1989  }
1990  }
1991  }
1992  }
1993 
1994  for (i = 0; i < cell_count; i += chunk_size) {
1995  ulint end_index = ut_min(i + chunk_size - 1, cell_count - 1);
1996 
1997  /* We release btr_search_latch every once in a while to
1998  give other queries a chance to run. */
1999  if (i != 0) {
2001  rw_lock_x_unlock(&btr_search_latch);
2002  os_thread_yield();
2003  rw_lock_x_lock(&btr_search_latch);
2005  }
2006 
2007  if (!ha_validate(btr_search_sys->hash_index, i, end_index)) {
2008  ok = FALSE;
2009  }
2010  }
2011 
2013  rw_lock_x_unlock(&btr_search_latch);
2014  if (UNIV_LIKELY_NULL(heap)) {
2015  mem_heap_free(heap);
2016  }
2017 
2018  return(ok);
2019 }
2020 #endif /* defined UNIV_AHI_DEBUG || defined UNIV_DEBUG */