MySQL 5.6.14 Source Code Document
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
page0cur.cc
Go to the documentation of this file.
1 /*****************************************************************************
2 
3 Copyright (c) 1994, 2013, Oracle and/or its affiliates. All Rights Reserved.
4 Copyright (c) 2012, Facebook Inc.
5 
6 This program is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free Software
8 Foundation; version 2 of the License.
9 
10 This program is distributed in the hope that it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
12 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
13 
14 You should have received a copy of the GNU General Public License along with
15 this program; if not, write to the Free Software Foundation, Inc.,
16 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
17 
18 *****************************************************************************/
19 
20 /********************************************************************/
27 #include "page0cur.h"
28 #ifdef UNIV_NONINL
29 #include "page0cur.ic"
30 #endif
31 
32 #include "page0zip.h"
33 #include "btr0btr.h"
34 #include "mtr0log.h"
35 #include "log0recv.h"
36 #include "ut0ut.h"
37 #ifndef UNIV_HOTBACKUP
38 #include "rem0cmp.h"
39 
40 #ifdef PAGE_CUR_ADAPT
41 # ifdef UNIV_SEARCH_PERF_STAT
42 static ulint page_cur_short_succ = 0;
43 # endif /* UNIV_SEARCH_PERF_STAT */
44 
45 /*******************************************************************/
57 static
58 ib_uint64_t
59 page_cur_lcg_prng(void)
60 /*===================*/
61 {
62 #define LCG_a 1103515245
63 #define LCG_c 12345
64  static ib_uint64_t lcg_current = 0;
65  static ibool initialized = FALSE;
66 
67  if (!initialized) {
68  lcg_current = (ib_uint64_t) ut_time_us(NULL);
69  initialized = TRUE;
70  }
71 
72  /* no need to "% 2^64" explicitly because lcg_current is
73  64 bit and this will be done anyway */
74  lcg_current = LCG_a * lcg_current + LCG_c;
75 
76  return(lcg_current);
77 }
78 
79 /****************************************************************/
82 UNIV_INLINE
83 ibool
84 page_cur_try_search_shortcut(
85 /*=========================*/
86  const buf_block_t* block,
87  const dict_index_t* index,
88  const dtuple_t* tuple,
89  ulint* iup_matched_fields,
92  ulint* iup_matched_bytes,
96  ulint* ilow_matched_fields,
99  ulint* ilow_matched_bytes,
103  page_cur_t* cursor)
104 {
105  const rec_t* rec;
106  const rec_t* next_rec;
107  ulint low_match;
108  ulint low_bytes;
109  ulint up_match;
110  ulint up_bytes;
111 #ifdef UNIV_SEARCH_DEBUG
112  page_cur_t cursor2;
113 #endif
114  ibool success = FALSE;
115  const page_t* page = buf_block_get_frame(block);
116  mem_heap_t* heap = NULL;
117  ulint offsets_[REC_OFFS_NORMAL_SIZE];
118  ulint* offsets = offsets_;
119  rec_offs_init(offsets_);
120 
121  ut_ad(dtuple_check_typed(tuple));
122 
123  rec = page_header_get_ptr(page, PAGE_LAST_INSERT);
124  offsets = rec_get_offsets(rec, index, offsets,
125  dtuple_get_n_fields(tuple), &heap);
126 
127  ut_ad(rec);
129 
130  ut_pair_min(&low_match, &low_bytes,
131  *ilow_matched_fields, *ilow_matched_bytes,
132  *iup_matched_fields, *iup_matched_bytes);
133 
134  up_match = low_match;
135  up_bytes = low_bytes;
136 
137  if (page_cmp_dtuple_rec_with_match(tuple, rec, offsets,
138  &low_match, &low_bytes) < 0) {
139  goto exit_func;
140  }
141 
142  next_rec = page_rec_get_next_const(rec);
143  offsets = rec_get_offsets(next_rec, index, offsets,
144  dtuple_get_n_fields(tuple), &heap);
145 
146  if (page_cmp_dtuple_rec_with_match(tuple, next_rec, offsets,
147  &up_match, &up_bytes) >= 0) {
148  goto exit_func;
149  }
150 
151  page_cur_position(rec, block, cursor);
152 
153 #ifdef UNIV_SEARCH_DEBUG
154  page_cur_search_with_match(block, index, tuple, PAGE_CUR_DBG,
155  iup_matched_fields,
156  iup_matched_bytes,
157  ilow_matched_fields,
158  ilow_matched_bytes,
159  &cursor2);
160  ut_a(cursor2.rec == cursor->rec);
161 
162  if (!page_rec_is_supremum(next_rec)) {
163 
164  ut_a(*iup_matched_fields == up_match);
165  ut_a(*iup_matched_bytes == up_bytes);
166  }
167 
168  ut_a(*ilow_matched_fields == low_match);
169  ut_a(*ilow_matched_bytes == low_bytes);
170 #endif
171  if (!page_rec_is_supremum(next_rec)) {
172 
173  *iup_matched_fields = up_match;
174  *iup_matched_bytes = up_bytes;
175  }
176 
177  *ilow_matched_fields = low_match;
178  *ilow_matched_bytes = low_bytes;
179 
180 #ifdef UNIV_SEARCH_PERF_STAT
181  page_cur_short_succ++;
182 #endif
183  success = TRUE;
184 exit_func:
185  if (UNIV_LIKELY_NULL(heap)) {
186  mem_heap_free(heap);
187  }
188  return(success);
189 }
190 
191 #endif
192 
193 #ifdef PAGE_CUR_LE_OR_EXTENDS
194 /****************************************************************/
199 static
200 ibool
201 page_cur_rec_field_extends(
202 /*=======================*/
203  const dtuple_t* tuple,
204  const rec_t* rec,
205  const ulint* offsets,
206  ulint n)
207 {
208  const dtype_t* type;
209  const dfield_t* dfield;
210  const byte* rec_f;
211  ulint rec_f_len;
212 
213  ut_ad(rec_offs_validate(rec, NULL, offsets));
214  dfield = dtuple_get_nth_field(tuple, n);
215 
216  type = dfield_get_type(dfield);
217 
218  rec_f = rec_get_nth_field(rec, offsets, n, &rec_f_len);
219 
220  if (type->mtype == DATA_VARCHAR
221  || type->mtype == DATA_CHAR
222  || type->mtype == DATA_FIXBINARY
223  || type->mtype == DATA_BINARY
224  || type->mtype == DATA_BLOB
225  || type->mtype == DATA_VARMYSQL
226  || type->mtype == DATA_MYSQL) {
227 
228  if (dfield_get_len(dfield) != UNIV_SQL_NULL
229  && rec_f_len != UNIV_SQL_NULL
230  && rec_f_len >= dfield_get_len(dfield)
231  && !cmp_data_data_slow(type->mtype, type->prtype,
232  dfield_get_data(dfield),
233  dfield_get_len(dfield),
234  rec_f, dfield_get_len(dfield))) {
235 
236  return(TRUE);
237  }
238  }
239 
240  return(FALSE);
241 }
242 #endif /* PAGE_CUR_LE_OR_EXTENDS */
243 
244 /****************************************************************/
246 UNIV_INTERN
247 void
249 /*=======================*/
250  const buf_block_t* block,
251  const dict_index_t* index,
252  const dtuple_t* tuple,
253  ulint mode,
256  ulint* iup_matched_fields,
259  ulint* iup_matched_bytes,
263  ulint* ilow_matched_fields,
266  ulint* ilow_matched_bytes,
270  page_cur_t* cursor)
271 {
272  ulint up;
273  ulint low;
274  ulint mid;
275  const page_t* page;
276  const page_dir_slot_t* slot;
277  const rec_t* up_rec;
278  const rec_t* low_rec;
279  const rec_t* mid_rec;
280  ulint up_matched_fields;
281  ulint up_matched_bytes;
282  ulint low_matched_fields;
283  ulint low_matched_bytes;
284  ulint cur_matched_fields;
285  ulint cur_matched_bytes;
286  int cmp;
287 #ifdef UNIV_SEARCH_DEBUG
288  int dbg_cmp;
289  ulint dbg_matched_fields;
290  ulint dbg_matched_bytes;
291 #endif
292 #ifdef UNIV_ZIP_DEBUG
293  const page_zip_des_t* page_zip = buf_block_get_page_zip(block);
294 #endif /* UNIV_ZIP_DEBUG */
295  mem_heap_t* heap = NULL;
296  ulint offsets_[REC_OFFS_NORMAL_SIZE];
297  ulint* offsets = offsets_;
298  rec_offs_init(offsets_);
299 
300  ut_ad(block && tuple && iup_matched_fields && iup_matched_bytes
301  && ilow_matched_fields && ilow_matched_bytes && cursor);
302  ut_ad(dtuple_validate(tuple));
303 #ifdef UNIV_DEBUG
304 # ifdef PAGE_CUR_DBG
305  if (mode != PAGE_CUR_DBG)
306 # endif /* PAGE_CUR_DBG */
307 # ifdef PAGE_CUR_LE_OR_EXTENDS
308  if (mode != PAGE_CUR_LE_OR_EXTENDS)
309 # endif /* PAGE_CUR_LE_OR_EXTENDS */
310  ut_ad(mode == PAGE_CUR_L || mode == PAGE_CUR_LE
311  || mode == PAGE_CUR_G || mode == PAGE_CUR_GE);
312 #endif /* UNIV_DEBUG */
313  page = buf_block_get_frame(block);
314 #ifdef UNIV_ZIP_DEBUG
315  ut_a(!page_zip || page_zip_validate(page_zip, page, index));
316 #endif /* UNIV_ZIP_DEBUG */
317 
318  page_check_dir(page);
319 
320 #ifdef PAGE_CUR_ADAPT
321  if (page_is_leaf(page)
322  && (mode == PAGE_CUR_LE)
323  && (page_header_get_field(page, PAGE_N_DIRECTION) > 3)
324  && (page_header_get_ptr(page, PAGE_LAST_INSERT))
325  && (page_header_get_field(page, PAGE_DIRECTION) == PAGE_RIGHT)) {
326 
327  if (page_cur_try_search_shortcut(
328  block, index, tuple,
329  iup_matched_fields, iup_matched_bytes,
330  ilow_matched_fields, ilow_matched_bytes,
331  cursor)) {
332  return;
333  }
334  }
335 # ifdef PAGE_CUR_DBG
336  if (mode == PAGE_CUR_DBG) {
337  mode = PAGE_CUR_LE;
338  }
339 # endif
340 #endif
341 
342  /* The following flag does not work for non-latin1 char sets because
343  cmp_full_field does not tell how many bytes matched */
344 #ifdef PAGE_CUR_LE_OR_EXTENDS
345  ut_a(mode != PAGE_CUR_LE_OR_EXTENDS);
346 #endif /* PAGE_CUR_LE_OR_EXTENDS */
347 
348  /* If mode PAGE_CUR_G is specified, we are trying to position the
349  cursor to answer a query of the form "tuple < X", where tuple is
350  the input parameter, and X denotes an arbitrary physical record on
351  the page. We want to position the cursor on the first X which
352  satisfies the condition. */
353 
354  up_matched_fields = *iup_matched_fields;
355  up_matched_bytes = *iup_matched_bytes;
356  low_matched_fields = *ilow_matched_fields;
357  low_matched_bytes = *ilow_matched_bytes;
358 
359  /* Perform binary search. First the search is done through the page
360  directory, after that as a linear search in the list of records
361  owned by the upper limit directory slot. */
362 
363  low = 0;
364  up = page_dir_get_n_slots(page) - 1;
365 
366  /* Perform binary search until the lower and upper limit directory
367  slots come to the distance 1 of each other */
368 
369  while (up - low > 1) {
370  mid = (low + up) / 2;
371  slot = page_dir_get_nth_slot(page, mid);
372  mid_rec = page_dir_slot_get_rec(slot);
373 
374  ut_pair_min(&cur_matched_fields, &cur_matched_bytes,
375  low_matched_fields, low_matched_bytes,
376  up_matched_fields, up_matched_bytes);
377 
378  offsets = rec_get_offsets(mid_rec, index, offsets,
380  &heap);
381 
382  cmp = cmp_dtuple_rec_with_match(tuple, mid_rec, offsets,
383  &cur_matched_fields,
384  &cur_matched_bytes);
385  if (UNIV_LIKELY(cmp > 0)) {
386 low_slot_match:
387  low = mid;
388  low_matched_fields = cur_matched_fields;
389  low_matched_bytes = cur_matched_bytes;
390 
391  } else if (UNIV_EXPECT(cmp, -1)) {
392 #ifdef PAGE_CUR_LE_OR_EXTENDS
393  if (mode == PAGE_CUR_LE_OR_EXTENDS
394  && page_cur_rec_field_extends(
395  tuple, mid_rec, offsets,
396  cur_matched_fields)) {
397 
398  goto low_slot_match;
399  }
400 #endif /* PAGE_CUR_LE_OR_EXTENDS */
401 up_slot_match:
402  up = mid;
403  up_matched_fields = cur_matched_fields;
404  up_matched_bytes = cur_matched_bytes;
405 
406  } else if (mode == PAGE_CUR_G || mode == PAGE_CUR_LE
407 #ifdef PAGE_CUR_LE_OR_EXTENDS
408  || mode == PAGE_CUR_LE_OR_EXTENDS
409 #endif /* PAGE_CUR_LE_OR_EXTENDS */
410  ) {
411 
412  goto low_slot_match;
413  } else {
414 
415  goto up_slot_match;
416  }
417  }
418 
419  slot = page_dir_get_nth_slot(page, low);
420  low_rec = page_dir_slot_get_rec(slot);
421  slot = page_dir_get_nth_slot(page, up);
422  up_rec = page_dir_slot_get_rec(slot);
423 
424  /* Perform linear search until the upper and lower records come to
425  distance 1 of each other. */
426 
427  while (page_rec_get_next_const(low_rec) != up_rec) {
428 
429  mid_rec = page_rec_get_next_const(low_rec);
430 
431  ut_pair_min(&cur_matched_fields, &cur_matched_bytes,
432  low_matched_fields, low_matched_bytes,
433  up_matched_fields, up_matched_bytes);
434 
435  offsets = rec_get_offsets(mid_rec, index, offsets,
437  &heap);
438 
439  cmp = cmp_dtuple_rec_with_match(tuple, mid_rec, offsets,
440  &cur_matched_fields,
441  &cur_matched_bytes);
442  if (UNIV_LIKELY(cmp > 0)) {
443 low_rec_match:
444  low_rec = mid_rec;
445  low_matched_fields = cur_matched_fields;
446  low_matched_bytes = cur_matched_bytes;
447 
448  } else if (UNIV_EXPECT(cmp, -1)) {
449 #ifdef PAGE_CUR_LE_OR_EXTENDS
450  if (mode == PAGE_CUR_LE_OR_EXTENDS
451  && page_cur_rec_field_extends(
452  tuple, mid_rec, offsets,
453  cur_matched_fields)) {
454 
455  goto low_rec_match;
456  }
457 #endif /* PAGE_CUR_LE_OR_EXTENDS */
458 up_rec_match:
459  up_rec = mid_rec;
460  up_matched_fields = cur_matched_fields;
461  up_matched_bytes = cur_matched_bytes;
462  } else if (mode == PAGE_CUR_G || mode == PAGE_CUR_LE
463 #ifdef PAGE_CUR_LE_OR_EXTENDS
464  || mode == PAGE_CUR_LE_OR_EXTENDS
465 #endif /* PAGE_CUR_LE_OR_EXTENDS */
466  ) {
467 
468  goto low_rec_match;
469  } else {
470 
471  goto up_rec_match;
472  }
473  }
474 
475 #ifdef UNIV_SEARCH_DEBUG
476 
477  /* Check that the lower and upper limit records have the
478  right alphabetical order compared to tuple. */
479  dbg_matched_fields = 0;
480  dbg_matched_bytes = 0;
481 
482  offsets = rec_get_offsets(low_rec, index, offsets,
483  ULINT_UNDEFINED, &heap);
484  dbg_cmp = page_cmp_dtuple_rec_with_match(tuple, low_rec, offsets,
485  &dbg_matched_fields,
486  &dbg_matched_bytes);
487  if (mode == PAGE_CUR_G) {
488  ut_a(dbg_cmp >= 0);
489  } else if (mode == PAGE_CUR_GE) {
490  ut_a(dbg_cmp == 1);
491  } else if (mode == PAGE_CUR_L) {
492  ut_a(dbg_cmp == 1);
493  } else if (mode == PAGE_CUR_LE) {
494  ut_a(dbg_cmp >= 0);
495  }
496 
497  if (!page_rec_is_infimum(low_rec)) {
498 
499  ut_a(low_matched_fields == dbg_matched_fields);
500  ut_a(low_matched_bytes == dbg_matched_bytes);
501  }
502 
503  dbg_matched_fields = 0;
504  dbg_matched_bytes = 0;
505 
506  offsets = rec_get_offsets(up_rec, index, offsets,
507  ULINT_UNDEFINED, &heap);
508  dbg_cmp = page_cmp_dtuple_rec_with_match(tuple, up_rec, offsets,
509  &dbg_matched_fields,
510  &dbg_matched_bytes);
511  if (mode == PAGE_CUR_G) {
512  ut_a(dbg_cmp == -1);
513  } else if (mode == PAGE_CUR_GE) {
514  ut_a(dbg_cmp <= 0);
515  } else if (mode == PAGE_CUR_L) {
516  ut_a(dbg_cmp <= 0);
517  } else if (mode == PAGE_CUR_LE) {
518  ut_a(dbg_cmp == -1);
519  }
520 
521  if (!page_rec_is_supremum(up_rec)) {
522 
523  ut_a(up_matched_fields == dbg_matched_fields);
524  ut_a(up_matched_bytes == dbg_matched_bytes);
525  }
526 #endif
527  if (mode <= PAGE_CUR_GE) {
528  page_cur_position(up_rec, block, cursor);
529  } else {
530  page_cur_position(low_rec, block, cursor);
531  }
532 
533  *iup_matched_fields = up_matched_fields;
534  *iup_matched_bytes = up_matched_bytes;
535  *ilow_matched_fields = low_matched_fields;
536  *ilow_matched_bytes = low_matched_bytes;
537  if (UNIV_LIKELY_NULL(heap)) {
538  mem_heap_free(heap);
539  }
540 }
541 
542 /***********************************************************/
545 UNIV_INTERN
546 void
548 /*==========================*/
549  buf_block_t* block,
550  page_cur_t* cursor)
551 {
552  ulint rnd;
553  ulint n_recs = page_get_n_recs(buf_block_get_frame(block));
554 
555  page_cur_set_before_first(block, cursor);
556 
557  if (UNIV_UNLIKELY(n_recs == 0)) {
558 
559  return;
560  }
561 
562  rnd = (ulint) (page_cur_lcg_prng() % n_recs);
563 
564  do {
565  page_cur_move_to_next(cursor);
566  } while (rnd--);
567 }
568 
569 /***********************************************************/
571 static
572 void
573 page_cur_insert_rec_write_log(
574 /*==========================*/
575  rec_t* insert_rec,
576  ulint rec_size,
577  rec_t* cursor_rec,
579  dict_index_t* index,
580  mtr_t* mtr)
581 {
582  ulint cur_rec_size;
583  ulint extra_size;
584  ulint cur_extra_size;
585  const byte* ins_ptr;
586  byte* log_ptr;
587  const byte* log_end;
588  ulint i;
589 
590  ut_a(rec_size < UNIV_PAGE_SIZE);
591  ut_ad(page_align(insert_rec) == page_align(cursor_rec));
592  ut_ad(!page_rec_is_comp(insert_rec)
593  == !dict_table_is_comp(index->table));
594 
595  {
596  mem_heap_t* heap = NULL;
597  ulint cur_offs_[REC_OFFS_NORMAL_SIZE];
598  ulint ins_offs_[REC_OFFS_NORMAL_SIZE];
599 
600  ulint* cur_offs;
601  ulint* ins_offs;
602 
603  rec_offs_init(cur_offs_);
604  rec_offs_init(ins_offs_);
605 
606  cur_offs = rec_get_offsets(cursor_rec, index, cur_offs_,
607  ULINT_UNDEFINED, &heap);
608  ins_offs = rec_get_offsets(insert_rec, index, ins_offs_,
609  ULINT_UNDEFINED, &heap);
610 
611  extra_size = rec_offs_extra_size(ins_offs);
612  cur_extra_size = rec_offs_extra_size(cur_offs);
613  ut_ad(rec_size == rec_offs_size(ins_offs));
614  cur_rec_size = rec_offs_size(cur_offs);
615 
616  if (UNIV_LIKELY_NULL(heap)) {
617  mem_heap_free(heap);
618  }
619  }
620 
621  ins_ptr = insert_rec - extra_size;
622 
623  i = 0;
624 
625  if (cur_extra_size == extra_size) {
626  ulint min_rec_size = ut_min(cur_rec_size, rec_size);
627 
628  const byte* cur_ptr = cursor_rec - cur_extra_size;
629 
630  /* Find out the first byte in insert_rec which differs from
631  cursor_rec; skip the bytes in the record info */
632 
633  do {
634  if (*ins_ptr == *cur_ptr) {
635  i++;
636  ins_ptr++;
637  cur_ptr++;
638  } else if ((i < extra_size)
639  && (i >= extra_size
641  (insert_rec))) {
642  i = extra_size;
643  ins_ptr = insert_rec;
644  cur_ptr = cursor_rec;
645  } else {
646  break;
647  }
648  } while (i < min_rec_size);
649  }
650 
651  if (mtr_get_log_mode(mtr) != MTR_LOG_SHORT_INSERTS) {
652 
653  if (page_rec_is_comp(insert_rec)) {
654  log_ptr = mlog_open_and_write_index(
655  mtr, insert_rec, index, MLOG_COMP_REC_INSERT,
656  2 + 5 + 1 + 5 + 5 + MLOG_BUF_MARGIN);
657  if (UNIV_UNLIKELY(!log_ptr)) {
658  /* Logging in mtr is switched off
659  during crash recovery: in that case
660  mlog_open returns NULL */
661  return;
662  }
663  } else {
664  log_ptr = mlog_open(mtr, 11
665  + 2 + 5 + 1 + 5 + 5
666  + MLOG_BUF_MARGIN);
667  if (UNIV_UNLIKELY(!log_ptr)) {
668  /* Logging in mtr is switched off
669  during crash recovery: in that case
670  mlog_open returns NULL */
671  return;
672  }
673 
675  insert_rec, MLOG_REC_INSERT, log_ptr, mtr);
676  }
677 
678  log_end = &log_ptr[2 + 5 + 1 + 5 + 5 + MLOG_BUF_MARGIN];
679  /* Write the cursor rec offset as a 2-byte ulint */
680  mach_write_to_2(log_ptr, page_offset(cursor_rec));
681  log_ptr += 2;
682  } else {
683  log_ptr = mlog_open(mtr, 5 + 1 + 5 + 5 + MLOG_BUF_MARGIN);
684  if (!log_ptr) {
685  /* Logging in mtr is switched off during crash
686  recovery: in that case mlog_open returns NULL */
687  return;
688  }
689  log_end = &log_ptr[5 + 1 + 5 + 5 + MLOG_BUF_MARGIN];
690  }
691 
692  if (page_rec_is_comp(insert_rec)) {
693  if (UNIV_UNLIKELY
694  (rec_get_info_and_status_bits(insert_rec, TRUE)
695  != rec_get_info_and_status_bits(cursor_rec, TRUE))) {
696 
697  goto need_extra_info;
698  }
699  } else {
700  if (UNIV_UNLIKELY
701  (rec_get_info_and_status_bits(insert_rec, FALSE)
702  != rec_get_info_and_status_bits(cursor_rec, FALSE))) {
703 
704  goto need_extra_info;
705  }
706  }
707 
708  if (extra_size != cur_extra_size || rec_size != cur_rec_size) {
709 need_extra_info:
710  /* Write the record end segment length
711  and the extra info storage flag */
712  log_ptr += mach_write_compressed(log_ptr,
713  2 * (rec_size - i) + 1);
714 
715  /* Write the info bits */
716  mach_write_to_1(log_ptr,
718  insert_rec,
719  page_rec_is_comp(insert_rec)));
720  log_ptr++;
721 
722  /* Write the record origin offset */
723  log_ptr += mach_write_compressed(log_ptr, extra_size);
724 
725  /* Write the mismatch index */
726  log_ptr += mach_write_compressed(log_ptr, i);
727 
728  ut_a(i < UNIV_PAGE_SIZE);
729  ut_a(extra_size < UNIV_PAGE_SIZE);
730  } else {
731  /* Write the record end segment length
732  and the extra info storage flag */
733  log_ptr += mach_write_compressed(log_ptr, 2 * (rec_size - i));
734  }
735 
736  /* Write to the log the inserted index record end segment which
737  differs from the cursor record */
738 
739  rec_size -= i;
740 
741  if (log_ptr + rec_size <= log_end) {
742  memcpy(log_ptr, ins_ptr, rec_size);
743  mlog_close(mtr, log_ptr + rec_size);
744  } else {
745  mlog_close(mtr, log_ptr);
746  ut_a(rec_size < UNIV_PAGE_SIZE);
747  mlog_catenate_string(mtr, ins_ptr, rec_size);
748  }
749 }
750 #else /* !UNIV_HOTBACKUP */
751 # define page_cur_insert_rec_write_log(ins_rec,size,cur,index,mtr) ((void) 0)
752 #endif /* !UNIV_HOTBACKUP */
753 
754 /***********************************************************/
757 UNIV_INTERN
758 byte*
760 /*======================*/
761  ibool is_short,
762  byte* ptr,
763  byte* end_ptr,
764  buf_block_t* block,
765  dict_index_t* index,
766  mtr_t* mtr)
767 {
768  ulint origin_offset;
769  ulint end_seg_len;
770  ulint mismatch_index;
771  page_t* page;
772  rec_t* cursor_rec;
773  byte buf1[1024];
774  byte* buf;
775  byte* ptr2 = ptr;
776  ulint info_and_status_bits = 0; /* remove warning */
778  mem_heap_t* heap = NULL;
779  ulint offsets_[REC_OFFS_NORMAL_SIZE];
780  ulint* offsets = offsets_;
781  rec_offs_init(offsets_);
782 
783  page = block ? buf_block_get_frame(block) : NULL;
784 
785  if (is_short) {
786  cursor_rec = page_rec_get_prev(page_get_supremum_rec(page));
787  } else {
788  ulint offset;
789 
790  /* Read the cursor rec offset as a 2-byte ulint */
791 
792  if (UNIV_UNLIKELY(end_ptr < ptr + 2)) {
793 
794  return(NULL);
795  }
796 
797  offset = mach_read_from_2(ptr);
798  ptr += 2;
799 
800  cursor_rec = page + offset;
801 
802  if (UNIV_UNLIKELY(offset >= UNIV_PAGE_SIZE)) {
803 
804  recv_sys->found_corrupt_log = TRUE;
805 
806  return(NULL);
807  }
808  }
809 
810  ptr = mach_parse_compressed(ptr, end_ptr, &end_seg_len);
811 
812  if (ptr == NULL) {
813 
814  return(NULL);
815  }
816 
817  if (UNIV_UNLIKELY(end_seg_len >= UNIV_PAGE_SIZE << 1)) {
818  recv_sys->found_corrupt_log = TRUE;
819 
820  return(NULL);
821  }
822 
823  if (end_seg_len & 0x1UL) {
824  /* Read the info bits */
825 
826  if (end_ptr < ptr + 1) {
827 
828  return(NULL);
829  }
830 
831  info_and_status_bits = mach_read_from_1(ptr);
832  ptr++;
833 
834  ptr = mach_parse_compressed(ptr, end_ptr, &origin_offset);
835 
836  if (ptr == NULL) {
837 
838  return(NULL);
839  }
840 
841  ut_a(origin_offset < UNIV_PAGE_SIZE);
842 
843  ptr = mach_parse_compressed(ptr, end_ptr, &mismatch_index);
844 
845  if (ptr == NULL) {
846 
847  return(NULL);
848  }
849 
850  ut_a(mismatch_index < UNIV_PAGE_SIZE);
851  }
852 
853  if (UNIV_UNLIKELY(end_ptr < ptr + (end_seg_len >> 1))) {
854 
855  return(NULL);
856  }
857 
858  if (!block) {
859 
860  return(ptr + (end_seg_len >> 1));
861  }
862 
863  ut_ad(!!page_is_comp(page) == dict_table_is_comp(index->table));
864  ut_ad(!buf_block_get_page_zip(block) || page_is_comp(page));
865 
866  /* Read from the log the inserted index record end segment which
867  differs from the cursor record */
868 
869  offsets = rec_get_offsets(cursor_rec, index, offsets,
870  ULINT_UNDEFINED, &heap);
871 
872  if (!(end_seg_len & 0x1UL)) {
873  info_and_status_bits = rec_get_info_and_status_bits(
874  cursor_rec, page_is_comp(page));
875  origin_offset = rec_offs_extra_size(offsets);
876  mismatch_index = rec_offs_size(offsets) - (end_seg_len >> 1);
877  }
878 
879  end_seg_len >>= 1;
880 
881  if (mismatch_index + end_seg_len < sizeof buf1) {
882  buf = buf1;
883  } else {
884  buf = static_cast<byte*>(
885  mem_alloc(mismatch_index + end_seg_len));
886  }
887 
888  /* Build the inserted record to buf */
889 
890  if (UNIV_UNLIKELY(mismatch_index >= UNIV_PAGE_SIZE)) {
891  fprintf(stderr,
892  "Is short %lu, info_and_status_bits %lu, offset %lu, "
893  "o_offset %lu\n"
894  "mismatch index %lu, end_seg_len %lu\n"
895  "parsed len %lu\n",
896  (ulong) is_short, (ulong) info_and_status_bits,
897  (ulong) page_offset(cursor_rec),
898  (ulong) origin_offset,
899  (ulong) mismatch_index, (ulong) end_seg_len,
900  (ulong) (ptr - ptr2));
901 
902  fputs("Dump of 300 bytes of log:\n", stderr);
903  ut_print_buf(stderr, ptr2, 300);
904  putc('\n', stderr);
905 
906  buf_page_print(page, 0, 0);
907 
908  ut_error;
909  }
910 
911  ut_memcpy(buf, rec_get_start(cursor_rec, offsets), mismatch_index);
912  ut_memcpy(buf + mismatch_index, ptr, end_seg_len);
913 
914  if (page_is_comp(page)) {
915  rec_set_info_and_status_bits(buf + origin_offset,
916  info_and_status_bits);
917  } else {
918  rec_set_info_bits_old(buf + origin_offset,
919  info_and_status_bits);
920  }
921 
922  page_cur_position(cursor_rec, block, &cursor);
923 
924  offsets = rec_get_offsets(buf + origin_offset, index, offsets,
925  ULINT_UNDEFINED, &heap);
926  if (UNIV_UNLIKELY(!page_cur_rec_insert(&cursor,
927  buf + origin_offset,
928  index, offsets, mtr))) {
929  /* The redo log record should only have been written
930  after the write was successful. */
931  ut_error;
932  }
933 
934  if (buf != buf1) {
935 
936  mem_free(buf);
937  }
938 
939  if (UNIV_LIKELY_NULL(heap)) {
940  mem_heap_free(heap);
941  }
942 
943  return(ptr + end_seg_len);
944 }
945 
946 /***********************************************************/
951 UNIV_INTERN
952 rec_t*
954 /*====================*/
955  rec_t* current_rec,
957  dict_index_t* index,
958  const rec_t* rec,
959  ulint* offsets,
960  mtr_t* mtr)
961 {
962  byte* insert_buf;
963  ulint rec_size;
964  page_t* page;
965  rec_t* last_insert;
967  rec_t* free_rec;
969  rec_t* insert_rec;
970  ulint heap_no;
973  ut_ad(rec_offs_validate(rec, index, offsets));
974 
975  page = page_align(current_rec);
977  == (ibool) !!page_is_comp(page));
979  ut_ad(mach_read_from_8(page + PAGE_HEADER + PAGE_INDEX_ID)
980  == index->id || recv_recovery_is_on() || mtr->inside_ibuf);
981 
982  ut_ad(!page_rec_is_supremum(current_rec));
983 
984  /* 1. Get the size of the physical record in the page */
985  rec_size = rec_offs_size(offsets);
986 
987 #ifdef UNIV_DEBUG_VALGRIND
988  {
989  const void* rec_start
990  = rec - rec_offs_extra_size(offsets);
991  ulint extra_size
992  = rec_offs_extra_size(offsets)
993  - (rec_offs_comp(offsets)
994  ? REC_N_NEW_EXTRA_BYTES
995  : REC_N_OLD_EXTRA_BYTES);
996 
997  /* All data bytes of the record must be valid. */
998  UNIV_MEM_ASSERT_RW(rec, rec_offs_data_size(offsets));
999  /* The variable-length header must be valid. */
1000  UNIV_MEM_ASSERT_RW(rec_start, extra_size);
1001  }
1002 #endif /* UNIV_DEBUG_VALGRIND */
1003 
1004  /* 2. Try to find suitable space from page memory management */
1005 
1006  free_rec = page_header_get_ptr(page, PAGE_FREE);
1007  if (UNIV_LIKELY_NULL(free_rec)) {
1008  /* Try to allocate from the head of the free list. */
1009  ulint foffsets_[REC_OFFS_NORMAL_SIZE];
1010  ulint* foffsets = foffsets_;
1011  mem_heap_t* heap = NULL;
1012 
1013  rec_offs_init(foffsets_);
1014 
1015  foffsets = rec_get_offsets(
1016  free_rec, index, foffsets, ULINT_UNDEFINED, &heap);
1017  if (rec_offs_size(foffsets) < rec_size) {
1018  if (UNIV_LIKELY_NULL(heap)) {
1019  mem_heap_free(heap);
1020  }
1021 
1022  goto use_heap;
1023  }
1024 
1025  insert_buf = free_rec - rec_offs_extra_size(foffsets);
1026 
1027  if (page_is_comp(page)) {
1028  heap_no = rec_get_heap_no_new(free_rec);
1029  page_mem_alloc_free(page, NULL,
1030  rec_get_next_ptr(free_rec, TRUE),
1031  rec_size);
1032  } else {
1033  heap_no = rec_get_heap_no_old(free_rec);
1034  page_mem_alloc_free(page, NULL,
1035  rec_get_next_ptr(free_rec, FALSE),
1036  rec_size);
1037  }
1038 
1039  if (UNIV_LIKELY_NULL(heap)) {
1040  mem_heap_free(heap);
1041  }
1042  } else {
1043 use_heap:
1044  free_rec = NULL;
1045  insert_buf = page_mem_alloc_heap(page, NULL,
1046  rec_size, &heap_no);
1047 
1048  if (UNIV_UNLIKELY(insert_buf == NULL)) {
1049  return(NULL);
1050  }
1051  }
1052 
1053  /* 3. Create the record */
1054  insert_rec = rec_copy(insert_buf, rec, offsets);
1055  rec_offs_make_valid(insert_rec, index, offsets);
1056 
1057  /* 4. Insert the record in the linked list of records */
1058  ut_ad(current_rec != insert_rec);
1059 
1060  {
1061  /* next record after current before the insertion */
1062  rec_t* next_rec = page_rec_get_next(current_rec);
1063 #ifdef UNIV_DEBUG
1064  if (page_is_comp(page)) {
1065  ut_ad(rec_get_status(current_rec)
1066  <= REC_STATUS_INFIMUM);
1067  ut_ad(rec_get_status(insert_rec) < REC_STATUS_INFIMUM);
1068  ut_ad(rec_get_status(next_rec) != REC_STATUS_INFIMUM);
1069  }
1070 #endif
1071  page_rec_set_next(insert_rec, next_rec);
1072  page_rec_set_next(current_rec, insert_rec);
1073  }
1074 
1075  page_header_set_field(page, NULL, PAGE_N_RECS,
1076  1 + page_get_n_recs(page));
1077 
1078  /* 5. Set the n_owned field in the inserted record to zero,
1079  and set the heap_no field */
1080  if (page_is_comp(page)) {
1081  rec_set_n_owned_new(insert_rec, NULL, 0);
1082  rec_set_heap_no_new(insert_rec, heap_no);
1083  } else {
1084  rec_set_n_owned_old(insert_rec, 0);
1085  rec_set_heap_no_old(insert_rec, heap_no);
1086  }
1087 
1088  UNIV_MEM_ASSERT_RW(rec_get_start(insert_rec, offsets),
1089  rec_offs_size(offsets));
1090  /* 6. Update the last insertion info in page header */
1091 
1092  last_insert = page_header_get_ptr(page, PAGE_LAST_INSERT);
1093  ut_ad(!last_insert || !page_is_comp(page)
1094  || rec_get_node_ptr_flag(last_insert)
1095  == rec_get_node_ptr_flag(insert_rec));
1096 
1097  if (UNIV_UNLIKELY(last_insert == NULL)) {
1098  page_header_set_field(page, NULL, PAGE_DIRECTION,
1099  PAGE_NO_DIRECTION);
1100  page_header_set_field(page, NULL, PAGE_N_DIRECTION, 0);
1101 
1102  } else if ((last_insert == current_rec)
1103  && (page_header_get_field(page, PAGE_DIRECTION)
1104  != PAGE_LEFT)) {
1105 
1106  page_header_set_field(page, NULL, PAGE_DIRECTION,
1107  PAGE_RIGHT);
1108  page_header_set_field(page, NULL, PAGE_N_DIRECTION,
1110  page, PAGE_N_DIRECTION) + 1);
1111 
1112  } else if ((page_rec_get_next(insert_rec) == last_insert)
1113  && (page_header_get_field(page, PAGE_DIRECTION)
1114  != PAGE_RIGHT)) {
1115 
1116  page_header_set_field(page, NULL, PAGE_DIRECTION,
1117  PAGE_LEFT);
1118  page_header_set_field(page, NULL, PAGE_N_DIRECTION,
1120  page, PAGE_N_DIRECTION) + 1);
1121  } else {
1122  page_header_set_field(page, NULL, PAGE_DIRECTION,
1123  PAGE_NO_DIRECTION);
1124  page_header_set_field(page, NULL, PAGE_N_DIRECTION, 0);
1125  }
1126 
1127  page_header_set_ptr(page, NULL, PAGE_LAST_INSERT, insert_rec);
1128 
1129  /* 7. It remains to update the owner record. */
1130  {
1131  rec_t* owner_rec = page_rec_find_owner_rec(insert_rec);
1132  ulint n_owned;
1133  if (page_is_comp(page)) {
1134  n_owned = rec_get_n_owned_new(owner_rec);
1135  rec_set_n_owned_new(owner_rec, NULL, n_owned + 1);
1136  } else {
1137  n_owned = rec_get_n_owned_old(owner_rec);
1138  rec_set_n_owned_old(owner_rec, n_owned + 1);
1139  }
1140 
1141  /* 8. Now we have incremented the n_owned field of the owner
1142  record. If the number exceeds PAGE_DIR_SLOT_MAX_N_OWNED,
1143  we have to split the corresponding directory slot in two. */
1144 
1145  if (UNIV_UNLIKELY(n_owned == PAGE_DIR_SLOT_MAX_N_OWNED)) {
1147  page, NULL,
1148  page_dir_find_owner_slot(owner_rec));
1149  }
1150  }
1151 
1152  /* 9. Write log record of the insert */
1153  if (UNIV_LIKELY(mtr != NULL)) {
1154  page_cur_insert_rec_write_log(insert_rec, rec_size,
1155  current_rec, index, mtr);
1156  }
1157 
1158  btr_blob_dbg_add_rec(insert_rec, index, offsets, "insert");
1159 
1160  return(insert_rec);
1161 }
1162 
1163 /***********************************************************/
1175 UNIV_INTERN
1176 rec_t*
1178 /*====================*/
1179  page_cur_t* cursor,
1180  dict_index_t* index,
1181  const rec_t* rec,
1182  ulint* offsets,
1183  mtr_t* mtr)
1184 {
1185  byte* insert_buf;
1186  ulint rec_size;
1187  page_t* page;
1188  rec_t* last_insert;
1190  rec_t* free_rec;
1192  rec_t* insert_rec;
1193  ulint heap_no;
1195  page_zip_des_t* page_zip;
1196 
1197  page_zip = page_cur_get_page_zip(cursor);
1198  ut_ad(page_zip);
1199 
1200  ut_ad(rec_offs_validate(rec, index, offsets));
1201 
1202  page = page_cur_get_page(cursor);
1203  ut_ad(dict_table_is_comp(index->table));
1204  ut_ad(page_is_comp(page));
1206  ut_ad(mach_read_from_8(page + PAGE_HEADER + PAGE_INDEX_ID)
1207  == index->id || mtr->inside_ibuf || recv_recovery_is_on());
1208 
1209  ut_ad(!page_cur_is_after_last(cursor));
1210 #ifdef UNIV_ZIP_DEBUG
1211  ut_a(page_zip_validate(page_zip, page, index));
1212 #endif /* UNIV_ZIP_DEBUG */
1213 
1214  /* 1. Get the size of the physical record in the page */
1215  rec_size = rec_offs_size(offsets);
1216 
1217 #ifdef UNIV_DEBUG_VALGRIND
1218  {
1219  const void* rec_start
1220  = rec - rec_offs_extra_size(offsets);
1221  ulint extra_size
1222  = rec_offs_extra_size(offsets)
1223  - (rec_offs_comp(offsets)
1224  ? REC_N_NEW_EXTRA_BYTES
1225  : REC_N_OLD_EXTRA_BYTES);
1226 
1227  /* All data bytes of the record must be valid. */
1228  UNIV_MEM_ASSERT_RW(rec, rec_offs_data_size(offsets));
1229  /* The variable-length header must be valid. */
1230  UNIV_MEM_ASSERT_RW(rec_start, extra_size);
1231  }
1232 #endif /* UNIV_DEBUG_VALGRIND */
1233 
1234  const bool reorg_before_insert = page_has_garbage(page)
1235  && rec_size > page_get_max_insert_size(page, 1)
1237  page, 1);
1238 
1239  /* 2. Try to find suitable space from page memory management */
1240  if (!page_zip_available(page_zip, dict_index_is_clust(index),
1241  rec_size, 1)
1242  || reorg_before_insert) {
1243  /* The values can change dynamically. */
1244  bool log_compressed = page_zip_log_pages;
1245  ulint level = page_zip_level;
1246 #ifdef UNIV_DEBUG
1247  rec_t* cursor_rec = page_cur_get_rec(cursor);
1248 #endif /* UNIV_DEBUG */
1249 
1250  /* If we are not writing compressed page images, we
1251  must reorganize the page before attempting the
1252  insert. */
1253  if (recv_recovery_is_on()) {
1254  /* Insert into the uncompressed page only.
1255  The page reorganization or creation that we
1256  would attempt outside crash recovery would
1257  have been covered by a previous redo log record. */
1258  } else if (page_is_empty(page)) {
1260 
1261  /* This is an empty page. Recreate it to
1262  get rid of the modification log. */
1263  page_create_zip(page_cur_get_block(cursor), index,
1264  page_header_get_field(page, PAGE_LEVEL),
1265  0, mtr);
1266  ut_ad(!page_header_get_ptr(page, PAGE_FREE));
1267 
1268  if (page_zip_available(
1269  page_zip, dict_index_is_clust(index),
1270  rec_size, 1)) {
1271  goto use_heap;
1272  }
1273 
1274  /* The cursor should remain on the page infimum. */
1275  return(NULL);
1276  } else if (!page_zip->m_nonempty && !page_has_garbage(page)) {
1277  /* The page has been freshly compressed, so
1278  reorganizing it will not help. */
1279  } else if (log_compressed && !reorg_before_insert) {
1280  /* Insert into uncompressed page only, and
1281  try page_zip_reorganize() afterwards. */
1282  } else if (btr_page_reorganize_low(
1283  recv_recovery_is_on(), level,
1284  cursor, index, mtr)) {
1285  ut_ad(!page_header_get_ptr(page, PAGE_FREE));
1286 
1287  if (page_zip_available(
1288  page_zip, dict_index_is_clust(index),
1289  rec_size, 1)) {
1290  /* After reorganizing, there is space
1291  available. */
1292  goto use_heap;
1293  }
1294  } else {
1295  ut_ad(cursor->rec == cursor_rec);
1296  return(NULL);
1297  }
1298 
1299  /* Try compressing the whole page afterwards. */
1300  insert_rec = page_cur_insert_rec_low(
1301  cursor->rec, index, rec, offsets, NULL);
1302 
1303  /* If recovery is on, this implies that the compression
1304  of the page was successful during runtime. Had that not
1305  been the case or had the redo logging of compressed
1306  pages been enabled during runtime then we'd have seen
1307  a MLOG_ZIP_PAGE_COMPRESS redo record. Therefore, we
1308  know that we don't need to reorganize the page. We,
1309  however, do need to recompress the page. That will
1310  happen when the next redo record is read which must
1311  be of type MLOG_ZIP_PAGE_COMPRESS_NO_DATA and it must
1312  contain a valid compression level value.
1313  This implies that during recovery from this point till
1314  the next redo is applied the uncompressed and
1315  compressed versions are not identical and
1316  page_zip_validate will fail but that is OK because
1317  we call page_zip_validate only after processing
1318  all changes to a page under a single mtr during
1319  recovery. */
1320  if (insert_rec == NULL) {
1321  /* Out of space.
1322  This should never occur during crash recovery,
1323  because the MLOG_COMP_REC_INSERT should only
1324  be logged after a successful operation. */
1326  } else if (recv_recovery_is_on()) {
1327  /* This should be followed by
1328  MLOG_ZIP_PAGE_COMPRESS_NO_DATA,
1329  which should succeed. */
1330  rec_offs_make_valid(insert_rec, index, offsets);
1331  } else {
1332  ulint pos = page_rec_get_n_recs_before(insert_rec);
1333  ut_ad(pos > 0);
1334 
1335  if (!log_compressed) {
1336  if (page_zip_compress(
1337  page_zip, page, index,
1338  level, NULL)) {
1339  page_cur_insert_rec_write_log(
1340  insert_rec, rec_size,
1341  cursor->rec, index, mtr);
1343  level, page, index, mtr);
1344 
1345  rec_offs_make_valid(
1346  insert_rec, index, offsets);
1347  return(insert_rec);
1348  }
1349 
1350  ut_ad(cursor->rec
1351  == (pos > 1
1352  ? page_rec_get_nth(
1353  page, pos - 1)
1354  : page + PAGE_NEW_INFIMUM));
1355  } else {
1356  /* We are writing entire page images
1357  to the log. Reduce the redo log volume
1358  by reorganizing the page at the same time. */
1359  if (page_zip_reorganize(
1360  cursor->block, index, mtr)) {
1361  /* The page was reorganized:
1362  Seek to pos. */
1363  if (pos > 1) {
1364  cursor->rec = page_rec_get_nth(
1365  page, pos - 1);
1366  } else {
1367  cursor->rec = page
1368  + PAGE_NEW_INFIMUM;
1369  }
1370 
1371  insert_rec = page + rec_get_next_offs(
1372  cursor->rec, TRUE);
1373  rec_offs_make_valid(
1374  insert_rec, index, offsets);
1375  return(insert_rec);
1376  }
1377 
1378  /* Theoretically, we could try one
1379  last resort of btr_page_reorganize_low()
1380  followed by page_zip_available(), but
1381  that would be very unlikely to
1382  succeed. (If the full reorganized page
1383  failed to compress, why would it
1384  succeed to compress the page, plus log
1385  the insert of this record? */
1386  }
1387 
1388  /* Out of space: restore the page */
1389  btr_blob_dbg_remove(page, index, "insert_zip_fail");
1390  if (!page_zip_decompress(page_zip, page, FALSE)) {
1391  ut_error; /* Memory corrupted? */
1392  }
1393  ut_ad(page_validate(page, index));
1394  btr_blob_dbg_add(page, index, "insert_zip_fail");
1395  insert_rec = NULL;
1396  }
1397 
1398  return(insert_rec);
1399  }
1400 
1401  free_rec = page_header_get_ptr(page, PAGE_FREE);
1402  if (UNIV_LIKELY_NULL(free_rec)) {
1403  /* Try to allocate from the head of the free list. */
1404  lint extra_size_diff;
1405  ulint foffsets_[REC_OFFS_NORMAL_SIZE];
1406  ulint* foffsets = foffsets_;
1407  mem_heap_t* heap = NULL;
1408 
1409  rec_offs_init(foffsets_);
1410 
1411  foffsets = rec_get_offsets(free_rec, index, foffsets,
1412  ULINT_UNDEFINED, &heap);
1413  if (rec_offs_size(foffsets) < rec_size) {
1414 too_small:
1415  if (UNIV_LIKELY_NULL(heap)) {
1416  mem_heap_free(heap);
1417  }
1418 
1419  goto use_heap;
1420  }
1421 
1422  insert_buf = free_rec - rec_offs_extra_size(foffsets);
1423 
1424  /* On compressed pages, do not relocate records from
1425  the free list. If extra_size would grow, use the heap. */
1426  extra_size_diff
1427  = rec_offs_extra_size(offsets)
1428  - rec_offs_extra_size(foffsets);
1429 
1430  if (UNIV_UNLIKELY(extra_size_diff < 0)) {
1431  /* Add an offset to the extra_size. */
1432  if (rec_offs_size(foffsets)
1433  < rec_size - extra_size_diff) {
1434 
1435  goto too_small;
1436  }
1437 
1438  insert_buf -= extra_size_diff;
1439  } else if (UNIV_UNLIKELY(extra_size_diff)) {
1440  /* Do not allow extra_size to grow */
1441 
1442  goto too_small;
1443  }
1444 
1445  heap_no = rec_get_heap_no_new(free_rec);
1446  page_mem_alloc_free(page, page_zip,
1447  rec_get_next_ptr(free_rec, TRUE),
1448  rec_size);
1449 
1450  if (!page_is_leaf(page)) {
1451  /* Zero out the node pointer of free_rec,
1452  in case it will not be overwritten by
1453  insert_rec. */
1454 
1455  ut_ad(rec_size > REC_NODE_PTR_SIZE);
1456 
1457  if (rec_offs_extra_size(foffsets)
1458  + rec_offs_data_size(foffsets) > rec_size) {
1459 
1460  memset(rec_get_end(free_rec, foffsets)
1461  - REC_NODE_PTR_SIZE, 0,
1462  REC_NODE_PTR_SIZE);
1463  }
1464  } else if (dict_index_is_clust(index)) {
1465  /* Zero out the DB_TRX_ID and DB_ROLL_PTR
1466  columns of free_rec, in case it will not be
1467  overwritten by insert_rec. */
1468 
1469  ulint trx_id_col;
1470  ulint trx_id_offs;
1471  ulint len;
1472 
1473  trx_id_col = dict_index_get_sys_col_pos(index,
1474  DATA_TRX_ID);
1475  ut_ad(trx_id_col > 0);
1476  ut_ad(trx_id_col != ULINT_UNDEFINED);
1477 
1478  trx_id_offs = rec_get_nth_field_offs(foffsets,
1479  trx_id_col, &len);
1480  ut_ad(len == DATA_TRX_ID_LEN);
1481 
1482  if (DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN + trx_id_offs
1483  + rec_offs_extra_size(foffsets) > rec_size) {
1484  /* We will have to zero out the
1485  DB_TRX_ID and DB_ROLL_PTR, because
1486  they will not be fully overwritten by
1487  insert_rec. */
1488 
1489  memset(free_rec + trx_id_offs, 0,
1490  DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN);
1491  }
1492 
1493  ut_ad(free_rec + trx_id_offs + DATA_TRX_ID_LEN
1494  == rec_get_nth_field(free_rec, foffsets,
1495  trx_id_col + 1, &len));
1496  ut_ad(len == DATA_ROLL_PTR_LEN);
1497  }
1498 
1499  if (UNIV_LIKELY_NULL(heap)) {
1500  mem_heap_free(heap);
1501  }
1502  } else {
1503 use_heap:
1504  free_rec = NULL;
1505  insert_buf = page_mem_alloc_heap(page, page_zip,
1506  rec_size, &heap_no);
1507 
1508  if (UNIV_UNLIKELY(insert_buf == NULL)) {
1509  return(NULL);
1510  }
1511 
1512  page_zip_dir_add_slot(page_zip, dict_index_is_clust(index));
1513  }
1514 
1515  /* 3. Create the record */
1516  insert_rec = rec_copy(insert_buf, rec, offsets);
1517  rec_offs_make_valid(insert_rec, index, offsets);
1518 
1519  /* 4. Insert the record in the linked list of records */
1520  ut_ad(cursor->rec != insert_rec);
1521 
1522  {
1523  /* next record after current before the insertion */
1524  const rec_t* next_rec = page_rec_get_next_low(
1525  cursor->rec, TRUE);
1526  ut_ad(rec_get_status(cursor->rec)
1527  <= REC_STATUS_INFIMUM);
1528  ut_ad(rec_get_status(insert_rec) < REC_STATUS_INFIMUM);
1529  ut_ad(rec_get_status(next_rec) != REC_STATUS_INFIMUM);
1530 
1531  page_rec_set_next(insert_rec, next_rec);
1532  page_rec_set_next(cursor->rec, insert_rec);
1533  }
1534 
1535  page_header_set_field(page, page_zip, PAGE_N_RECS,
1536  1 + page_get_n_recs(page));
1537 
1538  /* 5. Set the n_owned field in the inserted record to zero,
1539  and set the heap_no field */
1540  rec_set_n_owned_new(insert_rec, NULL, 0);
1541  rec_set_heap_no_new(insert_rec, heap_no);
1542 
1543  UNIV_MEM_ASSERT_RW(rec_get_start(insert_rec, offsets),
1544  rec_offs_size(offsets));
1545 
1546  page_zip_dir_insert(page_zip, cursor->rec, free_rec, insert_rec);
1547 
1548  /* 6. Update the last insertion info in page header */
1549 
1550  last_insert = page_header_get_ptr(page, PAGE_LAST_INSERT);
1551  ut_ad(!last_insert
1552  || rec_get_node_ptr_flag(last_insert)
1553  == rec_get_node_ptr_flag(insert_rec));
1554 
1555  if (UNIV_UNLIKELY(last_insert == NULL)) {
1556  page_header_set_field(page, page_zip, PAGE_DIRECTION,
1557  PAGE_NO_DIRECTION);
1558  page_header_set_field(page, page_zip, PAGE_N_DIRECTION, 0);
1559 
1560  } else if ((last_insert == cursor->rec)
1561  && (page_header_get_field(page, PAGE_DIRECTION)
1562  != PAGE_LEFT)) {
1563 
1564  page_header_set_field(page, page_zip, PAGE_DIRECTION,
1565  PAGE_RIGHT);
1566  page_header_set_field(page, page_zip, PAGE_N_DIRECTION,
1568  page, PAGE_N_DIRECTION) + 1);
1569 
1570  } else if ((page_rec_get_next(insert_rec) == last_insert)
1571  && (page_header_get_field(page, PAGE_DIRECTION)
1572  != PAGE_RIGHT)) {
1573 
1574  page_header_set_field(page, page_zip, PAGE_DIRECTION,
1575  PAGE_LEFT);
1576  page_header_set_field(page, page_zip, PAGE_N_DIRECTION,
1578  page, PAGE_N_DIRECTION) + 1);
1579  } else {
1580  page_header_set_field(page, page_zip, PAGE_DIRECTION,
1581  PAGE_NO_DIRECTION);
1582  page_header_set_field(page, page_zip, PAGE_N_DIRECTION, 0);
1583  }
1584 
1585  page_header_set_ptr(page, page_zip, PAGE_LAST_INSERT, insert_rec);
1586 
1587  /* 7. It remains to update the owner record. */
1588  {
1589  rec_t* owner_rec = page_rec_find_owner_rec(insert_rec);
1590  ulint n_owned;
1591 
1592  n_owned = rec_get_n_owned_new(owner_rec);
1593  rec_set_n_owned_new(owner_rec, page_zip, n_owned + 1);
1594 
1595  /* 8. Now we have incremented the n_owned field of the owner
1596  record. If the number exceeds PAGE_DIR_SLOT_MAX_N_OWNED,
1597  we have to split the corresponding directory slot in two. */
1598 
1599  if (UNIV_UNLIKELY(n_owned == PAGE_DIR_SLOT_MAX_N_OWNED)) {
1601  page, page_zip,
1602  page_dir_find_owner_slot(owner_rec));
1603  }
1604  }
1605 
1606  page_zip_write_rec(page_zip, insert_rec, index, offsets, 1);
1607 
1608  btr_blob_dbg_add_rec(insert_rec, index, offsets, "insert_zip_ok");
1609 
1610  /* 9. Write log record of the insert */
1611  if (UNIV_LIKELY(mtr != NULL)) {
1612  page_cur_insert_rec_write_log(insert_rec, rec_size,
1613  cursor->rec, index, mtr);
1614  }
1615 
1616  return(insert_rec);
1617 }
1618 
1619 #ifndef UNIV_HOTBACKUP
1620 /**********************************************************/
1624 UNIV_INLINE
1625 byte*
1627 /*=========================================*/
1628  page_t* page,
1629  dict_index_t* index,
1630  mtr_t* mtr)
1631 {
1632  byte* log_ptr;
1633 
1634  ut_ad(!!page_is_comp(page) == dict_table_is_comp(index->table));
1635 
1636  log_ptr = mlog_open_and_write_index(mtr, page, index,
1637  page_is_comp(page)
1640  if (UNIV_LIKELY(log_ptr != NULL)) {
1641  mlog_close(mtr, log_ptr + 4);
1642  }
1643 
1644  return(log_ptr);
1645 }
1646 #endif /* !UNIV_HOTBACKUP */
1647 
1648 /**********************************************************/
1651 UNIV_INTERN
1652 byte*
1654 /*=====================================*/
1655  byte* ptr,
1656  byte* end_ptr,
1657  buf_block_t* block,
1658  dict_index_t* index,
1659  mtr_t* mtr)
1660 {
1661  byte* rec_end;
1662  ulint log_data_len;
1663  page_t* page;
1664  page_zip_des_t* page_zip;
1665 
1666  if (ptr + 4 > end_ptr) {
1667 
1668  return(NULL);
1669  }
1670 
1671  log_data_len = mach_read_from_4(ptr);
1672  ptr += 4;
1673 
1674  rec_end = ptr + log_data_len;
1675 
1676  if (rec_end > end_ptr) {
1677 
1678  return(NULL);
1679  }
1680 
1681  if (!block) {
1682 
1683  return(rec_end);
1684  }
1685 
1686  while (ptr < rec_end) {
1687  ptr = page_cur_parse_insert_rec(TRUE, ptr, end_ptr,
1688  block, index, mtr);
1689  }
1690 
1691  ut_a(ptr == rec_end);
1692 
1693  page = buf_block_get_frame(block);
1694  page_zip = buf_block_get_page_zip(block);
1695 
1696  page_header_set_ptr(page, page_zip, PAGE_LAST_INSERT, NULL);
1697  page_header_set_field(page, page_zip, PAGE_DIRECTION,
1698  PAGE_NO_DIRECTION);
1699  page_header_set_field(page, page_zip, PAGE_N_DIRECTION, 0);
1700 
1701  return(rec_end);
1702 }
1703 
1704 #ifndef UNIV_HOTBACKUP
1705 /*************************************************************/
1713 UNIV_INTERN
1714 void
1716 /*===================================*/
1717  page_t* new_page,
1718  rec_t* rec,
1719  dict_index_t* index,
1720  mtr_t* mtr)
1721 {
1722  page_dir_slot_t* slot = 0; /* remove warning */
1723  byte* heap_top;
1724  rec_t* insert_rec = 0; /* remove warning */
1725  rec_t* prev_rec;
1726  ulint count;
1727  ulint n_recs;
1728  ulint slot_index;
1729  ulint rec_size;
1730  ulint log_mode;
1731  byte* log_ptr;
1732  ulint log_data_len;
1733  mem_heap_t* heap = NULL;
1734  ulint offsets_[REC_OFFS_NORMAL_SIZE];
1735  ulint* offsets = offsets_;
1736  rec_offs_init(offsets_);
1737 
1738  ut_ad(page_dir_get_n_heap(new_page) == PAGE_HEAP_NO_USER_LOW);
1739  ut_ad(page_align(rec) != new_page);
1740  ut_ad(page_rec_is_comp(rec) == page_is_comp(new_page));
1741 
1742  if (page_rec_is_infimum(rec)) {
1743 
1744  rec = page_rec_get_next(rec);
1745  }
1746 
1747  if (page_rec_is_supremum(rec)) {
1748 
1749  return;
1750  }
1751 
1752 #ifdef UNIV_DEBUG
1753  /* To pass the debug tests we have to set these dummy values
1754  in the debug version */
1755  page_dir_set_n_slots(new_page, NULL, UNIV_PAGE_SIZE / 2);
1756  page_header_set_ptr(new_page, NULL, PAGE_HEAP_TOP,
1757  new_page + UNIV_PAGE_SIZE - 1);
1758 #endif
1759 
1761  index, mtr);
1762 
1763  log_data_len = dyn_array_get_data_size(&(mtr->log));
1764 
1765  /* Individual inserts are logged in a shorter form */
1766 
1767  log_mode = mtr_set_log_mode(mtr, MTR_LOG_SHORT_INSERTS);
1768 
1769  prev_rec = page_get_infimum_rec(new_page);
1770  if (page_is_comp(new_page)) {
1771  heap_top = new_page + PAGE_NEW_SUPREMUM_END;
1772  } else {
1773  heap_top = new_page + PAGE_OLD_SUPREMUM_END;
1774  }
1775  count = 0;
1776  slot_index = 0;
1777  n_recs = 0;
1778 
1779  do {
1780  offsets = rec_get_offsets(rec, index, offsets,
1781  ULINT_UNDEFINED, &heap);
1782  insert_rec = rec_copy(heap_top, rec, offsets);
1783 
1784  if (page_is_comp(new_page)) {
1785  rec_set_next_offs_new(prev_rec,
1786  page_offset(insert_rec));
1787 
1788  rec_set_n_owned_new(insert_rec, NULL, 0);
1789  rec_set_heap_no_new(insert_rec,
1790  PAGE_HEAP_NO_USER_LOW + n_recs);
1791  } else {
1792  rec_set_next_offs_old(prev_rec,
1793  page_offset(insert_rec));
1794 
1795  rec_set_n_owned_old(insert_rec, 0);
1796  rec_set_heap_no_old(insert_rec,
1797  PAGE_HEAP_NO_USER_LOW + n_recs);
1798  }
1799 
1800  count++;
1801  n_recs++;
1802 
1803  if (UNIV_UNLIKELY
1804  (count == (PAGE_DIR_SLOT_MAX_N_OWNED + 1) / 2)) {
1805 
1806  slot_index++;
1807 
1808  slot = page_dir_get_nth_slot(new_page, slot_index);
1809 
1810  page_dir_slot_set_rec(slot, insert_rec);
1811  page_dir_slot_set_n_owned(slot, NULL, count);
1812 
1813  count = 0;
1814  }
1815 
1816  rec_size = rec_offs_size(offsets);
1817 
1818  ut_ad(heap_top < new_page + UNIV_PAGE_SIZE);
1819 
1820  heap_top += rec_size;
1821 
1822  rec_offs_make_valid(insert_rec, index, offsets);
1823  btr_blob_dbg_add_rec(insert_rec, index, offsets, "copy_end");
1824 
1825  page_cur_insert_rec_write_log(insert_rec, rec_size, prev_rec,
1826  index, mtr);
1827  prev_rec = insert_rec;
1828  rec = page_rec_get_next(rec);
1829  } while (!page_rec_is_supremum(rec));
1830 
1831  if ((slot_index > 0) && (count + 1
1832  + (PAGE_DIR_SLOT_MAX_N_OWNED + 1) / 2
1833  <= PAGE_DIR_SLOT_MAX_N_OWNED)) {
1834  /* We can merge the two last dir slots. This operation is
1835  here to make this function imitate exactly the equivalent
1836  task made using page_cur_insert_rec, which we use in database
1837  recovery to reproduce the task performed by this function.
1838  To be able to check the correctness of recovery, it is good
1839  that it imitates exactly. */
1840 
1841  count += (PAGE_DIR_SLOT_MAX_N_OWNED + 1) / 2;
1842 
1843  page_dir_slot_set_n_owned(slot, NULL, 0);
1844 
1845  slot_index--;
1846  }
1847 
1848  if (UNIV_LIKELY_NULL(heap)) {
1849  mem_heap_free(heap);
1850  }
1851 
1852  log_data_len = dyn_array_get_data_size(&(mtr->log)) - log_data_len;
1853 
1854  ut_a(log_data_len < 100 * UNIV_PAGE_SIZE);
1855 
1856  if (UNIV_LIKELY(log_ptr != NULL)) {
1857  mach_write_to_4(log_ptr, log_data_len);
1858  }
1859 
1860  if (page_is_comp(new_page)) {
1861  rec_set_next_offs_new(insert_rec, PAGE_NEW_SUPREMUM);
1862  } else {
1863  rec_set_next_offs_old(insert_rec, PAGE_OLD_SUPREMUM);
1864  }
1865 
1866  slot = page_dir_get_nth_slot(new_page, 1 + slot_index);
1867 
1868  page_dir_slot_set_rec(slot, page_get_supremum_rec(new_page));
1869  page_dir_slot_set_n_owned(slot, NULL, count + 1);
1870 
1871  page_dir_set_n_slots(new_page, NULL, 2 + slot_index);
1872  page_header_set_ptr(new_page, NULL, PAGE_HEAP_TOP, heap_top);
1873  page_dir_set_n_heap(new_page, NULL, PAGE_HEAP_NO_USER_LOW + n_recs);
1874  page_header_set_field(new_page, NULL, PAGE_N_RECS, n_recs);
1875 
1876  page_header_set_ptr(new_page, NULL, PAGE_LAST_INSERT, NULL);
1877  page_header_set_field(new_page, NULL, PAGE_DIRECTION,
1878  PAGE_NO_DIRECTION);
1879  page_header_set_field(new_page, NULL, PAGE_N_DIRECTION, 0);
1880 
1881  /* Restore the log mode */
1882 
1883  mtr_set_log_mode(mtr, log_mode);
1884 }
1885 
1886 /***********************************************************/
1888 UNIV_INLINE
1889 void
1891 /*==========================*/
1892  rec_t* rec,
1893  const dict_index_t* index,
1894  mtr_t* mtr)
1895 {
1896  byte* log_ptr;
1897 
1898  ut_ad(!!page_rec_is_comp(rec) == dict_table_is_comp(index->table));
1899 
1900  log_ptr = mlog_open_and_write_index(mtr, rec, index,
1901  page_rec_is_comp(rec)
1903  : MLOG_REC_DELETE, 2);
1904 
1905  if (!log_ptr) {
1906  /* Logging in mtr is switched off during crash recovery:
1907  in that case mlog_open returns NULL */
1908  return;
1909  }
1910 
1911  /* Write the cursor rec offset as a 2-byte ulint */
1912  mach_write_to_2(log_ptr, page_offset(rec));
1913 
1914  mlog_close(mtr, log_ptr + 2);
1915 }
1916 #else /* !UNIV_HOTBACKUP */
1917 # define page_cur_delete_rec_write_log(rec,index,mtr) ((void) 0)
1918 #endif /* !UNIV_HOTBACKUP */
1919 
1920 /***********************************************************/
1923 UNIV_INTERN
1924 byte*
1926 /*======================*/
1927  byte* ptr,
1928  byte* end_ptr,
1929  buf_block_t* block,
1930  dict_index_t* index,
1931  mtr_t* mtr)
1932 {
1933  ulint offset;
1935 
1936  if (end_ptr < ptr + 2) {
1937 
1938  return(NULL);
1939  }
1940 
1941  /* Read the cursor rec offset as a 2-byte ulint */
1942  offset = mach_read_from_2(ptr);
1943  ptr += 2;
1944 
1945  ut_a(offset <= UNIV_PAGE_SIZE);
1946 
1947  if (block) {
1948  page_t* page = buf_block_get_frame(block);
1949  mem_heap_t* heap = NULL;
1950  ulint offsets_[REC_OFFS_NORMAL_SIZE];
1951  rec_t* rec = page + offset;
1952  rec_offs_init(offsets_);
1953 
1954  page_cur_position(rec, block, &cursor);
1955  ut_ad(!buf_block_get_page_zip(block) || page_is_comp(page));
1956 
1957  page_cur_delete_rec(&cursor, index,
1958  rec_get_offsets(rec, index, offsets_,
1959  ULINT_UNDEFINED, &heap),
1960  mtr);
1961  if (UNIV_LIKELY_NULL(heap)) {
1962  mem_heap_free(heap);
1963  }
1964  }
1965 
1966  return(ptr);
1967 }
1968 
1969 /***********************************************************/
1972 UNIV_INTERN
1973 void
1975 /*================*/
1976  page_cur_t* cursor,
1977  const dict_index_t* index,
1978  const ulint* offsets,
1980  mtr_t* mtr)
1981 {
1982  page_dir_slot_t* cur_dir_slot;
1983  page_dir_slot_t* prev_slot;
1984  page_t* page;
1985  page_zip_des_t* page_zip;
1986  rec_t* current_rec;
1987  rec_t* prev_rec = NULL;
1988  rec_t* next_rec;
1989  ulint cur_slot_no;
1990  ulint cur_n_owned;
1991  rec_t* rec;
1992 
1993  page = page_cur_get_page(cursor);
1994  page_zip = page_cur_get_page_zip(cursor);
1995 
1996  /* page_zip_validate() will fail here when
1997  btr_cur_pessimistic_delete() invokes btr_set_min_rec_mark().
1998  Then, both "page_zip" and "page" would have the min-rec-mark
1999  set on the smallest user record, but "page" would additionally
2000  have it set on the smallest-but-one record. Because sloppy
2001  page_zip_validate_low() only ignores min-rec-flag differences
2002  in the smallest user record, it cannot be used here either. */
2003 
2004  current_rec = cursor->rec;
2005  ut_ad(rec_offs_validate(current_rec, index, offsets));
2006  ut_ad(!!page_is_comp(page) == dict_table_is_comp(index->table));
2008  ut_ad(mach_read_from_8(page + PAGE_HEADER + PAGE_INDEX_ID)
2009  == index->id || mtr->inside_ibuf || recv_recovery_is_on());
2010 
2011  /* The record must not be the supremum or infimum record. */
2012  ut_ad(page_rec_is_user_rec(current_rec));
2013 
2014  if (page_get_n_recs(page) == 1 && !recv_recovery_is_on()) {
2015  /* Empty the page, unless we are applying the redo log
2016  during crash recovery. During normal operation, the
2017  page_create_empty() gets logged as one of MLOG_PAGE_CREATE,
2018  MLOG_COMP_PAGE_CREATE, MLOG_ZIP_PAGE_COMPRESS. */
2019  ut_ad(page_is_leaf(page));
2020  /* Usually, this should be the root page,
2021  and the whole index tree should become empty.
2022  However, this could also be a call in
2023  btr_cur_pessimistic_update() to delete the only
2024  record in the page and to insert another one. */
2025  page_cur_move_to_next(cursor);
2026  ut_ad(page_cur_is_after_last(cursor));
2027  page_create_empty(page_cur_get_block(cursor),
2028  const_cast<dict_index_t*>(index), mtr);
2029  return;
2030  }
2031 
2032  /* Save to local variables some data associated with current_rec */
2033  cur_slot_no = page_dir_find_owner_slot(current_rec);
2034  ut_ad(cur_slot_no > 0);
2035  cur_dir_slot = page_dir_get_nth_slot(page, cur_slot_no);
2036  cur_n_owned = page_dir_slot_get_n_owned(cur_dir_slot);
2037 
2038  /* 0. Write the log record */
2039  if (mtr != 0) {
2040  page_cur_delete_rec_write_log(current_rec, index, mtr);
2041  }
2042 
2043  /* 1. Reset the last insert info in the page header and increment
2044  the modify clock for the frame */
2045 
2046  page_header_set_ptr(page, page_zip, PAGE_LAST_INSERT, NULL);
2047 
2048  /* The page gets invalid for optimistic searches: increment the
2049  frame modify clock only if there is an mini-transaction covering
2050  the change. During IMPORT we allocate local blocks that are not
2051  part of the buffer pool. */
2052 
2053  if (mtr != 0) {
2054  buf_block_modify_clock_inc(page_cur_get_block(cursor));
2055  }
2056 
2057  /* 2. Find the next and the previous record. Note that the cursor is
2058  left at the next record. */
2059 
2060  ut_ad(cur_slot_no > 0);
2061  prev_slot = page_dir_get_nth_slot(page, cur_slot_no - 1);
2062 
2063  rec = (rec_t*) page_dir_slot_get_rec(prev_slot);
2064 
2065  /* rec now points to the record of the previous directory slot. Look
2066  for the immediate predecessor of current_rec in a loop. */
2067 
2068  while(current_rec != rec) {
2069  prev_rec = rec;
2070  rec = page_rec_get_next(rec);
2071  }
2072 
2073  page_cur_move_to_next(cursor);
2074  next_rec = cursor->rec;
2075 
2076  /* 3. Remove the record from the linked list of records */
2077 
2078  page_rec_set_next(prev_rec, next_rec);
2079 
2080  /* 4. If the deleted record is pointed to by a dir slot, update the
2081  record pointer in slot. In the following if-clause we assume that
2082  prev_rec is owned by the same slot, i.e., PAGE_DIR_SLOT_MIN_N_OWNED
2083  >= 2. */
2084 
2085 #if PAGE_DIR_SLOT_MIN_N_OWNED < 2
2086 # error "PAGE_DIR_SLOT_MIN_N_OWNED < 2"
2087 #endif
2088  ut_ad(cur_n_owned > 1);
2089 
2090  if (current_rec == page_dir_slot_get_rec(cur_dir_slot)) {
2091  page_dir_slot_set_rec(cur_dir_slot, prev_rec);
2092  }
2093 
2094  /* 5. Update the number of owned records of the slot */
2095 
2096  page_dir_slot_set_n_owned(cur_dir_slot, page_zip, cur_n_owned - 1);
2097 
2098  /* 6. Free the memory occupied by the record */
2099  btr_blob_dbg_remove_rec(current_rec, const_cast<dict_index_t*>(index),
2100  offsets, "delete");
2101  page_mem_free(page, page_zip, current_rec, index, offsets);
2102 
2103  /* 7. Now we have decremented the number of owned records of the slot.
2104  If the number drops below PAGE_DIR_SLOT_MIN_N_OWNED, we balance the
2105  slots. */
2106 
2107  if (cur_n_owned <= PAGE_DIR_SLOT_MIN_N_OWNED) {
2108  page_dir_balance_slot(page, page_zip, cur_slot_no);
2109  }
2110 
2111 #ifdef UNIV_ZIP_DEBUG
2112  ut_a(!page_zip || page_zip_validate(page_zip, page, index));
2113 #endif /* UNIV_ZIP_DEBUG */
2114 }
2115 
2116 #ifdef UNIV_COMPILE_TEST_FUNCS
2117 
2118 /*******************************************************************/
2121 void
2122 test_page_cur_lcg_prng(
2123 /*===================*/
2124  int n)
2125 {
2126  int i;
2127  unsigned long long rnd;
2128 
2129  for (i = 0; i < n; i++) {
2130  rnd = page_cur_lcg_prng();
2131  printf("%llu\t%%2=%llu %%3=%llu %%5=%llu %%7=%llu %%11=%llu\n",
2132  rnd,
2133  rnd % 2,
2134  rnd % 3,
2135  rnd % 5,
2136  rnd % 7,
2137  rnd % 11);
2138  }
2139 }
2140 
2141 #endif /* UNIV_COMPILE_TEST_FUNCS */