MySQL 5.6.14 Source Code Document
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
trx0rec.cc
Go to the documentation of this file.
1 /*****************************************************************************
2 
3 Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
4 
5 This program is free software; you can redistribute it and/or modify it under
6 the terms of the GNU General Public License as published by the Free Software
7 Foundation; version 2 of the License.
8 
9 This program is distributed in the hope that it will be useful, but WITHOUT
10 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
11 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
12 
13 You should have received a copy of the GNU General Public License along with
14 this program; if not, write to the Free Software Foundation, Inc.,
15 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
16 
17 *****************************************************************************/
18 
19 /**************************************************/
26 #include "trx0rec.h"
27 
28 #ifdef UNIV_NONINL
29 #include "trx0rec.ic"
30 #endif
31 
32 #include "fsp0fsp.h"
33 #include "mach0data.h"
34 #include "trx0undo.h"
35 #include "mtr0log.h"
36 #ifndef UNIV_HOTBACKUP
37 #include "dict0dict.h"
38 #include "ut0mem.h"
39 #include "read0read.h"
40 #include "row0ext.h"
41 #include "row0upd.h"
42 #include "que0que.h"
43 #include "trx0purge.h"
44 #include "trx0rseg.h"
45 #include "row0row.h"
46 
47 /*=========== UNDO LOG RECORD CREATION AND DECODING ====================*/
48 
49 /**********************************************************************/
52 UNIV_INLINE
53 void
55 /*============================*/
56  page_t* undo_page,
57  ulint old_free,
58  ulint new_free,
59  mtr_t* mtr)
60 {
61  byte* log_ptr;
62  const byte* log_end;
63  ulint len;
64 
65  log_ptr = mlog_open(mtr, 11 + 13 + MLOG_BUF_MARGIN);
66 
67  if (log_ptr == NULL) {
68 
69  return;
70  }
71 
72  log_end = &log_ptr[11 + 13 + MLOG_BUF_MARGIN];
74  undo_page, MLOG_UNDO_INSERT, log_ptr, mtr);
75  len = new_free - old_free - 4;
76 
77  mach_write_to_2(log_ptr, len);
78  log_ptr += 2;
79 
80  if (log_ptr + len <= log_end) {
81  memcpy(log_ptr, undo_page + old_free + 2, len);
82  mlog_close(mtr, log_ptr + len);
83  } else {
84  mlog_close(mtr, log_ptr);
85  mlog_catenate_string(mtr, undo_page + old_free + 2, len);
86  }
87 }
88 #endif /* !UNIV_HOTBACKUP */
89 
90 /***********************************************************/
93 UNIV_INTERN
94 byte*
96 /*========================*/
97  byte* ptr,
98  byte* end_ptr,
99  page_t* page)
100 {
101  ulint len;
102  byte* rec;
103  ulint first_free;
104 
105  if (end_ptr < ptr + 2) {
106 
107  return(NULL);
108  }
109 
110  len = mach_read_from_2(ptr);
111  ptr += 2;
112 
113  if (end_ptr < ptr + len) {
114 
115  return(NULL);
116  }
117 
118  if (page == NULL) {
119 
120  return(ptr + len);
121  }
122 
123  first_free = mach_read_from_2(page + TRX_UNDO_PAGE_HDR
125  rec = page + first_free;
126 
127  mach_write_to_2(rec, first_free + 4 + len);
128  mach_write_to_2(rec + 2 + len, first_free);
129 
131  first_free + 4 + len);
132  ut_memcpy(rec + 2, ptr, len);
133 
134  return(ptr + len);
135 }
136 
137 #ifndef UNIV_HOTBACKUP
138 /**********************************************************************/
141 UNIV_INLINE
142 ulint
144 /*==========*/
145  const page_t* page,
146  const byte* ptr)
147 {
148  /* The '- 10' is a safety margin, in case we have some small
149  calculation error below */
150 
151  return(UNIV_PAGE_SIZE - (ptr - page) - 10 - FIL_PAGE_DATA_END);
152 }
153 
154 /**********************************************************************/
159 static
160 ulint
161 trx_undo_page_set_next_prev_and_add(
162 /*================================*/
163  page_t* undo_page,
164  byte* ptr,
166  mtr_t* mtr)
167 {
168  ulint first_free;
169  ulint end_of_rec;
170  byte* ptr_to_first_free;
171  /* pointer within undo_page
172  that points to the next free
173  offset value within undo_page.*/
174 
175  ut_ad(ptr > undo_page);
176  ut_ad(ptr < undo_page + UNIV_PAGE_SIZE);
177 
178  if (UNIV_UNLIKELY(trx_undo_left(undo_page, ptr) < 2)) {
179 
180  return(0);
181  }
182 
183  ptr_to_first_free = undo_page + TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE;
184 
185  first_free = mach_read_from_2(ptr_to_first_free);
186 
187  /* Write offset of the previous undo log record */
188  mach_write_to_2(ptr, first_free);
189  ptr += 2;
190 
191  end_of_rec = ptr - undo_page;
192 
193  /* Write offset of the next undo log record */
194  mach_write_to_2(undo_page + first_free, end_of_rec);
195 
196  /* Update the offset to first free undo record */
197  mach_write_to_2(ptr_to_first_free, end_of_rec);
198 
199  /* Write this log entry to the UNDO log */
200  trx_undof_page_add_undo_rec_log(undo_page, first_free,
201  end_of_rec, mtr);
202 
203  return(first_free);
204 }
205 
206 /**********************************************************************/
209 static
210 ulint
211 trx_undo_page_report_insert(
212 /*========================*/
213  page_t* undo_page,
214  trx_t* trx,
216  const dtuple_t* clust_entry,
218  mtr_t* mtr)
219 {
220  ulint first_free;
221  byte* ptr;
222  ulint i;
223 
224  ut_ad(dict_index_is_clust(index));
226  + TRX_UNDO_PAGE_TYPE) == TRX_UNDO_INSERT);
227 
228  first_free = mach_read_from_2(undo_page + TRX_UNDO_PAGE_HDR
229  + TRX_UNDO_PAGE_FREE);
230  ptr = undo_page + first_free;
231 
232  ut_ad(first_free <= UNIV_PAGE_SIZE);
233 
234  if (trx_undo_left(undo_page, ptr) < 2 + 1 + 11 + 11) {
235 
236  /* Not enough space for writing the general parameters */
237 
238  return(0);
239  }
240 
241  /* Reserve 2 bytes for the pointer to the next undo log record */
242  ptr += 2;
243 
244  /* Store first some general parameters to the undo log */
245  *ptr++ = TRX_UNDO_INSERT_REC;
246  ptr += mach_ull_write_much_compressed(ptr, trx->undo_no);
247  ptr += mach_ull_write_much_compressed(ptr, index->table->id);
248  /*----------------------------------------*/
249  /* Store then the fields required to uniquely determine the record
250  to be inserted in the clustered index */
251 
252  for (i = 0; i < dict_index_get_n_unique(index); i++) {
253 
254  const dfield_t* field = dtuple_get_nth_field(clust_entry, i);
255  ulint flen = dfield_get_len(field);
256 
257  if (trx_undo_left(undo_page, ptr) < 5) {
258 
259  return(0);
260  }
261 
262  ptr += mach_write_compressed(ptr, flen);
263 
264  if (flen != UNIV_SQL_NULL) {
265  if (trx_undo_left(undo_page, ptr) < flen) {
266 
267  return(0);
268  }
269 
270  ut_memcpy(ptr, dfield_get_data(field), flen);
271  ptr += flen;
272  }
273  }
274 
275  return(trx_undo_page_set_next_prev_and_add(undo_page, ptr, mtr));
276 }
277 
278 /**********************************************************************/
281 UNIV_INTERN
282 byte*
284 /*==================*/
285  trx_undo_rec_t* undo_rec,
286  ulint* type,
288  ulint* cmpl_info,
290  bool* updated_extern,
292  undo_no_t* undo_no,
293  table_id_t* table_id)
294 {
295  byte* ptr;
296  ulint type_cmpl;
297 
298  ptr = undo_rec + 2;
299 
300  type_cmpl = mach_read_from_1(ptr);
301  ptr++;
302 
303  *updated_extern = !!(type_cmpl & TRX_UNDO_UPD_EXTERN);
304  type_cmpl &= ~TRX_UNDO_UPD_EXTERN;
305 
306  *type = type_cmpl & (TRX_UNDO_CMPL_INFO_MULT - 1);
307  *cmpl_info = type_cmpl / TRX_UNDO_CMPL_INFO_MULT;
308 
309  *undo_no = mach_ull_read_much_compressed(ptr);
310  ptr += mach_ull_get_much_compressed_size(*undo_no);
311 
312  *table_id = mach_ull_read_much_compressed(ptr);
313  ptr += mach_ull_get_much_compressed_size(*table_id);
314 
315  return(ptr);
316 }
317 
318 /**********************************************************************/
321 static
322 byte*
323 trx_undo_rec_get_col_val(
324 /*=====================*/
325  byte* ptr,
326  byte** field,
327  ulint* len,
328  ulint* orig_len)
330 {
331  *len = mach_read_compressed(ptr);
332  ptr += mach_get_compressed_size(*len);
333 
334  *orig_len = 0;
335 
336  switch (*len) {
337  case UNIV_SQL_NULL:
338  *field = NULL;
339  break;
340  case UNIV_EXTERN_STORAGE_FIELD:
341  *orig_len = mach_read_compressed(ptr);
342  ptr += mach_get_compressed_size(*orig_len);
343  *len = mach_read_compressed(ptr);
344  ptr += mach_get_compressed_size(*len);
345  *field = ptr;
346  ptr += *len;
347 
348  ut_ad(*orig_len >= BTR_EXTERN_FIELD_REF_SIZE);
349  ut_ad(*len > *orig_len);
350  /* @see dtuple_convert_big_rec() */
352 
353  /* we do not have access to index->table here
354  ut_ad(dict_table_get_format(index->table) >= UNIV_FORMAT_B
355  || *len >= col->max_prefix
356  + BTR_EXTERN_FIELD_REF_SIZE);
357  */
358 
359  *len += UNIV_EXTERN_STORAGE_FIELD;
360  break;
361  default:
362  *field = ptr;
363  if (*len >= UNIV_EXTERN_STORAGE_FIELD) {
364  ptr += *len - UNIV_EXTERN_STORAGE_FIELD;
365  } else {
366  ptr += *len;
367  }
368  }
369 
370  return(ptr);
371 }
372 
373 /*******************************************************************/
376 UNIV_INTERN
377 byte*
379 /*=====================*/
380  byte* ptr,
386  dict_index_t* index,
387  dtuple_t** ref,
388  mem_heap_t* heap)
390 {
391  ulint ref_len;
392  ulint i;
393 
394  ut_ad(index && ptr && ref && heap);
395  ut_a(dict_index_is_clust(index));
396 
397  ref_len = dict_index_get_n_unique(index);
398 
399  *ref = dtuple_create(heap, ref_len);
400 
401  dict_index_copy_types(*ref, index, ref_len);
402 
403  for (i = 0; i < ref_len; i++) {
404  dfield_t* dfield;
405  byte* field;
406  ulint len;
407  ulint orig_len;
408 
409  dfield = dtuple_get_nth_field(*ref, i);
410 
411  ptr = trx_undo_rec_get_col_val(ptr, &field, &len, &orig_len);
412 
413  dfield_set_data(dfield, field, len);
414  }
415 
416  return(ptr);
417 }
418 
419 /*******************************************************************/
422 UNIV_INTERN
423 byte*
425 /*======================*/
426  byte* ptr,
428  dict_index_t* index)
429 {
430  ulint ref_len;
431  ulint i;
432 
433  ut_ad(index && ptr);
434  ut_a(dict_index_is_clust(index));
435 
436  ref_len = dict_index_get_n_unique(index);
437 
438  for (i = 0; i < ref_len; i++) {
439  byte* field;
440  ulint len;
441  ulint orig_len;
442 
443  ptr = trx_undo_rec_get_col_val(ptr, &field, &len, &orig_len);
444  }
445 
446  return(ptr);
447 }
448 
449 /**********************************************************************/
453 static
454 byte*
455 trx_undo_page_fetch_ext(
456 /*====================*/
457  byte* ext_buf,
459  ulint prefix_len,
461  ulint zip_size,
463  const byte* field,
464  ulint* len)
466 {
467  /* Fetch the BLOB. */
469  ext_buf, prefix_len, zip_size, field, *len);
470  /* BLOBs should always be nonempty. */
471  ut_a(ext_len);
472  /* Append the BLOB pointer to the prefix. */
473  memcpy(ext_buf + ext_len,
474  field + *len - BTR_EXTERN_FIELD_REF_SIZE,
476  *len = ext_len + BTR_EXTERN_FIELD_REF_SIZE;
477  return(ext_buf);
478 }
479 
480 /**********************************************************************/
483 static
484 byte*
485 trx_undo_page_report_modify_ext(
486 /*============================*/
487  byte* ptr,
489  byte* ext_buf,
493  ulint prefix_len,
495  ulint zip_size,
497  const byte** field,
499  ulint* len)
500 {
501  if (ext_buf) {
502  ut_a(prefix_len > 0);
503 
504  /* If an ordering column is externally stored, we will
505  have to store a longer prefix of the field. In this
506  case, write to the log a marker followed by the
507  original length and the real length of the field. */
508  ptr += mach_write_compressed(ptr, UNIV_EXTERN_STORAGE_FIELD);
509 
510  ptr += mach_write_compressed(ptr, *len);
511 
512  *field = trx_undo_page_fetch_ext(ext_buf, prefix_len, zip_size,
513  *field, len);
514 
515  ptr += mach_write_compressed(ptr, *len);
516  } else {
517  ptr += mach_write_compressed(ptr, UNIV_EXTERN_STORAGE_FIELD
518  + *len);
519  }
520 
521  return(ptr);
522 }
523 
524 /**********************************************************************/
529 static
530 ulint
531 trx_undo_page_report_modify(
532 /*========================*/
533  page_t* undo_page,
534  trx_t* trx,
535  dict_index_t* index,
537  const rec_t* rec,
539  const ulint* offsets,
540  const upd_t* update,
543  ulint cmpl_info,
545  mtr_t* mtr)
546 {
548  ulint first_free;
549  byte* ptr;
550  const byte* field;
551  ulint flen;
552  ulint col_no;
553  ulint type_cmpl;
554  byte* type_cmpl_ptr;
555  ulint i;
556  trx_id_t trx_id;
557  ibool ignore_prefix = FALSE;
560 
561  ut_a(dict_index_is_clust(index));
562  ut_ad(rec_offs_validate(rec, index, offsets));
564  + TRX_UNDO_PAGE_TYPE) == TRX_UNDO_UPDATE);
565  table = index->table;
566 
567  first_free = mach_read_from_2(undo_page + TRX_UNDO_PAGE_HDR
568  + TRX_UNDO_PAGE_FREE);
569  ptr = undo_page + first_free;
570 
571  ut_ad(first_free <= UNIV_PAGE_SIZE);
572 
573  if (trx_undo_left(undo_page, ptr) < 50) {
574 
575  /* NOTE: the value 50 must be big enough so that the general
576  fields written below fit on the undo log page */
577 
578  return(0);
579  }
580 
581  /* Reserve 2 bytes for the pointer to the next undo log record */
582  ptr += 2;
583 
584  /* Store first some general parameters to the undo log */
585 
586  if (!update) {
588  type_cmpl = TRX_UNDO_DEL_MARK_REC;
589  } else if (rec_get_deleted_flag(rec, dict_table_is_comp(table))) {
590  type_cmpl = TRX_UNDO_UPD_DEL_REC;
591  /* We are about to update a delete marked record.
592  We don't typically need the prefix in this case unless
593  the delete marking is done by the same transaction
594  (which we check below). */
595  ignore_prefix = TRUE;
596  } else {
597  type_cmpl = TRX_UNDO_UPD_EXIST_REC;
598  }
599 
600  type_cmpl |= cmpl_info * TRX_UNDO_CMPL_INFO_MULT;
601  type_cmpl_ptr = ptr;
602 
603  *ptr++ = (byte) type_cmpl;
604  ptr += mach_ull_write_much_compressed(ptr, trx->undo_no);
605 
606  ptr += mach_ull_write_much_compressed(ptr, table->id);
607 
608  /*----------------------------------------*/
609  /* Store the state of the info bits */
610 
611  *ptr++ = (byte) rec_get_info_bits(rec, dict_table_is_comp(table));
612 
613  /* Store the values of the system columns */
614  field = rec_get_nth_field(rec, offsets,
616  index, DATA_TRX_ID), &flen);
617  ut_ad(flen == DATA_TRX_ID_LEN);
618 
619  trx_id = trx_read_trx_id(field);
620 
621  /* If it is an update of a delete marked record, then we are
622  allowed to ignore blob prefixes if the delete marking was done
623  by some other trx as it must have committed by now for us to
624  allow an over-write. */
625  if (ignore_prefix) {
626  ignore_prefix = (trx_id != trx->id);
627  }
628  ptr += mach_ull_write_compressed(ptr, trx_id);
629 
630  field = rec_get_nth_field(rec, offsets,
632  index, DATA_ROLL_PTR), &flen);
633  ut_ad(flen == DATA_ROLL_PTR_LEN);
634 
635  ptr += mach_ull_write_compressed(ptr, trx_read_roll_ptr(field));
636 
637  /*----------------------------------------*/
638  /* Store then the fields required to uniquely determine the
639  record which will be modified in the clustered index */
640 
641  for (i = 0; i < dict_index_get_n_unique(index); i++) {
642 
643  field = rec_get_nth_field(rec, offsets, i, &flen);
644 
645  /* The ordering columns must not be stored externally. */
646  ut_ad(!rec_offs_nth_extern(offsets, i));
647  ut_ad(dict_index_get_nth_col(index, i)->ord_part);
648 
649  if (trx_undo_left(undo_page, ptr) < 5) {
650 
651  return(0);
652  }
653 
654  ptr += mach_write_compressed(ptr, flen);
655 
656  if (flen != UNIV_SQL_NULL) {
657  if (trx_undo_left(undo_page, ptr) < flen) {
658 
659  return(0);
660  }
661 
662  ut_memcpy(ptr, field, flen);
663  ptr += flen;
664  }
665  }
666 
667  /*----------------------------------------*/
668  /* Save to the undo log the old values of the columns to be updated. */
669 
670  if (update) {
671  if (trx_undo_left(undo_page, ptr) < 5) {
672 
673  return(0);
674  }
675 
676  ptr += mach_write_compressed(ptr, upd_get_n_fields(update));
677 
678  for (i = 0; i < upd_get_n_fields(update); i++) {
679 
680  ulint pos = upd_get_nth_field(update, i)->field_no;
681 
682  /* Write field number to undo log */
683  if (trx_undo_left(undo_page, ptr) < 5) {
684 
685  return(0);
686  }
687 
688  ptr += mach_write_compressed(ptr, pos);
689 
690  /* Save the old value of field */
691  field = rec_get_nth_field(rec, offsets, pos, &flen);
692 
693  if (trx_undo_left(undo_page, ptr) < 15) {
694 
695  return(0);
696  }
697 
698  if (rec_offs_nth_extern(offsets, pos)) {
699  const dict_col_t* col
700  = dict_index_get_nth_col(index, pos);
701  ulint prefix_len
703  table, col);
704 
705  ut_ad(prefix_len + BTR_EXTERN_FIELD_REF_SIZE
706  <= sizeof ext_buf);
707 
708  ptr = trx_undo_page_report_modify_ext(
709  ptr,
710  col->ord_part
711  && !ignore_prefix
712  && flen < REC_ANTELOPE_MAX_INDEX_COL_LEN
713  ? ext_buf : NULL, prefix_len,
714  dict_table_zip_size(table),
715  &field, &flen);
716 
717  /* Notify purge that it eventually has to
718  free the old externally stored field */
719 
720  trx->update_undo->del_marks = TRUE;
721 
722  *type_cmpl_ptr |= TRX_UNDO_UPD_EXTERN;
723  } else {
724  ptr += mach_write_compressed(ptr, flen);
725  }
726 
727  if (flen != UNIV_SQL_NULL) {
728  if (trx_undo_left(undo_page, ptr) < flen) {
729 
730  return(0);
731  }
732 
733  ut_memcpy(ptr, field, flen);
734  ptr += flen;
735  }
736  }
737  }
738 
739  /*----------------------------------------*/
740  /* In the case of a delete marking, and also in the case of an update
741  where any ordering field of any index changes, store the values of all
742  columns which occur as ordering fields in any index. This info is used
743  in the purge of old versions where we use it to build and search the
744  delete marked index records, to look if we can remove them from the
745  index tree. Note that starting from 4.0.14 also externally stored
746  fields can be ordering in some index. Starting from 5.2, we no longer
747  store REC_MAX_INDEX_COL_LEN first bytes to the undo log record,
748  but we can construct the column prefix fields in the index by
749  fetching the first page of the BLOB that is pointed to by the
750  clustered index. This works also in crash recovery, because all pages
751  (including BLOBs) are recovered before anything is rolled back. */
752 
753  if (!update || !(cmpl_info & UPD_NODE_NO_ORD_CHANGE)) {
754  byte* old_ptr = ptr;
755 
756  trx->update_undo->del_marks = TRUE;
757 
758  if (trx_undo_left(undo_page, ptr) < 5) {
759 
760  return(0);
761  }
762 
763  /* Reserve 2 bytes to write the number of bytes the stored
764  fields take in this undo record */
765 
766  ptr += 2;
767 
768  for (col_no = 0; col_no < dict_table_get_n_cols(table);
769  col_no++) {
770 
771  const dict_col_t* col
772  = dict_table_get_nth_col(table, col_no);
773 
774  if (col->ord_part) {
775  ulint pos;
776 
777  /* Write field number to undo log */
778  if (trx_undo_left(undo_page, ptr) < 5 + 15) {
779 
780  return(0);
781  }
782 
783  pos = dict_index_get_nth_col_pos(index,
784  col_no);
785  ptr += mach_write_compressed(ptr, pos);
786 
787  /* Save the old value of field */
788  field = rec_get_nth_field(rec, offsets, pos,
789  &flen);
790 
791  if (rec_offs_nth_extern(offsets, pos)) {
792  const dict_col_t* col =
794  index, pos);
795  ulint prefix_len =
797  table, col);
798 
799  ut_a(prefix_len < sizeof ext_buf);
800 
801  ptr = trx_undo_page_report_modify_ext(
802  ptr,
803  flen < REC_ANTELOPE_MAX_INDEX_COL_LEN
804  && !ignore_prefix
805  ? ext_buf : NULL, prefix_len,
806  dict_table_zip_size(table),
807  &field, &flen);
808  } else {
809  ptr += mach_write_compressed(
810  ptr, flen);
811  }
812 
813  if (flen != UNIV_SQL_NULL) {
814  if (trx_undo_left(undo_page, ptr)
815  < flen) {
816 
817  return(0);
818  }
819 
820  ut_memcpy(ptr, field, flen);
821  ptr += flen;
822  }
823  }
824  }
825 
826  mach_write_to_2(old_ptr, ptr - old_ptr);
827  }
828 
829  /*----------------------------------------*/
830  /* Write pointers to the previous and the next undo log records */
831  if (trx_undo_left(undo_page, ptr) < 2) {
832 
833  return(0);
834  }
835 
836  mach_write_to_2(ptr, first_free);
837  ptr += 2;
838  mach_write_to_2(undo_page + first_free, ptr - undo_page);
839 
840  mach_write_to_2(undo_page + TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE,
841  ptr - undo_page);
842 
843  /* Write to the REDO log about this change in the UNDO log */
844 
845  trx_undof_page_add_undo_rec_log(undo_page, first_free,
846  ptr - undo_page, mtr);
847  return(first_free);
848 }
849 
850 /**********************************************************************/
854 UNIV_INTERN
855 byte*
857 /*=============================*/
858  byte* ptr,
861  trx_id_t* trx_id,
862  roll_ptr_t* roll_ptr,
863  ulint* info_bits)
864 {
865  /* Read the state of the info bits */
866  *info_bits = mach_read_from_1(ptr);
867  ptr += 1;
868 
869  /* Read the values of the system columns */
870 
871  *trx_id = mach_ull_read_compressed(ptr);
872  ptr += mach_ull_get_compressed_size(*trx_id);
873 
874  *roll_ptr = mach_ull_read_compressed(ptr);
875  ptr += mach_ull_get_compressed_size(*roll_ptr);
876 
877  return(ptr);
878 }
879 
880 /**********************************************************************/
883 UNIV_INLINE
884 byte*
886 /*=================================*/
887  byte* ptr,
888  ulint* n)
889 {
890  *n = mach_read_compressed(ptr);
891  ptr += mach_get_compressed_size(*n);
892 
893  return(ptr);
894 }
895 
896 /**********************************************************************/
899 UNIV_INLINE
900 byte*
902 /*=============================*/
903  byte* ptr,
904  ulint* field_no)
905 {
906  *field_no = mach_read_compressed(ptr);
907  ptr += mach_get_compressed_size(*field_no);
908 
909  return(ptr);
910 }
911 
912 /*******************************************************************/
916 UNIV_INTERN
917 byte*
919 /*===========================*/
920  byte* ptr,
926  dict_index_t* index,
927  ulint type,
932  trx_id_t trx_id,
933  roll_ptr_t roll_ptr,
934  ulint info_bits,
935  trx_t* trx,
936  mem_heap_t* heap,
938  upd_t** upd)
939 {
940  upd_field_t* upd_field;
941  upd_t* update;
942  ulint n_fields;
943  byte* buf;
944  ulint i;
945 
946  ut_a(dict_index_is_clust(index));
947 
948  if (type != TRX_UNDO_DEL_MARK_REC) {
949  ptr = trx_undo_update_rec_get_n_upd_fields(ptr, &n_fields);
950  } else {
951  n_fields = 0;
952  }
953 
954  update = upd_create(n_fields + 2, heap);
955 
956  update->info_bits = info_bits;
957 
958  /* Store first trx id and roll ptr to update vector */
959 
960  upd_field = upd_get_nth_field(update, n_fields);
961 
962  buf = static_cast<byte*>(mem_heap_alloc(heap, DATA_TRX_ID_LEN));
963 
964  trx_write_trx_id(buf, trx_id);
965 
966  upd_field_set_field_no(upd_field,
967  dict_index_get_sys_col_pos(index, DATA_TRX_ID),
968  index, trx);
969  dfield_set_data(&(upd_field->new_val), buf, DATA_TRX_ID_LEN);
970 
971  upd_field = upd_get_nth_field(update, n_fields + 1);
972 
973  buf = static_cast<byte*>(mem_heap_alloc(heap, DATA_ROLL_PTR_LEN));
974 
975  trx_write_roll_ptr(buf, roll_ptr);
976 
978  upd_field, dict_index_get_sys_col_pos(index, DATA_ROLL_PTR),
979  index, trx);
980  dfield_set_data(&(upd_field->new_val), buf, DATA_ROLL_PTR_LEN);
981 
982  /* Store then the updated ordinary columns to the update vector */
983 
984  for (i = 0; i < n_fields; i++) {
985 
986  byte* field;
987  ulint len;
988  ulint field_no;
989  ulint orig_len;
990 
991  ptr = trx_undo_update_rec_get_field_no(ptr, &field_no);
992 
993  if (field_no >= dict_index_get_n_fields(index)) {
994  fprintf(stderr,
995  "InnoDB: Error: trying to access"
996  " update undo rec field %lu in ",
997  (ulong) field_no);
998  dict_index_name_print(stderr, trx, index);
999  fprintf(stderr, "\n"
1000  "InnoDB: but index has only %lu fields\n"
1001  "InnoDB: Submit a detailed bug report"
1002  " to http://bugs.mysql.com\n"
1003  "InnoDB: Run also CHECK TABLE ",
1004  (ulong) dict_index_get_n_fields(index));
1005  ut_print_name(stderr, trx, TRUE, index->table_name);
1006  fprintf(stderr, "\n"
1007  "InnoDB: n_fields = %lu, i = %lu, ptr %p\n",
1008  (ulong) n_fields, (ulong) i, ptr);
1009  ut_ad(0);
1010  *upd = NULL;
1011  return(NULL);
1012  }
1013 
1014  upd_field = upd_get_nth_field(update, i);
1015 
1016  upd_field_set_field_no(upd_field, field_no, index, trx);
1017 
1018  ptr = trx_undo_rec_get_col_val(ptr, &field, &len, &orig_len);
1019 
1020  upd_field->orig_len = orig_len;
1021 
1022  if (len == UNIV_SQL_NULL) {
1023  dfield_set_null(&upd_field->new_val);
1024  } else if (len < UNIV_EXTERN_STORAGE_FIELD) {
1025  dfield_set_data(&upd_field->new_val, field, len);
1026  } else {
1027  len -= UNIV_EXTERN_STORAGE_FIELD;
1028 
1029  dfield_set_data(&upd_field->new_val, field, len);
1030  dfield_set_ext(&upd_field->new_val);
1031  }
1032  }
1033 
1034  *upd = update;
1035 
1036  return(ptr);
1037 }
1038 
1039 /*******************************************************************/
1044 UNIV_INTERN
1045 byte*
1047 /*=========================*/
1048  byte* ptr,
1055  dict_index_t* index,
1056  dtuple_t** row,
1057  ibool ignore_prefix,
1060  mem_heap_t* heap)
1062 {
1063  const byte* end_ptr;
1064  ulint row_len;
1065 
1066  ut_ad(index);
1067  ut_ad(ptr);
1068  ut_ad(row);
1069  ut_ad(heap);
1070  ut_ad(dict_index_is_clust(index));
1071 
1072  row_len = dict_table_get_n_cols(index->table);
1073 
1074  *row = dtuple_create(heap, row_len);
1075 
1076  /* Mark all columns in the row uninitialized, so that
1077  we can distinguish missing fields from fields that are SQL NULL. */
1078  for (ulint i = 0; i < row_len; i++) {
1079  dfield_get_type(dtuple_get_nth_field(*row, i))
1080  ->mtype = DATA_MISSING;
1081  }
1082 
1083  end_ptr = ptr + mach_read_from_2(ptr);
1084  ptr += 2;
1085 
1086  while (ptr != end_ptr) {
1087  dfield_t* dfield;
1088  byte* field;
1089  ulint field_no;
1090  const dict_col_t* col;
1091  ulint col_no;
1092  ulint len;
1093  ulint orig_len;
1094 
1095  ptr = trx_undo_update_rec_get_field_no(ptr, &field_no);
1096 
1097  col = dict_index_get_nth_col(index, field_no);
1098  col_no = dict_col_get_no(col);
1099 
1100  ptr = trx_undo_rec_get_col_val(ptr, &field, &len, &orig_len);
1101 
1102  dfield = dtuple_get_nth_field(*row, col_no);
1104  dict_table_get_nth_col(index->table, col_no),
1105  dfield_get_type(dfield));
1106  dfield_set_data(dfield, field, len);
1107 
1108  if (len != UNIV_SQL_NULL
1109  && len >= UNIV_EXTERN_STORAGE_FIELD) {
1110  dfield_set_len(dfield,
1111  len - UNIV_EXTERN_STORAGE_FIELD);
1112  dfield_set_ext(dfield);
1113  /* If the prefix of this column is indexed,
1114  ensure that enough prefix is stored in the
1115  undo log record. */
1116  if (!ignore_prefix && col->ord_part) {
1117  ut_a(dfield_get_len(dfield)
1118  >= BTR_EXTERN_FIELD_REF_SIZE);
1120  >= UNIV_FORMAT_B
1121  || dfield_get_len(dfield)
1122  >= REC_ANTELOPE_MAX_INDEX_COL_LEN
1123  + BTR_EXTERN_FIELD_REF_SIZE);
1124  }
1125  }
1126  }
1127 
1128  return(ptr);
1129 }
1130 #endif /* !UNIV_HOTBACKUP */
1131 
1132 /***********************************************************************/
1135 static __attribute__((nonnull))
1136 ibool
1137 trx_undo_erase_page_end(
1138 /*====================*/
1139  page_t* undo_page,
1140  mtr_t* mtr)
1141 {
1142  ulint first_free;
1143 
1144  first_free = mach_read_from_2(undo_page + TRX_UNDO_PAGE_HDR
1145  + TRX_UNDO_PAGE_FREE);
1146  memset(undo_page + first_free, 0xff,
1147  (UNIV_PAGE_SIZE - FIL_PAGE_DATA_END) - first_free);
1148 
1150  return(first_free != TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_HDR_SIZE);
1151 }
1152 
1153 /***********************************************************/
1156 UNIV_INTERN
1157 byte*
1159 /*==========================*/
1160  byte* ptr,
1161  byte* end_ptr __attribute__((unused)),
1162  page_t* page,
1163  mtr_t* mtr)
1164 {
1165  ut_ad(ptr && end_ptr);
1166 
1167  if (page == NULL) {
1168 
1169  return(ptr);
1170  }
1171 
1172  trx_undo_erase_page_end(page, mtr);
1173 
1174  return(ptr);
1175 }
1176 
1177 #ifndef UNIV_HOTBACKUP
1178 /***********************************************************************/
1184 UNIV_INTERN
1185 dberr_t
1187 /*==========================*/
1188  ulint flags,
1190  ulint op_type,
1192  que_thr_t* thr,
1193  dict_index_t* index,
1194  const dtuple_t* clust_entry,
1197  const upd_t* update,
1199  ulint cmpl_info,
1201  const rec_t* rec,
1204  const ulint* offsets,
1205  roll_ptr_t* roll_ptr)
1209 {
1210  trx_t* trx;
1211  trx_undo_t* undo;
1212  ulint page_no;
1213  buf_block_t* undo_block;
1214  trx_rseg_t* rseg;
1215  mtr_t mtr;
1216  dberr_t err = DB_SUCCESS;
1217 #ifdef UNIV_DEBUG
1218  int loop_count = 0;
1219 #endif /* UNIV_DEBUG */
1220 
1222  ut_a(dict_index_is_clust(index));
1223  ut_ad(!rec || rec_offs_validate(rec, index, offsets));
1224 
1225  if (flags & BTR_NO_UNDO_LOG_FLAG) {
1226 
1227  *roll_ptr = 0;
1228 
1229  return(DB_SUCCESS);
1230  }
1231 
1232  ut_ad(thr);
1233  ut_ad((op_type != TRX_UNDO_INSERT_OP)
1234  || (clust_entry && !update && !rec));
1235 
1236  trx = thr_get_trx(thr);
1237 
1238  /* This table is visible only to the session that created it. */
1239  if (trx->read_only) {
1241  /* MySQL should block writes to non-temporary tables. */
1242  ut_a(DICT_TF2_FLAG_IS_SET(index->table, DICT_TF2_TEMPORARY));
1243  if (trx->rseg == 0) {
1244  trx_assign_rseg(trx);
1245  }
1246  }
1247 
1248  rseg = trx->rseg;
1249 
1250  mtr_start(&mtr);
1251  mutex_enter(&trx->undo_mutex);
1252 
1253  /* If the undo log is not assigned yet, assign one */
1254 
1255  switch (op_type) {
1256  case TRX_UNDO_INSERT_OP:
1257  undo = trx->insert_undo;
1258 
1259  if (undo == NULL) {
1260 
1261  err = trx_undo_assign_undo(trx, TRX_UNDO_INSERT);
1262  undo = trx->insert_undo;
1263 
1264  if (undo == NULL) {
1265  /* Did not succeed */
1266  ut_ad(err != DB_SUCCESS);
1267  goto err_exit;
1268  }
1269 
1270  ut_ad(err == DB_SUCCESS);
1271  }
1272  break;
1273  default:
1274  ut_ad(op_type == TRX_UNDO_MODIFY_OP);
1275 
1276  undo = trx->update_undo;
1277 
1278  if (undo == NULL) {
1279  err = trx_undo_assign_undo(trx, TRX_UNDO_UPDATE);
1280  undo = trx->update_undo;
1281 
1282  if (undo == NULL) {
1283  /* Did not succeed */
1284  ut_ad(err != DB_SUCCESS);
1285  goto err_exit;
1286  }
1287  }
1288 
1289  ut_ad(err == DB_SUCCESS);
1290  }
1291 
1292  page_no = undo->last_page_no;
1293  undo_block = buf_page_get_gen(
1294  undo->space, undo->zip_size, page_no, RW_X_LATCH,
1295  undo->guess_block, BUF_GET, __FILE__, __LINE__, &mtr);
1296  buf_block_dbg_add_level(undo_block, SYNC_TRX_UNDO_PAGE);
1297 
1298  do {
1299  page_t* undo_page;
1300  ulint offset;
1301 
1302  undo_page = buf_block_get_frame(undo_block);
1303  ut_ad(page_no == buf_block_get_page_no(undo_block));
1304 
1305  switch (op_type) {
1306  case TRX_UNDO_INSERT_OP:
1307  offset = trx_undo_page_report_insert(
1308  undo_page, trx, index, clust_entry, &mtr);
1309  break;
1310  default:
1311  ut_ad(op_type == TRX_UNDO_MODIFY_OP);
1312  offset = trx_undo_page_report_modify(
1313  undo_page, trx, index, rec, offsets, update,
1314  cmpl_info, &mtr);
1315  }
1316 
1317  if (UNIV_UNLIKELY(offset == 0)) {
1318  /* The record did not fit on the page. We erase the
1319  end segment of the undo log page and write a log
1320  record of it: this is to ensure that in the debug
1321  version the replicate page constructed using the log
1322  records stays identical to the original page */
1323 
1324  if (!trx_undo_erase_page_end(undo_page, &mtr)) {
1325  /* The record did not fit on an empty
1326  undo page. Discard the freshly allocated
1327  page and return an error. */
1328 
1329  /* When we remove a page from an undo
1330  log, this is analogous to a
1331  pessimistic insert in a B-tree, and we
1332  must reserve the counterpart of the
1333  tree latch, which is the rseg
1334  mutex. We must commit the mini-transaction
1335  first, because it may be holding lower-level
1336  latches, such as SYNC_FSP and SYNC_FSP_PAGE. */
1337 
1338  mtr_commit(&mtr);
1339  mtr_start(&mtr);
1340 
1341  mutex_enter(&rseg->mutex);
1342  trx_undo_free_last_page(trx, undo, &mtr);
1343  mutex_exit(&rseg->mutex);
1344 
1345  err = DB_UNDO_RECORD_TOO_BIG;
1346  goto err_exit;
1347  }
1348 
1349  mtr_commit(&mtr);
1350  } else {
1351  /* Success */
1352 
1353  mtr_commit(&mtr);
1354 
1355  undo->empty = FALSE;
1356  undo->top_page_no = page_no;
1357  undo->top_offset = offset;
1358  undo->top_undo_no = trx->undo_no;
1359  undo->guess_block = undo_block;
1360 
1361  trx->undo_no++;
1362 
1363  mutex_exit(&trx->undo_mutex);
1364 
1365  *roll_ptr = trx_undo_build_roll_ptr(
1366  op_type == TRX_UNDO_INSERT_OP,
1367  rseg->id, page_no, offset);
1368  return(DB_SUCCESS);
1369  }
1370 
1371  ut_ad(page_no == undo->last_page_no);
1372 
1373  /* We have to extend the undo log by one page */
1374 
1375  ut_ad(++loop_count < 2);
1376  mtr_start(&mtr);
1377 
1378  /* When we add a page to an undo log, this is analogous to
1379  a pessimistic insert in a B-tree, and we must reserve the
1380  counterpart of the tree latch, which is the rseg mutex. */
1381 
1382  mutex_enter(&rseg->mutex);
1383  undo_block = trx_undo_add_page(trx, undo, &mtr);
1384  mutex_exit(&rseg->mutex);
1385 
1386  page_no = undo->last_page_no;
1387  } while (undo_block != NULL);
1388 
1389  /* Did not succeed: out of space */
1390  err = DB_OUT_OF_FILE_SPACE;
1391 
1392 err_exit:
1393  mutex_exit(&trx->undo_mutex);
1394  mtr_commit(&mtr);
1395  return(err);
1396 }
1397 
1398 /*============== BUILDING PREVIOUS VERSION OF A RECORD ===============*/
1399 
1400 /******************************************************************/
1404 UNIV_INTERN
1407 /*======================*/
1408  roll_ptr_t roll_ptr,
1409  mem_heap_t* heap)
1410 {
1411  trx_undo_rec_t* undo_rec;
1412  ulint rseg_id;
1413  ulint page_no;
1414  ulint offset;
1415  const page_t* undo_page;
1416  trx_rseg_t* rseg;
1417  ibool is_insert;
1418  mtr_t mtr;
1419 
1420  trx_undo_decode_roll_ptr(roll_ptr, &is_insert, &rseg_id, &page_no,
1421  &offset);
1422  rseg = trx_rseg_get_on_id(rseg_id);
1423 
1424  mtr_start(&mtr);
1425 
1426  undo_page = trx_undo_page_get_s_latched(rseg->space, rseg->zip_size,
1427  page_no, &mtr);
1428 
1429  undo_rec = trx_undo_rec_copy(undo_page + offset, heap);
1430 
1431  mtr_commit(&mtr);
1432 
1433  return(undo_rec);
1434 }
1435 
1436 /******************************************************************/
1444 static __attribute__((nonnull, warn_unused_result))
1445 bool
1446 trx_undo_get_undo_rec(
1447 /*==================*/
1448  roll_ptr_t roll_ptr,
1449  trx_id_t trx_id,
1452  trx_undo_rec_t**undo_rec,
1453  mem_heap_t* heap)
1454 {
1455  bool missing_history;
1456 
1458  missing_history = read_view_sees_trx_id(purge_sys->view, trx_id);
1459 
1460  if (!missing_history) {
1461  *undo_rec = trx_undo_get_undo_rec_low(roll_ptr, heap);
1462  }
1463 
1464  rw_lock_s_unlock(&purge_sys->latch);
1465 
1466  return(missing_history);
1467 }
1468 
1469 #ifdef UNIV_DEBUG
1470 #define ATTRIB_USED_ONLY_IN_DEBUG
1471 #else /* UNIV_DEBUG */
1472 #define ATTRIB_USED_ONLY_IN_DEBUG __attribute__((unused))
1473 #endif /* UNIV_DEBUG */
1474 
1475 /*******************************************************************/
1482 UNIV_INTERN
1483 bool
1485 /*========================*/
1486  const rec_t* index_rec ATTRIB_USED_ONLY_IN_DEBUG,
1489  mtr_t* index_mtr ATTRIB_USED_ONLY_IN_DEBUG,
1492  const rec_t* rec,
1493  dict_index_t* index,
1494  ulint* offsets,
1495  mem_heap_t* heap,
1497  rec_t** old_vers)
1502 {
1503  trx_undo_rec_t* undo_rec = NULL;
1504  dtuple_t* entry;
1505  trx_id_t rec_trx_id;
1506  ulint type;
1507  undo_no_t undo_no;
1508  table_id_t table_id;
1509  trx_id_t trx_id;
1510  roll_ptr_t roll_ptr;
1511  upd_t* update;
1512  byte* ptr;
1513  ulint info_bits;
1514  ulint cmpl_info;
1515  bool dummy_extern;
1516  byte* buf;
1517 #ifdef UNIV_SYNC_DEBUG
1518  ut_ad(!rw_lock_own(&purge_sys->latch, RW_LOCK_SHARED));
1519 #endif /* UNIV_SYNC_DEBUG */
1520  ut_ad(mtr_memo_contains_page(index_mtr, index_rec, MTR_MEMO_PAGE_S_FIX)
1521  || mtr_memo_contains_page(index_mtr, index_rec,
1522  MTR_MEMO_PAGE_X_FIX));
1523  ut_ad(rec_offs_validate(rec, index, offsets));
1524  ut_a(dict_index_is_clust(index));
1525 
1526  roll_ptr = row_get_rec_roll_ptr(rec, index, offsets);
1527 
1528  *old_vers = NULL;
1529 
1530  if (trx_undo_roll_ptr_is_insert(roll_ptr)) {
1531  /* The record rec is the first inserted version */
1532  return(true);
1533  }
1534 
1535  rec_trx_id = row_get_rec_trx_id(rec, index, offsets);
1536 
1537  if (trx_undo_get_undo_rec(roll_ptr, rec_trx_id, &undo_rec, heap)) {
1538  /* The undo record may already have been purged,
1539  during purge or semi-consistent read. */
1540  return(false);
1541  }
1542 
1543  ptr = trx_undo_rec_get_pars(undo_rec, &type, &cmpl_info,
1544  &dummy_extern, &undo_no, &table_id);
1545 
1546  if (table_id != index->table->id) {
1547  /* The table should have been rebuilt, but purge has
1548  not yet removed the undo log records for the
1549  now-dropped old table (table_id). */
1550  return(true);
1551  }
1552 
1553  ptr = trx_undo_update_rec_get_sys_cols(ptr, &trx_id, &roll_ptr,
1554  &info_bits);
1555 
1556  /* (a) If a clustered index record version is such that the
1557  trx id stamp in it is bigger than purge_sys->view, then the
1558  BLOBs in that version are known to exist (the purge has not
1559  progressed that far);
1560 
1561  (b) if the version is the first version such that trx id in it
1562  is less than purge_sys->view, and it is not delete-marked,
1563  then the BLOBs in that version are known to exist (the purge
1564  cannot have purged the BLOBs referenced by that version
1565  yet).
1566 
1567  This function does not fetch any BLOBs. The callers might, by
1568  possibly invoking row_ext_create() via row_build(). However,
1569  they should have all needed information in the *old_vers
1570  returned by this function. This is because *old_vers is based
1571  on the transaction undo log records. The function
1572  trx_undo_page_fetch_ext() will write BLOB prefixes to the
1573  transaction undo log that are at least as long as the longest
1574  possible column prefix in a secondary index. Thus, secondary
1575  index entries for *old_vers can be constructed without
1576  dereferencing any BLOB pointers. */
1577 
1578  ptr = trx_undo_rec_skip_row_ref(ptr, index);
1579 
1580  ptr = trx_undo_update_rec_get_update(ptr, index, type, trx_id,
1581  roll_ptr, info_bits,
1582  NULL, heap, &update);
1583  ut_a(ptr);
1584 
1585 # if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG
1586  ut_a(!rec_offs_any_null_extern(rec, offsets));
1587 # endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */
1588 
1589  if (row_upd_changes_field_size_or_external(index, offsets, update)) {
1590  ulint n_ext;
1591 
1592  /* We should confirm the existence of disowned external data,
1593  if the previous version record is delete marked. If the trx_id
1594  of the previous record is seen by purge view, we should treat
1595  it as missing history, because the disowned external data
1596  might be purged already.
1597 
1598  The inherited external data (BLOBs) can be freed (purged)
1599  after trx_id was committed, provided that no view was started
1600  before trx_id. If the purge view can see the committed
1601  delete-marked record by trx_id, no transactions need to access
1602  the BLOB. */
1603 
1604  /* the row_upd_changes_disowned_external(update) call could be
1605  omitted, but the synchronization on purge_sys->latch is likely
1606  more expensive. */
1607 
1608  if ((update->info_bits & REC_INFO_DELETED_FLAG)
1609  && row_upd_changes_disowned_external(update)) {
1610  bool missing_extern;
1611 
1613  missing_extern = read_view_sees_trx_id(purge_sys->view,
1614  trx_id);
1615  rw_lock_s_unlock(&purge_sys->latch);
1616 
1617  if (missing_extern) {
1618  /* treat as a fresh insert, not to
1619  cause assertion error at the caller. */
1620  return(true);
1621  }
1622  }
1623 
1624  /* We have to set the appropriate extern storage bits in the
1625  old version of the record: the extern bits in rec for those
1626  fields that update does NOT update, as well as the bits for
1627  those fields that update updates to become externally stored
1628  fields. Store the info: */
1629 
1630  entry = row_rec_to_index_entry(
1631  rec, index, offsets, &n_ext, heap);
1632  n_ext += btr_push_update_extern_fields(entry, update, heap);
1633  /* The page containing the clustered index record
1634  corresponding to entry is latched in mtr. Thus the
1635  following call is safe. */
1636  row_upd_index_replace_new_col_vals(entry, index, update, heap);
1637 
1638  buf = static_cast<byte*>(
1640  heap,
1641  rec_get_converted_size(index, entry, n_ext)));
1642 
1643  *old_vers = rec_convert_dtuple_to_rec(buf, index,
1644  entry, n_ext);
1645  } else {
1646  buf = static_cast<byte*>(
1647  mem_heap_alloc(heap, rec_offs_size(offsets)));
1648 
1649  *old_vers = rec_copy(buf, rec, offsets);
1650  rec_offs_make_valid(*old_vers, index, offsets);
1651  row_upd_rec_in_place(*old_vers, index, offsets, update, NULL);
1652  }
1653 
1654  return(true);
1655 }
1656 #endif /* !UNIV_HOTBACKUP */