MySQL 5.6.14 Source Code Document
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
trx0undo.cc
Go to the documentation of this file.
1 /*****************************************************************************
2 
3 Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved.
4 
5 This program is free software; you can redistribute it and/or modify it under
6 the terms of the GNU General Public License as published by the Free Software
7 Foundation; version 2 of the License.
8 
9 This program is distributed in the hope that it will be useful, but WITHOUT
10 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
11 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
12 
13 You should have received a copy of the GNU General Public License along with
14 this program; if not, write to the Free Software Foundation, Inc.,
15 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
16 
17 *****************************************************************************/
18 
19 /**************************************************/
26 #include "trx0undo.h"
27 
28 #ifdef UNIV_NONINL
29 #include "trx0undo.ic"
30 #endif
31 
32 #include "fsp0fsp.h"
33 #ifndef UNIV_HOTBACKUP
34 #include "mach0data.h"
35 #include "mtr0log.h"
36 #include "trx0rseg.h"
37 #include "trx0trx.h"
38 #include "srv0srv.h"
39 #include "srv0start.h"
40 #include "trx0rec.h"
41 #include "trx0purge.h"
42 #include "srv0mon.h"
43 
44 /* How should the old versions in the history list be managed?
45  ----------------------------------------------------------
46 If each transaction is given a whole page for its update undo log, file
47 space consumption can be 10 times higher than necessary. Therefore,
48 partly filled update undo log pages should be reusable. But then there
49 is no way individual pages can be ordered so that the ordering agrees
50 with the serialization numbers of the transactions on the pages. Thus,
51 the history list must be formed of undo logs, not their header pages as
52 it was in the old implementation.
53  However, on a single header page the transactions are placed in
54 the order of their serialization numbers. As old versions are purged, we
55 may free the page when the last transaction on the page has been purged.
56  A problem is that the purge has to go through the transactions
57 in the serialization order. This means that we have to look through all
58 rollback segments for the one that has the smallest transaction number
59 in its history list.
60  When should we do a purge? A purge is necessary when space is
61 running out in any of the rollback segments. Then we may have to purge
62 also old version which might be needed by some consistent read. How do
63 we trigger the start of a purge? When a transaction writes to an undo log,
64 it may notice that the space is running out. When a read view is closed,
65 it may make some history superfluous. The server can have an utility which
66 periodically checks if it can purge some history.
67  In a parallellized purge we have the problem that a query thread
68 can remove a delete marked clustered index record before another query
69 thread has processed an earlier version of the record, which cannot then
70 be done because the row cannot be constructed from the clustered index
71 record. To avoid this problem, we will store in the update and delete mark
72 undo record also the columns necessary to construct the secondary index
73 entries which are modified.
74  We can latch the stack of versions of a single clustered index record
75 by taking a latch on the clustered index page. As long as the latch is held,
76 no new versions can be added and no versions removed by undo. But, a purge
77 can still remove old versions from the bottom of the stack. */
78 
79 /* How to protect rollback segments, undo logs, and history lists with
80  -------------------------------------------------------------------
81 latches?
82 -------
83 The contention of the trx_sys_t::mutex should be minimized. When a transaction
84 does its first insert or modify in an index, an undo log is assigned for it.
85 Then we must have an x-latch to the rollback segment header.
86  When the transaction does more modifys or rolls back, the undo log is
87 protected with undo_mutex in the transaction.
88  When the transaction commits, its insert undo log is either reset and
89 cached for a fast reuse, or freed. In these cases we must have an x-latch on
90 the rollback segment page. The update undo log is put to the history list. If
91 it is not suitable for reuse, its slot in the rollback segment is reset. In
92 both cases, an x-latch must be acquired on the rollback segment.
93  The purge operation steps through the history list without modifying
94 it until a truncate operation occurs, which can remove undo logs from the end
95 of the list and release undo log segments. In stepping through the list,
96 s-latches on the undo log pages are enough, but in a truncate, x-latches must
97 be obtained on the rollback segment and individual pages. */
98 #endif /* !UNIV_HOTBACKUP */
99 
100 /********************************************************************/
102 static
103 void
104 trx_undo_page_init(
105 /*===============*/
106  page_t* undo_page,
107  ulint type,
108  mtr_t* mtr);
110 #ifndef UNIV_HOTBACKUP
111 /********************************************************************/
114 static
115 trx_undo_t*
116 trx_undo_mem_create(
117 /*================*/
118  trx_rseg_t* rseg,
119  ulint id,
120  ulint type,
122  trx_id_t trx_id,
124  const XID* xid,
125  ulint page_no,
126  ulint offset);
127 #endif /* !UNIV_HOTBACKUP */
128 /***************************************************************/
133 static
134 ulint
135 trx_undo_insert_header_reuse(
136 /*=========================*/
137  page_t* undo_page,
139  trx_id_t trx_id,
140  mtr_t* mtr);
141 /**********************************************************************/
144 static
145 void
146 trx_undo_discard_latest_update_undo(
147 /*================================*/
148  page_t* undo_page,
149  mtr_t* mtr);
151 #ifndef UNIV_HOTBACKUP
152 /***********************************************************************/
155 static
157 trx_undo_get_prev_rec_from_prev_page(
158 /*=================================*/
160  ulint page_no,
161  ulint offset,
162  bool shared,
163  mtr_t* mtr)
164 {
165  ulint space;
166  ulint zip_size;
167  ulint prev_page_no;
168  page_t* prev_page;
169  page_t* undo_page;
170 
171  undo_page = page_align(rec);
172 
173  prev_page_no = flst_get_prev_addr(undo_page + TRX_UNDO_PAGE_HDR
174  + TRX_UNDO_PAGE_NODE, mtr)
175  .page;
176 
177  if (prev_page_no == FIL_NULL) {
178 
179  return(NULL);
180  }
181 
182  space = page_get_space_id(undo_page);
183  zip_size = fil_space_get_zip_size(space);
184 
185  buf_block_t* block = buf_page_get(space, zip_size, prev_page_no,
186  shared ? RW_S_LATCH : RW_X_LATCH,
187  mtr);
188  buf_block_dbg_add_level(block, SYNC_TRX_UNDO_PAGE);
189 
190  prev_page = buf_block_get_frame(block);
191 
192  return(trx_undo_page_get_last_rec(prev_page, page_no, offset));
193 }
194 
195 /***********************************************************************/
198 UNIV_INTERN
201 /*==================*/
202  trx_undo_rec_t* rec,
203  ulint page_no,
204  ulint offset,
205  bool shared,
206  mtr_t* mtr)
207 {
208  trx_undo_rec_t* prev_rec;
209 
210  prev_rec = trx_undo_page_get_prev_rec(rec, page_no, offset);
211 
212  if (prev_rec) {
213 
214  return(prev_rec);
215  }
216 
217  /* We have to go to the previous undo log page to look for the
218  previous record */
219 
220  return(trx_undo_get_prev_rec_from_prev_page(rec, page_no, offset,
221  shared, mtr));
222 }
223 
224 /***********************************************************************/
227 static
229 trx_undo_get_next_rec_from_next_page(
230 /*=================================*/
231  ulint space,
232  ulint zip_size,
234  page_t* undo_page,
235  ulint page_no,
236  ulint offset,
237  ulint mode,
238  mtr_t* mtr)
239 {
240  trx_ulogf_t* log_hdr;
241  ulint next_page_no;
242  page_t* next_page;
243  ulint next;
244 
245  if (page_no == page_get_page_no(undo_page)) {
246 
247  log_hdr = undo_page + offset;
248  next = mach_read_from_2(log_hdr + TRX_UNDO_NEXT_LOG);
249 
250  if (next != 0) {
251 
252  return(NULL);
253  }
254  }
255 
256  next_page_no = flst_get_next_addr(undo_page + TRX_UNDO_PAGE_HDR
257  + TRX_UNDO_PAGE_NODE, mtr)
258  .page;
259  if (next_page_no == FIL_NULL) {
260 
261  return(NULL);
262  }
263 
264  if (mode == RW_S_LATCH) {
265  next_page = trx_undo_page_get_s_latched(space, zip_size,
266  next_page_no, mtr);
267  } else {
268  ut_ad(mode == RW_X_LATCH);
269  next_page = trx_undo_page_get(space, zip_size,
270  next_page_no, mtr);
271  }
272 
273  return(trx_undo_page_get_first_rec(next_page, page_no, offset));
274 }
275 
276 /***********************************************************************/
279 UNIV_INTERN
282 /*==================*/
283  trx_undo_rec_t* rec,
284  ulint page_no,
285  ulint offset,
286  mtr_t* mtr)
287 {
288  ulint space;
289  ulint zip_size;
290  trx_undo_rec_t* next_rec;
291 
292  next_rec = trx_undo_page_get_next_rec(rec, page_no, offset);
293 
294  if (next_rec) {
295  return(next_rec);
296  }
297 
298  space = page_get_space_id(page_align(rec));
299  zip_size = fil_space_get_zip_size(space);
300 
301  return(trx_undo_get_next_rec_from_next_page(space, zip_size,
302  page_align(rec),
303  page_no, offset,
304  RW_S_LATCH, mtr));
305 }
306 
307 /***********************************************************************/
310 UNIV_INTERN
313 /*===================*/
314  ulint space,
315  ulint zip_size,
317  ulint page_no,
318  ulint offset,
319  ulint mode,
320  mtr_t* mtr)
321 {
322  page_t* undo_page;
324 
325  if (mode == RW_S_LATCH) {
326  undo_page = trx_undo_page_get_s_latched(space, zip_size,
327  page_no, mtr);
328  } else {
329  undo_page = trx_undo_page_get(space, zip_size, page_no, mtr);
330  }
331 
332  rec = trx_undo_page_get_first_rec(undo_page, page_no, offset);
333 
334  if (rec) {
335  return(rec);
336  }
337 
338  return(trx_undo_get_next_rec_from_next_page(space, zip_size,
339  undo_page, page_no, offset,
340  mode, mtr));
341 }
342 
343 /*============== UNDO LOG FILE COPY CREATION AND FREEING ==================*/
344 
345 /**********************************************************************/
347 UNIV_INLINE
348 void
350 /*===================*/
351  page_t* undo_page,
352  ulint type,
353  mtr_t* mtr)
354 {
356 
358 }
359 #else /* !UNIV_HOTBACKUP */
360 # define trx_undo_page_init_log(undo_page,type,mtr) ((void) 0)
361 #endif /* !UNIV_HOTBACKUP */
362 
363 /***********************************************************/
366 UNIV_INTERN
367 byte*
369 /*=====================*/
370  byte* ptr,
371  byte* end_ptr,
372  page_t* page,
373  mtr_t* mtr)
374 {
375  ulint type;
376 
377  ptr = mach_parse_compressed(ptr, end_ptr, &type);
378 
379  if (ptr == NULL) {
380 
381  return(NULL);
382  }
383 
384  if (page) {
385  trx_undo_page_init(page, type, mtr);
386  }
387 
388  return(ptr);
389 }
390 
391 /********************************************************************/
393 static
394 void
395 trx_undo_page_init(
396 /*===============*/
397  page_t* undo_page,
398  ulint type,
399  mtr_t* mtr)
400 {
401  trx_upagef_t* page_hdr;
402 
403  page_hdr = undo_page + TRX_UNDO_PAGE_HDR;
404 
405  mach_write_to_2(page_hdr + TRX_UNDO_PAGE_TYPE, type);
406 
408  TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_HDR_SIZE);
410  TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_HDR_SIZE);
411 
413 
414  trx_undo_page_init_log(undo_page, type, mtr);
415 }
416 
417 #ifndef UNIV_HOTBACKUP
418 /***************************************************************/
422 static __attribute__((nonnull, warn_unused_result))
423 dberr_t
424 trx_undo_seg_create(
425 /*================*/
426  trx_rseg_t* rseg __attribute__((unused)),
429  ulint type,
431  ulint* id,
432  page_t** undo_page,
435  mtr_t* mtr)
436 {
437  ulint slot_no;
438  ulint space;
440  trx_upagef_t* page_hdr;
441  trx_usegf_t* seg_hdr;
442  ulint n_reserved;
443  ibool success;
444  dberr_t err = DB_SUCCESS;
445 
446  ut_ad(mtr && id && rseg_hdr);
447  ut_ad(mutex_own(&(rseg->mutex)));
448 
449  /* fputs(type == TRX_UNDO_INSERT
450  ? "Creating insert undo log segment\n"
451  : "Creating update undo log segment\n", stderr); */
452  slot_no = trx_rsegf_undo_find_free(rseg_hdr, mtr);
453 
454  if (slot_no == ULINT_UNDEFINED) {
455  ut_print_timestamp(stderr);
456  fprintf(stderr,
457  " InnoDB: Warning: cannot find a free slot for"
458  " an undo log. Do you have too\n"
459  "InnoDB: many active transactions"
460  " running concurrently?\n");
461 
463  }
464 
465  space = page_get_space_id(page_align(rseg_hdr));
466 
467  success = fsp_reserve_free_extents(&n_reserved, space, 2, FSP_UNDO,
468  mtr);
469  if (!success) {
470 
471  return(DB_OUT_OF_FILE_SPACE);
472  }
473 
474  /* Allocate a new file segment for the undo log */
475  block = fseg_create_general(space, 0,
477  + TRX_UNDO_FSEG_HEADER, TRUE, mtr);
478 
479  fil_space_release_free_extents(space, n_reserved);
480 
481  if (block == NULL) {
482  /* No space left */
483 
484  return(DB_OUT_OF_FILE_SPACE);
485  }
486 
487  buf_block_dbg_add_level(block, SYNC_TRX_UNDO_PAGE);
488 
489  *undo_page = buf_block_get_frame(block);
490 
491  page_hdr = *undo_page + TRX_UNDO_PAGE_HDR;
492  seg_hdr = *undo_page + TRX_UNDO_SEG_HDR;
493 
494  trx_undo_page_init(*undo_page, type, mtr);
495 
497  TRX_UNDO_SEG_HDR + TRX_UNDO_SEG_HDR_SIZE,
498  MLOG_2BYTES, mtr);
499 
500  mlog_write_ulint(seg_hdr + TRX_UNDO_LAST_LOG, 0, MLOG_2BYTES, mtr);
501 
502  flst_init(seg_hdr + TRX_UNDO_PAGE_LIST, mtr);
503 
505  page_hdr + TRX_UNDO_PAGE_NODE, mtr);
506 
507  trx_rsegf_set_nth_undo(rseg_hdr, slot_no,
508  page_get_page_no(*undo_page), mtr);
509  *id = slot_no;
510 
511  MONITOR_INC(MONITOR_NUM_UNDO_SLOT_USED);
512 
513  return(err);
514 }
515 
516 /**********************************************************************/
518 UNIV_INLINE
519 void
520 trx_undo_header_create_log(
521 /*=======================*/
522  const page_t* undo_page,
523  trx_id_t trx_id,
524  mtr_t* mtr)
525 {
527 
528  mlog_catenate_ull_compressed(mtr, trx_id);
529 }
530 #else /* !UNIV_HOTBACKUP */
531 # define trx_undo_header_create_log(undo_page,trx_id,mtr) ((void) 0)
532 #endif /* !UNIV_HOTBACKUP */
533 
534 /***************************************************************/
539 static
540 ulint
541 trx_undo_header_create(
542 /*===================*/
543  page_t* undo_page,
548  trx_id_t trx_id,
549  mtr_t* mtr)
550 {
551  trx_upagef_t* page_hdr;
552  trx_usegf_t* seg_hdr;
553  trx_ulogf_t* log_hdr;
554  trx_ulogf_t* prev_log_hdr;
555  ulint prev_log;
556  ulint free;
557  ulint new_free;
558 
559  ut_ad(mtr && undo_page);
560 
561  page_hdr = undo_page + TRX_UNDO_PAGE_HDR;
562  seg_hdr = undo_page + TRX_UNDO_SEG_HDR;
563 
564  free = mach_read_from_2(page_hdr + TRX_UNDO_PAGE_FREE);
565 
566  log_hdr = undo_page + free;
567 
568  new_free = free + TRX_UNDO_LOG_OLD_HDR_SIZE;
569 
570  ut_a(free + TRX_UNDO_LOG_XA_HDR_SIZE < UNIV_PAGE_SIZE - 100);
571 
572  mach_write_to_2(page_hdr + TRX_UNDO_PAGE_START, new_free);
573 
574  mach_write_to_2(page_hdr + TRX_UNDO_PAGE_FREE, new_free);
575 
576  mach_write_to_2(seg_hdr + TRX_UNDO_STATE, TRX_UNDO_ACTIVE);
577 
578  prev_log = mach_read_from_2(seg_hdr + TRX_UNDO_LAST_LOG);
579 
580  if (prev_log != 0) {
581  prev_log_hdr = undo_page + prev_log;
582 
583  mach_write_to_2(prev_log_hdr + TRX_UNDO_NEXT_LOG, free);
584  }
585 
586  mach_write_to_2(seg_hdr + TRX_UNDO_LAST_LOG, free);
587 
588  log_hdr = undo_page + free;
589 
590  mach_write_to_2(log_hdr + TRX_UNDO_DEL_MARKS, TRUE);
591 
592  mach_write_to_8(log_hdr + TRX_UNDO_TRX_ID, trx_id);
593  mach_write_to_2(log_hdr + TRX_UNDO_LOG_START, new_free);
594 
595  mach_write_to_1(log_hdr + TRX_UNDO_XID_EXISTS, FALSE);
596  mach_write_to_1(log_hdr + TRX_UNDO_DICT_TRANS, FALSE);
597 
598  mach_write_to_2(log_hdr + TRX_UNDO_NEXT_LOG, 0);
599  mach_write_to_2(log_hdr + TRX_UNDO_PREV_LOG, prev_log);
600 
601  /* Write the log record about the header creation */
602  trx_undo_header_create_log(undo_page, trx_id, mtr);
603 
604  return(free);
605 }
606 
607 #ifndef UNIV_HOTBACKUP
608 /********************************************************************/
610 static
611 void
612 trx_undo_write_xid(
613 /*===============*/
614  trx_ulogf_t* log_hdr,
615  const XID* xid,
616  mtr_t* mtr)
617 {
619  (ulint) xid->formatID, MLOG_4BYTES, mtr);
620 
622  (ulint) xid->gtrid_length, MLOG_4BYTES, mtr);
623 
625  (ulint) xid->bqual_length, MLOG_4BYTES, mtr);
626 
627  mlog_write_string(log_hdr + TRX_UNDO_XA_XID, (const byte*) xid->data,
628  XIDDATASIZE, mtr);
629 }
630 
631 /********************************************************************/
633 static
634 void
635 trx_undo_read_xid(
636 /*==============*/
637  trx_ulogf_t* log_hdr,
638  XID* xid)
639 {
640  xid->formatID = (long) mach_read_from_4(log_hdr + TRX_UNDO_XA_FORMAT);
641 
642  xid->gtrid_length
643  = (long) mach_read_from_4(log_hdr + TRX_UNDO_XA_TRID_LEN);
644  xid->bqual_length
645  = (long) mach_read_from_4(log_hdr + TRX_UNDO_XA_BQUAL_LEN);
646 
647  memcpy(xid->data, log_hdr + TRX_UNDO_XA_XID, XIDDATASIZE);
648 }
649 
650 /***************************************************************/
652 static
653 void
654 trx_undo_header_add_space_for_xid(
655 /*==============================*/
656  page_t* undo_page,
657  trx_ulogf_t* log_hdr,
658  mtr_t* mtr)
659 {
660  trx_upagef_t* page_hdr;
661  ulint free;
662  ulint new_free;
663 
664  page_hdr = undo_page + TRX_UNDO_PAGE_HDR;
665 
666  free = mach_read_from_2(page_hdr + TRX_UNDO_PAGE_FREE);
667 
668  /* free is now the end offset of the old style undo log header */
669 
670  ut_a(free == (ulint)(log_hdr - undo_page) + TRX_UNDO_LOG_OLD_HDR_SIZE);
671 
672  new_free = free + (TRX_UNDO_LOG_XA_HDR_SIZE
674 
675  /* Add space for a XID after the header, update the free offset
676  fields on the undo log page and in the undo log header */
677 
678  mlog_write_ulint(page_hdr + TRX_UNDO_PAGE_START, new_free,
679  MLOG_2BYTES, mtr);
680 
681  mlog_write_ulint(page_hdr + TRX_UNDO_PAGE_FREE, new_free,
682  MLOG_2BYTES, mtr);
683 
684  mlog_write_ulint(log_hdr + TRX_UNDO_LOG_START, new_free,
685  MLOG_2BYTES, mtr);
686 }
687 
688 /**********************************************************************/
690 UNIV_INLINE
691 void
692 trx_undo_insert_header_reuse_log(
693 /*=============================*/
694  const page_t* undo_page,
695  trx_id_t trx_id,
696  mtr_t* mtr)
697 {
699 
700  mlog_catenate_ull_compressed(mtr, trx_id);
701 }
702 #else /* !UNIV_HOTBACKUP */
703 # define trx_undo_insert_header_reuse_log(undo_page,trx_id,mtr) ((void) 0)
704 #endif /* !UNIV_HOTBACKUP */
705 
706 /***********************************************************/
709 UNIV_INTERN
710 byte*
712 /*=======================*/
713  ulint type,
714  byte* ptr,
715  byte* end_ptr,
716  page_t* page,
717  mtr_t* mtr)
718 {
719  trx_id_t trx_id;
720  /* Silence a GCC warning about possibly uninitialized variable
721  when mach_ull_parse_compressed() is not inlined. */
722  ut_d(trx_id = 0);
723  /* Declare the variable uninitialized in Valgrind, so that the
724  above initialization will not mask any bugs. */
725  UNIV_MEM_INVALID(&trx_id, sizeof trx_id);
726 
727  ptr = mach_ull_parse_compressed(ptr, end_ptr, &trx_id);
728 
729  if (ptr == NULL) {
730 
731  return(NULL);
732  }
733 
734  if (page) {
735  if (type == MLOG_UNDO_HDR_CREATE) {
736  trx_undo_header_create(page, trx_id, mtr);
737  } else {
738  ut_ad(type == MLOG_UNDO_HDR_REUSE);
739  trx_undo_insert_header_reuse(page, trx_id, mtr);
740  }
741  }
742 
743  return(ptr);
744 }
745 
746 /***************************************************************/
751 static
752 ulint
753 trx_undo_insert_header_reuse(
754 /*=========================*/
755  page_t* undo_page,
757  trx_id_t trx_id,
758  mtr_t* mtr)
759 {
760  trx_upagef_t* page_hdr;
761  trx_usegf_t* seg_hdr;
762  trx_ulogf_t* log_hdr;
763  ulint free;
764  ulint new_free;
765 
766  ut_ad(mtr && undo_page);
767 
768  page_hdr = undo_page + TRX_UNDO_PAGE_HDR;
769  seg_hdr = undo_page + TRX_UNDO_SEG_HDR;
770 
771  free = TRX_UNDO_SEG_HDR + TRX_UNDO_SEG_HDR_SIZE;
772 
773  ut_a(free + TRX_UNDO_LOG_XA_HDR_SIZE < UNIV_PAGE_SIZE - 100);
774 
775  log_hdr = undo_page + free;
776 
777  new_free = free + TRX_UNDO_LOG_OLD_HDR_SIZE;
778 
779  /* Insert undo data is not needed after commit: we may free all
780  the space on the page */
781 
782  ut_a(mach_read_from_2(undo_page + TRX_UNDO_PAGE_HDR
784  == TRX_UNDO_INSERT);
785 
786  mach_write_to_2(page_hdr + TRX_UNDO_PAGE_START, new_free);
787 
788  mach_write_to_2(page_hdr + TRX_UNDO_PAGE_FREE, new_free);
789 
790  mach_write_to_2(seg_hdr + TRX_UNDO_STATE, TRX_UNDO_ACTIVE);
791 
792  log_hdr = undo_page + free;
793 
794  mach_write_to_8(log_hdr + TRX_UNDO_TRX_ID, trx_id);
795  mach_write_to_2(log_hdr + TRX_UNDO_LOG_START, new_free);
796 
797  mach_write_to_1(log_hdr + TRX_UNDO_XID_EXISTS, FALSE);
798  mach_write_to_1(log_hdr + TRX_UNDO_DICT_TRANS, FALSE);
799 
800  /* Write the log record MLOG_UNDO_HDR_REUSE */
801  trx_undo_insert_header_reuse_log(undo_page, trx_id, mtr);
802 
803  return(free);
804 }
805 
806 #ifndef UNIV_HOTBACKUP
807 /**********************************************************************/
809 UNIV_INLINE
810 void
811 trx_undo_discard_latest_log(
812 /*========================*/
813  page_t* undo_page,
814  mtr_t* mtr)
815 {
817 }
818 #else /* !UNIV_HOTBACKUP */
819 # define trx_undo_discard_latest_log(undo_page, mtr) ((void) 0)
820 #endif /* !UNIV_HOTBACKUP */
821 
822 /***********************************************************/
825 UNIV_INTERN
826 byte*
828 /*==========================*/
829  byte* ptr,
830  byte* end_ptr __attribute__((unused)),
831  page_t* page,
832  mtr_t* mtr)
833 {
834  ut_ad(end_ptr);
835 
836  if (page) {
837  trx_undo_discard_latest_update_undo(page, mtr);
838  }
839 
840  return(ptr);
841 }
842 
843 /**********************************************************************/
846 static
847 void
848 trx_undo_discard_latest_update_undo(
849 /*================================*/
850  page_t* undo_page,
851  mtr_t* mtr)
852 {
853  trx_usegf_t* seg_hdr;
854  trx_upagef_t* page_hdr;
855  trx_ulogf_t* log_hdr;
856  trx_ulogf_t* prev_log_hdr;
857  ulint free;
858  ulint prev_hdr_offset;
859 
860  seg_hdr = undo_page + TRX_UNDO_SEG_HDR;
861  page_hdr = undo_page + TRX_UNDO_PAGE_HDR;
862 
863  free = mach_read_from_2(seg_hdr + TRX_UNDO_LAST_LOG);
864  log_hdr = undo_page + free;
865 
866  prev_hdr_offset = mach_read_from_2(log_hdr + TRX_UNDO_PREV_LOG);
867 
868  if (prev_hdr_offset != 0) {
869  prev_log_hdr = undo_page + prev_hdr_offset;
870 
872  mach_read_from_2(prev_log_hdr
873  + TRX_UNDO_LOG_START));
874  mach_write_to_2(prev_log_hdr + TRX_UNDO_NEXT_LOG, 0);
875  }
876 
877  mach_write_to_2(page_hdr + TRX_UNDO_PAGE_FREE, free);
878 
879  mach_write_to_2(seg_hdr + TRX_UNDO_STATE, TRX_UNDO_CACHED);
880  mach_write_to_2(seg_hdr + TRX_UNDO_LAST_LOG, prev_hdr_offset);
881 
882  trx_undo_discard_latest_log(undo_page, mtr);
883 }
884 
885 #ifndef UNIV_HOTBACKUP
886 /********************************************************************/
889 UNIV_INTERN
892 /*==============*/
893  trx_t* trx,
894  trx_undo_t* undo,
895  mtr_t* mtr)
898 {
899  page_t* header_page;
900  buf_block_t* new_block;
901  page_t* new_page;
902  trx_rseg_t* rseg;
903  ulint n_reserved;
904 
905  ut_ad(mutex_own(&(trx->undo_mutex)));
906  ut_ad(mutex_own(&(trx->rseg->mutex)));
907 
908  rseg = trx->rseg;
909 
910  if (rseg->curr_size == rseg->max_size) {
911 
912  return(NULL);
913  }
914 
915  header_page = trx_undo_page_get(undo->space, undo->zip_size,
916  undo->hdr_page_no, mtr);
917 
918  if (!fsp_reserve_free_extents(&n_reserved, undo->space, 1,
919  FSP_UNDO, mtr)) {
920 
921  return(NULL);
922  }
923 
924  new_block = fseg_alloc_free_page_general(
925  TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER
926  + header_page,
927  undo->top_page_no + 1, FSP_UP, TRUE, mtr, mtr);
928 
929  fil_space_release_free_extents(undo->space, n_reserved);
930 
931  if (new_block == NULL) {
932 
933  /* No space left */
934 
935  return(NULL);
936  }
937 
938  ut_ad(rw_lock_get_x_lock_count(&new_block->lock) == 1);
939  buf_block_dbg_add_level(new_block, SYNC_TRX_UNDO_PAGE);
940  undo->last_page_no = buf_block_get_page_no(new_block);
941 
942  new_page = buf_block_get_frame(new_block);
943 
944  trx_undo_page_init(new_page, undo->type, mtr);
945 
946  flst_add_last(header_page + TRX_UNDO_SEG_HDR + TRX_UNDO_PAGE_LIST,
947  new_page + TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_NODE, mtr);
948  undo->size++;
949  rseg->curr_size++;
950 
951  return(new_block);
952 }
953 
954 /********************************************************************/
957 static
958 ulint
959 trx_undo_free_page(
960 /*===============*/
961  trx_rseg_t* rseg,
962  ibool in_history,
964  ulint space,
965  ulint hdr_page_no,
966  ulint page_no,
968  mtr_t* mtr)
971 {
972  page_t* header_page;
973  page_t* undo_page;
974  fil_addr_t last_addr;
975  trx_rsegf_t* rseg_header;
976  ulint hist_size;
977  ulint zip_size;
978 
979  ut_a(hdr_page_no != page_no);
980  ut_ad(mutex_own(&(rseg->mutex)));
981 
982  zip_size = rseg->zip_size;
983 
984  undo_page = trx_undo_page_get(space, zip_size, page_no, mtr);
985 
986  header_page = trx_undo_page_get(space, zip_size, hdr_page_no, mtr);
987 
988  flst_remove(header_page + TRX_UNDO_SEG_HDR + TRX_UNDO_PAGE_LIST,
989  undo_page + TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_NODE, mtr);
990 
991  fseg_free_page(header_page + TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER,
992  space, page_no, mtr);
993 
994  last_addr = flst_get_last(header_page + TRX_UNDO_SEG_HDR
995  + TRX_UNDO_PAGE_LIST, mtr);
996  rseg->curr_size--;
997 
998  if (in_history) {
999  rseg_header = trx_rsegf_get(space, zip_size,
1000  rseg->page_no, mtr);
1001 
1002  hist_size = mtr_read_ulint(rseg_header + TRX_RSEG_HISTORY_SIZE,
1003  MLOG_4BYTES, mtr);
1004  ut_ad(hist_size > 0);
1005  mlog_write_ulint(rseg_header + TRX_RSEG_HISTORY_SIZE,
1006  hist_size - 1, MLOG_4BYTES, mtr);
1007  }
1008 
1009  return(last_addr.page);
1010 }
1011 
1012 /********************************************************************/
1015 UNIV_INTERN
1016 void
1018 /*==========================*/
1019 #ifdef UNIV_DEBUG
1020  const trx_t* trx,
1021 #endif /* UNIV_DEBUG */
1022  trx_undo_t* undo,
1023  mtr_t* mtr)
1026 {
1027  ut_ad(mutex_own(&trx->undo_mutex));
1028  ut_ad(undo->hdr_page_no != undo->last_page_no);
1029  ut_ad(undo->size > 0);
1030 
1031  undo->last_page_no = trx_undo_free_page(
1032  undo->rseg, FALSE, undo->space,
1033  undo->hdr_page_no, undo->last_page_no, mtr);
1034 
1035  undo->size--;
1036 }
1037 
1038 /********************************************************************/
1041 static
1042 void
1043 trx_undo_empty_header_page(
1044 /*=======================*/
1045  ulint space,
1046  ulint zip_size,
1048  ulint hdr_page_no,
1049  ulint hdr_offset,
1050  mtr_t* mtr)
1051 {
1052  page_t* header_page;
1053  trx_ulogf_t* log_hdr;
1054  ulint end;
1055 
1056  header_page = trx_undo_page_get(space, zip_size, hdr_page_no, mtr);
1057 
1058  log_hdr = header_page + hdr_offset;
1059 
1060  end = trx_undo_page_get_end(header_page, hdr_page_no, hdr_offset);
1061 
1062  mlog_write_ulint(log_hdr + TRX_UNDO_LOG_START, end, MLOG_2BYTES, mtr);
1063 }
1064 
1065 /***********************************************************************/
1068 UNIV_INTERN
1069 void
1071 /*=======================*/
1072 #ifdef UNIV_DEBUG
1073  const trx_t* trx,
1074 #endif /* UNIV_DEBUG */
1075  trx_undo_t* undo,
1076  undo_no_t limit)
1078 {
1079  page_t* undo_page;
1080  ulint last_page_no;
1082  trx_undo_rec_t* trunc_here;
1083  mtr_t mtr;
1084 
1085  ut_ad(mutex_own(&(trx->undo_mutex)));
1086  ut_ad(mutex_own(&(trx->rseg->mutex)));
1087 
1088  for (;;) {
1089  mtr_start(&mtr);
1090 
1091  trunc_here = NULL;
1092 
1093  last_page_no = undo->last_page_no;
1094 
1095  undo_page = trx_undo_page_get(undo->space, undo->zip_size,
1096  last_page_no, &mtr);
1097 
1098  rec = trx_undo_page_get_last_rec(undo_page, undo->hdr_page_no,
1099  undo->hdr_offset);
1100  while (rec) {
1101  if (trx_undo_rec_get_undo_no(rec) >= limit) {
1102  /* Truncate at least this record off, maybe
1103  more */
1104  trunc_here = rec;
1105  } else {
1106  goto function_exit;
1107  }
1108 
1109  rec = trx_undo_page_get_prev_rec(rec,
1110  undo->hdr_page_no,
1111  undo->hdr_offset);
1112  }
1113 
1114  if (last_page_no == undo->hdr_page_no) {
1115 
1116  goto function_exit;
1117  }
1118 
1119  ut_ad(last_page_no == undo->last_page_no);
1120  trx_undo_free_last_page(trx, undo, &mtr);
1121 
1122  mtr_commit(&mtr);
1123  }
1124 
1125 function_exit:
1126  if (trunc_here) {
1127  mlog_write_ulint(undo_page + TRX_UNDO_PAGE_HDR
1129  trunc_here - undo_page, MLOG_2BYTES, &mtr);
1130  }
1131 
1132  mtr_commit(&mtr);
1133 }
1134 
1135 /***********************************************************************/
1138 UNIV_INTERN
1139 void
1141 /*====================*/
1142  trx_rseg_t* rseg,
1143  ulint space,
1144  ulint hdr_page_no,
1145  ulint hdr_offset,
1146  undo_no_t limit)
1153 {
1154  page_t* undo_page;
1156  trx_undo_rec_t* last_rec;
1157  ulint page_no;
1158  mtr_t mtr;
1159 
1160  ut_ad(mutex_own(&(rseg->mutex)));
1161 
1162  if (!limit) {
1163 
1164  return;
1165  }
1166 loop:
1167  mtr_start(&mtr);
1168 
1169  rec = trx_undo_get_first_rec(space, rseg->zip_size,
1170  hdr_page_no, hdr_offset,
1171  RW_X_LATCH, &mtr);
1172  if (rec == NULL) {
1173  /* Already empty */
1174 
1175  mtr_commit(&mtr);
1176 
1177  return;
1178  }
1179 
1180  undo_page = page_align(rec);
1181 
1182  last_rec = trx_undo_page_get_last_rec(undo_page, hdr_page_no,
1183  hdr_offset);
1184  if (trx_undo_rec_get_undo_no(last_rec) >= limit) {
1185 
1186  mtr_commit(&mtr);
1187 
1188  return;
1189  }
1190 
1191  page_no = page_get_page_no(undo_page);
1192 
1193  if (page_no == hdr_page_no) {
1194  trx_undo_empty_header_page(space, rseg->zip_size,
1195  hdr_page_no, hdr_offset,
1196  &mtr);
1197  } else {
1198  trx_undo_free_page(rseg, TRUE, space, hdr_page_no,
1199  page_no, &mtr);
1200  }
1201 
1202  mtr_commit(&mtr);
1203 
1204  goto loop;
1205 }
1206 
1207 /**********************************************************************/
1209 static
1210 void
1211 trx_undo_seg_free(
1212 /*==============*/
1213  trx_undo_t* undo)
1214 {
1215  trx_rseg_t* rseg;
1216  fseg_header_t* file_seg;
1217  trx_rsegf_t* rseg_header;
1219  ibool finished;
1220  mtr_t mtr;
1221 
1222  rseg = undo->rseg;
1223 
1224  do {
1225 
1226  mtr_start(&mtr);
1227 
1228  mutex_enter(&(rseg->mutex));
1229 
1230  seg_header = trx_undo_page_get(undo->space, undo->zip_size,
1231  undo->hdr_page_no,
1232  &mtr) + TRX_UNDO_SEG_HDR;
1233 
1234  file_seg = seg_header + TRX_UNDO_FSEG_HEADER;
1235 
1236  finished = fseg_free_step(file_seg, &mtr);
1237 
1238  if (finished) {
1239  /* Update the rseg header */
1240  rseg_header = trx_rsegf_get(
1241  rseg->space, rseg->zip_size, rseg->page_no,
1242  &mtr);
1243  trx_rsegf_set_nth_undo(rseg_header, undo->id, FIL_NULL,
1244  &mtr);
1245 
1246  MONITOR_DEC(MONITOR_NUM_UNDO_SLOT_USED);
1247  }
1248 
1249  mutex_exit(&(rseg->mutex));
1250  mtr_commit(&mtr);
1251  } while (!finished);
1252 }
1253 
1254 /*========== UNDO LOG MEMORY COPY INITIALIZATION =====================*/
1255 
1256 /********************************************************************/
1261 static
1262 trx_undo_t*
1263 trx_undo_mem_create_at_db_start(
1264 /*============================*/
1265  trx_rseg_t* rseg,
1266  ulint id,
1267  ulint page_no,
1268  mtr_t* mtr)
1269 {
1270  page_t* undo_page;
1271  trx_upagef_t* page_header;
1273  trx_ulogf_t* undo_header;
1274  trx_undo_t* undo;
1275  ulint type;
1276  ulint state;
1277  trx_id_t trx_id;
1278  ulint offset;
1279  fil_addr_t last_addr;
1280  page_t* last_page;
1282  XID xid;
1283  ibool xid_exists = FALSE;
1284 
1285  if (id >= TRX_RSEG_N_SLOTS) {
1286  fprintf(stderr,
1287  "InnoDB: Error: undo->id is %lu\n", (ulong) id);
1288  ut_error;
1289  }
1290 
1291  undo_page = trx_undo_page_get(rseg->space, rseg->zip_size,
1292  page_no, mtr);
1293 
1294  page_header = undo_page + TRX_UNDO_PAGE_HDR;
1295 
1296  type = mtr_read_ulint(page_header + TRX_UNDO_PAGE_TYPE, MLOG_2BYTES,
1297  mtr);
1298  seg_header = undo_page + TRX_UNDO_SEG_HDR;
1299 
1300  state = mach_read_from_2(seg_header + TRX_UNDO_STATE);
1301 
1302  offset = mach_read_from_2(seg_header + TRX_UNDO_LAST_LOG);
1303 
1304  undo_header = undo_page + offset;
1305 
1306  trx_id = mach_read_from_8(undo_header + TRX_UNDO_TRX_ID);
1307 
1308  xid_exists = mtr_read_ulint(undo_header + TRX_UNDO_XID_EXISTS,
1309  MLOG_1BYTE, mtr);
1310 
1311  /* Read X/Open XA transaction identification if it exists, or
1312  set it to NULL. */
1313 
1314  memset(&xid, 0, sizeof(xid));
1315  xid.formatID = -1;
1316 
1317  if (xid_exists == TRUE) {
1318  trx_undo_read_xid(undo_header, &xid);
1319  }
1320 
1321  mutex_enter(&(rseg->mutex));
1322 
1323  undo = trx_undo_mem_create(rseg, id, type, trx_id, &xid,
1324  page_no, offset);
1325  mutex_exit(&(rseg->mutex));
1326 
1328  undo_header + TRX_UNDO_DICT_TRANS, MLOG_1BYTE, mtr);
1329 
1330  undo->table_id = mach_read_from_8(undo_header + TRX_UNDO_TABLE_ID);
1331  undo->state = state;
1332  undo->size = flst_get_len(seg_header + TRX_UNDO_PAGE_LIST, mtr);
1333 
1334  /* If the log segment is being freed, the page list is inconsistent! */
1335  if (state == TRX_UNDO_TO_FREE) {
1336 
1337  goto add_to_list;
1338  }
1339 
1340  last_addr = flst_get_last(seg_header + TRX_UNDO_PAGE_LIST, mtr);
1341 
1342  undo->last_page_no = last_addr.page;
1343  undo->top_page_no = last_addr.page;
1344 
1345  last_page = trx_undo_page_get(rseg->space, rseg->zip_size,
1346  undo->last_page_no, mtr);
1347 
1348  rec = trx_undo_page_get_last_rec(last_page, page_no, offset);
1349 
1350  if (rec == NULL) {
1351  undo->empty = TRUE;
1352  } else {
1353  undo->empty = FALSE;
1354  undo->top_offset = rec - last_page;
1356  }
1357 add_to_list:
1358  if (type == TRX_UNDO_INSERT) {
1359  if (state != TRX_UNDO_CACHED) {
1360  UT_LIST_ADD_LAST(undo_list, rseg->insert_undo_list,
1361  undo);
1362  } else {
1363  UT_LIST_ADD_LAST(undo_list, rseg->insert_undo_cached,
1364  undo);
1365  MONITOR_INC(MONITOR_NUM_UNDO_SLOT_CACHED);
1366  }
1367  } else {
1368  ut_ad(type == TRX_UNDO_UPDATE);
1369  if (state != TRX_UNDO_CACHED) {
1370  UT_LIST_ADD_LAST(undo_list, rseg->update_undo_list,
1371  undo);
1372  } else {
1373  UT_LIST_ADD_LAST(undo_list, rseg->update_undo_cached,
1374  undo);
1375  MONITOR_INC(MONITOR_NUM_UNDO_SLOT_CACHED);
1376  }
1377  }
1378 
1379  return(undo);
1380 }
1381 
1382 /********************************************************************/
1387 UNIV_INTERN
1388 ulint
1390 /*================*/
1391  trx_rseg_t* rseg)
1392 {
1393  ulint size = 0;
1394  trx_rsegf_t* rseg_header;
1395  ulint i;
1396  mtr_t mtr;
1397 
1398  UT_LIST_INIT(rseg->update_undo_list);
1399  UT_LIST_INIT(rseg->update_undo_cached);
1400  UT_LIST_INIT(rseg->insert_undo_list);
1401  UT_LIST_INIT(rseg->insert_undo_cached);
1402 
1403  mtr_start(&mtr);
1404 
1405  rseg_header = trx_rsegf_get_new(
1406  rseg->space, rseg->zip_size, rseg->page_no, &mtr);
1407 
1408  for (i = 0; i < TRX_RSEG_N_SLOTS; i++) {
1409  ulint page_no;
1410 
1411  page_no = trx_rsegf_get_nth_undo(rseg_header, i, &mtr);
1412 
1413  /* In forced recovery: try to avoid operations which look
1414  at database pages; undo logs are rapidly changing data, and
1415  the probability that they are in an inconsistent state is
1416  high */
1417 
1418  if (page_no != FIL_NULL
1420 
1421  trx_undo_t* undo;
1422 
1423  undo = trx_undo_mem_create_at_db_start(
1424  rseg, i, page_no, &mtr);
1425 
1426  size += undo->size;
1427 
1428  mtr_commit(&mtr);
1429 
1430  mtr_start(&mtr);
1431 
1432  rseg_header = trx_rsegf_get(
1433  rseg->space, rseg->zip_size, rseg->page_no,
1434  &mtr);
1435 
1436  /* Found a used slot */
1437  MONITOR_INC(MONITOR_NUM_UNDO_SLOT_USED);
1438  }
1439  }
1440 
1441  mtr_commit(&mtr);
1442 
1443  return(size);
1444 }
1445 
1446 /********************************************************************/
1449 static
1450 trx_undo_t*
1451 trx_undo_mem_create(
1452 /*================*/
1453  trx_rseg_t* rseg,
1454  ulint id,
1455  ulint type,
1457  trx_id_t trx_id,
1459  const XID* xid,
1460  ulint page_no,
1461  ulint offset)
1462 {
1463  trx_undo_t* undo;
1464 
1465  ut_ad(mutex_own(&(rseg->mutex)));
1466 
1467  if (id >= TRX_RSEG_N_SLOTS) {
1468  fprintf(stderr,
1469  "InnoDB: Error: undo->id is %lu\n", (ulong) id);
1470  ut_error;
1471  }
1472 
1473  undo = static_cast<trx_undo_t*>(mem_alloc(sizeof(*undo)));
1474 
1475  if (undo == NULL) {
1476 
1477  return(NULL);
1478  }
1479 
1480  undo->id = id;
1481  undo->type = type;
1482  undo->state = TRX_UNDO_ACTIVE;
1483  undo->del_marks = FALSE;
1484  undo->trx_id = trx_id;
1485  undo->xid = *xid;
1486 
1487  undo->dict_operation = FALSE;
1488 
1489  undo->rseg = rseg;
1490 
1491  undo->space = rseg->space;
1492  undo->zip_size = rseg->zip_size;
1493  undo->hdr_page_no = page_no;
1494  undo->hdr_offset = offset;
1495  undo->last_page_no = page_no;
1496  undo->size = 1;
1497 
1498  undo->empty = TRUE;
1499  undo->top_page_no = page_no;
1500  undo->guess_block = NULL;
1501 
1502  return(undo);
1503 }
1504 
1505 /********************************************************************/
1507 static
1508 void
1509 trx_undo_mem_init_for_reuse(
1510 /*========================*/
1511  trx_undo_t* undo,
1512  trx_id_t trx_id,
1514  const XID* xid,
1515  ulint offset)
1516 {
1517  ut_ad(mutex_own(&((undo->rseg)->mutex)));
1518 
1519  if (UNIV_UNLIKELY(undo->id >= TRX_RSEG_N_SLOTS)) {
1520  fprintf(stderr, "InnoDB: Error: undo->id is %lu\n",
1521  (ulong) undo->id);
1522 
1523  mem_analyze_corruption(undo);
1524  ut_error;
1525  }
1526 
1527  undo->state = TRX_UNDO_ACTIVE;
1528  undo->del_marks = FALSE;
1529  undo->trx_id = trx_id;
1530  undo->xid = *xid;
1531 
1532  undo->dict_operation = FALSE;
1533 
1534  undo->hdr_offset = offset;
1535  undo->empty = TRUE;
1536 }
1537 
1538 /********************************************************************/
1540 UNIV_INTERN
1541 void
1542 trx_undo_mem_free(
1543 /*==============*/
1544  trx_undo_t* undo)
1545 {
1546  if (undo->id >= TRX_RSEG_N_SLOTS) {
1547  fprintf(stderr,
1548  "InnoDB: Error: undo->id is %lu\n", (ulong) undo->id);
1549  ut_error;
1550  }
1551 
1552  mem_free(undo);
1553 }
1554 
1555 /**********************************************************************/
1560 static __attribute__((nonnull, warn_unused_result))
1561 dberr_t
1562 trx_undo_create(
1563 /*============*/
1564  trx_t* trx,
1565  trx_rseg_t* rseg,
1566  ulint type,
1568  trx_id_t trx_id,
1570  const XID* xid,
1571  trx_undo_t** undo,
1573  mtr_t* mtr)
1574 {
1575  trx_rsegf_t* rseg_header;
1576  ulint page_no;
1577  ulint offset;
1578  ulint id;
1579  page_t* undo_page;
1580  dberr_t err;
1581 
1582  ut_ad(mutex_own(&(rseg->mutex)));
1583 
1584  if (rseg->curr_size == rseg->max_size) {
1585 
1586  return(DB_OUT_OF_FILE_SPACE);
1587  }
1588 
1589  rseg->curr_size++;
1590 
1591  rseg_header = trx_rsegf_get(rseg->space, rseg->zip_size, rseg->page_no,
1592  mtr);
1593 
1594  err = trx_undo_seg_create(rseg, rseg_header, type, &id,
1595  &undo_page, mtr);
1596 
1597  if (err != DB_SUCCESS) {
1598  /* Did not succeed */
1599 
1600  rseg->curr_size--;
1601 
1602  return(err);
1603  }
1604 
1605  page_no = page_get_page_no(undo_page);
1606 
1607  offset = trx_undo_header_create(undo_page, trx_id, mtr);
1608 
1609  if (trx->support_xa) {
1610  trx_undo_header_add_space_for_xid(undo_page,
1611  undo_page + offset, mtr);
1612  }
1613 
1614  *undo = trx_undo_mem_create(rseg, id, type, trx_id, xid,
1615  page_no, offset);
1616  if (*undo == NULL) {
1617 
1618  err = DB_OUT_OF_MEMORY;
1619  }
1620 
1621  return(err);
1622 }
1623 
1624 /*================ UNDO LOG ASSIGNMENT AND CLEANUP =====================*/
1625 
1626 /********************************************************************/
1629 static
1630 trx_undo_t*
1631 trx_undo_reuse_cached(
1632 /*==================*/
1633  trx_t* trx,
1634  trx_rseg_t* rseg,
1635  ulint type,
1637  trx_id_t trx_id,
1639  const XID* xid,
1640  mtr_t* mtr)
1641 {
1642  trx_undo_t* undo;
1643  page_t* undo_page;
1644  ulint offset;
1645 
1646  ut_ad(mutex_own(&(rseg->mutex)));
1647 
1648  if (type == TRX_UNDO_INSERT) {
1649 
1650  undo = UT_LIST_GET_FIRST(rseg->insert_undo_cached);
1651  if (undo == NULL) {
1652 
1653  return(NULL);
1654  }
1655 
1656  UT_LIST_REMOVE(undo_list, rseg->insert_undo_cached, undo);
1657 
1658  MONITOR_DEC(MONITOR_NUM_UNDO_SLOT_CACHED);
1659  } else {
1660  ut_ad(type == TRX_UNDO_UPDATE);
1661 
1662  undo = UT_LIST_GET_FIRST(rseg->update_undo_cached);
1663  if (undo == NULL) {
1664 
1665  return(NULL);
1666  }
1667 
1668  UT_LIST_REMOVE(undo_list, rseg->update_undo_cached, undo);
1669 
1670  MONITOR_DEC(MONITOR_NUM_UNDO_SLOT_CACHED);
1671  }
1672 
1673  ut_ad(undo->size == 1);
1674 
1675  if (undo->id >= TRX_RSEG_N_SLOTS) {
1676  fprintf(stderr, "InnoDB: Error: undo->id is %lu\n",
1677  (ulong) undo->id);
1678  mem_analyze_corruption(undo);
1679  ut_error;
1680  }
1681 
1682  undo_page = trx_undo_page_get(undo->space, undo->zip_size,
1683  undo->hdr_page_no, mtr);
1684 
1685  if (type == TRX_UNDO_INSERT) {
1686  offset = trx_undo_insert_header_reuse(undo_page, trx_id, mtr);
1687 
1688  if (trx->support_xa) {
1689  trx_undo_header_add_space_for_xid(
1690  undo_page, undo_page + offset, mtr);
1691  }
1692  } else {
1693  ut_a(mach_read_from_2(undo_page + TRX_UNDO_PAGE_HDR
1695  == TRX_UNDO_UPDATE);
1696 
1697  offset = trx_undo_header_create(undo_page, trx_id, mtr);
1698 
1699  if (trx->support_xa) {
1700  trx_undo_header_add_space_for_xid(
1701  undo_page, undo_page + offset, mtr);
1702  }
1703  }
1704 
1705  trx_undo_mem_init_for_reuse(undo, trx_id, xid, offset);
1706 
1707  return(undo);
1708 }
1709 
1710 /**********************************************************************/
1713 static
1714 void
1715 trx_undo_mark_as_dict_operation(
1716 /*============================*/
1717  trx_t* trx,
1718  trx_undo_t* undo,
1719  mtr_t* mtr)
1720 {
1721  page_t* hdr_page;
1722 
1723  hdr_page = trx_undo_page_get(undo->space, undo->zip_size,
1724  undo->hdr_page_no, mtr);
1725 
1726  switch (trx_get_dict_operation(trx)) {
1727  case TRX_DICT_OP_NONE:
1728  ut_error;
1729  case TRX_DICT_OP_INDEX:
1730  /* Do not discard the table on recovery. */
1731  undo->table_id = 0;
1732  break;
1733  case TRX_DICT_OP_TABLE:
1734  undo->table_id = trx->table_id;
1735  break;
1736  }
1737 
1738  mlog_write_ulint(hdr_page + undo->hdr_offset
1740  TRUE, MLOG_1BYTE, mtr);
1741 
1742  mlog_write_ull(hdr_page + undo->hdr_offset + TRX_UNDO_TABLE_ID,
1743  undo->table_id, mtr);
1744 
1745  undo->dict_operation = TRUE;
1746 }
1747 
1748 /**********************************************************************/
1754 UNIV_INTERN
1755 dberr_t
1757 /*=================*/
1758  trx_t* trx,
1759  ulint type)
1760 {
1761  trx_rseg_t* rseg;
1762  trx_undo_t* undo;
1763  mtr_t mtr;
1764  dberr_t err = DB_SUCCESS;
1765 
1766  ut_ad(trx);
1767 
1768  if (trx->rseg == NULL) {
1769  return(DB_READ_ONLY);
1770  }
1771 
1772  rseg = trx->rseg;
1773 
1774  ut_ad(mutex_own(&(trx->undo_mutex)));
1775 
1776  mtr_start(&mtr);
1777 
1778  mutex_enter(&rseg->mutex);
1779 
1780  DBUG_EXECUTE_IF(
1781  "ib_create_table_fail_too_many_trx",
1783  goto func_exit;
1784  );
1785 
1786  undo = trx_undo_reuse_cached(trx, rseg, type, trx->id, &trx->xid,
1787  &mtr);
1788  if (undo == NULL) {
1789  err = trx_undo_create(trx, rseg, type, trx->id, &trx->xid,
1790  &undo, &mtr);
1791  if (err != DB_SUCCESS) {
1792 
1793  goto func_exit;
1794  }
1795  }
1796 
1797  if (type == TRX_UNDO_INSERT) {
1798  UT_LIST_ADD_FIRST(undo_list, rseg->insert_undo_list, undo);
1799  ut_ad(trx->insert_undo == NULL);
1800  trx->insert_undo = undo;
1801  } else {
1802  UT_LIST_ADD_FIRST(undo_list, rseg->update_undo_list, undo);
1803  ut_ad(trx->update_undo == NULL);
1804  trx->update_undo = undo;
1805  }
1806 
1808  trx_undo_mark_as_dict_operation(trx, undo, &mtr);
1809  }
1810 
1811 func_exit:
1812  mutex_exit(&(rseg->mutex));
1813  mtr_commit(&mtr);
1814 
1815  return(err);
1816 }
1817 
1818 /******************************************************************/
1821 UNIV_INTERN
1822 page_t*
1824 /*=========================*/
1825  trx_undo_t* undo,
1826  mtr_t* mtr)
1827 {
1828  trx_usegf_t* seg_hdr;
1829  trx_upagef_t* page_hdr;
1830  page_t* undo_page;
1831  ulint state;
1832 
1833  if (undo->id >= TRX_RSEG_N_SLOTS) {
1834  fprintf(stderr, "InnoDB: Error: undo->id is %lu\n",
1835  (ulong) undo->id);
1836  mem_analyze_corruption(undo);
1837  ut_error;
1838  }
1839 
1840  undo_page = trx_undo_page_get(undo->space, undo->zip_size,
1841  undo->hdr_page_no, mtr);
1842 
1843  seg_hdr = undo_page + TRX_UNDO_SEG_HDR;
1844  page_hdr = undo_page + TRX_UNDO_PAGE_HDR;
1845 
1846  if (undo->size == 1
1847  && mach_read_from_2(page_hdr + TRX_UNDO_PAGE_FREE)
1849 
1850  state = TRX_UNDO_CACHED;
1851 
1852  } else if (undo->type == TRX_UNDO_INSERT) {
1853 
1854  state = TRX_UNDO_TO_FREE;
1855  } else {
1856  state = TRX_UNDO_TO_PURGE;
1857  }
1858 
1859  undo->state = state;
1860 
1861  mlog_write_ulint(seg_hdr + TRX_UNDO_STATE, state, MLOG_2BYTES, mtr);
1862 
1863  return(undo_page);
1864 }
1865 
1866 /******************************************************************/
1869 UNIV_INTERN
1870 page_t*
1872 /*==========================*/
1873  trx_t* trx,
1874  trx_undo_t* undo,
1875  mtr_t* mtr)
1876 {
1877  trx_usegf_t* seg_hdr;
1878  trx_ulogf_t* undo_header;
1879  page_t* undo_page;
1880  ulint offset;
1881 
1882  ut_ad(trx && undo && mtr);
1883 
1884  if (undo->id >= TRX_RSEG_N_SLOTS) {
1885  fprintf(stderr, "InnoDB: Error: undo->id is %lu\n",
1886  (ulong) undo->id);
1887  mem_analyze_corruption(undo);
1888  ut_error;
1889  }
1890 
1891  undo_page = trx_undo_page_get(undo->space, undo->zip_size,
1892  undo->hdr_page_no, mtr);
1893 
1894  seg_hdr = undo_page + TRX_UNDO_SEG_HDR;
1895 
1896  /*------------------------------*/
1897  undo->state = TRX_UNDO_PREPARED;
1898  undo->xid = trx->xid;
1899  /*------------------------------*/
1900 
1901  mlog_write_ulint(seg_hdr + TRX_UNDO_STATE, undo->state,
1902  MLOG_2BYTES, mtr);
1903 
1904  offset = mach_read_from_2(seg_hdr + TRX_UNDO_LAST_LOG);
1905  undo_header = undo_page + offset;
1906 
1907  mlog_write_ulint(undo_header + TRX_UNDO_XID_EXISTS,
1908  TRUE, MLOG_1BYTE, mtr);
1909 
1910  trx_undo_write_xid(undo_header, &undo->xid, mtr);
1911 
1912  return(undo_page);
1913 }
1914 
1915 /**********************************************************************/
1919 UNIV_INTERN
1920 void
1922 /*====================*/
1923  trx_t* trx,
1924  page_t* undo_page,
1926  mtr_t* mtr)
1927 {
1928  trx_rseg_t* rseg;
1929  trx_undo_t* undo;
1930 
1931  undo = trx->update_undo;
1932  rseg = trx->rseg;
1933 
1934  ut_ad(mutex_own(&(rseg->mutex)));
1935 
1936  trx_purge_add_update_undo_to_history(trx, undo_page, mtr);
1937 
1938  UT_LIST_REMOVE(undo_list, rseg->update_undo_list, undo);
1939 
1940  trx->update_undo = NULL;
1941 
1942  if (undo->state == TRX_UNDO_CACHED) {
1943 
1944  UT_LIST_ADD_FIRST(undo_list, rseg->update_undo_cached, undo);
1945 
1946  MONITOR_INC(MONITOR_NUM_UNDO_SLOT_CACHED);
1947  } else {
1948  ut_ad(undo->state == TRX_UNDO_TO_PURGE);
1949 
1950  trx_undo_mem_free(undo);
1951  }
1952 }
1953 
1954 /******************************************************************/
1958 UNIV_INTERN
1959 void
1961 /*====================*/
1962  trx_t* trx)
1963 {
1964  trx_undo_t* undo;
1965  trx_rseg_t* rseg;
1966 
1967  undo = trx->insert_undo;
1968  ut_ad(undo);
1969 
1970  rseg = trx->rseg;
1971 
1972  mutex_enter(&(rseg->mutex));
1973 
1974  UT_LIST_REMOVE(undo_list, rseg->insert_undo_list, undo);
1975  trx->insert_undo = NULL;
1976 
1977  if (undo->state == TRX_UNDO_CACHED) {
1978 
1979  UT_LIST_ADD_FIRST(undo_list, rseg->insert_undo_cached, undo);
1980 
1981  MONITOR_INC(MONITOR_NUM_UNDO_SLOT_CACHED);
1982  } else {
1983  ut_ad(undo->state == TRX_UNDO_TO_FREE);
1984 
1985  /* Delete first the undo log segment in the file */
1986 
1987  mutex_exit(&(rseg->mutex));
1988 
1989  trx_undo_seg_free(undo);
1990 
1991  mutex_enter(&(rseg->mutex));
1992 
1993  ut_ad(rseg->curr_size > undo->size);
1994 
1995  rseg->curr_size -= undo->size;
1996 
1997  trx_undo_mem_free(undo);
1998  }
1999 
2000  mutex_exit(&(rseg->mutex));
2001 }
2002 
2003 /********************************************************************/
2005 UNIV_INTERN
2006 void
2008 /*===================*/
2009  trx_t* trx)
2010 {
2012 
2013  if (trx->update_undo) {
2014  ut_a(trx->update_undo->state == TRX_UNDO_PREPARED);
2015  UT_LIST_REMOVE(undo_list, trx->rseg->update_undo_list,
2016  trx->update_undo);
2017  trx_undo_mem_free(trx->update_undo);
2018  }
2019  if (trx->insert_undo) {
2020  ut_a(trx->insert_undo->state == TRX_UNDO_PREPARED);
2021  UT_LIST_REMOVE(undo_list, trx->rseg->insert_undo_list,
2022  trx->insert_undo);
2023  trx_undo_mem_free(trx->insert_undo);
2024  }
2025 }
2026 #endif /* !UNIV_HOTBACKUP */