MySQL 5.6.14 Source Code Document
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
row0upd.cc
Go to the documentation of this file.
1 /*****************************************************************************
2 
3 Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
4 
5 This program is free software; you can redistribute it and/or modify it under
6 the terms of the GNU General Public License as published by the Free Software
7 Foundation; version 2 of the License.
8 
9 This program is distributed in the hope that it will be useful, but WITHOUT
10 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
11 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
12 
13 You should have received a copy of the GNU General Public License along with
14 this program; if not, write to the Free Software Foundation, Inc.,
15 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
16 
17 *****************************************************************************/
18 
19 /**************************************************/
26 #include "row0upd.h"
27 
28 #ifdef UNIV_NONINL
29 #include "row0upd.ic"
30 #endif
31 
32 #include "ha_prototypes.h"
33 #include "dict0dict.h"
34 #include "trx0undo.h"
35 #include "rem0rec.h"
36 #ifndef UNIV_HOTBACKUP
37 #include "dict0boot.h"
38 #include "dict0crea.h"
39 #include "mach0data.h"
40 #include "btr0btr.h"
41 #include "btr0cur.h"
42 #include "que0que.h"
43 #include "row0ext.h"
44 #include "row0ins.h"
45 #include "row0log.h"
46 #include "row0row.h"
47 #include "row0sel.h"
48 #include "rem0cmp.h"
49 #include "lock0lock.h"
50 #include "log0log.h"
51 #include "pars0sym.h"
52 #include "eval0eval.h"
53 #include "buf0lru.h"
54 
55 
56 /* What kind of latch and lock can we assume when the control comes to
57  -------------------------------------------------------------------
58 an update node?
59 --------------
60 Efficiency of massive updates would require keeping an x-latch on a
61 clustered index page through many updates, and not setting an explicit
62 x-lock on clustered index records, as they anyway will get an implicit
63 x-lock when they are updated. A problem is that the read nodes in the
64 graph should know that they must keep the latch when passing the control
65 up to the update node, and not set any record lock on the record which
66 will be updated. Another problem occurs if the execution is stopped,
67 as the kernel switches to another query thread, or the transaction must
68 wait for a lock. Then we should be able to release the latch and, maybe,
69 acquire an explicit x-lock on the record.
70  Because this seems too complicated, we conclude that the less
71 efficient solution of releasing all the latches when the control is
72 transferred to another node, and acquiring explicit x-locks, is better. */
73 
74 /* How is a delete performed? If there is a delete without an
75 explicit cursor, i.e., a searched delete, there are at least
76 two different situations:
77 the implicit select cursor may run on (1) the clustered index or
78 on (2) a secondary index. The delete is performed by setting
79 the delete bit in the record and substituting the id of the
80 deleting transaction for the original trx id, and substituting a
81 new roll ptr for previous roll ptr. The old trx id and roll ptr
82 are saved in the undo log record. Thus, no physical changes occur
83 in the index tree structure at the time of the delete. Only
84 when the undo log is purged, the index records will be physically
85 deleted from the index trees.
86 
87 The query graph executing a searched delete would consist of
88 a delete node which has as a subtree a select subgraph.
89 The select subgraph should return a (persistent) cursor
90 in the clustered index, placed on page which is x-latched.
91 The delete node should look for all secondary index records for
92 this clustered index entry and mark them as deleted. When is
93 the x-latch freed? The most efficient way for performing a
94 searched delete is obviously to keep the x-latch for several
95 steps of query graph execution. */
96 
97 /*************************************************************************
98 IMPORTANT NOTE: Any operation that generates redo MUST check that there
99 is enough space in the redo log before for that operation. This is
100 done by calling log_free_check(). The reason for checking the
101 availability of the redo log space before the start of the operation is
102 that we MUST not hold any synchonization objects when performing the
103 check.
104 If you make a change in this module make sure that no codepath is
105 introduced where a call to log_free_check() is bypassed. */
106 
107 /***********************************************************/
112 static
113 ibool
114 row_upd_changes_first_fields_binary(
115 /*================================*/
116  dtuple_t* entry,
118  const upd_t* update,
119  ulint n);
122 /*********************************************************************/
131 static
132 ibool
133 row_upd_index_is_referenced(
134 /*========================*/
136  trx_t* trx)
137 {
138  dict_table_t* table = index->table;
139  dict_foreign_t* foreign;
140  ibool froze_data_dict = FALSE;
141  ibool is_referenced = FALSE;
142 
143  if (!UT_LIST_GET_FIRST(table->referenced_list)) {
144 
145  return(FALSE);
146  }
147 
148  if (trx->dict_operation_lock_mode == 0) {
149  row_mysql_freeze_data_dictionary(trx);
150  froze_data_dict = TRUE;
151  }
152 
153  foreign = UT_LIST_GET_FIRST(table->referenced_list);
154 
155  while (foreign) {
156  if (foreign->referenced_index == index) {
157 
158  is_referenced = TRUE;
159  goto func_exit;
160  }
161 
162  foreign = UT_LIST_GET_NEXT(referenced_list, foreign);
163  }
164 
165 func_exit:
166  if (froze_data_dict) {
168  }
169 
170  return(is_referenced);
171 }
172 
173 /*********************************************************************/
181 static __attribute__((nonnull, warn_unused_result))
182 dberr_t
183 row_upd_check_references_constraints(
184 /*=================================*/
185  upd_node_t* node,
186  btr_pcur_t* pcur,
188  dict_table_t* table,
189  dict_index_t* index,
190  ulint* offsets,
191  que_thr_t* thr,
192  mtr_t* mtr)
193 {
194  dict_foreign_t* foreign;
195  mem_heap_t* heap;
196  dtuple_t* entry;
197  trx_t* trx;
198  const rec_t* rec;
199  ulint n_ext;
200  dberr_t err;
201  ibool got_s_lock = FALSE;
202 
203  if (UT_LIST_GET_FIRST(table->referenced_list) == NULL) {
204 
205  return(DB_SUCCESS);
206  }
207 
208  trx = thr_get_trx(thr);
209 
210  rec = btr_pcur_get_rec(pcur);
211  ut_ad(rec_offs_validate(rec, index, offsets));
212 
213  heap = mem_heap_create(500);
214 
215  entry = row_rec_to_index_entry(rec, index, offsets, &n_ext, heap);
216 
217  mtr_commit(mtr);
218 
219  DEBUG_SYNC_C("foreign_constraint_check_for_update");
220 
221  mtr_start(mtr);
222 
223  if (trx->dict_operation_lock_mode == 0) {
224  got_s_lock = TRUE;
225 
226  row_mysql_freeze_data_dictionary(trx);
227  }
228 
229 run_again:
230  foreign = UT_LIST_GET_FIRST(table->referenced_list);
231 
232  while (foreign) {
233  /* Note that we may have an update which updates the index
234  record, but does NOT update the first fields which are
235  referenced in a foreign key constraint. Then the update does
236  NOT break the constraint. */
237 
238  if (foreign->referenced_index == index
239  && (node->is_delete
240  || row_upd_changes_first_fields_binary(
241  entry, index, node->update,
242  foreign->n_fields))) {
243  dict_table_t* foreign_table = foreign->foreign_table;
244 
245  dict_table_t* ref_table = NULL;
246 
247  if (foreign_table == NULL) {
248 
249  ref_table = dict_table_open_on_name(
250  foreign->foreign_table_name_lookup,
251  FALSE, FALSE, DICT_ERR_IGNORE_NONE);
252  }
253 
254  if (foreign_table) {
255  os_inc_counter(dict_sys->mutex,
256  foreign_table
258  }
259 
260  /* NOTE that if the thread ends up waiting for a lock
261  we will release dict_operation_lock temporarily!
262  But the counter on the table protects 'foreign' from
263  being dropped while the check is running. */
264 
266  FALSE, foreign, table, entry, thr);
267 
268  if (foreign_table) {
269  os_dec_counter(dict_sys->mutex,
270  foreign_table
272  }
273 
274  if (ref_table != NULL) {
275  dict_table_close(ref_table, FALSE, FALSE);
276  }
277 
278  /* Some table foreign key dropped, try again */
279  if (err == DB_DICT_CHANGED) {
280  goto run_again;
281  } else if (err != DB_SUCCESS) {
282  goto func_exit;
283  }
284  }
285 
286  foreign = UT_LIST_GET_NEXT(referenced_list, foreign);
287  }
288 
289  err = DB_SUCCESS;
290 
291 func_exit:
292  if (got_s_lock) {
294  }
295 
296  mem_heap_free(heap);
297 
298  DEBUG_SYNC_C("foreign_constraint_check_for_update_done");
299 
300  return(err);
301 }
302 
303 /*********************************************************************/
306 UNIV_INTERN
307 upd_node_t*
309 /*============*/
310  mem_heap_t* heap)
311 {
312  upd_node_t* node;
313 
314  node = static_cast<upd_node_t*>(
315  mem_heap_alloc(heap, sizeof(upd_node_t)));
316 
317  node->common.type = QUE_NODE_UPDATE;
318 
319  node->state = UPD_NODE_UPDATE_CLUSTERED;
320  node->in_mysql_interface = FALSE;
321 
322  node->row = NULL;
323  node->ext = NULL;
324  node->upd_row = NULL;
325  node->upd_ext = NULL;
326  node->index = NULL;
327  node->update = NULL;
328 
329  node->foreign = NULL;
330  node->cascade_heap = NULL;
331  node->cascade_node = NULL;
332 
333  node->select = NULL;
334 
335  node->heap = mem_heap_create(128);
336  node->magic_n = UPD_NODE_MAGIC_N;
337 
338  node->cmpl_info = 0;
339 
340  return(node);
341 }
342 #endif /* !UNIV_HOTBACKUP */
343 
344 /*********************************************************************/
347 UNIV_INTERN
348 void
350 /*===============================*/
351  rec_t* rec,
352  page_zip_des_t* page_zip,
353  const ulint* offsets,
354  ulint pos,
355  trx_id_t trx_id,
356  roll_ptr_t roll_ptr)
357 {
358  ut_ad(rec_offs_validate(rec, NULL, offsets));
359 
360  if (page_zip) {
362  page_zip, rec, offsets, pos, trx_id, roll_ptr);
363  } else {
364  byte* field;
365  ulint len;
366 
367  field = rec_get_nth_field(rec, offsets, pos, &len);
368  ut_ad(len == DATA_TRX_ID_LEN);
369 #if DATA_TRX_ID + 1 != DATA_ROLL_PTR
370 # error "DATA_TRX_ID + 1 != DATA_ROLL_PTR"
371 #endif
372  trx_write_trx_id(field, trx_id);
373  trx_write_roll_ptr(field + DATA_TRX_ID_LEN, roll_ptr);
374  }
375 }
376 
377 #ifndef UNIV_HOTBACKUP
378 /*********************************************************************/
380 UNIV_INTERN
381 void
383 /*==========================*/
384  dtuple_t* entry,
388  dict_index_t* index,
389  ulint type,
390  ib_uint64_t val)
391 {
392  dfield_t* dfield;
393  byte* field;
394  ulint pos;
395 
396  ut_ad(dict_index_is_clust(index));
397 
398  pos = dict_index_get_sys_col_pos(index, type);
399 
400  dfield = dtuple_get_nth_field(entry, pos);
401  field = static_cast<byte*>(dfield_get_data(dfield));
402 
403  if (type == DATA_TRX_ID) {
404  trx_write_trx_id(field, val);
405  } else {
406  ut_ad(type == DATA_ROLL_PTR);
407  trx_write_roll_ptr(field, val);
408  }
409 }
410 
411 /***********************************************************/
416 UNIV_INTERN
417 ibool
419 /*===================================*/
420  dict_index_t* index,
421  const ulint* offsets,
422  const upd_t* update)
423 {
424  const upd_field_t* upd_field;
425  const dfield_t* new_val;
426  ulint old_len;
427  ulint new_len;
428  ulint n_fields;
429  ulint i;
430 
431  ut_ad(rec_offs_validate(NULL, index, offsets));
432  n_fields = upd_get_n_fields(update);
433 
434  for (i = 0; i < n_fields; i++) {
435  upd_field = upd_get_nth_field(update, i);
436 
437  new_val = &(upd_field->new_val);
438  new_len = dfield_get_len(new_val);
439 
440  if (dfield_is_null(new_val) && !rec_offs_comp(offsets)) {
441  /* A bug fixed on Dec 31st, 2004: we looked at the
442  SQL NULL size from the wrong field! We may backport
443  this fix also to 4.0. The merge to 5.0 will be made
444  manually immediately after we commit this to 4.1. */
445 
446  new_len = dict_col_get_sql_null_size(
448  upd_field->field_no),
449  0);
450  }
451 
452  old_len = rec_offs_nth_size(offsets, upd_field->field_no);
453 
454  if (rec_offs_comp(offsets)
455  && rec_offs_nth_sql_null(offsets,
456  upd_field->field_no)) {
457  /* Note that in the compact table format, for a
458  variable length field, an SQL NULL will use zero
459  bytes in the offset array at the start of the physical
460  record, but a zero-length value (empty string) will
461  use one byte! Thus, we cannot use update-in-place
462  if we update an SQL NULL varchar to an empty string! */
463 
464  old_len = UNIV_SQL_NULL;
465  }
466 
467  if (dfield_is_ext(new_val) || old_len != new_len
468  || rec_offs_nth_extern(offsets, upd_field->field_no)) {
469 
470  return(TRUE);
471  }
472  }
473 
474  return(FALSE);
475 }
476 
477 /***********************************************************/
480 UNIV_INTERN
481 bool
483 /*==============================*/
484  const upd_t* update)
485 {
486  const upd_field_t* upd_field;
487  const dfield_t* new_val;
488  ulint new_len;
489  ulint n_fields;
490  ulint i;
491 
492  n_fields = upd_get_n_fields(update);
493 
494  for (i = 0; i < n_fields; i++) {
495  const byte* field_ref;
496 
497  upd_field = upd_get_nth_field(update, i);
498  new_val = &(upd_field->new_val);
499  new_len = dfield_get_len(new_val);
500 
501  if (!dfield_is_ext(new_val)) {
502  continue;
503  }
504 
505  ut_ad(new_len >= BTR_EXTERN_FIELD_REF_SIZE);
506 
507  field_ref = static_cast<const byte*>(dfield_get_data(new_val))
508  + new_len - BTR_EXTERN_FIELD_REF_SIZE;
509 
510  if (field_ref[BTR_EXTERN_LEN] & BTR_EXTERN_OWNER_FLAG) {
511  return(true);
512  }
513  }
514 
515  return(false);
516 }
517 #endif /* !UNIV_HOTBACKUP */
518 
519 /***********************************************************/
525 UNIV_INTERN
526 void
528 /*=================*/
529  rec_t* rec,
530  dict_index_t* index,
531  const ulint* offsets,
532  const upd_t* update,
533  page_zip_des_t* page_zip)
535 {
536  const upd_field_t* upd_field;
537  const dfield_t* new_val;
538  ulint n_fields;
539  ulint i;
540 
541  ut_ad(rec_offs_validate(rec, index, offsets));
542 
543  if (rec_offs_comp(offsets)) {
544  rec_set_info_bits_new(rec, update->info_bits);
545  } else {
546  rec_set_info_bits_old(rec, update->info_bits);
547  }
548 
549  n_fields = upd_get_n_fields(update);
550 
551  for (i = 0; i < n_fields; i++) {
552 #ifdef UNIV_BLOB_DEBUG
553  btr_blob_dbg_t b;
554  const byte* field_ref = NULL;
555 #endif /* UNIV_BLOB_DEBUG */
556 
557  upd_field = upd_get_nth_field(update, i);
558  new_val = &(upd_field->new_val);
559  ut_ad(!dfield_is_ext(new_val) ==
560  !rec_offs_nth_extern(offsets, upd_field->field_no));
561 #ifdef UNIV_BLOB_DEBUG
562  if (dfield_is_ext(new_val)) {
563  ulint len;
564  field_ref = rec_get_nth_field(rec, offsets, i, &len);
565  ut_a(len != UNIV_SQL_NULL);
567  field_ref += len - BTR_EXTERN_FIELD_REF_SIZE;
568 
569  b.ref_page_no = page_get_page_no(page_align(rec));
570  b.ref_heap_no = page_rec_get_heap_no(rec);
571  b.ref_field_no = i;
572  b.blob_page_no = mach_read_from_4(
573  field_ref + BTR_EXTERN_PAGE_NO);
574  ut_a(b.ref_field_no >= index->n_uniq);
575  btr_blob_dbg_rbt_delete(index, &b, "upd_in_place");
576  }
577 #endif /* UNIV_BLOB_DEBUG */
578 
579  rec_set_nth_field(rec, offsets, upd_field->field_no,
580  dfield_get_data(new_val),
581  dfield_get_len(new_val));
582 
583 #ifdef UNIV_BLOB_DEBUG
584  if (dfield_is_ext(new_val)) {
585  b.blob_page_no = mach_read_from_4(
586  field_ref + BTR_EXTERN_PAGE_NO);
587  b.always_owner = b.owner = !(field_ref[BTR_EXTERN_LEN]
589  b.del = rec_get_deleted_flag(
590  rec, rec_offs_comp(offsets));
591 
592  btr_blob_dbg_rbt_insert(index, &b, "upd_in_place");
593  }
594 #endif /* UNIV_BLOB_DEBUG */
595  }
596 
597  if (page_zip) {
598  page_zip_write_rec(page_zip, rec, index, offsets, 0);
599  }
600 }
601 
602 #ifndef UNIV_HOTBACKUP
603 /*********************************************************************/
607 UNIV_INTERN
608 byte*
610 /*==========================*/
611  dict_index_t* index,
612  trx_id_t trx_id,
613  roll_ptr_t roll_ptr,
614  byte* log_ptr,
616  mtr_t* mtr __attribute__((unused)))
617 {
618  ut_ad(dict_index_is_clust(index));
619  ut_ad(mtr);
620 
621  log_ptr += mach_write_compressed(log_ptr,
623  index, DATA_TRX_ID));
624 
625  trx_write_roll_ptr(log_ptr, roll_ptr);
626  log_ptr += DATA_ROLL_PTR_LEN;
627 
628  log_ptr += mach_ull_write_compressed(log_ptr, trx_id);
629 
630  return(log_ptr);
631 }
632 #endif /* !UNIV_HOTBACKUP */
633 
634 /*********************************************************************/
637 UNIV_INTERN
638 byte*
640 /*===================*/
641  byte* ptr,
642  byte* end_ptr,
643  ulint* pos,
644  trx_id_t* trx_id,
645  roll_ptr_t* roll_ptr)
646 {
647  ptr = mach_parse_compressed(ptr, end_ptr, pos);
648 
649  if (ptr == NULL) {
650 
651  return(NULL);
652  }
653 
654  if (end_ptr < ptr + DATA_ROLL_PTR_LEN) {
655 
656  return(NULL);
657  }
658 
659  *roll_ptr = trx_read_roll_ptr(ptr);
660  ptr += DATA_ROLL_PTR_LEN;
661 
662  ptr = mach_ull_parse_compressed(ptr, end_ptr, trx_id);
663 
664  return(ptr);
665 }
666 
667 #ifndef UNIV_HOTBACKUP
668 /***********************************************************/
670 UNIV_INTERN
671 void
673 /*====================*/
674  const upd_t* update,
675  byte* log_ptr,
679  mtr_t* mtr)
680 {
681  const upd_field_t* upd_field;
682  const dfield_t* new_val;
683  ulint len;
684  ulint n_fields;
685  byte* buf_end;
686  ulint i;
687 
688  n_fields = upd_get_n_fields(update);
689 
690  buf_end = log_ptr + MLOG_BUF_MARGIN;
691 
692  mach_write_to_1(log_ptr, update->info_bits);
693  log_ptr++;
694  log_ptr += mach_write_compressed(log_ptr, n_fields);
695 
696  for (i = 0; i < n_fields; i++) {
697 
698 #if MLOG_BUF_MARGIN <= 30
699 # error "MLOG_BUF_MARGIN <= 30"
700 #endif
701 
702  if (log_ptr + 30 > buf_end) {
703  mlog_close(mtr, log_ptr);
704 
705  log_ptr = mlog_open(mtr, MLOG_BUF_MARGIN);
706  buf_end = log_ptr + MLOG_BUF_MARGIN;
707  }
708 
709  upd_field = upd_get_nth_field(update, i);
710 
711  new_val = &(upd_field->new_val);
712 
713  len = dfield_get_len(new_val);
714 
715  log_ptr += mach_write_compressed(log_ptr, upd_field->field_no);
716  log_ptr += mach_write_compressed(log_ptr, len);
717 
718  if (len != UNIV_SQL_NULL) {
719  if (log_ptr + len < buf_end) {
720  memcpy(log_ptr, dfield_get_data(new_val), len);
721 
722  log_ptr += len;
723  } else {
724  mlog_close(mtr, log_ptr);
725 
727  mtr,
728  static_cast<byte*>(
729  dfield_get_data(new_val)),
730  len);
731 
732  log_ptr = mlog_open(mtr, MLOG_BUF_MARGIN);
733  buf_end = log_ptr + MLOG_BUF_MARGIN;
734  }
735  }
736  }
737 
738  mlog_close(mtr, log_ptr);
739 }
740 #endif /* !UNIV_HOTBACKUP */
741 
742 /*********************************************************************/
745 UNIV_INTERN
746 byte*
748 /*================*/
749  byte* ptr,
750  byte* end_ptr,
751  mem_heap_t* heap,
753  upd_t** update_out)
754 {
755  upd_t* update;
756  upd_field_t* upd_field;
757  dfield_t* new_val;
758  ulint len;
759  ulint n_fields;
760  ulint info_bits;
761  ulint i;
762 
763  if (end_ptr < ptr + 1) {
764 
765  return(NULL);
766  }
767 
768  info_bits = mach_read_from_1(ptr);
769  ptr++;
770  ptr = mach_parse_compressed(ptr, end_ptr, &n_fields);
771 
772  if (ptr == NULL) {
773 
774  return(NULL);
775  }
776 
777  update = upd_create(n_fields, heap);
778  update->info_bits = info_bits;
779 
780  for (i = 0; i < n_fields; i++) {
781  ulint field_no;
782  upd_field = upd_get_nth_field(update, i);
783  new_val = &(upd_field->new_val);
784 
785  ptr = mach_parse_compressed(ptr, end_ptr, &field_no);
786 
787  if (ptr == NULL) {
788 
789  return(NULL);
790  }
791 
792  upd_field->field_no = field_no;
793 
794  ptr = mach_parse_compressed(ptr, end_ptr, &len);
795 
796  if (ptr == NULL) {
797 
798  return(NULL);
799  }
800 
801  if (len != UNIV_SQL_NULL) {
802 
803  if (end_ptr < ptr + len) {
804 
805  return(NULL);
806  }
807 
808  dfield_set_data(new_val,
809  mem_heap_dup(heap, ptr, len), len);
810  ptr += len;
811  } else {
812  dfield_set_null(new_val);
813  }
814  }
815 
816  *update_out = update;
817 
818  return(ptr);
819 }
820 
821 #ifndef UNIV_HOTBACKUP
822 /***************************************************************/
827 UNIV_INTERN
828 upd_t*
830 /*====================================*/
831  const rec_t* rec,
832  dict_index_t* index,
833  const ulint* offsets,
834  const dtuple_t* entry,
835  mem_heap_t* heap)
836 {
837  upd_field_t* upd_field;
838  const dfield_t* dfield;
839  const byte* data;
840  ulint len;
841  upd_t* update;
842  ulint n_diff;
843  ulint i;
844 
845  /* This function is used only for a secondary index */
846  ut_a(!dict_index_is_clust(index));
847  ut_ad(rec_offs_validate(rec, index, offsets));
848  ut_ad(rec_offs_n_fields(offsets) == dtuple_get_n_fields(entry));
849  ut_ad(!rec_offs_any_extern(offsets));
850 
851  update = upd_create(dtuple_get_n_fields(entry), heap);
852 
853  n_diff = 0;
854 
855  for (i = 0; i < dtuple_get_n_fields(entry); i++) {
856 
857  data = rec_get_nth_field(rec, offsets, i, &len);
858 
859  dfield = dtuple_get_nth_field(entry, i);
860 
861  /* NOTE that it may be that len != dfield_get_len(dfield) if we
862  are updating in a character set and collation where strings of
863  different length can be equal in an alphabetical comparison,
864  and also in the case where we have a column prefix index
865  and the last characters in the index field are spaces; the
866  latter case probably caused the assertion failures reported at
867  row0upd.cc line 713 in versions 4.0.14 - 4.0.16. */
868 
869  /* NOTE: we compare the fields as binary strings!
870  (No collation) */
871 
872  if (!dfield_data_is_binary_equal(dfield, len, data)) {
873 
874  upd_field = upd_get_nth_field(update, n_diff);
875 
876  dfield_copy(&(upd_field->new_val), dfield);
877 
878  upd_field_set_field_no(upd_field, i, index, NULL);
879 
880  n_diff++;
881  }
882  }
883 
884  update->n_fields = n_diff;
885 
886  return(update);
887 }
888 
889 /***************************************************************/
895 UNIV_INTERN
896 const upd_t*
898 /*============================*/
899  dict_index_t* index,
900  const dtuple_t* entry,
901  const rec_t* rec,
902  const ulint* offsets,
903  bool no_sys,
905  trx_t* trx,
906  mem_heap_t* heap)
907 {
908  upd_field_t* upd_field;
909  const dfield_t* dfield;
910  const byte* data;
911  ulint len;
912  upd_t* update;
913  ulint n_diff;
914  ulint trx_id_pos;
915  ulint i;
916  ulint offsets_[REC_OFFS_NORMAL_SIZE];
917  rec_offs_init(offsets_);
918 
919  /* This function is used only for a clustered index */
920  ut_a(dict_index_is_clust(index));
921 
922  update = upd_create(dtuple_get_n_fields(entry), heap);
923 
924  n_diff = 0;
925 
926  trx_id_pos = dict_index_get_sys_col_pos(index, DATA_TRX_ID);
927  ut_ad(dict_index_get_sys_col_pos(index, DATA_ROLL_PTR)
928  == trx_id_pos + 1);
929 
930  if (!offsets) {
931  offsets = rec_get_offsets(rec, index, offsets_,
932  ULINT_UNDEFINED, &heap);
933  } else {
934  ut_ad(rec_offs_validate(rec, index, offsets));
935  }
936 
937  for (i = 0; i < dtuple_get_n_fields(entry); i++) {
938 
939  data = rec_get_nth_field(rec, offsets, i, &len);
940 
941  dfield = dtuple_get_nth_field(entry, i);
942 
943  /* NOTE: we compare the fields as binary strings!
944  (No collation) */
945 
946  if (no_sys && (i == trx_id_pos || i == trx_id_pos + 1)) {
947 
948  continue;
949  }
950 
951  if (!dfield_is_ext(dfield)
952  != !rec_offs_nth_extern(offsets, i)
953  || !dfield_data_is_binary_equal(dfield, len, data)) {
954 
955  upd_field = upd_get_nth_field(update, n_diff);
956 
957  dfield_copy(&(upd_field->new_val), dfield);
958 
959  upd_field_set_field_no(upd_field, i, index, trx);
960 
961  n_diff++;
962  }
963  }
964 
965  update->n_fields = n_diff;
966 
967  return(update);
968 }
969 
970 /***********************************************************/
975 static
976 byte*
977 row_upd_ext_fetch(
978 /*==============*/
979  const byte* data,
982  ulint local_len,
983  ulint zip_size,
986  ulint* len,
988  mem_heap_t* heap)
989 {
990  byte* buf = static_cast<byte*>(mem_heap_alloc(heap, *len));
991 
993  buf, *len, zip_size, data, local_len);
994 
995  /* We should never update records containing a half-deleted BLOB. */
996  ut_a(*len);
997 
998  return(buf);
999 }
1000 
1001 /***********************************************************/
1004 static
1005 void
1006 row_upd_index_replace_new_col_val(
1007 /*==============================*/
1008  dfield_t* dfield,
1010  const dict_field_t* field,
1011  const dict_col_t* col,
1012  const upd_field_t* uf,
1013  mem_heap_t* heap,
1015  ulint zip_size)
1017 {
1018  ulint len;
1019  const byte* data;
1020 
1021  dfield_copy_data(dfield, &uf->new_val);
1022 
1023  if (dfield_is_null(dfield)) {
1024  return;
1025  }
1026 
1027  len = dfield_get_len(dfield);
1028  data = static_cast<const byte*>(dfield_get_data(dfield));
1029 
1030  if (field->prefix_len > 0) {
1031  ibool fetch_ext = dfield_is_ext(dfield)
1032  && len < (ulint) field->prefix_len
1033  + BTR_EXTERN_FIELD_REF_SIZE;
1034 
1035  if (fetch_ext) {
1036  ulint l = len;
1037 
1038  len = field->prefix_len;
1039 
1040  data = row_upd_ext_fetch(data, l, zip_size,
1041  &len, heap);
1042  }
1043 
1045  col->mbminmaxlen,
1046  field->prefix_len, len,
1047  (const char*) data);
1048 
1049  dfield_set_data(dfield, data, len);
1050 
1051  if (!fetch_ext) {
1052  dfield_dup(dfield, heap);
1053  }
1054 
1055  return;
1056  }
1057 
1058  switch (uf->orig_len) {
1059  byte* buf;
1060  case BTR_EXTERN_FIELD_REF_SIZE:
1061  /* Restore the original locally stored
1062  part of the column. In the undo log,
1063  InnoDB writes a longer prefix of externally
1064  stored columns, so that column prefixes
1065  in secondary indexes can be reconstructed. */
1066  dfield_set_data(dfield,
1067  data + len - BTR_EXTERN_FIELD_REF_SIZE,
1068  BTR_EXTERN_FIELD_REF_SIZE);
1069  dfield_set_ext(dfield);
1070  /* fall through */
1071  case 0:
1072  dfield_dup(dfield, heap);
1073  break;
1074  default:
1075  /* Reconstruct the original locally
1076  stored part of the column. The data
1077  will have to be copied. */
1078  ut_a(uf->orig_len > BTR_EXTERN_FIELD_REF_SIZE);
1079  buf = static_cast<byte*>(mem_heap_alloc(heap, uf->orig_len));
1080 
1081  /* Copy the locally stored prefix. */
1082  memcpy(buf, data,
1083  uf->orig_len - BTR_EXTERN_FIELD_REF_SIZE);
1084 
1085  /* Copy the BLOB pointer. */
1086  memcpy(buf + uf->orig_len - BTR_EXTERN_FIELD_REF_SIZE,
1087  data + len - BTR_EXTERN_FIELD_REF_SIZE,
1088  BTR_EXTERN_FIELD_REF_SIZE);
1089 
1090  dfield_set_data(dfield, buf, uf->orig_len);
1091  dfield_set_ext(dfield);
1092  break;
1093  }
1094 }
1095 
1096 /***********************************************************/
1099 UNIV_INTERN
1100 void
1102 /*=========================================*/
1103  dtuple_t* entry,
1107  dict_index_t* index,
1109  const upd_t* update,
1112  ibool order_only,
1116  mem_heap_t* heap)
1118 {
1119  ulint i;
1120  ulint n_fields;
1121  const ulint zip_size = dict_table_zip_size(index->table);
1122 
1123  ut_ad(index);
1124 
1125  dtuple_set_info_bits(entry, update->info_bits);
1126 
1127  if (order_only) {
1128  n_fields = dict_index_get_n_unique(index);
1129  } else {
1130  n_fields = dict_index_get_n_fields(index);
1131  }
1132 
1133  for (i = 0; i < n_fields; i++) {
1134  const dict_field_t* field;
1135  const dict_col_t* col;
1136  const upd_field_t* uf;
1137 
1138  field = dict_index_get_nth_field(index, i);
1139  col = dict_field_get_col(field);
1140  uf = upd_get_field_by_field_no(update, i);
1141 
1142  if (uf) {
1143  row_upd_index_replace_new_col_val(
1144  dtuple_get_nth_field(entry, i),
1145  field, col, uf, heap, zip_size);
1146  }
1147  }
1148 }
1149 
1150 /***********************************************************/
1153 UNIV_INTERN
1154 void
1156 /*===============================*/
1157  dtuple_t* entry,
1161  dict_index_t* index,
1163  const upd_t* update,
1166  mem_heap_t* heap)
1168 {
1169  ulint i;
1170  const dict_index_t* clust_index
1171  = dict_table_get_first_index(index->table);
1172  const ulint zip_size
1173  = dict_table_zip_size(index->table);
1174 
1175  dtuple_set_info_bits(entry, update->info_bits);
1176 
1177  for (i = 0; i < dict_index_get_n_fields(index); i++) {
1178  const dict_field_t* field;
1179  const dict_col_t* col;
1180  const upd_field_t* uf;
1181 
1182  field = dict_index_get_nth_field(index, i);
1183  col = dict_field_get_col(field);
1185  update, dict_col_get_clust_pos(col, clust_index));
1186 
1187  if (uf) {
1188  row_upd_index_replace_new_col_val(
1189  dtuple_get_nth_field(entry, i),
1190  field, col, uf, heap, zip_size);
1191  }
1192  }
1193 }
1194 
1195 /***********************************************************/
1197 UNIV_INTERN
1198 void
1200 /*============*/
1201  dtuple_t* row,
1206  row_ext_t** ext,
1208  const dict_index_t* index,
1209  const upd_t* update,
1211  mem_heap_t* heap)
1212 {
1213  ulint col_no;
1214  ulint i;
1215  ulint n_cols;
1216  ulint n_ext_cols;
1217  ulint* ext_cols;
1218  const dict_table_t* table;
1219 
1220  ut_ad(row);
1221  ut_ad(ext);
1222  ut_ad(index);
1223  ut_ad(dict_index_is_clust(index));
1224  ut_ad(update);
1225  ut_ad(heap);
1226 
1227  n_cols = dtuple_get_n_fields(row);
1228  table = index->table;
1229  ut_ad(n_cols == dict_table_get_n_cols(table));
1230 
1231  ext_cols = static_cast<ulint*>(
1232  mem_heap_alloc(heap, n_cols * sizeof *ext_cols));
1233 
1234  n_ext_cols = 0;
1235 
1236  dtuple_set_info_bits(row, update->info_bits);
1237 
1238  for (col_no = 0; col_no < n_cols; col_no++) {
1239 
1240  const dict_col_t* col
1241  = dict_table_get_nth_col(table, col_no);
1242  const ulint clust_pos
1243  = dict_col_get_clust_pos(col, index);
1244  dfield_t* dfield;
1245 
1246  if (UNIV_UNLIKELY(clust_pos == ULINT_UNDEFINED)) {
1247 
1248  continue;
1249  }
1250 
1251  dfield = dtuple_get_nth_field(row, col_no);
1252 
1253  for (i = 0; i < upd_get_n_fields(update); i++) {
1254 
1255  const upd_field_t* upd_field
1256  = upd_get_nth_field(update, i);
1257 
1258  if (upd_field->field_no != clust_pos) {
1259 
1260  continue;
1261  }
1262 
1263  dfield_copy_data(dfield, &upd_field->new_val);
1264  break;
1265  }
1266 
1267  if (dfield_is_ext(dfield) && col->ord_part) {
1268  ext_cols[n_ext_cols++] = col_no;
1269  }
1270  }
1271 
1272  if (n_ext_cols) {
1273  *ext = row_ext_create(n_ext_cols, ext_cols, table->flags, row,
1274  heap);
1275  } else {
1276  *ext = NULL;
1277  }
1278 }
1279 
1280 /***********************************************************/
1287 UNIV_INTERN
1288 ibool
1290 /*==================================*/
1291  dict_index_t* index,
1292  const upd_t* update,
1295 #ifdef UNIV_DEBUG
1296  const que_thr_t*thr,
1297 #endif /* UNIV_DEBUG */
1298  const dtuple_t* row,
1302  const row_ext_t*ext)
1304 {
1305  ulint n_unique;
1306  ulint i;
1307  const dict_index_t* clust_index;
1308 
1309  ut_ad(index);
1310  ut_ad(update);
1311  ut_ad(thr);
1312  ut_ad(thr->graph);
1313  ut_ad(thr->graph->trx);
1314 
1315  n_unique = dict_index_get_n_unique(index);
1316 
1317  clust_index = dict_table_get_first_index(index->table);
1318 
1319  for (i = 0; i < n_unique; i++) {
1320 
1321  const dict_field_t* ind_field;
1322  const dict_col_t* col;
1323  ulint col_no;
1324  const upd_field_t* upd_field;
1325  const dfield_t* dfield;
1326  dfield_t dfield_ext;
1327  ulint dfield_len;
1328  const byte* buf;
1329 
1330  ind_field = dict_index_get_nth_field(index, i);
1331  col = dict_field_get_col(ind_field);
1332  col_no = dict_col_get_no(col);
1333 
1334  upd_field = upd_get_field_by_field_no(
1335  update, dict_col_get_clust_pos(col, clust_index));
1336 
1337  if (upd_field == NULL) {
1338  continue;
1339  }
1340 
1341  if (row == NULL) {
1342  ut_ad(ext == NULL);
1343  return(TRUE);
1344  }
1345 
1346  dfield = dtuple_get_nth_field(row, col_no);
1347 
1348  /* This treatment of column prefix indexes is loosely
1349  based on row_build_index_entry(). */
1350 
1351  if (UNIV_LIKELY(ind_field->prefix_len == 0)
1352  || dfield_is_null(dfield)) {
1353  /* do nothing special */
1354  } else if (ext) {
1355  /* Silence a compiler warning without
1356  silencing a Valgrind error. */
1357  dfield_len = 0;
1358  UNIV_MEM_INVALID(&dfield_len, sizeof dfield_len);
1359  /* See if the column is stored externally. */
1360  buf = row_ext_lookup(ext, col_no, &dfield_len);
1361 
1362  ut_ad(col->ord_part);
1363 
1364  if (UNIV_LIKELY_NULL(buf)) {
1365  if (UNIV_UNLIKELY(buf == field_ref_zero)) {
1366  /* The externally stored field
1367  was not written yet. This
1368  record should only be seen by
1369  recv_recovery_rollback_active(),
1370  when the server had crashed before
1371  storing the field. */
1372  ut_ad(thr->graph->trx->is_recovered);
1373  ut_ad(trx_is_recv(thr->graph->trx));
1374  return(TRUE);
1375  }
1376 
1377  goto copy_dfield;
1378  }
1379  } else if (dfield_is_ext(dfield)) {
1380  dfield_len = dfield_get_len(dfield);
1381  ut_a(dfield_len > BTR_EXTERN_FIELD_REF_SIZE);
1382  dfield_len -= BTR_EXTERN_FIELD_REF_SIZE;
1383  ut_a(dict_index_is_clust(index)
1384  || ind_field->prefix_len <= dfield_len);
1385 
1386  buf = static_cast<byte*>(dfield_get_data(dfield));
1387 copy_dfield:
1388  ut_a(dfield_len > 0);
1389  dfield_copy(&dfield_ext, dfield);
1390  dfield_set_data(&dfield_ext, buf, dfield_len);
1391  dfield = &dfield_ext;
1392  }
1393 
1395  dfield, &upd_field->new_val,
1396  ind_field->prefix_len)) {
1397 
1398  return(TRUE);
1399  }
1400  }
1401 
1402  return(FALSE);
1403 }
1404 
1405 /***********************************************************/
1410 UNIV_INTERN
1411 ibool
1413 /*========================================*/
1414  const dict_table_t* table,
1415  const upd_t* update)
1416 {
1417  upd_field_t* upd_field;
1419  ulint i;
1420 
1421  index = dict_table_get_first_index(table);
1422 
1423  for (i = 0; i < upd_get_n_fields(update); i++) {
1424 
1425  upd_field = upd_get_nth_field(update, i);
1426 
1427  if (dict_field_get_col(dict_index_get_nth_field(
1428  index, upd_field->field_no))
1429  ->ord_part) {
1430 
1431  return(TRUE);
1432  }
1433  }
1434 
1435  return(FALSE);
1436 }
1437 
1438 /***********************************************************/
1441 UNIV_INTERN
1442 bool
1444 /*===================*/
1445  dict_table_t* table,
1446  upd_field_t* upd_field)
1447 {
1448  ulint col_no;
1449  dict_index_t* clust_index;
1450  fts_t* fts = table->fts;
1451 
1452  clust_index = dict_table_get_first_index(table);
1453 
1454  /* Convert from index-specific column number to table-global
1455  column number. */
1456  col_no = dict_index_get_nth_col_no(clust_index, upd_field->field_no);
1457 
1458  return(col_no == fts->doc_col);
1459 }
1460 /***********************************************************/
1464 UNIV_INTERN
1465 ulint
1467 /*=======================*/
1468  dict_table_t* table,
1469  upd_field_t* upd_field)
1470 {
1471  ulint col_no;
1472  dict_index_t* clust_index;
1473  fts_t* fts = table->fts;
1474 
1475  clust_index = dict_table_get_first_index(table);
1476 
1477  /* Convert from index-specific column number to table-global
1478  column number. */
1479  col_no = dict_index_get_nth_col_no(clust_index, upd_field->field_no);
1480 
1481  return(dict_table_is_fts_column(fts->indexes, col_no));
1482 }
1483 
1484 /***********************************************************/
1489 static
1490 ibool
1491 row_upd_changes_first_fields_binary(
1492 /*================================*/
1493  dtuple_t* entry,
1494  dict_index_t* index,
1495  const upd_t* update,
1496  ulint n)
1497 {
1498  ulint n_upd_fields;
1499  ulint i, j;
1500  dict_index_t* clust_index;
1501 
1502  ut_ad(update && index);
1503  ut_ad(n <= dict_index_get_n_fields(index));
1504 
1505  n_upd_fields = upd_get_n_fields(update);
1506  clust_index = dict_table_get_first_index(index->table);
1507 
1508  for (i = 0; i < n; i++) {
1509 
1510  const dict_field_t* ind_field;
1511  const dict_col_t* col;
1512  ulint col_pos;
1513 
1514  ind_field = dict_index_get_nth_field(index, i);
1515  col = dict_field_get_col(ind_field);
1516  col_pos = dict_col_get_clust_pos(col, clust_index);
1517 
1518  ut_a(ind_field->prefix_len == 0);
1519 
1520  for (j = 0; j < n_upd_fields; j++) {
1521 
1522  upd_field_t* upd_field
1523  = upd_get_nth_field(update, j);
1524 
1525  if (col_pos == upd_field->field_no
1527  dtuple_get_nth_field(entry, i),
1528  &upd_field->new_val, 0)) {
1529 
1530  return(TRUE);
1531  }
1532  }
1533  }
1534 
1535  return(FALSE);
1536 }
1537 
1538 /*********************************************************************/
1540 UNIV_INLINE
1541 void
1542 row_upd_copy_columns(
1543 /*=================*/
1544  rec_t* rec,
1545  const ulint* offsets,
1546  sym_node_t* column)
1548 {
1549  byte* data;
1550  ulint len;
1551 
1552  while (column) {
1553  data = rec_get_nth_field(rec, offsets,
1554  column->field_nos[SYM_CLUST_FIELD_NO],
1555  &len);
1556  eval_node_copy_and_alloc_val(column, data, len);
1557 
1558  column = UT_LIST_GET_NEXT(col_var_list, column);
1559  }
1560 }
1561 
1562 /*********************************************************************/
1565 UNIV_INLINE
1566 void
1567 row_upd_eval_new_vals(
1568 /*==================*/
1569  upd_t* update)
1570 {
1571  que_node_t* exp;
1572  upd_field_t* upd_field;
1573  ulint n_fields;
1574  ulint i;
1575 
1576  n_fields = upd_get_n_fields(update);
1577 
1578  for (i = 0; i < n_fields; i++) {
1579  upd_field = upd_get_nth_field(update, i);
1580 
1581  exp = upd_field->exp;
1582 
1583  eval_exp(exp);
1584 
1585  dfield_copy_data(&(upd_field->new_val), que_node_get_val(exp));
1586  }
1587 }
1588 
1589 /***********************************************************/
1591 static
1592 void
1593 row_upd_store_row(
1594 /*==============*/
1595  upd_node_t* node)
1596 {
1597  dict_index_t* clust_index;
1598  rec_t* rec;
1599  mem_heap_t* heap = NULL;
1600  row_ext_t** ext;
1601  ulint offsets_[REC_OFFS_NORMAL_SIZE];
1602  const ulint* offsets;
1603  rec_offs_init(offsets_);
1604 
1605  ut_ad(node->pcur->latch_mode != BTR_NO_LATCHES);
1606 
1607  if (node->row != NULL) {
1608  mem_heap_empty(node->heap);
1609  }
1610 
1611  clust_index = dict_table_get_first_index(node->table);
1612 
1613  rec = btr_pcur_get_rec(node->pcur);
1614 
1615  offsets = rec_get_offsets(rec, clust_index, offsets_,
1616  ULINT_UNDEFINED, &heap);
1617 
1618  if (dict_table_get_format(node->table) >= UNIV_FORMAT_B) {
1619  /* In DYNAMIC or COMPRESSED format, there is no prefix
1620  of externally stored columns in the clustered index
1621  record. Build a cache of column prefixes. */
1622  ext = &node->ext;
1623  } else {
1624  /* REDUNDANT and COMPACT formats store a local
1625  768-byte prefix of each externally stored column.
1626  No cache is needed. */
1627  ext = NULL;
1628  node->ext = NULL;
1629  }
1630 
1631  node->row = row_build(ROW_COPY_DATA, clust_index, rec, offsets,
1632  NULL, NULL, NULL, ext, node->heap);
1633  if (node->is_delete) {
1634  node->upd_row = NULL;
1635  node->upd_ext = NULL;
1636  } else {
1637  node->upd_row = dtuple_copy(node->row, node->heap);
1638  row_upd_replace(node->upd_row, &node->upd_ext,
1639  clust_index, node->update, node->heap);
1640  }
1641 
1642  if (UNIV_LIKELY_NULL(heap)) {
1643  mem_heap_free(heap);
1644  }
1645 }
1646 
1647 /***********************************************************/
1651 static __attribute__((nonnull, warn_unused_result))
1652 dberr_t
1653 row_upd_sec_index_entry(
1654 /*====================*/
1655  upd_node_t* node,
1656  que_thr_t* thr)
1657 {
1658  mtr_t mtr;
1659  const rec_t* rec;
1660  btr_pcur_t pcur;
1661  mem_heap_t* heap;
1662  dtuple_t* entry;
1664  btr_cur_t* btr_cur;
1665  ibool referenced;
1666  dberr_t err = DB_SUCCESS;
1667  trx_t* trx = thr_get_trx(thr);
1668  ulint mode;
1669  enum row_search_result search_result;
1670 
1671  ut_ad(trx->id);
1672 
1673  index = node->index;
1674 
1675  referenced = row_upd_index_is_referenced(index, trx);
1676 
1677  heap = mem_heap_create(1024);
1678 
1679  /* Build old index entry */
1680  entry = row_build_index_entry(node->row, node->ext, index, heap);
1681  ut_a(entry);
1682 
1683  log_free_check();
1684 
1685 #ifdef UNIV_DEBUG
1686  /* Work around Bug#14626800 ASSERTION FAILURE IN DEBUG_SYNC().
1687  Once it is fixed, remove the 'ifdef', 'if' and this comment. */
1688  if (!trx->ddl) {
1689  DEBUG_SYNC_C_IF_THD(trx->mysql_thd,
1690  "before_row_upd_sec_index_entry");
1691  }
1692 #endif /* UNIV_DEBUG */
1693 
1694  mtr_start(&mtr);
1695 
1696  if (*index->name == TEMP_INDEX_PREFIX) {
1697  /* The index->online_status may change if the
1698  index->name starts with TEMP_INDEX_PREFIX (meaning
1699  that the index is or was being created online). It is
1700  protected by index->lock. */
1701 
1702  mtr_s_lock(dict_index_get_lock(index), &mtr);
1703 
1704  switch (dict_index_get_online_status(index)) {
1705  case ONLINE_INDEX_COMPLETE:
1706  /* This is a normal index. Do not log anything.
1707  Perform the update on the index tree directly. */
1708  break;
1709  case ONLINE_INDEX_CREATION:
1710  /* Log a DELETE and optionally INSERT. */
1711  row_log_online_op(index, entry, 0);
1712 
1713  if (!node->is_delete) {
1714  mem_heap_empty(heap);
1715  entry = row_build_index_entry(
1716  node->upd_row, node->upd_ext,
1717  index, heap);
1718  ut_a(entry);
1719  row_log_online_op(index, entry, trx->id);
1720  }
1721  /* fall through */
1722  case ONLINE_INDEX_ABORTED:
1724  mtr_commit(&mtr);
1725  goto func_exit;
1726  }
1727 
1728  /* We can only buffer delete-mark operations if there
1729  are no foreign key constraints referring to the index. */
1730  mode = referenced
1733  | BTR_DELETE_MARK;
1734  } else {
1735  /* For secondary indexes,
1736  index->online_status==ONLINE_INDEX_CREATION unless
1737  index->name starts with TEMP_INDEX_PREFIX. */
1739 
1740  /* We can only buffer delete-mark operations if there
1741  are no foreign key constraints referring to the index. */
1742  mode = referenced
1743  ? BTR_MODIFY_LEAF
1745  }
1746 
1747  /* Set the query thread, so that ibuf_insert_low() will be
1748  able to invoke thd_get_trx(). */
1749  btr_pcur_get_btr_cur(&pcur)->thr = thr;
1750 
1751  search_result = row_search_index_entry(index, entry, mode,
1752  &pcur, &mtr);
1753 
1754  btr_cur = btr_pcur_get_btr_cur(&pcur);
1755 
1756  rec = btr_cur_get_rec(btr_cur);
1757 
1758  switch (search_result) {
1759  case ROW_NOT_DELETED_REF: /* should only occur for BTR_DELETE */
1760  ut_error;
1761  break;
1762  case ROW_BUFFERED:
1763  /* Entry was delete marked already. */
1764  break;
1765 
1766  case ROW_NOT_FOUND:
1767  if (*index->name == TEMP_INDEX_PREFIX) {
1768  /* When online CREATE INDEX copied the update
1769  that we already made to the clustered index,
1770  and completed the secondary index creation
1771  before we got here, the old secondary index
1772  record would not exist. The CREATE INDEX
1773  should be waiting for a MySQL meta-data lock
1774  upgrade at least until this UPDATE
1775  returns. After that point, the
1776  TEMP_INDEX_PREFIX would be dropped from the
1777  index name in commit_inplace_alter_table(). */
1778  break;
1779  }
1780 
1781  fputs("InnoDB: error in sec index entry update in\n"
1782  "InnoDB: ", stderr);
1783  dict_index_name_print(stderr, trx, index);
1784  fputs("\n"
1785  "InnoDB: tuple ", stderr);
1786  dtuple_print(stderr, entry);
1787  fputs("\n"
1788  "InnoDB: record ", stderr);
1789  rec_print(stderr, rec, index);
1790  putc('\n', stderr);
1791  trx_print(stderr, trx, 0);
1792  fputs("\n"
1793  "InnoDB: Submit a detailed bug report"
1794  " to http://bugs.mysql.com\n", stderr);
1795  ut_ad(0);
1796  break;
1797  case ROW_FOUND:
1798  /* Delete mark the old index record; it can already be
1799  delete marked if we return after a lock wait in
1800  row_ins_sec_index_entry() below */
1801  if (!rec_get_deleted_flag(
1802  rec, dict_table_is_comp(index->table))) {
1804  0, btr_cur, TRUE, thr, &mtr);
1805 
1806  if (err == DB_SUCCESS && referenced) {
1807 
1808  ulint* offsets;
1809 
1810  offsets = rec_get_offsets(
1811  rec, index, NULL, ULINT_UNDEFINED,
1812  &heap);
1813 
1814  /* NOTE that the following call loses
1815  the position of pcur ! */
1816  err = row_upd_check_references_constraints(
1817  node, &pcur, index->table,
1818  index, offsets, thr, &mtr);
1819  }
1820  }
1821  break;
1822  }
1823 
1824  btr_pcur_close(&pcur);
1825  mtr_commit(&mtr);
1826 
1827  if (node->is_delete || err != DB_SUCCESS) {
1828 
1829  goto func_exit;
1830  }
1831 
1832  mem_heap_empty(heap);
1833 
1834  /* Build a new index entry */
1835  entry = row_build_index_entry(node->upd_row, node->upd_ext,
1836  index, heap);
1837  ut_a(entry);
1838 
1839  /* Insert new index entry */
1840  err = row_ins_sec_index_entry(index, entry, thr);
1841 
1842 func_exit:
1843  mem_heap_free(heap);
1844 
1845  return(err);
1846 }
1847 
1848 /***********************************************************/
1853 static __attribute__((nonnull, warn_unused_result))
1854 dberr_t
1855 row_upd_sec_step(
1856 /*=============*/
1857  upd_node_t* node,
1858  que_thr_t* thr)
1859 {
1860  ut_ad((node->state == UPD_NODE_UPDATE_ALL_SEC)
1861  || (node->state == UPD_NODE_UPDATE_SOME_SEC));
1862  ut_ad(!dict_index_is_clust(node->index));
1863 
1864  if (node->state == UPD_NODE_UPDATE_ALL_SEC
1865  || row_upd_changes_ord_field_binary(node->index, node->update,
1866  thr, node->row, node->ext)) {
1867  return(row_upd_sec_index_entry(node, thr));
1868  }
1869 
1870  return(DB_SUCCESS);
1871 }
1872 
1873 #ifdef UNIV_DEBUG
1874 # define row_upd_clust_rec_by_insert_inherit(rec,offsets,entry,update) \
1875  row_upd_clust_rec_by_insert_inherit_func(rec,offsets,entry,update)
1876 #else /* UNIV_DEBUG */
1877 # define row_upd_clust_rec_by_insert_inherit(rec,offsets,entry,update) \
1878  row_upd_clust_rec_by_insert_inherit_func(entry,update)
1879 #endif /* UNIV_DEBUG */
1880 /*******************************************************************/
1886 static __attribute__((warn_unused_result))
1887 ibool
1888 row_upd_clust_rec_by_insert_inherit_func(
1889 /*=====================================*/
1890 #ifdef UNIV_DEBUG
1891  const rec_t* rec,
1892  const ulint* offsets,
1893 #endif /* UNIV_DEBUG */
1894  dtuple_t* entry,
1896  const upd_t* update)
1897 {
1898  ibool inherit = FALSE;
1899  ulint i;
1900 
1901  ut_ad(!rec == !offsets);
1902  ut_ad(!rec || rec_offs_any_extern(offsets));
1903 
1904  for (i = 0; i < dtuple_get_n_fields(entry); i++) {
1905  dfield_t* dfield = dtuple_get_nth_field(entry, i);
1906  byte* data;
1907  ulint len;
1908 
1909  ut_ad(!offsets
1910  || !rec_offs_nth_extern(offsets, i)
1911  == !dfield_is_ext(dfield)
1912  || upd_get_field_by_field_no(update, i));
1913  if (!dfield_is_ext(dfield)
1914  || upd_get_field_by_field_no(update, i)) {
1915  continue;
1916  }
1917 
1918 #ifdef UNIV_DEBUG
1919  if (UNIV_LIKELY(rec != NULL)) {
1920  const byte* rec_data
1921  = rec_get_nth_field(rec, offsets, i, &len);
1922  ut_ad(len == dfield_get_len(dfield));
1923  ut_ad(len != UNIV_SQL_NULL);
1924  ut_ad(len >= BTR_EXTERN_FIELD_REF_SIZE);
1925 
1926  rec_data += len - BTR_EXTERN_FIELD_REF_SIZE;
1927 
1928  /* The pointer must not be zero. */
1929  ut_ad(memcmp(rec_data, field_ref_zero,
1930  BTR_EXTERN_FIELD_REF_SIZE));
1931  /* The BLOB must be owned. */
1932  ut_ad(!(rec_data[BTR_EXTERN_LEN]
1933  & BTR_EXTERN_OWNER_FLAG));
1934  }
1935 #endif /* UNIV_DEBUG */
1936 
1937  len = dfield_get_len(dfield);
1938  ut_a(len != UNIV_SQL_NULL);
1939  ut_a(len >= BTR_EXTERN_FIELD_REF_SIZE);
1940 
1941  data = static_cast<byte*>(dfield_get_data(dfield));
1942 
1943  data += len - BTR_EXTERN_FIELD_REF_SIZE;
1944  /* The pointer must not be zero. */
1945  ut_a(memcmp(data, field_ref_zero, BTR_EXTERN_FIELD_REF_SIZE));
1946  /* The BLOB must be owned. */
1947  ut_a(!(data[BTR_EXTERN_LEN] & BTR_EXTERN_OWNER_FLAG));
1948 
1950  /* The BTR_EXTERN_INHERITED_FLAG only matters in
1951  rollback. Purge will always free the extern fields of
1952  a delete-marked row. */
1953 
1954  inherit = TRUE;
1955  }
1956 
1957  return(inherit);
1958 }
1959 
1960 /***********************************************************/
1967 static __attribute__((nonnull, warn_unused_result))
1968 dberr_t
1969 row_upd_clust_rec_by_insert(
1970 /*========================*/
1971  upd_node_t* node,
1972  dict_index_t* index,
1973  que_thr_t* thr,
1974  ibool referenced,
1976  mtr_t* mtr)
1977 {
1978  mem_heap_t* heap;
1979  btr_pcur_t* pcur;
1980  btr_cur_t* btr_cur;
1981  trx_t* trx;
1983  dtuple_t* entry;
1984  dberr_t err;
1985  ibool change_ownership = FALSE;
1986  rec_t* rec;
1987  ulint* offsets = NULL;
1988 
1989  ut_ad(node);
1990  ut_ad(dict_index_is_clust(index));
1991 
1992  trx = thr_get_trx(thr);
1993  table = node->table;
1994  pcur = node->pcur;
1995  btr_cur = btr_pcur_get_btr_cur(pcur);
1996 
1997  heap = mem_heap_create(1000);
1998 
1999  entry = row_build_index_entry(node->upd_row, node->upd_ext,
2000  index, heap);
2001  ut_a(entry);
2002 
2003  row_upd_index_entry_sys_field(entry, index, DATA_TRX_ID, trx->id);
2004 
2005  switch (node->state) {
2006  default:
2007  ut_error;
2008  case UPD_NODE_INSERT_BLOB:
2009  /* A lock wait occurred in row_ins_clust_index_entry() in
2010  the previous invocation of this function. Mark the
2011  off-page columns in the entry inherited. */
2012 
2013  change_ownership = row_upd_clust_rec_by_insert_inherit(
2014  NULL, NULL, entry, node->update);
2015  ut_a(change_ownership);
2016  /* fall through */
2017  case UPD_NODE_INSERT_CLUSTERED:
2018  /* A lock wait occurred in row_ins_clust_index_entry() in
2019  the previous invocation of this function. */
2020  break;
2021  case UPD_NODE_UPDATE_CLUSTERED:
2022  /* This is the first invocation of the function where
2023  we update the primary key. Delete-mark the old record
2024  in the clustered index and prepare to insert a new entry. */
2025  rec = btr_cur_get_rec(btr_cur);
2026  offsets = rec_get_offsets(rec, index, NULL,
2027  ULINT_UNDEFINED, &heap);
2029 
2031  btr_cur_get_block(btr_cur), rec, index, offsets,
2032  thr, mtr);
2033  if (err != DB_SUCCESS) {
2034 err_exit:
2035  mtr_commit(mtr);
2036  mem_heap_free(heap);
2037  return(err);
2038  }
2039 
2040  /* If the the new row inherits externally stored
2041  fields (off-page columns a.k.a. BLOBs) from the
2042  delete-marked old record, mark them disowned by the
2043  old record and owned by the new entry. */
2044 
2045  if (rec_offs_any_extern(offsets)) {
2046  change_ownership = row_upd_clust_rec_by_insert_inherit(
2047  rec, offsets, entry, node->update);
2048 
2049  if (change_ownership) {
2050  btr_pcur_store_position(pcur, mtr);
2051  }
2052  }
2053 
2054  if (referenced) {
2055  /* NOTE that the following call loses
2056  the position of pcur ! */
2057 
2058  err = row_upd_check_references_constraints(
2059  node, pcur, table, index, offsets, thr, mtr);
2060 
2061  if (err != DB_SUCCESS) {
2062  goto err_exit;
2063  }
2064  }
2065  }
2066 
2067  mtr_commit(mtr);
2068 
2070  index, entry, thr,
2071  node->upd_ext ? node->upd_ext->n_ext : 0);
2072  node->state = change_ownership
2073  ? UPD_NODE_INSERT_BLOB
2074  : UPD_NODE_INSERT_CLUSTERED;
2075 
2076  if (err == DB_SUCCESS && change_ownership) {
2077  /* Mark the non-updated fields disowned by the old record. */
2078 
2079  /* NOTE: this transaction has an x-lock on the record
2080  and therefore other transactions cannot modify the
2081  record when we have no latch on the page. In addition,
2082  we assume that other query threads of the same
2083  transaction do not modify the record in the meantime.
2084  Therefore we can assert that the restoration of the
2085  cursor succeeds. */
2086 
2087  mtr_start(mtr);
2088 
2089  if (!btr_pcur_restore_position(BTR_MODIFY_LEAF, pcur, mtr)) {
2090  ut_error;
2091  }
2092 
2093  rec = btr_cur_get_rec(btr_cur);
2094  offsets = rec_get_offsets(rec, index, offsets,
2095  ULINT_UNDEFINED, &heap);
2097  ut_ad(rec_get_deleted_flag(rec, rec_offs_comp(offsets)));
2098 
2100  btr_cur_get_page_zip(btr_cur),
2101  rec, index, offsets, node->update, mtr);
2102 
2103  /* It is not necessary to call row_log_table for
2104  this, because during online table rebuild, purge will
2105  not free any BLOBs in the table, whether or not they
2106  are owned by the clustered index record. */
2107 
2108  mtr_commit(mtr);
2109  }
2110 
2111  mem_heap_free(heap);
2112 
2113  return(err);
2114 }
2115 
2116 /***********************************************************/
2121 static __attribute__((nonnull, warn_unused_result))
2122 dberr_t
2123 row_upd_clust_rec(
2124 /*==============*/
2125  upd_node_t* node,
2126  dict_index_t* index,
2127  ulint* offsets,
2130  que_thr_t* thr,
2131  mtr_t* mtr)
2132 {
2133  mem_heap_t* heap = NULL;
2134  big_rec_t* big_rec = NULL;
2135  btr_pcur_t* pcur;
2136  btr_cur_t* btr_cur;
2137  dberr_t err;
2138  const dtuple_t* rebuilt_old_pk = NULL;
2139 
2140  ut_ad(node);
2141  ut_ad(dict_index_is_clust(index));
2142 
2143  pcur = node->pcur;
2144  btr_cur = btr_pcur_get_btr_cur(pcur);
2145 
2146  ut_ad(btr_cur_get_index(btr_cur) == index);
2147  ut_ad(!rec_get_deleted_flag(btr_cur_get_rec(btr_cur),
2148  dict_table_is_comp(index->table)));
2149  ut_ad(rec_offs_validate(btr_cur_get_rec(btr_cur), index, offsets));
2150 
2151  if (dict_index_is_online_ddl(index)) {
2152  rebuilt_old_pk = row_log_table_get_pk(
2153  btr_cur_get_rec(btr_cur), index, offsets, &heap);
2154  }
2155 
2156  /* Try optimistic updating of the record, keeping changes within
2157  the page; we do not check locks because we assume the x-lock on the
2158  record to update */
2159 
2160  if (node->cmpl_info & UPD_NODE_NO_SIZE_CHANGE) {
2162  BTR_NO_LOCKING_FLAG, btr_cur,
2163  offsets, node->update,
2164  node->cmpl_info, thr, thr_get_trx(thr)->id, mtr);
2165  } else {
2167  BTR_NO_LOCKING_FLAG, btr_cur,
2168  &offsets, offsets_heap, node->update,
2169  node->cmpl_info, thr, thr_get_trx(thr)->id, mtr);
2170  }
2171 
2172  if (err == DB_SUCCESS && dict_index_is_online_ddl(index)) {
2173  row_log_table_update(btr_cur_get_rec(btr_cur),
2174  index, offsets, rebuilt_old_pk);
2175  }
2176 
2177  mtr_commit(mtr);
2178 
2179  if (UNIV_LIKELY(err == DB_SUCCESS)) {
2180 
2181  goto func_exit;
2182  }
2183 
2185 
2186  err = DB_LOCK_TABLE_FULL;
2187  goto func_exit;
2188  }
2189  /* We may have to modify the tree structure: do a pessimistic descent
2190  down the index tree */
2191 
2192  mtr_start(mtr);
2193 
2194  /* NOTE: this transaction has an s-lock or x-lock on the record and
2195  therefore other transactions cannot modify the record when we have no
2196  latch on the page. In addition, we assume that other query threads of
2197  the same transaction do not modify the record in the meantime.
2198  Therefore we can assert that the restoration of the cursor succeeds. */
2199 
2200  ut_a(btr_pcur_restore_position(BTR_MODIFY_TREE, pcur, mtr));
2201 
2202  ut_ad(!rec_get_deleted_flag(btr_pcur_get_rec(pcur),
2203  dict_table_is_comp(index->table)));
2204 
2205  if (!heap) {
2206  heap = mem_heap_create(1024);
2207  }
2208 
2211  &offsets, offsets_heap, heap, &big_rec,
2212  node->update, node->cmpl_info,
2213  thr, thr_get_trx(thr)->id, mtr);
2214  if (big_rec) {
2215  ut_a(err == DB_SUCCESS);
2216  /* Write out the externally stored
2217  columns while still x-latching
2218  index->lock and block->lock. Allocate
2219  pages for big_rec in the mtr that
2220  modified the B-tree, but be sure to skip
2221  any pages that were freed in mtr. We will
2222  write out the big_rec pages before
2223  committing the B-tree mini-transaction. If
2224  the system crashes so that crash recovery
2225  will not replay the mtr_commit(&mtr), the
2226  big_rec pages will be left orphaned until
2227  the pages are allocated for something else.
2228 
2229  TODO: If the allocation extends the tablespace, it
2230  will not be redo logged, in either mini-transaction.
2231  Tablespace extension should be redo-logged in the
2232  big_rec mini-transaction, so that recovery will not
2233  fail when the big_rec was written to the extended
2234  portion of the file, in case the file was somehow
2235  truncated in the crash. */
2236 
2237  DEBUG_SYNC_C("before_row_upd_extern");
2239  index, btr_cur_get_block(btr_cur),
2240  btr_cur_get_rec(btr_cur), offsets,
2241  big_rec, mtr, BTR_STORE_UPDATE);
2242  DEBUG_SYNC_C("after_row_upd_extern");
2243  /* If writing big_rec fails (for example, because of
2244  DB_OUT_OF_FILE_SPACE), the record will be corrupted.
2245  Even if we did not update any externally stored
2246  columns, our update could cause the record to grow so
2247  that a non-updated column was selected for external
2248  storage. This non-update would not have been written
2249  to the undo log, and thus the record cannot be rolled
2250  back.
2251 
2252  However, because we have not executed mtr_commit(mtr)
2253  yet, the update will not be replayed in crash
2254  recovery, and the following assertion failure will
2255  effectively "roll back" the operation. */
2256  ut_a(err == DB_SUCCESS);
2257  }
2258 
2259  if (err == DB_SUCCESS && dict_index_is_online_ddl(index)) {
2260  row_log_table_update(btr_cur_get_rec(btr_cur),
2261  index, offsets, rebuilt_old_pk);
2262  }
2263 
2264  mtr_commit(mtr);
2265 func_exit:
2266  if (heap) {
2267  mem_heap_free(heap);
2268  }
2269 
2270  if (big_rec) {
2271  dtuple_big_rec_free(big_rec);
2272  }
2273 
2274  return(err);
2275 }
2276 
2277 /***********************************************************/
2280 static __attribute__((nonnull, warn_unused_result))
2281 dberr_t
2282 row_upd_del_mark_clust_rec(
2283 /*=======================*/
2284  upd_node_t* node,
2285  dict_index_t* index,
2286  ulint* offsets,
2288  que_thr_t* thr,
2289  ibool referenced,
2292  mtr_t* mtr)
2293 {
2294  btr_pcur_t* pcur;
2295  btr_cur_t* btr_cur;
2296  dberr_t err;
2297 
2298  ut_ad(node);
2299  ut_ad(dict_index_is_clust(index));
2300  ut_ad(node->is_delete);
2301 
2302  pcur = node->pcur;
2303  btr_cur = btr_pcur_get_btr_cur(pcur);
2304 
2305  /* Store row because we have to build also the secondary index
2306  entries */
2307 
2308  row_upd_store_row(node);
2309 
2310  /* Mark the clustered index record deleted; we do not have to check
2311  locks, because we assume that we have an x-lock on the record */
2312 
2314  btr_cur_get_block(btr_cur), btr_cur_get_rec(btr_cur),
2315  index, offsets, thr, mtr);
2316  if (err == DB_SUCCESS && referenced) {
2317  /* NOTE that the following call loses the position of pcur ! */
2318 
2319  err = row_upd_check_references_constraints(
2320  node, pcur, index->table, index, offsets, thr, mtr);
2321  }
2322 
2323  mtr_commit(mtr);
2324 
2325  return(err);
2326 }
2327 
2328 /***********************************************************/
2332 static __attribute__((nonnull, warn_unused_result))
2333 dberr_t
2334 row_upd_clust_step(
2335 /*===============*/
2336  upd_node_t* node,
2337  que_thr_t* thr)
2338 {
2340  btr_pcur_t* pcur;
2341  ibool success;
2342  dberr_t err;
2343  mtr_t mtr;
2344  rec_t* rec;
2345  mem_heap_t* heap = NULL;
2346  ulint offsets_[REC_OFFS_NORMAL_SIZE];
2347  ulint* offsets;
2348  ibool referenced;
2349  rec_offs_init(offsets_);
2350 
2351  index = dict_table_get_first_index(node->table);
2352 
2353  referenced = row_upd_index_is_referenced(index, thr_get_trx(thr));
2354 
2355  pcur = node->pcur;
2356 
2357  /* We have to restore the cursor to its position */
2358 
2359  mtr_start(&mtr);
2360 
2361  /* If the restoration does not succeed, then the same
2362  transaction has deleted the record on which the cursor was,
2363  and that is an SQL error. If the restoration succeeds, it may
2364  still be that the same transaction has successively deleted
2365  and inserted a record with the same ordering fields, but in
2366  that case we know that the transaction has at least an
2367  implicit x-lock on the record. */
2368 
2369  ut_a(pcur->rel_pos == BTR_PCUR_ON);
2370 
2371  ulint mode;
2372 
2373 #ifdef UNIV_DEBUG
2374  /* Work around Bug#14626800 ASSERTION FAILURE IN DEBUG_SYNC().
2375  Once it is fixed, remove the 'ifdef', 'if' and this comment. */
2376  if (!thr_get_trx(thr)->ddl) {
2377  DEBUG_SYNC_C_IF_THD(
2378  thr_get_trx(thr)->mysql_thd,
2379  "innodb_row_upd_clust_step_enter");
2380  }
2381 #endif /* UNIV_DEBUG */
2382 
2383  if (dict_index_is_online_ddl(index)) {
2384  ut_ad(node->table->id != DICT_INDEXES_ID);
2386  mtr_s_lock(dict_index_get_lock(index), &mtr);
2387  } else {
2388  mode = BTR_MODIFY_LEAF;
2389  }
2390 
2391  success = btr_pcur_restore_position(mode, pcur, &mtr);
2392 
2393  if (!success) {
2394  err = DB_RECORD_NOT_FOUND;
2395 
2396  mtr_commit(&mtr);
2397 
2398  return(err);
2399  }
2400 
2401  /* If this is a row in SYS_INDEXES table of the data dictionary,
2402  then we have to free the file segments of the index tree associated
2403  with the index */
2404 
2405  if (node->is_delete && node->table->id == DICT_INDEXES_ID) {
2406 
2408 
2409  dict_drop_index_tree(btr_pcur_get_rec(pcur), &mtr);
2410 
2411  mtr_commit(&mtr);
2412 
2413  mtr_start(&mtr);
2414 
2415  success = btr_pcur_restore_position(BTR_MODIFY_LEAF, pcur,
2416  &mtr);
2417  if (!success) {
2418  err = DB_ERROR;
2419 
2420  mtr_commit(&mtr);
2421 
2422  return(err);
2423  }
2424  }
2425 
2426  rec = btr_pcur_get_rec(pcur);
2427  offsets = rec_get_offsets(rec, index, offsets_,
2428  ULINT_UNDEFINED, &heap);
2429 
2430  if (!node->has_clust_rec_x_lock) {
2432  0, btr_pcur_get_block(pcur),
2433  rec, index, offsets, thr);
2434  if (err != DB_SUCCESS) {
2435  mtr_commit(&mtr);
2436  goto exit_func;
2437  }
2438  }
2439 
2440  ut_ad(lock_trx_has_rec_x_lock(thr_get_trx(thr), index->table,
2441  btr_pcur_get_block(pcur),
2442  page_rec_get_heap_no(rec)));
2443 
2444  /* NOTE: the following function calls will also commit mtr */
2445 
2446  if (node->is_delete) {
2447  err = row_upd_del_mark_clust_rec(
2448  node, index, offsets, thr, referenced, &mtr);
2449 
2450  if (err == DB_SUCCESS) {
2451  node->state = UPD_NODE_UPDATE_ALL_SEC;
2452  node->index = dict_table_get_next_index(index);
2453  }
2454 
2455  goto exit_func;
2456  }
2457 
2458  /* If the update is made for MySQL, we already have the update vector
2459  ready, else we have to do some evaluation: */
2460 
2461  if (UNIV_UNLIKELY(!node->in_mysql_interface)) {
2462  /* Copy the necessary columns from clust_rec and calculate the
2463  new values to set */
2464  row_upd_copy_columns(rec, offsets,
2465  UT_LIST_GET_FIRST(node->columns));
2466  row_upd_eval_new_vals(node->update);
2467  }
2468 
2469  if (node->cmpl_info & UPD_NODE_NO_ORD_CHANGE) {
2470 
2471  err = row_upd_clust_rec(
2472  node, index, offsets, &heap, thr, &mtr);
2473  goto exit_func;
2474  }
2475 
2476  row_upd_store_row(node);
2477 
2478  if (row_upd_changes_ord_field_binary(index, node->update, thr,
2479  node->row, node->ext)) {
2480 
2481  /* Update causes an ordering field (ordering fields within
2482  the B-tree) of the clustered index record to change: perform
2483  the update by delete marking and inserting.
2484 
2485  TODO! What to do to the 'Halloween problem', where an update
2486  moves the record forward in index so that it is again
2487  updated when the cursor arrives there? Solution: the
2488  read operation must check the undo record undo number when
2489  choosing records to update. MySQL solves now the problem
2490  externally! */
2491 
2492  err = row_upd_clust_rec_by_insert(
2493  node, index, thr, referenced, &mtr);
2494 
2495  if (err != DB_SUCCESS) {
2496 
2497  goto exit_func;
2498  }
2499 
2500  node->state = UPD_NODE_UPDATE_ALL_SEC;
2501  } else {
2502  err = row_upd_clust_rec(
2503  node, index, offsets, &heap, thr, &mtr);
2504 
2505  if (err != DB_SUCCESS) {
2506 
2507  goto exit_func;
2508  }
2509 
2510  node->state = UPD_NODE_UPDATE_SOME_SEC;
2511  }
2512 
2513  node->index = dict_table_get_next_index(index);
2514 
2515 exit_func:
2516  if (heap) {
2517  mem_heap_free(heap);
2518  }
2519  return(err);
2520 }
2521 
2522 /***********************************************************/
2528 static __attribute__((nonnull, warn_unused_result))
2529 dberr_t
2530 row_upd(
2531 /*====*/
2532  upd_node_t* node,
2533  que_thr_t* thr)
2534 {
2535  dberr_t err = DB_SUCCESS;
2536 
2537  ut_ad(node && thr);
2538 
2539  if (UNIV_LIKELY(node->in_mysql_interface)) {
2540 
2541  /* We do not get the cmpl_info value from the MySQL
2542  interpreter: we must calculate it on the fly: */
2543 
2544  if (node->is_delete
2546  node->table, node->update)) {
2547  node->cmpl_info = 0;
2548  } else {
2549  node->cmpl_info = UPD_NODE_NO_ORD_CHANGE;
2550  }
2551  }
2552 
2553  switch (node->state) {
2554  case UPD_NODE_UPDATE_CLUSTERED:
2555  case UPD_NODE_INSERT_CLUSTERED:
2556  case UPD_NODE_INSERT_BLOB:
2557  log_free_check();
2558  err = row_upd_clust_step(node, thr);
2559 
2560  if (err != DB_SUCCESS) {
2561 
2562  return(err);
2563  }
2564  }
2565 
2566  if (node->index == NULL
2567  || (!node->is_delete
2568  && (node->cmpl_info & UPD_NODE_NO_ORD_CHANGE))) {
2569 
2570  return(DB_SUCCESS);
2571  }
2572 
2573 #ifdef UNIV_DEBUG
2574  /* Work around Bug#14626800 ASSERTION FAILURE IN DEBUG_SYNC().
2575  Once it is fixed, remove the 'ifdef', 'if' and this comment. */
2576  if (!thr_get_trx(thr)->ddl) {
2577  DEBUG_SYNC_C_IF_THD(thr_get_trx(thr)->mysql_thd,
2578  "after_row_upd_clust");
2579  }
2580 #endif /* UNIV_DEBUG */
2581 
2582  DBUG_EXECUTE_IF("row_upd_skip_sec", node->index = NULL;);
2583 
2584  do {
2585  /* Skip corrupted index */
2586  dict_table_skip_corrupt_index(node->index);
2587 
2588  if (!node->index) {
2589  break;
2590  }
2591 
2592  if (node->index->type != DICT_FTS) {
2593  err = row_upd_sec_step(node, thr);
2594 
2595  if (err != DB_SUCCESS) {
2596 
2597  return(err);
2598  }
2599  }
2600 
2601  node->index = dict_table_get_next_index(node->index);
2602  } while (node->index != NULL);
2603 
2604  ut_ad(err == DB_SUCCESS);
2605 
2606  /* Do some cleanup */
2607 
2608  if (node->row != NULL) {
2609  node->row = NULL;
2610  node->ext = NULL;
2611  node->upd_row = NULL;
2612  node->upd_ext = NULL;
2613  mem_heap_empty(node->heap);
2614  }
2615 
2616  node->state = UPD_NODE_UPDATE_CLUSTERED;
2617 
2618  return(err);
2619 }
2620 
2621 /***********************************************************/
2625 UNIV_INTERN
2626 que_thr_t*
2627 row_upd_step(
2628 /*=========*/
2629  que_thr_t* thr)
2630 {
2631  upd_node_t* node;
2632  sel_node_t* sel_node;
2633  que_node_t* parent;
2634  dberr_t err = DB_SUCCESS;
2635  trx_t* trx;
2636 
2637  ut_ad(thr);
2638 
2639  trx = thr_get_trx(thr);
2640 
2641  trx_start_if_not_started_xa(trx);
2642 
2643  node = static_cast<upd_node_t*>(thr->run_node);
2644 
2645  sel_node = node->select;
2646 
2647  parent = que_node_get_parent(node);
2648 
2649  ut_ad(que_node_get_type(node) == QUE_NODE_UPDATE);
2650 
2651  if (thr->prev_node == parent) {
2652  node->state = UPD_NODE_SET_IX_LOCK;
2653  }
2654 
2655  if (node->state == UPD_NODE_SET_IX_LOCK) {
2656 
2657  if (!node->has_clust_rec_x_lock) {
2658  /* It may be that the current session has not yet
2659  started its transaction, or it has been committed: */
2660 
2661  err = lock_table(0, node->table, LOCK_IX, thr);
2662 
2663  if (err != DB_SUCCESS) {
2664 
2665  goto error_handling;
2666  }
2667  }
2668 
2669  node->state = UPD_NODE_UPDATE_CLUSTERED;
2670 
2671  if (node->searched_update) {
2672  /* Reset the cursor */
2673  sel_node->state = SEL_NODE_OPEN;
2674 
2675  /* Fetch a row to update */
2676 
2677  thr->run_node = sel_node;
2678 
2679  return(thr);
2680  }
2681  }
2682 
2683  /* sel_node is NULL if we are in the MySQL interface */
2684 
2685  if (sel_node && (sel_node->state != SEL_NODE_FETCH)) {
2686 
2687  if (!node->searched_update) {
2688  /* An explicit cursor should be positioned on a row
2689  to update */
2690 
2691  ut_error;
2692 
2693  err = DB_ERROR;
2694 
2695  goto error_handling;
2696  }
2697 
2698  ut_ad(sel_node->state == SEL_NODE_NO_MORE_ROWS);
2699 
2700  /* No more rows to update, or the select node performed the
2701  updates directly in-place */
2702 
2703  thr->run_node = parent;
2704 
2705  return(thr);
2706  }
2707 
2708  /* DO THE CHECKS OF THE CONSISTENCY CONSTRAINTS HERE */
2709 
2710  err = row_upd(node, thr);
2711 
2712 error_handling:
2713  trx->error_state = err;
2714 
2715  if (err != DB_SUCCESS) {
2716  return(NULL);
2717  }
2718 
2719  /* DO THE TRIGGER ACTIONS HERE */
2720 
2721  if (node->searched_update) {
2722  /* Fetch next row to update */
2723 
2724  thr->run_node = sel_node;
2725  } else {
2726  /* It was an explicit cursor update */
2727 
2728  thr->run_node = parent;
2729  }
2730 
2731  node->state = UPD_NODE_UPDATE_CLUSTERED;
2732 
2733  return(thr);
2734 }
2735 #endif /* !UNIV_HOTBACKUP */