MySQL 5.6.14 Source Code Document
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
sql_update.cc
1 /* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
2 
3  This program is free software; you can redistribute it and/or modify
4  it under the terms of the GNU General Public License as published by
5  the Free Software Foundation; version 2 of the License.
6 
7  This program is distributed in the hope that it will be useful,
8  but WITHOUT ANY WARRANTY; without even the implied warranty of
9  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  GNU General Public License for more details.
11 
12  You should have received a copy of the GNU General Public License
13  along with this program; if not, write to the Free Software Foundation,
14  51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */
15 
16 
17 /*
18  Single table and multi table updates of tables.
19  Multi-table updates were introduced by Sinisa & Monty
20 */
21 
22 #include "my_global.h" /* NO_EMBEDDED_ACCESS_CHECKS */
23 #include "sql_priv.h"
24 #include "unireg.h" // REQUIRED: for other includes
25 #include "sql_update.h"
26 #include "sql_cache.h" // query_cache_*
27 #include "sql_base.h" // close_tables_for_reopen
28 #include "sql_parse.h" // cleanup_items
29 #include "sql_partition.h" // partition_key_modified
30 #include "sql_select.h"
31 #include "sql_view.h" // check_key_in_view
32 #include "sp_head.h"
33 #include "sql_trigger.h"
34 #include "probes_mysql.h"
35 #include "debug_sync.h"
36 #include "key.h" // is_key_used
37 #include "sql_acl.h" // *_ACL, check_grant
38 #include "records.h" // init_read_record,
39  // end_read_record
40 #include "filesort.h" // filesort
41 #include "opt_explain.h"
42 #include "sql_derived.h" // mysql_derived_prepare,
43  // mysql_handle_derived,
44  // mysql_derived_filling
45 #include "opt_trace.h" // Opt_trace_object
46 #include "sql_tmp_table.h" // tmp tables
47 #include "sql_optimizer.h" // remove_eq_conds
48 #include "sql_resolver.h" // setup_order, fix_inner_refs
49 
54 bool records_are_comparable(const TABLE *table) {
55  return ((table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ) == 0) ||
56  bitmap_is_subset(table->write_set, table->read_set);
57 }
58 
59 
73 bool compare_records(const TABLE *table)
74 {
75  DBUG_ASSERT(records_are_comparable(table));
76 
77  if ((table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ) != 0)
78  {
79  /*
80  Storage engine may not have read all columns of the record. Fields
81  (including NULL bits) not in the write_set may not have been read and
82  can therefore not be compared.
83  */
84  for (Field **ptr= table->field ; *ptr != NULL; ptr++)
85  {
86  Field *field= *ptr;
87  if (bitmap_is_set(table->write_set, field->field_index))
88  {
89  if (field->real_maybe_null())
90  {
91  uchar null_byte_index= field->null_offset();
92 
93  if (((table->record[0][null_byte_index]) & field->null_bit) !=
94  ((table->record[1][null_byte_index]) & field->null_bit))
95  return TRUE;
96  }
97  if (field->cmp_binary_offset(table->s->rec_buff_length))
98  return TRUE;
99  }
100  }
101  return FALSE;
102  }
103 
104  /*
105  The storage engine has read all columns, so it's safe to compare all bits
106  including those not in the write_set. This is cheaper than the field-by-field
107  comparison done above.
108  */
109  if (table->s->blob_fields + table->s->varchar_fields == 0)
110  // Fixed-size record: do bitwise comparison of the records
111  return cmp_record(table,record[1]);
112  /* Compare null bits */
113  if (memcmp(table->null_flags,
114  table->null_flags+table->s->rec_buff_length,
115  table->s->null_bytes))
116  return TRUE; // Diff in NULL value
117  /* Compare updated fields */
118  for (Field **ptr= table->field ; *ptr ; ptr++)
119  {
120  if (bitmap_is_set(table->write_set, (*ptr)->field_index) &&
121  (*ptr)->cmp_binary_offset(table->s->rec_buff_length))
122  return TRUE;
123  }
124  return FALSE;
125 }
126 
127 
128 /*
129  check that all fields are real fields
130 
131  SYNOPSIS
132  check_fields()
133  thd thread handler
134  items Items for check
135 
136  RETURN
137  TRUE Items can't be used in UPDATE
138  FALSE Items are OK
139 */
140 
141 static bool check_fields(THD *thd, List<Item> &items)
142 {
143  List_iterator<Item> it(items);
144  Item *item;
145  Item_field *field;
146 
147  while ((item= it++))
148  {
149  if (!(field= item->field_for_view_update()))
150  {
151  /* item has name, because it comes from VIEW SELECT list */
152  my_error(ER_NONUPDATEABLE_COLUMN, MYF(0), item->item_name.ptr());
153  return TRUE;
154  }
155  /*
156  we make temporary copy of Item_field, to avoid influence of changing
157  result_field on Item_ref which refer on this field
158  */
159  thd->change_item_tree(it.ref(), new Item_field(thd, field));
160  }
161  return FALSE;
162 }
163 
164 
174 static bool check_constant_expressions(List<Item> &values)
175 {
176  Item *value;
177  List_iterator_fast<Item> v(values);
178  DBUG_ENTER("check_constant_expressions");
179 
180  while ((value= v++))
181  {
182  if (!value->const_item())
183  {
184  DBUG_PRINT("exit", ("expression is not constant"));
185  DBUG_RETURN(false);
186  }
187  }
188  DBUG_PRINT("exit", ("expression is constant"));
189  DBUG_RETURN(true);
190 }
191 
192 
193 /*
194  Process usual UPDATE
195 
196  SYNOPSIS
197  mysql_update()
198  thd thread handler
199  fields fields for update
200  values values of fields for update
201  conds WHERE clause expression
202  order_num number of elemen in ORDER BY clause
203  order ORDER BY clause list
204  limit limit clause
205  handle_duplicates how to handle duplicates
206 
207  RETURN
208  0 - OK
209  2 - privilege check and openning table passed, but we need to convert to
210  multi-update because of view substitution
211  1 - error
212 */
213 
214 int mysql_update(THD *thd,
215  TABLE_LIST *table_list,
216  List<Item> &fields,
217  List<Item> &values,
218  Item *conds,
219  uint order_num, ORDER *order,
220  ha_rows limit,
221  enum enum_duplicates handle_duplicates, bool ignore,
222  ha_rows *found_return, ha_rows *updated_return)
223 {
224  bool using_limit= limit != HA_POS_ERROR;
225  bool safe_update= test(thd->variables.option_bits & OPTION_SAFE_UPDATES);
226  bool used_key_is_modified= FALSE, transactional_table, will_batch;
227  int res;
228  int error= 1;
229  int loc_error;
230  uint used_index, dup_key_found;
231  bool need_sort= TRUE;
232  bool reverse= FALSE;
233  bool using_filesort;
234  bool read_removal= false;
235 #ifndef NO_EMBEDDED_ACCESS_CHECKS
236  uint want_privilege;
237 #endif
238  ha_rows updated, found;
239  key_map old_covering_keys;
240  TABLE *table;
241  SQL_SELECT *select= NULL;
242  READ_RECORD info;
243  SELECT_LEX *select_lex= &thd->lex->select_lex;
244  ulonglong id;
245  List<Item> all_fields;
246  THD::killed_state killed_status= THD::NOT_KILLED;
247  COPY_INFO update(COPY_INFO::UPDATE_OPERATION, &fields, &values);
248 
249  DBUG_ENTER("mysql_update");
250 
251  if (open_normal_and_derived_tables(thd, table_list, 0))
252  DBUG_RETURN(1);
253 
254  if (table_list->multitable_view)
255  {
256  DBUG_ASSERT(table_list->view != 0);
257  DBUG_PRINT("info", ("Switch to multi-update"));
258  /* convert to multiupdate */
259  DBUG_RETURN(2);
260  }
261 
262  THD_STAGE_INFO(thd, stage_init);
263  table= table_list->table;
264 
265  if (!table_list->updatable)
266  {
267  my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "UPDATE");
268  DBUG_RETURN(1);
269  }
270 
271  /* Calculate "table->covering_keys" based on the WHERE */
272  table->covering_keys= table->s->keys_in_use;
273  table->quick_keys.clear_all();
274  table->possible_quick_keys.clear_all();
275 
276 #ifndef NO_EMBEDDED_ACCESS_CHECKS
277  /* Force privilege re-checking for views after they have been opened. */
278  want_privilege= (table_list->view ? UPDATE_ACL :
279  table_list->grant.want_privilege);
280 #endif
281  if (mysql_prepare_update(thd, table_list, &conds, order_num, order))
282  DBUG_RETURN(1);
283 
284  old_covering_keys= table->covering_keys; // Keys used in WHERE
285  /* Check the fields we are going to modify */
286 #ifndef NO_EMBEDDED_ACCESS_CHECKS
287  table_list->grant.want_privilege= table->grant.want_privilege= want_privilege;
288  table_list->register_want_access(want_privilege);
289 #endif
290  if (setup_fields_with_no_wrap(thd, Ref_ptr_array(),
291  fields, MARK_COLUMNS_WRITE, 0, 0))
292  DBUG_RETURN(1); /* purecov: inspected */
293  if (table_list->view && check_fields(thd, fields))
294  {
295  DBUG_RETURN(1);
296  }
297  if (!table_list->updatable || check_key_in_view(thd, table_list))
298  {
299  my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "UPDATE");
300  DBUG_RETURN(1);
301  }
302 
303  if (update.add_function_default_columns(table, table->write_set))
304  DBUG_RETURN(1);
305 
306 #ifndef NO_EMBEDDED_ACCESS_CHECKS
307  /* Check values */
308  table_list->grant.want_privilege= table->grant.want_privilege=
309  (SELECT_ACL & ~table->grant.privilege);
310 #endif
311  if (setup_fields(thd, Ref_ptr_array(), values, MARK_COLUMNS_READ, 0, 0))
312  {
313  free_underlaid_joins(thd, select_lex);
314  DBUG_RETURN(1); /* purecov: inspected */
315  }
316 
317  if (select_lex->inner_refs_list.elements &&
318  fix_inner_refs(thd, all_fields, select_lex, select_lex->ref_pointer_array))
319  DBUG_RETURN(1);
320 
321  if ((table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ) != 0 &&
322  update.function_defaults_apply(table))
323  /*
324  A column is to be set to its ON UPDATE function default only if other
325  columns of the row are changing. To know this, we must be able to
326  compare the "before" and "after" value of those columns
327  (i.e. records_are_comparable() must be true below). Thus, we must read
328  those columns:
329  */
330  bitmap_union(table->read_set, table->write_set);
331 
332  // Don't count on usage of 'only index' when calculating which key to use
333  table->covering_keys.clear_all();
334 
335  /*
336  This must be done before partitioning pruning, since prune_partitions()
337  uses the table->write_set to determine may prune locks too.
338  */
339  if (table->triggers)
340  table->triggers->mark_fields_used(TRG_EVENT_UPDATE);
341 
342 #ifdef WITH_PARTITION_STORAGE_ENGINE
343  if (table->part_info)
344  {
345  if (prune_partitions(thd, table, conds))
346  DBUG_RETURN(1);
347  if (table->all_partitions_pruned_away)
348  {
349  /* No matching records */
350  if (thd->lex->describe)
351  {
352  error= explain_no_table(thd,
353  "No matching rows after partition pruning");
354  goto exit_without_my_ok;
355  }
356  my_ok(thd); // No matching records
357  DBUG_RETURN(0);
358  }
359  }
360 #endif
361  if (lock_tables(thd, table_list, thd->lex->table_count, 0))
362  DBUG_RETURN(1);
363 
364  // Must be done after lock_tables()
365  if (conds)
366  {
367  COND_EQUAL *cond_equal= NULL;
368  Item::cond_result result;
369  if (table_list->check_option)
370  {
371  /*
372  If this UPDATE is on a view with CHECK OPTION, Item_fields
373  must not be replaced by constants. The reason is that when
374  'conds' is optimized, 'check_option' is also optimized (it is
375  part of 'conds'). Const replacement is fine for 'conds'
376  because it is evaluated on a read row, but 'check_option' is
377  evaluated on a row with updated fields and needs those updated
378  values to be correct.
379 
380  Example:
381  CREATE VIEW v1 ... WHERE fld < 2 WITH CHECK_OPTION
382  UPDATE v1 SET fld=4 WHERE fld=1
383 
384  check_option is "(fld < 2)"
385  conds is "(fld < 2) and (fld = 1)"
386 
387  optimize_cond() would propagate fld=1 to the first argument of
388  the AND to create "(1 < 2) AND (fld = 1)". After this,
389  check_option would be "(1 < 2)". But for check_option to work
390  it must be evaluated with the *updated* value of fld: 4.
391  Otherwise it will evaluate to true even when it should be
392  false, which is the case for the UPDATE statement above.
393 
394  Thus, if there is a check_option, we do only the "safe" parts
395  of optimize_cond(): Item_row -> Item_func_eq conversion (to
396  enable range access) and removal of always true/always false
397  predicates.
398 
399  An alternative to restricting this optimization of 'conds' in
400  the presense of check_option: the Item-tree of 'check_option'
401  could be cloned before optimizing 'conds' and thereby avoid
402  const replacement. However, at the moment there is no such
403  thing as Item::clone().
404  */
405  conds= build_equal_items(thd, conds, NULL, false,
406  select_lex->join_list, &cond_equal);
407  conds= remove_eq_conds(thd, conds, &result);
408  }
409  else
410  conds= optimize_cond(thd, conds, &cond_equal, select_lex->join_list,
411  true, &result);
412 
413  if (result == Item::COND_FALSE)
414  {
415  limit= 0; // Impossible WHERE
416  if (thd->lex->describe)
417  {
418  error= explain_no_table(thd, "Impossible WHERE");
419  goto exit_without_my_ok;
420  }
421  }
422  if (conds)
423  {
424  conds= substitute_for_best_equal_field(conds, cond_equal, 0);
425  conds->update_used_tables();
426  }
427  }
428 
429 #ifdef WITH_PARTITION_STORAGE_ENGINE
430  /*
431  Also try a second time after locking, to prune when subqueries and
432  stored programs can be evaluated.
433  */
434  if (table->part_info)
435  {
436  if (prune_partitions(thd, table, conds))
437  DBUG_RETURN(1);
438  if (table->all_partitions_pruned_away)
439  {
440  /* No matching records */
441  if (thd->lex->describe)
442  {
443  error= explain_no_table(thd,
444  "No matching rows after partition pruning");
445  goto exit_without_my_ok;
446  }
447  my_ok(thd); // No matching records
448  DBUG_RETURN(0);
449  }
450  }
451 #endif
452  /* Update the table->file->stats.records number */
453  table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
454 
456  select= make_select(table, 0, 0, conds, 0, &error);
457 
458  { // Enter scope for optimizer trace wrapper
459  Opt_trace_object wrapper(&thd->opt_trace);
460  wrapper.add_utf8_table(table);
461 
462  if (error || !limit ||
463  (select && select->check_quick(thd, safe_update, limit)))
464  {
465  if (thd->lex->describe && !error && !thd->is_error())
466  {
467  error= explain_no_table(thd, "Impossible WHERE");
468  goto exit_without_my_ok;
469  }
470  delete select;
471  free_underlaid_joins(thd, select_lex);
472  /*
473  There was an error or the error was already sent by
474  the quick select evaluation.
475  TODO: Add error code output parameter to Item::val_xxx() methods.
476  Currently they rely on the user checking DA for
477  errors when unwinding the stack after calling Item::val_xxx().
478  */
479  if (error || thd->is_error())
480  {
481  DBUG_RETURN(1); // Error in where
482  }
483 
484  char buff[MYSQL_ERRMSG_SIZE];
485  my_snprintf(buff, sizeof(buff), ER(ER_UPDATE_INFO), 0, 0,
486  (ulong) thd->get_stmt_da()->current_statement_warn_count());
487  my_ok(thd, 0, 0, buff);
488 
489  DBUG_PRINT("info",("0 records updated"));
490  DBUG_RETURN(0);
491  }
492  } // Ends scope for optimizer trace wrapper
493 
494  /* If running in safe sql mode, don't allow updates without keys */
495  if (table->quick_keys.is_clear_all())
496  {
497  thd->server_status|=SERVER_QUERY_NO_INDEX_USED;
498  if (safe_update && !using_limit)
499  {
500  my_message(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE,
501  ER(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE), MYF(0));
502  goto exit_without_my_ok;
503  }
504  }
505  init_ftfuncs(thd, select_lex, 1);
506 
507  table->update_const_key_parts(conds);
508  order= simple_remove_const(order, conds);
509 
510  used_index= get_index_for_order(order, table, select, limit,
511  &need_sort, &reverse);
512  if (need_sort)
513  { // Assign table scan index to check below for modified key fields:
514  used_index= table->file->key_used_on_scan;
515  }
516  if (used_index != MAX_KEY)
517  { // Check if we are modifying a key that we are used to search with:
518  used_key_is_modified= is_key_used(table, used_index, table->write_set);
519  }
520  else if (select && select->quick)
521  {
522  /*
523  select->quick != NULL and used_index == MAX_KEY happens for index
524  merge and should be handled in a different way.
525  */
526  used_key_is_modified= (!select->quick->unique_key_range() &&
527  select->quick->is_keys_used(table->write_set));
528  }
529 
530 #ifdef WITH_PARTITION_STORAGE_ENGINE
531  used_key_is_modified|= partition_key_modified(table, table->write_set);
532 #endif
533 
534  using_filesort= order && (need_sort||used_key_is_modified);
535  if (thd->lex->describe)
536  {
537  const bool using_tmp_table= !using_filesort &&
538  (used_key_is_modified || order);
539  error= explain_single_table_modification(thd, table, select, used_index,
540  limit, using_tmp_table,
541  using_filesort,
542  true,
543  used_key_is_modified);
544  goto exit_without_my_ok;
545  }
546 
547  if (used_key_is_modified || order)
548  {
549  /*
550  We can't update table directly; We must first search after all
551  matching rows before updating the table!
552  */
553 
554  if (used_index < MAX_KEY && old_covering_keys.is_set(used_index))
555  table->set_keyread(true);
556 
557  /* note: We avoid sorting if we sort on the used index */
558  if (using_filesort)
559  {
560  /*
561  Doing an ORDER BY; Let filesort find and sort the rows we are going
562  to update
563  NOTE: filesort will call table->prepare_for_position()
564  */
565  ha_rows examined_rows;
566  ha_rows found_rows;
567  Filesort fsort(order, limit, select);
568 
569  table->sort.io_cache = (IO_CACHE *) my_malloc(sizeof(IO_CACHE),
570  MYF(MY_FAE | MY_ZEROFILL));
571  if ((table->sort.found_records= filesort(thd, table, &fsort, true,
572  &examined_rows, &found_rows))
573  == HA_POS_ERROR)
574  {
575  goto exit_without_my_ok;
576  }
577  thd->inc_examined_row_count(examined_rows);
578  /*
579  Filesort has already found and selected the rows we want to update,
580  so we don't need the where clause
581  */
582  delete select;
583  select= 0;
584  }
585  else
586  {
587  /*
588  We are doing a search on a key that is updated. In this case
589  we go trough the matching rows, save a pointer to them and
590  update these in a separate loop based on the pointer.
591  */
592  table->prepare_for_position();
593 
594  IO_CACHE tempfile;
595  if (open_cached_file(&tempfile, mysql_tmpdir,TEMP_PREFIX,
596  DISK_BUFFER_SIZE, MYF(MY_WME)))
597  goto exit_without_my_ok;
598 
599  /* If quick select is used, initialize it before retrieving rows. */
600  if (select && select->quick && (error= select->quick->reset()))
601  {
602  close_cached_file(&tempfile);
603  table->file->print_error(error, MYF(0));
604  goto exit_without_my_ok;
605  }
606  table->file->try_semi_consistent_read(1);
607 
608  /*
609  When we get here, we have one of the following options:
610  A. used_index == MAX_KEY
611  This means we should use full table scan, and start it with
612  init_read_record call
613  B. used_index != MAX_KEY
614  B.1 quick select is used, start the scan with init_read_record
615  B.2 quick select is not used, this is full index scan (with LIMIT)
616  Full index scan must be started with init_read_record_idx
617  */
618 
619  if (used_index == MAX_KEY || (select && select->quick))
620  error= init_read_record(&info, thd, table, select, 0, 1, FALSE);
621  else
622  error= init_read_record_idx(&info, thd, table, 1, used_index, reverse);
623 
624  if (error)
625  {
626  close_cached_file(&tempfile);
627  goto exit_without_my_ok;
628  }
629 
630  THD_STAGE_INFO(thd, stage_searching_rows_for_update);
631  ha_rows tmp_limit= limit;
632 
633  while (!(error=info.read_record(&info)) && !thd->killed)
634  {
635  thd->inc_examined_row_count(1);
636  bool skip_record= FALSE;
637  if (select && select->skip_record(thd, &skip_record))
638  {
639  error= 1;
640  /*
641  Don't try unlocking the row if skip_record reported an error since
642  in this case the transaction might have been rolled back already.
643  */
644  break;
645  }
646  if (!skip_record)
647  {
648  if (table->file->was_semi_consistent_read())
649  continue; /* repeat the read of the same row if it still exists */
650 
651  table->file->position(table->record[0]);
652  if (my_b_write(&tempfile,table->file->ref,
653  table->file->ref_length))
654  {
655  error=1; /* purecov: inspected */
656  break; /* purecov: inspected */
657  }
658  if (!--limit && using_limit)
659  {
660  error= -1;
661  break;
662  }
663  }
664  else
665  table->file->unlock_row();
666  }
667  if (thd->killed && !error)
668  error= 1; // Aborted
669  limit= tmp_limit;
670  table->file->try_semi_consistent_read(0);
671  end_read_record(&info);
672 
673  /* Change select to use tempfile */
674  if (select)
675  {
676  select->set_quick(NULL);
677  if (select->free_cond)
678  delete select->cond;
679  select->cond= NULL;
680  }
681  else
682  {
683  select= new SQL_SELECT;
684  select->head=table;
685  }
686  if (reinit_io_cache(&tempfile,READ_CACHE,0L,0,0))
687  error=1; /* purecov: inspected */
688  select->file=tempfile; // Read row ptrs from this file
689  if (error >= 0)
690  goto exit_without_my_ok;
691  }
692  if (used_index < MAX_KEY && old_covering_keys.is_set(used_index))
693  table->set_keyread(false);
694  }
695 
696  if (ignore)
697  table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
698 
699  if (select && select->quick && (error= select->quick->reset()))
700  {
701  table->file->print_error(error, MYF(0));
702  goto exit_without_my_ok;
703  }
704  table->file->try_semi_consistent_read(1);
705  if ((error= init_read_record(&info, thd, table, select, 0, 1, FALSE)))
706  goto exit_without_my_ok;
707 
708  updated= found= 0;
709  /*
710  Generate an error (in TRADITIONAL mode) or warning
711  when trying to set a NOT NULL field to NULL.
712  */
713  thd->count_cuted_fields= CHECK_FIELD_WARN;
714  thd->cuted_fields=0L;
715  THD_STAGE_INFO(thd, stage_updating);
716 
717  transactional_table= table->file->has_transactions();
718  thd->abort_on_warning= (!ignore && thd->is_strict_mode());
719 
720  if (table->triggers &&
721  table->triggers->has_triggers(TRG_EVENT_UPDATE,
722  TRG_ACTION_AFTER))
723  {
724  /*
725  The table has AFTER UPDATE triggers that might access to subject
726  table and therefore might need update to be done immediately.
727  So we turn-off the batching.
728  */
729  (void) table->file->extra(HA_EXTRA_UPDATE_CANNOT_BATCH);
730  will_batch= FALSE;
731  }
732  else
733  will_batch= !table->file->start_bulk_update();
734 
735  if ((table->file->ha_table_flags() & HA_READ_BEFORE_WRITE_REMOVAL) &&
736  !ignore && !using_limit &&
737  select && select->quick && select->quick->index != MAX_KEY &&
738  check_constant_expressions(values))
739  read_removal= table->check_read_removal(select->quick->index);
740 
741  while (!(error=info.read_record(&info)) && !thd->killed)
742  {
743  thd->inc_examined_row_count(1);
744  bool skip_record;
745  if (!select || (!select->skip_record(thd, &skip_record) && !skip_record))
746  {
747  if (table->file->was_semi_consistent_read())
748  continue; /* repeat the read of the same row if it still exists */
749 
750  store_record(table,record[1]);
751  if (fill_record_n_invoke_before_triggers(thd, fields, values, 0,
752  table->triggers,
753  TRG_EVENT_UPDATE))
754  break; /* purecov: inspected */
755 
756  found++;
757 
758  if (!records_are_comparable(table) || compare_records(table))
759  {
760  if ((res= table_list->view_check_option(thd, ignore)) !=
761  VIEW_CHECK_OK)
762  {
763  found--;
764  if (res == VIEW_CHECK_SKIP)
765  continue;
766  else if (res == VIEW_CHECK_ERROR)
767  {
768  error= 1;
769  break;
770  }
771  }
772 
773  /*
774  In order to keep MySQL legacy behavior, we do this update *after*
775  the CHECK OPTION test. Proper behavior is probably to throw an
776  error, though.
777  */
778  update.set_function_defaults(table);
779 
780  if (will_batch)
781  {
782  /*
783  Typically a batched handler can execute the batched jobs when:
784  1) When specifically told to do so
785  2) When it is not a good idea to batch anymore
786  3) When it is necessary to send batch for other reasons
787  (One such reason is when READ's must be performed)
788 
789  1) is covered by exec_bulk_update calls.
790  2) and 3) is handled by the bulk_update_row method.
791 
792  bulk_update_row can execute the updates including the one
793  defined in the bulk_update_row or not including the row
794  in the call. This is up to the handler implementation and can
795  vary from call to call.
796 
797  The dup_key_found reports the number of duplicate keys found
798  in those updates actually executed. It only reports those if
799  the extra call with HA_EXTRA_IGNORE_DUP_KEY have been issued.
800  If this hasn't been issued it returns an error code and can
801  ignore this number. Thus any handler that implements batching
802  for UPDATE IGNORE must also handle this extra call properly.
803 
804  If a duplicate key is found on the record included in this
805  call then it should be included in the count of dup_key_found
806  and error should be set to 0 (only if these errors are ignored).
807  */
808  error= table->file->ha_bulk_update_row(table->record[1],
809  table->record[0],
810  &dup_key_found);
811  limit+= dup_key_found;
812  updated-= dup_key_found;
813  }
814  else
815  {
816  /* Non-batched update */
817  error= table->file->ha_update_row(table->record[1],
818  table->record[0]);
819  }
820  if (!error || error == HA_ERR_RECORD_IS_THE_SAME)
821  {
822  if (error != HA_ERR_RECORD_IS_THE_SAME)
823  updated++;
824  else
825  error= 0;
826  }
827  else if (!ignore ||
828  table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
829  {
830  /*
831  If (ignore && error is ignorable) we don't have to
832  do anything; otherwise...
833  */
834  myf flags= 0;
835 
836  if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
837  flags|= ME_FATALERROR; /* Other handler errors are fatal */
838 
839  table->file->print_error(error,MYF(flags));
840  error= 1;
841  break;
842  }
843  }
844 
845  if (table->triggers &&
846  table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
847  TRG_ACTION_AFTER, TRUE))
848  {
849  error= 1;
850  break;
851  }
852 
853  if (!--limit && using_limit)
854  {
855  /*
856  We have reached end-of-file in most common situations where no
857  batching has occurred and if batching was supposed to occur but
858  no updates were made and finally when the batch execution was
859  performed without error and without finding any duplicate keys.
860  If the batched updates were performed with errors we need to
861  check and if no error but duplicate key's found we need to
862  continue since those are not counted for in limit.
863  */
864  if (will_batch &&
865  ((error= table->file->exec_bulk_update(&dup_key_found)) ||
866  dup_key_found))
867  {
868  if (error)
869  {
870  /* purecov: begin inspected */
871  /*
872  The handler should not report error of duplicate keys if they
873  are ignored. This is a requirement on batching handlers.
874  */
875  table->file->print_error(error,MYF(0));
876  error= 1;
877  break;
878  /* purecov: end */
879  }
880  /*
881  Either an error was found and we are ignoring errors or there
882  were duplicate keys found. In both cases we need to correct
883  the counters and continue the loop.
884  */
885  limit= dup_key_found; //limit is 0 when we get here so need to +
886  updated-= dup_key_found;
887  }
888  else
889  {
890  error= -1; // Simulate end of file
891  break;
892  }
893  }
894  }
895  /*
896  Don't try unlocking the row if skip_record reported an error since in
897  this case the transaction might have been rolled back already.
898  */
899  else if (!thd->is_error())
900  table->file->unlock_row();
901  else
902  {
903  error= 1;
904  break;
905  }
906  thd->get_stmt_da()->inc_current_row_for_warning();
907  if (thd->is_error())
908  {
909  error= 1;
910  break;
911  }
912  }
913  table->auto_increment_field_not_null= FALSE;
914  dup_key_found= 0;
915  /*
916  Caching the killed status to pass as the arg to query event constuctor;
917  The cached value can not change whereas the killed status can
918  (externally) since this point and change of the latter won't affect
919  binlogging.
920  It's assumed that if an error was set in combination with an effective
921  killed status then the error is due to killing.
922  */
923  killed_status= thd->killed; // get the status of the volatile
924  // simulated killing after the loop must be ineffective for binlogging
925  DBUG_EXECUTE_IF("simulate_kill_bug27571",
926  {
927  thd->killed= THD::KILL_QUERY;
928  };);
929  error= (killed_status == THD::NOT_KILLED)? error : 1;
930 
931  if (error &&
932  will_batch &&
933  (loc_error= table->file->exec_bulk_update(&dup_key_found)))
934  /*
935  An error has occurred when a batched update was performed and returned
936  an error indication. It cannot be an allowed duplicate key error since
937  we require the batching handler to treat this as a normal behavior.
938 
939  Otherwise we simply remove the number of duplicate keys records found
940  in the batched update.
941  */
942  {
943  /* purecov: begin inspected */
944  table->file->print_error(loc_error,MYF(ME_FATALERROR));
945  error= 1;
946  /* purecov: end */
947  }
948  else
949  updated-= dup_key_found;
950  if (will_batch)
951  table->file->end_bulk_update();
952  table->file->try_semi_consistent_read(0);
953 
954  if (read_removal)
955  {
956  /* Only handler knows how many records really was written */
957  updated= table->file->end_read_removal();
958  if (!records_are_comparable(table))
959  found= updated;
960  }
961 
962  if (!transactional_table && updated > 0)
963  thd->transaction.stmt.mark_modified_non_trans_table();
964 
965  end_read_record(&info);
966  delete select;
967  THD_STAGE_INFO(thd, stage_end);
968  (void) table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
969 
970  /*
971  Invalidate the table in the query cache if something changed.
972  This must be before binlog writing and ha_autocommit_...
973  */
974  if (updated)
975  {
976  query_cache_invalidate3(thd, table_list, 1);
977  }
978 
979  /*
980  error < 0 means really no error at all: we processed all rows until the
981  last one without error. error > 0 means an error (e.g. unique key
982  violation and no IGNORE or REPLACE). error == 0 is also an error (if
983  preparing the record or invoking before triggers fails). See
984  ha_autocommit_or_rollback(error>=0) and DBUG_RETURN(error>=0) below.
985  Sometimes we want to binlog even if we updated no rows, in case user used
986  it to be sure master and slave are in same state.
987  */
988  if ((error < 0) || thd->transaction.stmt.cannot_safely_rollback())
989  {
990  if (mysql_bin_log.is_open())
991  {
992  int errcode= 0;
993  if (error < 0)
994  thd->clear_error();
995  else
996  errcode= query_error_code(thd, killed_status == THD::NOT_KILLED);
997 
998  if (thd->binlog_query(THD::ROW_QUERY_TYPE,
999  thd->query(), thd->query_length(),
1000  transactional_table, FALSE, FALSE, errcode))
1001  {
1002  error=1; // Rollback update
1003  }
1004  }
1005  }
1006  DBUG_ASSERT(transactional_table || !updated ||
1007  thd->transaction.stmt.cannot_safely_rollback());
1008  free_underlaid_joins(thd, select_lex);
1009 
1010  /* If LAST_INSERT_ID(X) was used, report X */
1011  id= thd->arg_of_last_insert_id_function ?
1012  thd->first_successful_insert_id_in_prev_stmt : 0;
1013 
1014  if (error < 0)
1015  {
1016  char buff[MYSQL_ERRMSG_SIZE];
1017  my_snprintf(buff, sizeof(buff), ER(ER_UPDATE_INFO), (ulong) found,
1018  (ulong) updated,
1019  (ulong) thd->get_stmt_da()->current_statement_warn_count());
1020  my_ok(thd, (thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated,
1021  id, buff);
1022  DBUG_PRINT("info",("%ld records updated", (long) updated));
1023  }
1024  thd->count_cuted_fields= CHECK_FIELD_IGNORE; /* calc cuted fields */
1025  thd->abort_on_warning= 0;
1026  *found_return= found;
1027  *updated_return= updated;
1028  DBUG_RETURN((error >= 0 || thd->is_error()) ? 1 : 0);
1029 
1030 exit_without_my_ok:
1031  delete select;
1032  free_underlaid_joins(thd, select_lex);
1033  table->set_keyread(FALSE);
1034  thd->abort_on_warning= 0;
1035  DBUG_RETURN(error);
1036 }
1037 
1038 /*
1039  Prepare items in UPDATE statement
1040 
1041  SYNOPSIS
1042  mysql_prepare_update()
1043  thd - thread handler
1044  table_list - global/local table list
1045  conds - conditions
1046  order_num - number of ORDER BY list entries
1047  order - ORDER BY clause list
1048 
1049  RETURN VALUE
1050  FALSE OK
1051  TRUE error
1052 */
1053 bool mysql_prepare_update(THD *thd, TABLE_LIST *table_list,
1054  Item **conds, uint order_num, ORDER *order)
1055 {
1056  Item *fake_conds= 0;
1057 #ifndef NO_EMBEDDED_ACCESS_CHECKS
1058  TABLE *table= table_list->table;
1059 #endif
1060  List<Item> all_fields;
1061  SELECT_LEX *select_lex= &thd->lex->select_lex;
1062  DBUG_ENTER("mysql_prepare_update");
1063 
1064 #ifndef NO_EMBEDDED_ACCESS_CHECKS
1065  table_list->grant.want_privilege= table->grant.want_privilege=
1066  (SELECT_ACL & ~table->grant.privilege);
1067  table_list->register_want_access(SELECT_ACL);
1068 #endif
1069 
1070  thd->lex->allow_sum_func= 0;
1071 
1072  if (setup_tables_and_check_access(thd, &select_lex->context,
1073  &select_lex->top_join_list,
1074  table_list,
1075  &select_lex->leaf_tables,
1076  FALSE, UPDATE_ACL, SELECT_ACL) ||
1077  setup_conds(thd, table_list, select_lex->leaf_tables, conds) ||
1078  select_lex->setup_ref_array(thd, order_num) ||
1079  setup_order(thd, select_lex->ref_pointer_array,
1080  table_list, all_fields, all_fields, order) ||
1081  setup_ftfuncs(select_lex))
1082  DBUG_RETURN(TRUE);
1083 
1084  /* Check that we are not using table that we are updating in a sub select */
1085  {
1086  TABLE_LIST *duplicate;
1087  if ((duplicate= unique_table(thd, table_list, table_list->next_global, 0)))
1088  {
1089  update_non_unique_table_error(table_list, "UPDATE", duplicate);
1090  DBUG_RETURN(TRUE);
1091  }
1092  }
1093  select_lex->fix_prepare_information(thd, conds, &fake_conds);
1094  DBUG_RETURN(FALSE);
1095 }
1096 
1097 
1098 /***************************************************************************
1099  Update multiple tables from join
1100 ***************************************************************************/
1101 
1102 /*
1103  Get table map for list of Item_field
1104 */
1105 
1106 static table_map get_table_map(List<Item> *items)
1107 {
1108  List_iterator_fast<Item> item_it(*items);
1109  Item_field *item;
1110  table_map map= 0;
1111 
1112  while ((item= (Item_field *) item_it++))
1113  map|= item->used_tables();
1114  DBUG_PRINT("info", ("table_map: 0x%08lx", (long) map));
1115  return map;
1116 }
1117 
1147 static
1148 bool unsafe_key_update(TABLE_LIST *leaves, table_map tables_for_update)
1149 {
1150  TABLE_LIST *tl= leaves;
1151 
1152  for (tl= leaves; tl ; tl= tl->next_leaf)
1153  {
1154  if (tl->table->map & tables_for_update)
1155  {
1156  TABLE *table1= tl->table;
1157  bool primkey_clustered= (table1->file->primary_key_is_clustered() &&
1158  table1->s->primary_key != MAX_KEY);
1159 
1160  bool table_partitioned= false;
1161 #ifdef WITH_PARTITION_STORAGE_ENGINE
1162  table_partitioned= (table1->part_info != NULL);
1163 #endif
1164 
1165  if (!table_partitioned && !primkey_clustered)
1166  continue;
1167 
1168  for (TABLE_LIST* tl2= tl->next_leaf; tl2 ; tl2= tl2->next_leaf)
1169  {
1170  /*
1171  Look at "next" tables only since all previous tables have
1172  already been checked
1173  */
1174  TABLE *table2= tl2->table;
1175  if (table2->map & tables_for_update && table1->s == table2->s)
1176  {
1177 #ifdef WITH_PARTITION_STORAGE_ENGINE
1178  // A table is updated through two aliases
1179  if (table_partitioned &&
1180  (partition_key_modified(table1, table1->write_set) ||
1181  partition_key_modified(table2, table2->write_set)))
1182  {
1183  // Partitioned key is updated
1184  my_error(ER_MULTI_UPDATE_KEY_CONFLICT, MYF(0),
1185  tl->belong_to_view ? tl->belong_to_view->alias
1186  : tl->alias,
1187  tl2->belong_to_view ? tl2->belong_to_view->alias
1188  : tl2->alias);
1189  return true;
1190  }
1191 #endif
1192 
1193  if (primkey_clustered)
1194  {
1195  // The primary key can cover multiple columns
1196  KEY key_info= table1->key_info[table1->s->primary_key];
1197  KEY_PART_INFO *key_part= key_info.key_part;
1198  KEY_PART_INFO *key_part_end= key_part +
1199  key_info.user_defined_key_parts;
1200 
1201  for (;key_part != key_part_end; ++key_part)
1202  {
1203  if (bitmap_is_set(table1->write_set, key_part->fieldnr-1) ||
1204  bitmap_is_set(table2->write_set, key_part->fieldnr-1))
1205  {
1206  // Clustered primary key is updated
1207  my_error(ER_MULTI_UPDATE_KEY_CONFLICT, MYF(0),
1208  tl->belong_to_view ? tl->belong_to_view->alias
1209  : tl->alias,
1210  tl2->belong_to_view ? tl2->belong_to_view->alias
1211  : tl2->alias);
1212  return true;
1213  }
1214  }
1215  }
1216  }
1217  }
1218  }
1219  }
1220  return false;
1221 }
1222 
1223 
1251 static bool multi_update_check_table_access(THD *thd, TABLE_LIST *table,
1252  table_map tables_for_update,
1253  bool *updated_arg)
1254 {
1255  if (table->view)
1256  {
1257  bool updated= false;
1258  /*
1259  If it is a mergeable view then we need to check privileges on its
1260  underlying tables being merged (including views). We also need to
1261  check if any of them is updated in order to find if this view is
1262  updated.
1263  If it is a non-mergeable view then it can't be updated.
1264  */
1265  DBUG_ASSERT(table->merge_underlying_list ||
1266  (!table->updatable &&
1267  !(table->table->map & tables_for_update)));
1268 
1269  for (TABLE_LIST *tbl= table->merge_underlying_list; tbl;
1270  tbl= tbl->next_local)
1271  {
1272  if (multi_update_check_table_access(thd, tbl, tables_for_update, &updated))
1273  return true;
1274  }
1275  if (check_table_access(thd, updated ? UPDATE_ACL: SELECT_ACL, table,
1276  FALSE, 1, FALSE))
1277  return true;
1278  *updated_arg|= updated;
1279  /* We only need SELECT privilege for columns in the values list. */
1280  table->grant.want_privilege= SELECT_ACL & ~table->grant.privilege;
1281  }
1282  else
1283  {
1284  /* Must be a base or derived table. */
1285  const bool updated= table->table->map & tables_for_update;
1286  if (check_table_access(thd, updated ? UPDATE_ACL : SELECT_ACL, table,
1287  FALSE, 1, FALSE))
1288  return true;
1289  *updated_arg|= updated;
1290  /* We only need SELECT privilege for columns in the values list. */
1291  if (!table->derived)
1292  {
1293  table->grant.want_privilege= SELECT_ACL & ~table->grant.privilege;
1294  table->table->grant.want_privilege= SELECT_ACL & ~table->table->grant.privilege;
1295  }
1296  }
1297  return false;
1298 }
1299 
1300 
1301 /*
1302  make update specific preparation and checks after opening tables
1303 
1304  SYNOPSIS
1305  mysql_multi_update_prepare()
1306  thd thread handler
1307 
1308  RETURN
1309  FALSE OK
1310  TRUE Error
1311 */
1312 
1313 int mysql_multi_update_prepare(THD *thd)
1314 {
1315  LEX *lex= thd->lex;
1316  TABLE_LIST *table_list= lex->query_tables;
1317  TABLE_LIST *tl, *leaves;
1318  List<Item> *fields= &lex->select_lex.item_list;
1319  table_map tables_for_update;
1320  bool update_view= 0;
1321  const bool using_lock_tables= thd->locked_tables_mode != LTM_NONE;
1322  bool original_multiupdate= (thd->lex->sql_command == SQLCOM_UPDATE_MULTI);
1323  DBUG_ENTER("mysql_multi_update_prepare");
1324 
1325  /* following need for prepared statements, to run next time multi-update */
1326  thd->lex->sql_command= SQLCOM_UPDATE_MULTI;
1327 
1328  /*
1329  Open tables and create derived ones, but do not lock and fill them yet.
1330 
1331  During prepare phase acquire only S metadata locks instead of SW locks to
1332  keep prepare of multi-UPDATE compatible with concurrent LOCK TABLES WRITE
1333  and global read lock.
1334  */
1335  if (original_multiupdate &&
1336  open_normal_and_derived_tables(thd, table_list,
1337  (thd->stmt_arena->is_stmt_prepare() ?
1338  MYSQL_OPEN_FORCE_SHARED_MDL : 0)))
1339  DBUG_RETURN(TRUE);
1340  /*
1341  setup_tables() need for VIEWs. JOIN::prepare() will call setup_tables()
1342  second time, but this call will do nothing (there are check for second
1343  call in setup_tables()).
1344  */
1345 
1346  if (setup_tables(thd, &lex->select_lex.context,
1347  &lex->select_lex.top_join_list,
1348  table_list, &lex->select_lex.leaf_tables,
1349  FALSE))
1350  DBUG_RETURN(TRUE);
1351 
1352  if (setup_fields_with_no_wrap(thd, Ref_ptr_array(),
1353  *fields, MARK_COLUMNS_WRITE, 0, 0))
1354  DBUG_RETURN(TRUE);
1355 
1356  /*
1357  Setting tl->updating= false for view as it is correctly set
1358  for tables below
1359  */
1360  for (tl= table_list; tl ; tl= tl->next_local)
1361  {
1362  if (tl->view)
1363  {
1364  update_view= 1;
1365  tl->updating= false;
1366  }
1367  }
1368 
1369  if (update_view && check_fields(thd, *fields))
1370  {
1371  DBUG_RETURN(TRUE);
1372  }
1373 
1374  thd->table_map_for_update= tables_for_update= get_table_map(fields);
1375 
1376  leaves= lex->select_lex.leaf_tables;
1377 
1378  if (unsafe_key_update(leaves, tables_for_update))
1379  DBUG_RETURN(true);
1380 
1381  /*
1382  Setup timestamp handling and locking mode
1383  */
1384  for (tl= leaves; tl; tl= tl->next_leaf)
1385  {
1386  TABLE *table= tl->table;
1387 
1388  /* if table will be updated then check that it is unique */
1389  if (table->map & tables_for_update)
1390  {
1391  if (!tl->updatable || check_key_in_view(thd, tl))
1392  {
1393  my_error(ER_NON_UPDATABLE_TABLE, MYF(0), tl->alias, "UPDATE");
1394  DBUG_RETURN(TRUE);
1395  }
1396 
1397  DBUG_PRINT("info",("setting table `%s` for update", tl->alias));
1398  /*
1399  If table will be updated we should not downgrade lock for it and
1400  leave it as is.
1401  */
1402  }
1403  else
1404  {
1405  DBUG_PRINT("info",("setting table `%s` for read-only", tl->alias));
1406  /*
1407  If we are using the binary log, we need TL_READ_NO_INSERT to get
1408  correct order of statements. Otherwise, we use a TL_READ lock to
1409  improve performance.
1410  We don't downgrade metadata lock from SW to SR in this case as
1411  there is no guarantee that the same ticket is not used by
1412  another table instance used by this statement which is going to
1413  be write-locked (for example, trigger to be invoked might try
1414  to update this table).
1415  Last argument routine_modifies_data for read_lock_type_for_table()
1416  is ignored, as prelocking placeholder will never be set here.
1417  */
1418  DBUG_ASSERT(tl->prelocking_placeholder == false);
1419  tl->lock_type= read_lock_type_for_table(thd, lex, tl, true);
1420  tl->updating= 0;
1421  /* Update TABLE::lock_type accordingly. */
1422  if (!tl->placeholder() && !using_lock_tables)
1423  tl->table->reginfo.lock_type= tl->lock_type;
1424  }
1425  }
1426 
1427  /*
1428  Check access privileges for tables being updated or read.
1429  Note that unlike in the above loop we need to iterate here not only
1430  through all leaf tables but also through all view hierarchy.
1431  */
1432  for (tl= table_list; tl; tl= tl->next_local)
1433  {
1434  bool not_used= false;
1435  if (multi_update_check_table_access(thd, tl, tables_for_update, &not_used))
1436  DBUG_RETURN(TRUE);
1437  }
1438 
1439  /* check single table update for view compound from several tables */
1440  for (tl= table_list; tl; tl= tl->next_local)
1441  {
1442  if (tl->effective_algorithm == VIEW_ALGORITHM_MERGE)
1443  {
1444  TABLE_LIST *for_update= 0;
1445  if (tl->check_single_table(&for_update, tables_for_update, tl))
1446  {
1447  my_error(ER_VIEW_MULTIUPDATE, MYF(0),
1448  tl->view_db.str, tl->view_name.str);
1449  DBUG_RETURN(-1);
1450  }
1451  }
1452  }
1453 
1454  /* @todo: downgrade the metadata locks here. */
1455 
1456  /*
1457  Check that we are not using table that we are updating, but we should
1458  skip all tables of UPDATE SELECT itself
1459  */
1460  lex->select_lex.exclude_from_table_unique_test= TRUE;
1461  for (tl= leaves; tl; tl= tl->next_leaf)
1462  {
1463  if (tl->lock_type != TL_READ &&
1464  tl->lock_type != TL_READ_NO_INSERT)
1465  {
1466  TABLE_LIST *duplicate;
1467  if ((duplicate= unique_table(thd, tl, table_list, 0)))
1468  {
1469  update_non_unique_table_error(table_list, "UPDATE", duplicate);
1470  DBUG_RETURN(TRUE);
1471  }
1472  }
1473  }
1474  /*
1475  Set exclude_from_table_unique_test value back to FALSE. It is needed for
1476  further check in multi_update::prepare whether to use record cache.
1477  */
1478  lex->select_lex.exclude_from_table_unique_test= FALSE;
1479  DBUG_RETURN (FALSE);
1480 }
1481 
1482 
1483 /*
1484  Setup multi-update handling and call SELECT to do the join
1485 */
1486 
1487 bool mysql_multi_update(THD *thd,
1488  TABLE_LIST *table_list,
1489  List<Item> *fields,
1490  List<Item> *values,
1491  Item *conds,
1492  ulonglong options,
1493  enum enum_duplicates handle_duplicates,
1494  bool ignore,
1495  SELECT_LEX_UNIT *unit,
1496  SELECT_LEX *select_lex,
1497  multi_update **result)
1498 {
1499  bool res;
1500  DBUG_ENTER("mysql_multi_update");
1501 
1502  if (!(*result= new multi_update(table_list,
1503  thd->lex->select_lex.leaf_tables,
1504  fields, values,
1505  handle_duplicates, ignore)))
1506  {
1507  DBUG_RETURN(TRUE);
1508  }
1509 
1510  thd->abort_on_warning= (!ignore && thd->is_strict_mode());
1511 
1512  if (thd->lex->describe)
1513  res= explain_multi_table_modification(thd, *result);
1514  else
1515  {
1516  List<Item> total_list;
1517 
1518  res= mysql_select(thd,
1519  table_list, select_lex->with_wild,
1520  total_list,
1521  conds, (SQL_I_List<ORDER> *) NULL,
1522  (SQL_I_List<ORDER> *)NULL, (Item *) NULL,
1523  options | SELECT_NO_JOIN_CACHE | SELECT_NO_UNLOCK |
1525  *result, unit, select_lex);
1526 
1527  DBUG_PRINT("info",("res: %d report_error: %d",res, (int) thd->is_error()));
1528  res|= thd->is_error();
1529  if (unlikely(res))
1530  {
1531  /* If we had a another error reported earlier then this will be ignored */
1532  (*result)->send_error(ER_UNKNOWN_ERROR, ER(ER_UNKNOWN_ERROR));
1533  (*result)->abort_result_set();
1534  }
1535  }
1536  thd->abort_on_warning= 0;
1537  DBUG_RETURN(res);
1538 }
1539 
1540 
1541 multi_update::multi_update(TABLE_LIST *table_list,
1542  TABLE_LIST *leaves_list,
1543  List<Item> *field_list, List<Item> *value_list,
1544  enum enum_duplicates handle_duplicates_arg,
1545  bool ignore_arg)
1546  :all_tables(table_list), leaves(leaves_list), update_tables(0),
1547  tmp_tables(0), updated(0), found(0), fields(field_list),
1548  values(value_list), table_count(0), copy_field(0),
1549  handle_duplicates(handle_duplicates_arg), do_update(1), trans_safe(1),
1550  transactional_tables(0), ignore(ignore_arg), error_handled(0),
1551  update_operations(NULL)
1552 {}
1553 
1554 
1555 /*
1556  Connect fields with tables and create list of tables that are updated
1557 */
1558 
1559 int multi_update::prepare(List<Item> &not_used_values,
1560  SELECT_LEX_UNIT *lex_unit)
1561 {
1562  TABLE_LIST *table_ref;
1563  SQL_I_List<TABLE_LIST> update;
1564  table_map tables_to_update;
1565  Item_field *item;
1566  List_iterator_fast<Item> field_it(*fields);
1567  List_iterator_fast<Item> value_it(*values);
1568  uint i, max_fields;
1569  uint leaf_table_count= 0;
1570  DBUG_ENTER("multi_update::prepare");
1571 
1572  thd->count_cuted_fields= CHECK_FIELD_WARN;
1573  thd->cuted_fields=0L;
1574  THD_STAGE_INFO(thd, stage_updating_main_table);
1575 
1576  tables_to_update= get_table_map(fields);
1577 
1578  if (!tables_to_update)
1579  {
1580  my_message(ER_NO_TABLES_USED, ER(ER_NO_TABLES_USED), MYF(0));
1581  DBUG_RETURN(1);
1582  }
1583 
1584  /*
1585  We gather the set of columns read during evaluation of SET expression in
1586  TABLE::tmp_set by pointing TABLE::read_set to it and then restore it after
1587  setup_fields().
1588  */
1589  for (table_ref= leaves; table_ref; table_ref= table_ref->next_leaf)
1590  {
1591  TABLE *table= table_ref->table;
1592  if (tables_to_update & table->map)
1593  {
1594  DBUG_ASSERT(table->read_set == &table->def_read_set);
1595  table->read_set= &table->tmp_set;
1596  bitmap_clear_all(table->read_set);
1597  }
1598  }
1599 
1600  /*
1601  We have to check values after setup_tables to get covering_keys right in
1602  reference tables
1603  */
1604 
1605  int error= setup_fields(thd, Ref_ptr_array(),
1606  *values, MARK_COLUMNS_READ, 0, 0);
1607 
1608  for (table_ref= leaves; table_ref; table_ref= table_ref->next_leaf)
1609  {
1610  TABLE *table= table_ref->table;
1611  if (tables_to_update & table->map)
1612  {
1613  table->read_set= &table->def_read_set;
1614  bitmap_union(table->read_set, &table->tmp_set);
1615  }
1616  }
1617 
1618  if (error)
1619  DBUG_RETURN(1);
1620 
1621  /*
1622  Save tables beeing updated in update_tables
1623  update_table->shared is position for table
1624  Don't use key read on tables that are updated
1625  */
1626 
1627  update.empty();
1628  for (table_ref= leaves; table_ref; table_ref= table_ref->next_leaf)
1629  {
1630  /* TODO: add support of view of join support */
1631  TABLE *table=table_ref->table;
1632  leaf_table_count++;
1633  if (tables_to_update & table->map)
1634  {
1635  TABLE_LIST *tl= (TABLE_LIST*) thd->memdup(table_ref,
1636  sizeof(*tl));
1637  if (!tl)
1638  DBUG_RETURN(1);
1639  update.link_in_list(tl, &tl->next_local);
1640  tl->shared= table_count++;
1641  table->no_keyread=1;
1642  table->covering_keys.clear_all();
1643  table->pos_in_table_list= tl;
1644  if (table->triggers &&
1645  table->triggers->has_triggers(TRG_EVENT_UPDATE,
1646  TRG_ACTION_AFTER))
1647  {
1648  /*
1649  The table has AFTER UPDATE triggers that might access to subject
1650  table and therefore might need update to be done immediately.
1651  So we turn-off the batching.
1652  */
1653  (void) table->file->extra(HA_EXTRA_UPDATE_CANNOT_BATCH);
1654  }
1655  }
1656  }
1657 
1658 
1659  table_count= update.elements;
1660  update_tables= update.first;
1661 
1662  tmp_tables = (TABLE**) thd->calloc(sizeof(TABLE *) * table_count);
1663  tmp_table_param = (TMP_TABLE_PARAM*) thd->calloc(sizeof(TMP_TABLE_PARAM) *
1664  table_count);
1665  fields_for_table= (List_item **) thd->alloc(sizeof(List_item *) *
1666  table_count);
1667  values_for_table= (List_item **) thd->alloc(sizeof(List_item *) *
1668  table_count);
1669 
1670  DBUG_ASSERT(update_operations == NULL);
1671  update_operations= (COPY_INFO**) thd->calloc(sizeof(COPY_INFO*) *
1672  table_count);
1673 
1674  if (thd->is_fatal_error)
1675  DBUG_RETURN(1);
1676  for (i=0 ; i < table_count ; i++)
1677  {
1678  fields_for_table[i]= new List_item;
1679  values_for_table[i]= new List_item;
1680  }
1681  if (thd->is_fatal_error)
1682  DBUG_RETURN(1);
1683 
1684  /* Split fields into fields_for_table[] and values_by_table[] */
1685 
1686  while ((item= (Item_field *) field_it++))
1687  {
1688  Item *value= value_it++;
1689  uint offset= item->field->table->pos_in_table_list->shared;
1690  fields_for_table[offset]->push_back(item);
1691  values_for_table[offset]->push_back(value);
1692  }
1693  if (thd->is_fatal_error)
1694  DBUG_RETURN(1);
1695 
1696  /* Allocate copy fields */
1697  max_fields=0;
1698  for (i=0 ; i < table_count ; i++)
1699  set_if_bigger(max_fields, fields_for_table[i]->elements + leaf_table_count);
1700  copy_field= new Copy_field[max_fields];
1701 
1702 
1703  for (TABLE_LIST *ref= leaves; ref != NULL; ref= ref->next_leaf)
1704  {
1705  TABLE *table= ref->table;
1706  if (tables_to_update & table->map)
1707  {
1708  const uint position= table->pos_in_table_list->shared;
1709  List<Item> *cols= fields_for_table[position];
1710  List<Item> *vals= values_for_table[position];
1711  COPY_INFO *update=
1712  new (thd->mem_root) COPY_INFO(COPY_INFO::UPDATE_OPERATION, cols, vals);
1713  if (update == NULL ||
1714  update->add_function_default_columns(table, table->write_set))
1715  DBUG_RETURN(1);
1716 
1717  update_operations[position]= update;
1718 
1719  if ((table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ) != 0 &&
1720  update->function_defaults_apply(table))
1721  {
1722  /*
1723  A column is to be set to its ON UPDATE function default only if
1724  other columns of the row are changing. To know this, we must be able
1725  to compare the "before" and "after" value of those columns. Thus, we
1726  must read those columns:
1727  */
1728  bitmap_union(table->read_set, table->write_set);
1729  }
1730  /* All needed columns must be marked before prune_partitions(). */
1731  if (table->triggers)
1732  table->triggers->mark_fields_used(TRG_EVENT_UPDATE);
1733  }
1734  }
1735 
1736  DBUG_RETURN(thd->is_fatal_error != 0);
1737 }
1738 
1739 
1740 /*
1741  Check if table is safe to update on fly
1742 
1743  SYNOPSIS
1744  safe_update_on_fly()
1745  thd Thread handler
1746  join_tab How table is used in join
1747  all_tables List of tables
1748 
1749  NOTES
1750  We can update the first table in join on the fly if we know that
1751  a row in this table will never be read twice. This is true under
1752  the following conditions:
1753 
1754  - No column is both written to and read in SET expressions.
1755 
1756  - We are doing a table scan and the data is in a separate file (MyISAM) or
1757  if we don't update a clustered key.
1758 
1759  - We are doing a range scan and we don't update the scan key or
1760  the primary key for a clustered table handler.
1761 
1762  - Table is not joined to itself.
1763 
1764  This function gets information about fields to be updated from
1765  the TABLE::write_set bitmap.
1766 
1767  WARNING
1768  This code is a bit dependent of how make_join_readinfo() works.
1769 
1770  The field table->tmp_set is used for keeping track of which fields are
1771  read during evaluation of the SET expression. See multi_update::prepare.
1772 
1773  RETURN
1774  0 Not safe to update
1775  1 Safe to update
1776 */
1777 
1778 static bool safe_update_on_fly(THD *thd, JOIN_TAB *join_tab,
1779  TABLE_LIST *table_ref, TABLE_LIST *all_tables)
1780 {
1781  TABLE *table= join_tab->table;
1782  if (unique_table(thd, table_ref, all_tables, 0))
1783  return 0;
1784  switch (join_tab->type) {
1785  case JT_SYSTEM:
1786  case JT_CONST:
1787  case JT_EQ_REF:
1788  return TRUE; // At most one matching row
1789  case JT_REF:
1790  case JT_REF_OR_NULL:
1791  return !is_key_used(table, join_tab->ref.key, table->write_set);
1792  case JT_ALL:
1793  if (bitmap_is_overlapping(&table->tmp_set, table->write_set))
1794  return FALSE;
1795  /* If range search on index */
1796  if (join_tab->quick)
1797  return !join_tab->quick->is_keys_used(table->write_set);
1798  /* If scanning in clustered key */
1799  if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX) &&
1800  table->s->primary_key < MAX_KEY)
1801  return !is_key_used(table, table->s->primary_key, table->write_set);
1802  return TRUE;
1803  default:
1804  break; // Avoid compler warning
1805  }
1806  return FALSE;
1807 
1808 }
1809 
1810 
1811 /*
1812  Initialize table for multi table
1813 
1814  IMPLEMENTATION
1815  - Update first table in join on the fly, if possible
1816  - Create temporary tables to store changed values for all other tables
1817  that are updated (and main_table if the above doesn't hold).
1818 */
1819 
1820 bool
1821 multi_update::initialize_tables(JOIN *join)
1822 {
1823  TABLE_LIST *table_ref;
1824  DBUG_ENTER("initialize_tables");
1825 
1826  if ((thd->variables.option_bits & OPTION_SAFE_UPDATES) && error_if_full_join(join))
1827  DBUG_RETURN(1);
1828  main_table=join->join_tab->table;
1829  table_to_update= 0;
1830 
1831  /* Any update has at least one pair (field, value) */
1832  DBUG_ASSERT(fields->elements);
1833  /*
1834  Only one table may be modified by UPDATE of an updatable view.
1835  For an updatable view first_table_for_update indicates this
1836  table.
1837  For a regular multi-update it refers to some updated table.
1838  */
1839  TABLE *first_table_for_update= ((Item_field *) fields->head())->field->table;
1840 
1841  /* Create a temporary table for keys to all tables, except main table */
1842  for (table_ref= update_tables; table_ref; table_ref= table_ref->next_local)
1843  {
1844  TABLE *table=table_ref->table;
1845  uint cnt= table_ref->shared;
1846  List<Item> temp_fields;
1847  ORDER group;
1848  TMP_TABLE_PARAM *tmp_param;
1849 
1850  if (ignore)
1851  table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
1852  if (table == main_table) // First table in join
1853  {
1854  if (safe_update_on_fly(thd, join->join_tab, table_ref, all_tables))
1855  {
1857  table_to_update= table; // Update table on the fly
1858  continue;
1859  }
1860  }
1862 
1863  /*
1864  enable uncacheable flag if we update a view with check option
1865  and check option has a subselect, otherwise, the check option
1866  can be evaluated after the subselect was freed as independent
1867  (See full_local in JOIN::join_free()).
1868  */
1869  if (table_ref->check_option && !join->select_lex->uncacheable)
1870  {
1871  SELECT_LEX_UNIT *tmp_unit;
1872  SELECT_LEX *sl;
1873  for (tmp_unit= join->select_lex->first_inner_unit();
1874  tmp_unit;
1875  tmp_unit= tmp_unit->next_unit())
1876  {
1877  for (sl= tmp_unit->first_select(); sl; sl= sl->next_select())
1878  {
1879  if (sl->master_unit()->item)
1880  {
1881  join->select_lex->uncacheable|= UNCACHEABLE_CHECKOPTION;
1882  goto loop_end;
1883  }
1884  }
1885  }
1886  }
1887 loop_end:
1888 
1889  if (table == first_table_for_update && table_ref->check_option)
1890  {
1891  table_map unupdated_tables= table_ref->check_option->used_tables() &
1892  ~first_table_for_update->map;
1893  for (TABLE_LIST *tbl_ref =leaves;
1894  unupdated_tables && tbl_ref;
1895  tbl_ref= tbl_ref->next_leaf)
1896  {
1897  if (unupdated_tables & tbl_ref->table->map)
1898  unupdated_tables&= ~tbl_ref->table->map;
1899  else
1900  continue;
1901  if (unupdated_check_opt_tables.push_back(tbl_ref->table))
1902  DBUG_RETURN(1);
1903  }
1904  }
1905 
1906  tmp_param= tmp_table_param+cnt;
1907 
1908  /*
1909  Create a temporary table to store all fields that are changed for this
1910  table. The first field in the temporary table is a pointer to the
1911  original row so that we can find and update it. For the updatable
1912  VIEW a few following fields are rowids of tables used in the CHECK
1913  OPTION condition.
1914  */
1915 
1916  List_iterator_fast<TABLE> tbl_it(unupdated_check_opt_tables);
1917  TABLE *tbl= table;
1918  do
1919  {
1920  /*
1921  Signal each table (including tables referenced by WITH CHECK OPTION
1922  clause) for which we will store row position in the temporary table
1923  that we need a position to be read first.
1924  */
1925  tbl->prepare_for_position();
1926 
1927  Field_string *field= new Field_string(tbl->file->ref_length, 0,
1928  tbl->alias, &my_charset_bin);
1929  if (!field)
1930  DBUG_RETURN(1);
1931  field->init(tbl);
1932  /*
1933  The field will be converted to varstring when creating tmp table if
1934  table to be updated was created by mysql 4.1. Deny this.
1935  */
1936  field->can_alter_field_type= 0;
1937  Item_field *ifield= new Item_field((Field *) field);
1938  if (!ifield)
1939  DBUG_RETURN(1);
1940  ifield->maybe_null= 0;
1941  if (temp_fields.push_back(ifield))
1942  DBUG_RETURN(1);
1943  } while ((tbl= tbl_it++));
1944 
1945  temp_fields.concat(fields_for_table[cnt]);
1946 
1947  /* Make an unique key over the first field to avoid duplicated updates */
1948  memset(&group, 0, sizeof(group));
1949  group.direction= ORDER::ORDER_ASC;
1950  group.item= (Item**) temp_fields.head_ref();
1951 
1952  tmp_param->quick_group=1;
1953  tmp_param->field_count=temp_fields.elements;
1954  tmp_param->group_parts=1;
1955  tmp_param->group_length= table->file->ref_length;
1956  /* small table, ignore SQL_BIG_TABLES */
1957  my_bool save_big_tables= thd->variables.big_tables;
1958  thd->variables.big_tables= FALSE;
1959  tmp_tables[cnt]=create_tmp_table(thd, tmp_param, temp_fields,
1960  (ORDER*) &group, 0, 0,
1961  TMP_TABLE_ALL_COLUMNS, HA_POS_ERROR, "");
1962  thd->variables.big_tables= save_big_tables;
1963  if (!tmp_tables[cnt])
1964  DBUG_RETURN(1);
1965  tmp_tables[cnt]->file->extra(HA_EXTRA_WRITE_CACHE);
1966  }
1967  DBUG_RETURN(0);
1968 }
1969 
1970 
1971 multi_update::~multi_update()
1972 {
1973  TABLE_LIST *table;
1974  for (table= update_tables ; table; table= table->next_local)
1975  {
1976  table->table->no_keyread= table->table->no_cache= 0;
1977  if (ignore)
1978  table->table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
1979  }
1980 
1981  if (tmp_tables)
1982  {
1983  for (uint cnt = 0; cnt < table_count; cnt++)
1984  {
1985  if (tmp_tables[cnt])
1986  {
1987  free_tmp_table(thd, tmp_tables[cnt]);
1988  tmp_table_param[cnt].cleanup();
1989  }
1990  }
1991  }
1992  if (copy_field)
1993  delete [] copy_field;
1994  thd->count_cuted_fields= CHECK_FIELD_IGNORE; // Restore this setting
1995  DBUG_ASSERT(trans_safe || !updated ||
1996  thd->transaction.stmt.cannot_safely_rollback());
1997 
1998  if (update_operations != NULL)
1999  for (uint i= 0; i < table_count; i++)
2000  delete update_operations[i];
2001 }
2002 
2003 
2004 bool multi_update::send_data(List<Item> &not_used_values)
2005 {
2006  TABLE_LIST *cur_table;
2007  DBUG_ENTER("multi_update::send_data");
2008 
2009  for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local)
2010  {
2011  TABLE *table= cur_table->table;
2012  uint offset= cur_table->shared;
2013  /*
2014  Check if we are using outer join and we didn't find the row
2015  or if we have already updated this row in the previous call to this
2016  function.
2017 
2018  The same row may be presented here several times in a join of type
2019  UPDATE t1 FROM t1,t2 SET t1.a=t2.a
2020 
2021  In this case we will do the update for the first found row combination.
2022  The join algorithm guarantees that we will not find the a row in
2023  t1 several times.
2024  */
2025  if (table->status & (STATUS_NULL_ROW | STATUS_UPDATED))
2026  continue;
2027 
2028  if (table == table_to_update)
2029  {
2030  table->status|= STATUS_UPDATED;
2031  store_record(table,record[1]);
2032  if (fill_record_n_invoke_before_triggers(thd,
2033  *fields_for_table[offset],
2034  *values_for_table[offset],
2035  false, // ignore_errors
2036  table->triggers,
2037  TRG_EVENT_UPDATE))
2038  DBUG_RETURN(1);
2039 
2040  /*
2041  Reset the table->auto_increment_field_not_null as it is valid for
2042  only one row.
2043  */
2044  table->auto_increment_field_not_null= FALSE;
2045  found++;
2046  if (!records_are_comparable(table) || compare_records(table))
2047  {
2048  update_operations[offset]->set_function_defaults(table);
2049 
2050  int error;
2051  if ((error= cur_table->view_check_option(thd, ignore)) !=
2052  VIEW_CHECK_OK)
2053  {
2054  found--;
2055  if (error == VIEW_CHECK_SKIP)
2056  continue;
2057  else if (error == VIEW_CHECK_ERROR)
2058  DBUG_RETURN(1);
2059  }
2060  if (!updated++)
2061  {
2062  /*
2063  Inform the main table that we are going to update the table even
2064  while we may be scanning it. This will flush the read cache
2065  if it's used.
2066  */
2067  main_table->file->extra(HA_EXTRA_PREPARE_FOR_UPDATE);
2068  }
2069  if ((error=table->file->ha_update_row(table->record[1],
2070  table->record[0])) &&
2071  error != HA_ERR_RECORD_IS_THE_SAME)
2072  {
2073  updated--;
2074  if (!ignore ||
2075  table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
2076  {
2077  /*
2078  If (ignore && error == is ignorable) we don't have to
2079  do anything; otherwise...
2080  */
2081  myf flags= 0;
2082 
2083  if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
2084  flags|= ME_FATALERROR; /* Other handler errors are fatal */
2085 
2086  table->file->print_error(error,MYF(flags));
2087  DBUG_RETURN(1);
2088  }
2089  }
2090  else
2091  {
2092  if (error == HA_ERR_RECORD_IS_THE_SAME)
2093  {
2094  error= 0;
2095  updated--;
2096  }
2097  /* non-transactional or transactional table got modified */
2098  /* either multi_update class' flag is raised in its branch */
2099  if (table->file->has_transactions())
2100  transactional_tables= TRUE;
2101  else
2102  {
2103  trans_safe= FALSE;
2104  thd->transaction.stmt.mark_modified_non_trans_table();
2105  }
2106  }
2107  }
2108  if (table->triggers &&
2109  table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
2110  TRG_ACTION_AFTER, TRUE))
2111  DBUG_RETURN(1);
2112  }
2113  else
2114  {
2115  int error;
2116  TABLE *tmp_table= tmp_tables[offset];
2117  /*
2118  For updatable VIEW store rowid of the updated table and
2119  rowids of tables used in the CHECK OPTION condition.
2120  */
2121  uint field_num= 0;
2122  List_iterator_fast<TABLE> tbl_it(unupdated_check_opt_tables);
2123  TABLE *tbl= table;
2124  do
2125  {
2126  tbl->file->position(tbl->record[0]);
2127  memcpy((char*) tmp_table->field[field_num]->ptr,
2128  (char*) tbl->file->ref, tbl->file->ref_length);
2129  /*
2130  For outer joins a rowid field may have no NOT_NULL_FLAG,
2131  so we have to reset NULL bit for this field.
2132  (set_notnull() resets NULL bit only if available).
2133  */
2134  tmp_table->field[field_num]->set_notnull();
2135  field_num++;
2136  } while ((tbl= tbl_it++));
2137 
2138  /* Store regular updated fields in the row. */
2139  fill_record(thd,
2140  tmp_table->field + 1 + unupdated_check_opt_tables.elements,
2141  *values_for_table[offset], 1, NULL);
2142 
2143  /* Write row, ignoring duplicated updates to a row */
2144  error= tmp_table->file->ha_write_row(tmp_table->record[0]);
2145  if (error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE)
2146  {
2147  if (error &&
2148  create_myisam_from_heap(thd, tmp_table,
2149  tmp_table_param[offset].start_recinfo,
2150  &tmp_table_param[offset].recinfo,
2151  error, TRUE, NULL))
2152  {
2153  do_update= 0;
2154  DBUG_RETURN(1); // Not a table_is_full error
2155  }
2156  found++;
2157  }
2158  }
2159  }
2160  DBUG_RETURN(0);
2161 }
2162 
2163 
2164 void multi_update::send_error(uint errcode,const char *err)
2165 {
2166  /* First send error what ever it is ... */
2167  my_error(errcode, MYF(0), err);
2168 }
2169 
2170 
2171 void multi_update::abort_result_set()
2172 {
2173  /* the error was handled or nothing deleted and no side effects return */
2174  if (error_handled ||
2175  (!thd->transaction.stmt.cannot_safely_rollback() && !updated))
2176  return;
2177 
2178  /* Something already updated so we have to invalidate cache */
2179  if (updated)
2180  query_cache_invalidate3(thd, update_tables, 1);
2181  /*
2182  If all tables that has been updated are trans safe then just do rollback.
2183  If not attempt to do remaining updates.
2184  */
2185 
2186  if (! trans_safe)
2187  {
2188  DBUG_ASSERT(thd->transaction.stmt.cannot_safely_rollback());
2189  if (do_update && table_count > 1)
2190  {
2191  /* Add warning here */
2192  /*
2193  todo/fixme: do_update() is never called with the arg 1.
2194  should it change the signature to become argless?
2195  */
2196  (void) do_updates();
2197  }
2198  }
2199  if (thd->transaction.stmt.cannot_safely_rollback())
2200  {
2201  /*
2202  The query has to binlog because there's a modified non-transactional table
2203  either from the query's list or via a stored routine: bug#13270,23333
2204  */
2205  if (mysql_bin_log.is_open())
2206  {
2207  /*
2208  THD::killed status might not have been set ON at time of an error
2209  got caught and if happens later the killed error is written
2210  into repl event.
2211  */
2212  int errcode= query_error_code(thd, thd->killed == THD::NOT_KILLED);
2213  /* the error of binary logging is ignored */
2214  (void)thd->binlog_query(THD::ROW_QUERY_TYPE,
2215  thd->query(), thd->query_length(),
2216  transactional_tables, FALSE, FALSE, errcode);
2217  }
2218  }
2219  DBUG_ASSERT(trans_safe || !updated || thd->transaction.stmt.cannot_safely_rollback());
2220 }
2221 
2222 
2223 int multi_update::do_updates()
2224 {
2225  TABLE_LIST *cur_table;
2226  int local_error= 0;
2227  ha_rows org_updated;
2228  TABLE *table, *tmp_table;
2229  List_iterator_fast<TABLE> check_opt_it(unupdated_check_opt_tables);
2230  DBUG_ENTER("multi_update::do_updates");
2231 
2232  do_update= 0; // Don't retry this function
2233  if (!found)
2234  DBUG_RETURN(0);
2235  for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local)
2236  {
2237  uint offset= cur_table->shared;
2238 
2239  table = cur_table->table;
2240  if (table == table_to_update)
2241  continue; // Already updated
2242  org_updated= updated;
2243  tmp_table= tmp_tables[cur_table->shared];
2244  tmp_table->file->extra(HA_EXTRA_CACHE); // Change to read cache
2245  if ((local_error= table->file->ha_rnd_init(0)))
2246  goto err;
2247  table->file->extra(HA_EXTRA_NO_CACHE);
2248 
2249  check_opt_it.rewind();
2250  while(TABLE *tbl= check_opt_it++)
2251  {
2252  if (tbl->file->ha_rnd_init(1))
2253  goto err;
2254  tbl->file->extra(HA_EXTRA_CACHE);
2255  }
2256 
2257  /*
2258  Setup copy functions to copy fields from temporary table
2259  */
2260  List_iterator_fast<Item> field_it(*fields_for_table[offset]);
2261  Field **field= tmp_table->field +
2262  1 + unupdated_check_opt_tables.elements; // Skip row pointers
2263  Copy_field *copy_field_ptr= copy_field, *copy_field_end;
2264  for ( ; *field ; field++)
2265  {
2266  Item_field *item= (Item_field* ) field_it++;
2267  (copy_field_ptr++)->set(item->field, *field, 0);
2268  }
2269  copy_field_end=copy_field_ptr;
2270 
2271  if ((local_error = tmp_table->file->ha_rnd_init(1)))
2272  goto err;
2273 
2274  for (;;)
2275  {
2276  if (thd->killed && trans_safe)
2277  goto err;
2278  if ((local_error=tmp_table->file->ha_rnd_next(tmp_table->record[0])))
2279  {
2280  if (local_error == HA_ERR_END_OF_FILE)
2281  break;
2282  if (local_error == HA_ERR_RECORD_DELETED)
2283  continue; // May happen on dup key
2284  goto err;
2285  }
2286 
2287  /* call ha_rnd_pos() using rowids from temporary table */
2288  check_opt_it.rewind();
2289  TABLE *tbl= table;
2290  uint field_num= 0;
2291  do
2292  {
2293  if((local_error=
2294  tbl->file->ha_rnd_pos(tbl->record[0],
2295  (uchar *) tmp_table->field[field_num]->ptr)))
2296  goto err;
2297  field_num++;
2298  } while((tbl= check_opt_it++));
2299 
2300  table->status|= STATUS_UPDATED;
2301  store_record(table,record[1]);
2302 
2303  /* Copy data from temporary table to current table */
2304  for (copy_field_ptr=copy_field;
2305  copy_field_ptr != copy_field_end;
2306  copy_field_ptr++)
2307  (*copy_field_ptr->do_copy)(copy_field_ptr);
2308 
2309  if (table->triggers &&
2310  table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
2311  TRG_ACTION_BEFORE, TRUE))
2312  goto err2;
2313 
2314  if (!records_are_comparable(table) || compare_records(table))
2315  {
2316  update_operations[offset]->set_function_defaults(table);
2317  int error;
2318  if ((error= cur_table->view_check_option(thd, ignore)) !=
2319  VIEW_CHECK_OK)
2320  {
2321  if (error == VIEW_CHECK_SKIP)
2322  continue;
2323  else if (error == VIEW_CHECK_ERROR)
2324  goto err;
2325  }
2326  local_error= table->file->ha_update_row(table->record[1],
2327  table->record[0]);
2328  if (!local_error)
2329  updated++;
2330  else if (local_error == HA_ERR_RECORD_IS_THE_SAME)
2331  local_error= 0;
2332  else if (!ignore ||
2333  table->file->is_fatal_error(local_error, HA_CHECK_DUP_KEY))
2334  goto err;
2335  else
2336  local_error= 0;
2337  }
2338 
2339  if (table->triggers &&
2340  table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
2341  TRG_ACTION_AFTER, TRUE))
2342  goto err2;
2343  }
2344 
2345  if (updated != org_updated)
2346  {
2347  if (table->file->has_transactions())
2348  transactional_tables= TRUE;
2349  else
2350  {
2351  trans_safe= FALSE; // Can't do safe rollback
2352  thd->transaction.stmt.mark_modified_non_trans_table();
2353  }
2354  }
2355  (void) table->file->ha_rnd_end();
2356  (void) tmp_table->file->ha_rnd_end();
2357  check_opt_it.rewind();
2358  while (TABLE *tbl= check_opt_it++)
2359  tbl->file->ha_rnd_end();
2360 
2361  }
2362  DBUG_RETURN(0);
2363 
2364 err:
2365  {
2366  table->file->print_error(local_error,MYF(ME_FATALERROR));
2367  }
2368 
2369 err2:
2370  if (table->file->inited)
2371  (void) table->file->ha_rnd_end();
2372  if (tmp_table->file->inited)
2373  (void) tmp_table->file->ha_rnd_end();
2374  check_opt_it.rewind();
2375  while (TABLE *tbl= check_opt_it++)
2376  {
2377  if (tbl->file->inited)
2378  (void) tbl->file->ha_rnd_end();
2379  }
2380 
2381  if (updated != org_updated)
2382  {
2383  if (table->file->has_transactions())
2384  transactional_tables= TRUE;
2385  else
2386  {
2387  trans_safe= FALSE;
2388  thd->transaction.stmt.mark_modified_non_trans_table();
2389  }
2390  }
2391  DBUG_RETURN(1);
2392 }
2393 
2394 
2395 /* out: 1 if error, 0 if success */
2396 
2397 bool multi_update::send_eof()
2398 {
2399  char buff[STRING_BUFFER_USUAL_SIZE];
2400  ulonglong id;
2401  THD::killed_state killed_status= THD::NOT_KILLED;
2402  DBUG_ENTER("multi_update::send_eof");
2403  THD_STAGE_INFO(thd, stage_updating_reference_tables);
2404 
2405  /*
2406  Does updates for the last n - 1 tables, returns 0 if ok;
2407  error takes into account killed status gained in do_updates()
2408  */
2409  int local_error= thd->is_error();
2410  if (!local_error)
2411  local_error = (table_count) ? do_updates() : 0;
2412  /*
2413  if local_error is not set ON until after do_updates() then
2414  later carried out killing should not affect binlogging.
2415  */
2416  killed_status= (local_error == 0)? THD::NOT_KILLED : thd->killed;
2417  THD_STAGE_INFO(thd, stage_end);
2418 
2419  /* We must invalidate the query cache before binlog writing and
2420  ha_autocommit_... */
2421 
2422  if (updated)
2423  {
2424  query_cache_invalidate3(thd, update_tables, 1);
2425  }
2426  /*
2427  Write the SQL statement to the binlog if we updated
2428  rows and we succeeded or if we updated some non
2429  transactional tables.
2430 
2431  The query has to binlog because there's a modified non-transactional table
2432  either from the query's list or via a stored routine: bug#13270,23333
2433  */
2434 
2435  if (local_error == 0 || thd->transaction.stmt.cannot_safely_rollback())
2436  {
2437  if (mysql_bin_log.is_open())
2438  {
2439  int errcode= 0;
2440  if (local_error == 0)
2441  thd->clear_error();
2442  else
2443  errcode= query_error_code(thd, killed_status == THD::NOT_KILLED);
2444  if (thd->binlog_query(THD::ROW_QUERY_TYPE,
2445  thd->query(), thd->query_length(),
2446  transactional_tables, FALSE, FALSE, errcode))
2447  {
2448  local_error= 1; // Rollback update
2449  }
2450  }
2451  }
2452  DBUG_ASSERT(trans_safe || !updated ||
2453  thd->transaction.stmt.cannot_safely_rollback());
2454 
2455  if (local_error != 0)
2456  error_handled= TRUE; // to force early leave from ::send_error()
2457 
2458  if (local_error > 0) // if the above log write did not fail ...
2459  {
2460  /* Safety: If we haven't got an error before (can happen in do_updates) */
2461  my_message(ER_UNKNOWN_ERROR, "An error occured in multi-table update",
2462  MYF(0));
2463  DBUG_RETURN(TRUE);
2464  }
2465 
2466  id= thd->arg_of_last_insert_id_function ?
2467  thd->first_successful_insert_id_in_prev_stmt : 0;
2468  my_snprintf(buff, sizeof(buff), ER(ER_UPDATE_INFO),
2469  (ulong) found, (ulong) updated, (ulong) thd->cuted_fields);
2470  ::my_ok(thd, (thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated,
2471  id, buff);
2472  DBUG_RETURN(FALSE);
2473 }