MySQL 5.6.14 Source Code Document
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
sql_executor.cc
Go to the documentation of this file.
1 /* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
2 
3  This program is free software; you can redistribute it and/or modify
4  it under the terms of the GNU General Public License as published by
5  the Free Software Foundation; version 2 of the License.
6 
7  This program is distributed in the hope that it will be useful,
8  but WITHOUT ANY WARRANTY; without even the implied warranty of
9  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  GNU General Public License for more details.
11 
12  You should have received a copy of the GNU General Public License
13  along with this program; if not, write to the Free Software
14  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
15 
27 #include "sql_select.h"
28 #include "sql_executor.h"
29 #include "sql_optimizer.h"
30 #include "sql_join_buffer.h"
31 #include "opt_trace.h"
32 #include "sql_test.h"
33 #include "sql_base.h"
34 #include "key.h"
35 #include "sql_derived.h"
36 #include "sql_show.h"
37 #include "filesort.h"
38 #include "sql_tmp_table.h"
39 #include "records.h" // rr_sequential
40 #include "opt_explain_format.h" // Explain_format_flags
41 
42 #include <algorithm>
43 using std::max;
44 using std::min;
45 
46 static void return_zero_rows(JOIN *join, List<Item> &fields);
47 static void save_const_null_info(JOIN *join, table_map *save_nullinfo);
48 static void restore_const_null_info(JOIN *join, table_map save_nullinfo);
49 static int do_select(JOIN *join);
50 
51 static enum_nested_loop_state
52 evaluate_join_record(JOIN *join, JOIN_TAB *join_tab);
53 static enum_nested_loop_state
54 evaluate_null_complemented_join_record(JOIN *join, JOIN_TAB *join_tab);
55 static enum_nested_loop_state
56 end_send(JOIN *join, JOIN_TAB *join_tab, bool end_of_records);
57 static enum_nested_loop_state
58 end_write(JOIN *join, JOIN_TAB *join_tab, bool end_of_records);
59 static enum_nested_loop_state
60 end_update(JOIN *join, JOIN_TAB *join_tab, bool end_of_records);
61 static enum_nested_loop_state
62 end_unique_update(JOIN *join, JOIN_TAB *join_tab, bool end_of_records);
63 static void copy_sum_funcs(Item_sum **func_ptr, Item_sum **end_ptr);
64 
65 static int join_read_system(JOIN_TAB *tab);
66 static int join_read_const(JOIN_TAB *tab);
67 static int join_read_key(JOIN_TAB *tab);
68 static int join_read_always_key(JOIN_TAB *tab);
69 static int join_no_more_records(READ_RECORD *info);
70 static int join_read_next(READ_RECORD *info);
71 static int test_if_quick_select(JOIN_TAB *tab);
72 static int join_read_next_same(READ_RECORD *info);
73 static int join_read_prev(READ_RECORD *info);
74 static int join_ft_read_first(JOIN_TAB *tab);
75 static int join_ft_read_next(READ_RECORD *info);
76 static int join_read_always_key_or_null(JOIN_TAB *tab);
77 static int join_read_next_same_or_null(READ_RECORD *info);
78 static int create_sort_index(THD *thd, JOIN *join, JOIN_TAB *tab);
79 static bool remove_dup_with_compare(THD *thd, TABLE *entry, Field **field,
80  ulong offset,Item *having);
81 static bool remove_dup_with_hash_index(THD *thd,TABLE *table,
82  uint field_count, Field **first_field,
83  ulong key_length,Item *having);
84 static int join_read_linked_first(JOIN_TAB *tab);
85 static int join_read_linked_next(READ_RECORD *info);
86 static int do_sj_reset(SJ_TMP_TABLE *sj_tbl);
87 static bool cmp_buffer_with_ref(THD *thd, TABLE *table, TABLE_REF *tab_ref);
88 
96 void
98 {
99  Opt_trace_context * const trace= &thd->opt_trace;
100  Opt_trace_object trace_wrapper(trace);
101  Opt_trace_object trace_exec(trace, "join_execution");
102  trace_exec.add_select_number(select_lex->select_number);
103  Opt_trace_array trace_steps(trace, "steps");
104  List<Item> *columns_list= &fields_list;
105  DBUG_ENTER("JOIN::exec");
106 
107  DBUG_ASSERT(!tables || thd->lex->is_query_tables_locked());
108  DBUG_ASSERT(!(select_options & SELECT_DESCRIBE));
109 
110  THD_STAGE_INFO(thd, stage_executing);
111 
112  // Ignore errors of execution if option IGNORE present
113  if (thd->lex->ignore)
114  thd->lex->current_select->no_error= true;
115 
116  if (prepare_result(&columns_list))
117  DBUG_VOID_RETURN;
118 
119  if (!tables_list && (tables || !select_lex->with_sum_func))
120  { // Only test of functions
121  /*
122  We have to test for 'conds' here as the WHERE may not be constant
123  even if we don't have any tables for prepared statements or if
124  conds uses something like 'rand()'.
125 
126  Don't evaluate the having clause here. return_zero_rows() should
127  be called only for cases where there are no matching rows after
128  evaluating all conditions except the HAVING clause.
129  */
130  if (select_lex->cond_value != Item::COND_FALSE &&
131  (!conds || conds->val_int()))
132  {
133  if (result->send_result_set_metadata(*columns_list,
134  Protocol::SEND_NUM_ROWS |
135  Protocol::SEND_EOF))
136  {
137  DBUG_VOID_RETURN;
138  }
139 
140  /*
141  If the HAVING clause is either impossible or always true, then
142  JOIN::having is set to NULL by optimize_cond.
143  In this case JOIN::exec must check for JOIN::having_value, in the
144  same way it checks for JOIN::cond_value.
145  */
146  if (((select_lex->having_value != Item::COND_FALSE) &&
147  (!having || having->val_int()))
148  && do_send_rows && result->send_data(fields_list))
149  error= 1;
150  else
151  {
152  error= (int) result->send_eof();
153  send_records= ((select_options & OPTION_FOUND_ROWS) ? 1 :
154  thd->get_sent_row_count());
155  }
156  /* Query block (without union) always returns 0 or 1 row */
157  thd->limit_found_rows= send_records;
158  thd->set_examined_row_count(0);
159  }
160  else
161  {
162  return_zero_rows(this, *columns_list);
163  }
164  DBUG_VOID_RETURN;
165  }
166 
167  if (zero_result_cause)
168  {
169  return_zero_rows(this, *columns_list);
170  DBUG_VOID_RETURN;
171  }
172 
173  /*
174  Initialize examined rows here because the values from all join parts
175  must be accumulated in examined_row_count. Hence every join
176  iteration must count from zero.
177  */
178  examined_rows= 0;
179 
180  /* XXX: When can we have here thd->is_error() not zero? */
181  if (thd->is_error())
182  {
183  error= thd->is_error();
184  DBUG_VOID_RETURN;
185  }
186 
187  THD_STAGE_INFO(thd, stage_sending_data);
188  DBUG_PRINT("info", ("%s", thd->proc_info));
189  result->send_result_set_metadata(*fields,
190  Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF);
191  error= do_select(this);
192  /* Accumulate the counts from all join iterations of all join parts. */
193  thd->inc_examined_row_count(examined_rows);
194  DBUG_PRINT("counts", ("thd->examined_row_count: %lu",
195  (ulong) thd->get_examined_row_count()));
196 
197  DBUG_VOID_RETURN;
198 }
199 
200 
201 bool
202 JOIN::create_intermediate_table(JOIN_TAB *tab, List<Item> *tmp_table_fields,
203  ORDER_with_src &tmp_table_group,
204  bool save_sum_fields)
205 {
206  DBUG_ENTER("JOIN::create_intermediate_table");
207  THD_STAGE_INFO(thd, stage_creating_tmp_table);
208 
209  /*
210  Pushing LIMIT to the temporary table creation is not applicable
211  when there is ORDER BY or GROUP BY or there is no GROUP BY, but
212  there are aggregate functions, because in all these cases we need
213  all result rows.
214  */
215  ha_rows tmp_rows_limit= ((order == NULL || skip_sort_order) &&
216  !tmp_table_group &&
217  !select_lex->with_sum_func) ?
218  m_select_limit : HA_POS_ERROR;
219 
220  tab->tmp_table_param= new TMP_TABLE_PARAM(tmp_table_param);
221  tab->tmp_table_param->skip_create_table= true;
222  TABLE* table= create_tmp_table(thd, tab->tmp_table_param, *tmp_table_fields,
223  tmp_table_group, select_distinct && !group_list,
224  save_sum_fields, select_options, tmp_rows_limit,
225  "");
226  if (!table)
227  DBUG_RETURN(true);
228  tmp_table_param.using_outer_summary_function=
229  tab->tmp_table_param->using_outer_summary_function;
230  tab->join= this;
231  DBUG_ASSERT(tab > tab->join->join_tab);
232  (tab - 1)->next_select= sub_select_op;
233  tab->op= new (thd->mem_root) QEP_tmp_table(tab);
234  if (!tab->op)
235  goto err;
236  tab->table= table;
237  table->reginfo.join_tab= tab;
238 
239  if (table->group)
240  {
241  explain_flags.set(tmp_table_group.src, ESP_USING_TMPTABLE);
242  }
243  if (table->distinct || select_distinct)
244  {
245  explain_flags.set(ESC_DISTINCT, ESP_USING_TMPTABLE);
246  }
247  if ((!group_list && !order && !select_distinct) ||
248  (select_options & (SELECT_BIG_RESULT | OPTION_BUFFER_RESULT)))
249  {
250  explain_flags.set(ESC_BUFFER_RESULT, ESP_USING_TMPTABLE);
251  }
252  /* if group or order on first table, sort first */
253  if (group_list && simple_group)
254  {
255  DBUG_PRINT("info",("Sorting for group"));
256  THD_STAGE_INFO(thd, stage_sorting_for_group);
257 
258  if (ordered_index_usage != ordered_index_group_by &&
259  (join_tab + const_tables)->type != JT_CONST && // Don't sort 1 row
260  add_sorting_to_table(join_tab + const_tables, &group_list))
261  goto err;
262 
263  if (alloc_group_fields(this, group_list))
264  goto err;
266  goto err;
267  if (prepare_sum_aggregators(sum_funcs,
268  !join_tab->is_using_agg_loose_index_scan()))
269  goto err;
270  if (setup_sum_funcs(thd, sum_funcs))
271  goto err;
272  group_list= NULL;
273  }
274  else
275  {
277  goto err;
278  if (prepare_sum_aggregators(sum_funcs,
279  !join_tab->is_using_agg_loose_index_scan()))
280  goto err;
281  if (setup_sum_funcs(thd, sum_funcs))
282  goto err;
283 
284  if (!group_list && !table->distinct && order && simple_order)
285  {
286  DBUG_PRINT("info",("Sorting for order"));
287  THD_STAGE_INFO(thd, stage_sorting_for_order);
288 
289  if (ordered_index_usage != ordered_index_order_by &&
291  goto err;
292  order= NULL;
293  }
294  }
295  DBUG_RETURN(false);
296 
297 err:
298  if (table != NULL)
299  free_tmp_table(thd, table);
300  DBUG_RETURN(true);
301 }
302 
303 
324 {
325  uint i;
326  for (i= send_group_parts ; i-- > idx ; )
327  {
328  /* Get reference pointers to sum functions in place */
329  copy_ref_ptr_array(ref_ptrs, rollup.ref_pointer_arrays[i]);
330  if ((!having || having->val_int()))
331  {
332  if (send_records < unit->select_limit_cnt && do_send_rows &&
333  result->send_data(rollup.fields[i]))
334  return 1;
335  send_records++;
336  }
337  }
338  /* Restore ref_pointer_array */
339  set_items_ref_array(current_ref_ptrs);
340  return 0;
341 }
342 
343 
364 int JOIN::rollup_write_data(uint idx, TABLE *table_arg)
365 {
366  uint i;
367  for (i= send_group_parts ; i-- > idx ; )
368  {
369  /* Get reference pointers to sum functions in place */
370  copy_ref_ptr_array(ref_ptrs, rollup.ref_pointer_arrays[i]);
371  if ((!having || having->val_int()))
372  {
373  int write_error;
374  Item *item;
375  List_iterator_fast<Item> it(rollup.fields[i]);
376  while ((item= it++))
377  {
378  if (item->type() == Item::NULL_ITEM && item->is_result_field())
379  item->save_in_result_field(1);
380  }
381  copy_sum_funcs(sum_funcs_end[i+1], sum_funcs_end[i]);
382  if ((write_error= table_arg->file->ha_write_row(table_arg->record[0])))
383  {
384  if (create_myisam_from_heap(thd, table_arg,
385  tmp_table_param.start_recinfo,
386  &tmp_table_param.recinfo,
387  write_error, FALSE, NULL))
388  return 1;
389  }
390  }
391  }
392  /* Restore ref_pointer_array */
393  set_items_ref_array(current_ref_ptrs);
394  return 0;
395 }
396 
397 
398 void
399 JOIN::optimize_distinct()
400 {
401  for (JOIN_TAB *last_join_tab= join_tab + primary_tables - 1; ;)
402  {
403  if (select_lex->select_list_tables & last_join_tab->table->map)
404  break;
405  last_join_tab->not_used_in_distinct= true;
406  if (last_join_tab == join_tab)
407  break;
408  --last_join_tab;
409  }
410 
411  /* Optimize "select distinct b from t1 order by key_part_1 limit #" */
412  if (order && skip_sort_order)
413  {
414  /* Should already have been optimized away */
415  DBUG_ASSERT(ordered_index_usage == ordered_index_order_by);
416  if (ordered_index_usage == ordered_index_order_by)
417  {
418  order= NULL;
419  }
420  }
421 }
422 
423 bool prepare_sum_aggregators(Item_sum **func_ptr, bool need_distinct)
424 {
425  Item_sum *func;
426  DBUG_ENTER("prepare_sum_aggregators");
427  while ((func= *(func_ptr++)))
428  {
429  if (func->set_aggregator(need_distinct && func->has_with_distinct() ?
430  Aggregator::DISTINCT_AGGREGATOR :
431  Aggregator::SIMPLE_AGGREGATOR))
432  DBUG_RETURN(TRUE);
433  }
434  DBUG_RETURN(FALSE);
435 }
436 
437 
438 /******************************************************************************
439  Code for calculating functions
440 ******************************************************************************/
441 
442 
455 bool setup_sum_funcs(THD *thd, Item_sum **func_ptr)
456 {
457  Item_sum *func;
458  DBUG_ENTER("setup_sum_funcs");
459  while ((func= *(func_ptr++)))
460  {
461  if (func->aggregator_setup(thd))
462  DBUG_RETURN(TRUE);
463  }
464  DBUG_RETURN(FALSE);
465 }
466 
467 
468 static void
469 init_tmptable_sum_functions(Item_sum **func_ptr)
470 {
471  Item_sum *func;
472  while ((func= *(func_ptr++)))
473  func->reset_field();
474 }
475 
476 
479 static void
480 update_tmptable_sum_func(Item_sum **func_ptr,
481  TABLE *tmp_table __attribute__((unused)))
482 {
483  Item_sum *func;
484  while ((func= *(func_ptr++)))
485  func->update_field();
486 }
487 
488 
491 static void
492 copy_sum_funcs(Item_sum **func_ptr, Item_sum **end_ptr)
493 {
494  for (; func_ptr != end_ptr ; func_ptr++)
495  (void) (*func_ptr)->save_in_result_field(1);
496  return;
497 }
498 
499 
500 static bool
501 init_sum_functions(Item_sum **func_ptr, Item_sum **end_ptr)
502 {
503  for (; func_ptr != end_ptr ;func_ptr++)
504  {
505  if ((*func_ptr)->reset_and_add())
506  return 1;
507  }
508  /* If rollup, calculate the upper sum levels */
509  for ( ; *func_ptr ; func_ptr++)
510  {
511  if ((*func_ptr)->aggregator_add())
512  return 1;
513  }
514  return 0;
515 }
516 
517 
518 static bool
519 update_sum_func(Item_sum **func_ptr)
520 {
521  Item_sum *func;
522  for (; (func= (Item_sum*) *func_ptr) ; func_ptr++)
523  if (func->aggregator_add())
524  return 1;
525  return 0;
526 }
527 
544 bool
545 copy_funcs(Item **func_ptr, const THD *thd)
546 {
547  Item *func;
548  for (; (func = *func_ptr) ; func_ptr++)
549  {
550  func->save_in_result_field(1);
551  /*
552  Need to check the THD error state because Item::val_xxx() don't
553  return error code, but can generate errors
554  TODO: change it for a real status check when Item::val_xxx()
555  are extended to return status code.
556  */
557  if (thd->is_error())
558  return TRUE;
559  }
560  return FALSE;
561 }
562 
563 /*
564  end_select-compatible function that writes the record into a sjm temptable
565 
566  SYNOPSIS
567  end_sj_materialize()
568  join The join
569  join_tab Last join table
570  end_of_records FALSE <=> This call is made to pass another record
571  combination
572  TRUE <=> EOF (no action)
573 
574  DESCRIPTION
575  This function is used by semi-join materialization to capture suquery's
576  resultset and write it into the temptable (that is, materialize it).
577 
578  NOTE
579  This function is used only for semi-join materialization. Non-semijoin
580  materialization uses different mechanism.
581 
582  RETURN
583  NESTED_LOOP_OK
584  NESTED_LOOP_ERROR
585 */
586 
587 static enum_nested_loop_state
588 end_sj_materialize(JOIN *join, JOIN_TAB *join_tab, bool end_of_records)
589 {
590  int error;
591  THD *thd= join->thd;
592  Semijoin_mat_exec *sjm= join_tab[-1].sj_mat_exec;
593  DBUG_ENTER("end_sj_materialize");
594  if (!end_of_records)
595  {
596  TABLE *table= sjm->table;
597 
598  List_iterator<Item> it(sjm->sj_nest->nested_join->sj_inner_exprs);
599  Item *item;
600  while ((item= it++))
601  {
602  if (item->is_null())
603  DBUG_RETURN(NESTED_LOOP_OK);
604  }
605  fill_record(thd, table->field, sjm->sj_nest->nested_join->sj_inner_exprs,
606  1, NULL);
607  if (thd->is_error())
608  DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
609  if ((error= table->file->ha_write_row(table->record[0])))
610  {
611  /* create_myisam_from_heap will generate error if needed */
612  if (table->file->is_fatal_error(error, HA_CHECK_DUP) &&
613  create_myisam_from_heap(thd, table,
614  sjm->table_param.start_recinfo,
615  &sjm->table_param.recinfo, error,
616  TRUE, NULL))
617  DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
618  }
619  }
620  DBUG_RETURN(NESTED_LOOP_OK);
621 }
622 
623 
624 
625 
639 static void update_const_equal_items(Item *cond, JOIN_TAB *tab)
640 {
641  if (!(cond->used_tables() & tab->table->map))
642  return;
643 
644  if (cond->type() == Item::COND_ITEM)
645  {
646  List<Item> *cond_list= ((Item_cond*) cond)->argument_list();
647  List_iterator_fast<Item> li(*cond_list);
648  Item *item;
649  while ((item= li++))
650  update_const_equal_items(item, tab);
651  }
652  else if (cond->type() == Item::FUNC_ITEM &&
653  ((Item_cond*) cond)->functype() == Item_func::MULT_EQUAL_FUNC)
654  {
655  Item_equal *item_equal= (Item_equal *) cond;
656  bool contained_const= item_equal->get_const() != NULL;
657  item_equal->update_const();
658  if (!contained_const && item_equal->get_const())
659  {
660  /* Update keys for range analysis */
661  Item_equal_iterator it(*item_equal);
662  Item_field *item_field;
663  while ((item_field= it++))
664  {
665  Field *field= item_field->field;
666  JOIN_TAB *stat= field->table->reginfo.join_tab;
667  key_map possible_keys= field->key_start;
668  possible_keys.intersect(field->table->keys_in_use_for_query);
669  stat[0].const_keys.merge(possible_keys);
670  stat[0].keys.merge(possible_keys);
671 
672  /*
673  For each field in the multiple equality (for which we know that it
674  is a constant) we have to find its corresponding key part, and set
675  that key part in const_key_parts.
676  */
677  if (!possible_keys.is_clear_all())
678  {
679  TABLE *tab= field->table;
680  Key_use *use;
681  for (use= stat->keyuse; use && use->table == tab; use++)
682  if (possible_keys.is_set(use->key) &&
683  tab->key_info[use->key].key_part[use->keypart].field ==
684  field)
685  tab->const_key_parts[use->key]|= use->keypart_map;
686  }
687  }
688  }
689  }
690 }
691 
707 static void
708 return_zero_rows(JOIN *join, List<Item> &fields)
709 {
710  DBUG_ENTER("return_zero_rows");
711 
712  join->join_free();
713 
714  if (!(join->result->send_result_set_metadata(fields,
715  Protocol::SEND_NUM_ROWS |
716  Protocol::SEND_EOF)))
717  {
718  bool send_error= FALSE;
719  if (join->send_row_on_empty_set())
720  {
721  // Mark tables as containing only NULL values
722  for (TABLE_LIST *table= join->select_lex->leaf_tables; table;
723  table= table->next_leaf)
724  mark_as_null_row(table->table);
725 
726  // Calculate aggregate functions for no rows
727 
728  /*
729  Must notify all fields that there are no rows (not only those
730  that will be returned) because join->having may refer to
731  fields that are not part of the result columns.
732  */
734  Item *item;
735  while ((item= it++))
736  item->no_rows_in_result();
737 
738  if (!join->having || join->having->val_int())
739  send_error= join->result->send_data(fields);
740  }
741  if (!send_error)
742  join->result->send_eof(); // Should be safe
743  }
744  /* Update results for FOUND_ROWS */
745  join->thd->set_examined_row_count(0);
746  join->thd->limit_found_rows= 0;
747  DBUG_VOID_RETURN;
748 }
749 
750 
762 {
763  JOIN *join= tab->join;
764  TABLE *table= tab->table;
765  QEP_tmp_table *op= (QEP_tmp_table *)tab->op;
766  TMP_TABLE_PARAM *tmp_tbl= tab->tmp_table_param;
767 
768  DBUG_ASSERT(table && op);
769 
770  if (table->group && tmp_tbl->sum_func_count &&
771  !tmp_tbl->precomputed_group_by)
772  {
773  /*
774  Note for MyISAM tmp tables: if uniques is true keys won't be
775  created.
776  */
777  if (table->s->keys && !table->s->uniques)
778  {
779  DBUG_PRINT("info",("Using end_update"));
780  op->set_write_func(end_update);
781  }
782  else
783  {
784  DBUG_PRINT("info",("Using end_unique_update"));
785  op->set_write_func(end_unique_update);
786  }
787  }
788  else if (join->sort_and_group && !tmp_tbl->precomputed_group_by)
789  {
790  DBUG_PRINT("info",("Using end_write_group"));
791  op->set_write_func(end_write_group);
792  }
793  else
794  {
795  DBUG_PRINT("info",("Using end_write"));
796  op->set_write_func(end_write);
797  if (tmp_tbl->precomputed_group_by)
798  {
799  /*
800  A preceding call to create_tmp_table in the case when loose
801  index scan is used guarantees that
802  TMP_TABLE_PARAM::items_to_copy has enough space for the group
803  by functions. It is OK here to use memcpy since we copy
804  Item_sum pointers into an array of Item pointers.
805  */
806  memcpy(tmp_tbl->items_to_copy + tmp_tbl->func_count,
807  join->sum_funcs,
808  sizeof(Item*)*tmp_tbl->sum_func_count);
809  tmp_tbl->items_to_copy[tmp_tbl->func_count+tmp_tbl->sum_func_count]= 0;
810  }
811  }
812 }
813 
814 
827 Next_select_func setup_end_select_func(JOIN *join, JOIN_TAB *tab)
828 {
829  TMP_TABLE_PARAM *tmp_tbl= tab ? tab->tmp_table_param : &join->tmp_table_param;
830 
831  /*
832  Choose method for presenting result to user. Use end_send_group
833  if the query requires grouping (has a GROUP BY clause and/or one or
834  more aggregate functions). Use end_send if the query should not
835  be grouped.
836  */
837  if (join->sort_and_group && !tmp_tbl->precomputed_group_by)
838  {
839  DBUG_PRINT("info",("Using end_send_group"));
840  return end_send_group;
841  }
842  DBUG_PRINT("info",("Using end_send"));
843  return end_send;
844 }
845 
846 
858 static int
859 do_select(JOIN *join)
860 {
861  int rc= 0;
862  enum_nested_loop_state error= NESTED_LOOP_OK;
863  DBUG_ENTER("do_select");
864 
865  join->send_records=0;
866  if (join->plan_is_const() && !join->need_tmp)
867  {
868  Next_select_func end_select= setup_end_select_func(join, NULL);
869  /*
870  HAVING will be checked after processing aggregate functions,
871  But WHERE should checkd here (we alredy have read tables)
872 
873  @todo: consider calling end_select instead of duplicating code
874  */
875  if (!join->conds || join->conds->val_int())
876  {
877  // HAVING will be checked by end_select
878  error= (*end_select)(join, 0, 0);
879  if (error >= NESTED_LOOP_OK)
880  error= (*end_select)(join, 0, 1);
881 
882  /*
883  If we don't go through evaluate_join_record(), do the counting
884  here. join->send_records is increased on success in end_send(),
885  so we don't touch it here.
886  */
887  join->examined_rows++;
888  DBUG_ASSERT(join->examined_rows <= 1);
889  }
890  else if (join->send_row_on_empty_set())
891  {
892  table_map save_nullinfo= 0;
893  /*
894  If this is a subquery, we need to save and later restore
895  the const table NULL info before clearing the tables
896  because the following executions of the subquery do not
897  reevaluate constant fields. @see save_const_null_info
898  and restore_const_null_info
899  */
900  if (join->select_lex->master_unit()->item && join->const_tables)
901  save_const_null_info(join, &save_nullinfo);
902 
903  // Calculate aggregate functions for no rows
904  List_iterator_fast<Item> it(*join->fields);
905  Item *item;
906  while ((item= it++))
907  item->no_rows_in_result();
908 
909  // Mark tables as containing only NULL values
910  join->clear();
911 
912  if (!join->having || join->having->val_int())
913  rc= join->result->send_data(*join->fields);
914 
915  if (save_nullinfo)
916  restore_const_null_info(join, save_nullinfo);
917  }
918  /*
919  An error can happen when evaluating the conds
920  (the join condition and piece of where clause
921  relevant to this join table).
922  */
923  if (join->thd->is_error())
924  error= NESTED_LOOP_ERROR;
925  }
926  else
927  {
928  JOIN_TAB *join_tab= join->join_tab + join->const_tables;
929  DBUG_ASSERT(join->primary_tables);
930  error= join->first_select(join,join_tab,0);
931  if (error >= NESTED_LOOP_OK)
932  error= join->first_select(join,join_tab,1);
933  }
934 
935  join->thd->limit_found_rows= join->send_records;
936  /*
937  For "order by with limit", we cannot rely on send_records, but need
938  to use the rowcount read originally into the join_tab applying the
939  filesort. There cannot be any post-filtering conditions, nor any
940  following join_tabs in this case, so this rowcount properly represents
941  the correct number of qualifying rows.
942  */
943  if (join->order)
944  {
945  // Save # of found records prior to cleanup
946  JOIN_TAB *sort_tab;
947  JOIN_TAB *join_tab= join->join_tab;
948  uint const_tables= join->const_tables;
949 
950  // Take record count from first non constant table or from last tmp table
951  if (join->tmp_tables > 0)
952  sort_tab= join_tab + join->primary_tables + join->tmp_tables - 1;
953  else
954  {
955  DBUG_ASSERT(!join->plan_is_const());
956  sort_tab= join_tab + const_tables;
957  }
958  if (sort_tab->filesort &&
959  join->select_options & OPTION_FOUND_ROWS &&
960  sort_tab->filesort->sortorder &&
961  sort_tab->filesort->limit != HA_POS_ERROR)
962  {
963  join->thd->limit_found_rows= sort_tab->records;
964  }
965  }
966 
967  {
968  /*
969  The following will unlock all cursors if the command wasn't an
970  update command
971  */
972  join->join_free(); // Unlock all cursors
973  }
974  if (error == NESTED_LOOP_OK)
975  {
976  /*
977  Sic: this branch works even if rc != 0, e.g. when
978  send_data above returns an error.
979  */
980  if (join->result->send_eof())
981  rc= 1; // Don't send error
982  DBUG_PRINT("info",("%ld records output", (long) join->send_records));
983  }
984  else
985  rc= -1;
986 #ifndef DBUG_OFF
987  if (rc)
988  {
989  DBUG_PRINT("error",("Error: do_select() failed"));
990  }
991 #endif
992  rc= join->thd->is_error() ? -1 : rc;
993  DBUG_RETURN(rc);
994 }
995 
996 
1030 enum_nested_loop_state
1031 sub_select_op(JOIN *join, JOIN_TAB *join_tab, bool end_of_records)
1032 {
1033  enum_nested_loop_state rc;
1034  QEP_operation *op= join_tab->op;
1035 
1036  /* This function cannot be called if join_tab has no associated operation */
1037  DBUG_ASSERT(op != NULL);
1038 
1039  DBUG_ENTER("sub_select_op");
1040 
1041  if (join->thd->killed)
1042  {
1043  /* The user has aborted the execution of the query */
1044  join->thd->send_kill_message();
1045  DBUG_RETURN(NESTED_LOOP_KILLED);
1046  }
1047 
1048  if (end_of_records)
1049  {
1050  rc= op->end_send();
1051  if (rc >= NESTED_LOOP_OK)
1052  rc= sub_select(join, join_tab, end_of_records);
1053  DBUG_RETURN(rc);
1054  }
1055  if (join_tab->prepare_scan())
1056  DBUG_RETURN(NESTED_LOOP_ERROR);
1057 
1058  /*
1059  setup_join_buffering() disables join buffering if QS_DYNAMIC_RANGE is
1060  enabled.
1061  */
1062  DBUG_ASSERT(join_tab->use_quick != QS_DYNAMIC_RANGE);
1063 
1064  rc= op->put_record();
1065 
1066  DBUG_RETURN(rc);
1067 }
1068 
1069 
1197 enum_nested_loop_state
1198 sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
1199 {
1200  DBUG_ENTER("sub_select");
1201 
1202  join_tab->table->null_row=0;
1203  if (end_of_records)
1204  {
1205  enum_nested_loop_state nls=
1206  (*join_tab->next_select)(join,join_tab+1,end_of_records);
1207  DBUG_RETURN(nls);
1208  }
1209  READ_RECORD *info= &join_tab->read_record;
1210 
1211  if (join_tab->prepare_scan())
1212  DBUG_RETURN(NESTED_LOOP_ERROR);
1213 
1214  if (join_tab->starts_weedout())
1215  {
1216  do_sj_reset(join_tab->flush_weedout_table);
1217  }
1218 
1219  join->return_tab= join_tab;
1220  join_tab->not_null_compl= true;
1221  join_tab->found_match= false;
1222 
1223  if (join_tab->last_inner)
1224  {
1225  /* join_tab is the first inner table for an outer join operation. */
1226 
1227  /* Set initial state of guard variables for this table.*/
1228  join_tab->found=0;
1229 
1230  /* Set first_unmatched for the last inner table of this group */
1231  join_tab->last_inner->first_unmatched= join_tab;
1232  }
1233  if (join_tab->do_firstmatch() || join_tab->do_loosescan())
1234  {
1235  /*
1236  join_tab is the first table of a LooseScan range, or has a "jump"
1237  address in a FirstMatch range.
1238  Reset the matching for this round of execution.
1239  */
1240  join_tab->match_tab->found_match= false;
1241  }
1242 
1243  join->thd->get_stmt_da()->reset_current_row_for_warning();
1244 
1245  enum_nested_loop_state rc= NESTED_LOOP_OK;
1246  bool in_first_read= true;
1247  while (rc == NESTED_LOOP_OK && join->return_tab >= join_tab)
1248  {
1249  int error;
1250  if (in_first_read)
1251  {
1252  in_first_read= false;
1253  error= (*join_tab->read_first_record)(join_tab);
1254  }
1255  else
1256  error= info->read_record(info);
1257 
1258  DBUG_EXECUTE_IF("bug13822652_1", join->thd->killed= THD::KILL_QUERY;);
1259 
1260  if (error > 0 || (join->thd->is_error())) // Fatal error
1261  rc= NESTED_LOOP_ERROR;
1262  else if (error < 0)
1263  break;
1264  else if (join->thd->killed) // Aborted by user
1265  {
1266  join->thd->send_kill_message();
1267  rc= NESTED_LOOP_KILLED;
1268  }
1269  else
1270  {
1271  if (join_tab->keep_current_rowid)
1272  join_tab->table->file->position(join_tab->table->record[0]);
1273  rc= evaluate_join_record(join, join_tab);
1274  }
1275  }
1276 
1277  if (rc == NESTED_LOOP_OK && join_tab->last_inner && !join_tab->found)
1278  rc= evaluate_null_complemented_join_record(join, join_tab);
1279 
1280  DBUG_RETURN(rc);
1281 }
1282 
1283 
1296 {
1297  // Check whether materialization is required.
1298  if (!materialize_table || materialized)
1299  return false;
1300 
1301  // Materialize table prior to reading it
1302  if ((*materialize_table)(this))
1303  return true;
1304 
1305  materialized= true;
1306 
1307  // Bind to the rowid buffer managed by the TABLE object.
1308  if (copy_current_rowid)
1309  copy_current_rowid->bind_buffer(table->file->ref);
1310 
1311  return false;
1312 }
1313 
1314 
1334 int do_sj_dups_weedout(THD *thd, SJ_TMP_TABLE *sjtbl)
1335 {
1336  int error;
1337  SJ_TMP_TABLE::TAB *tab= sjtbl->tabs;
1338  SJ_TMP_TABLE::TAB *tab_end= sjtbl->tabs_end;
1339 
1340  DBUG_ENTER("do_sj_dups_weedout");
1341 
1342  if (sjtbl->is_confluent)
1343  {
1344  if (sjtbl->have_confluent_row)
1345  DBUG_RETURN(1);
1346  else
1347  {
1348  sjtbl->have_confluent_row= TRUE;
1349  DBUG_RETURN(0);
1350  }
1351  }
1352 
1353  uchar *ptr= sjtbl->tmp_table->record[0] + 1;
1354  // Put the rowids tuple into table->record[0]:
1355  // 1. Store the length
1356  if (((Field_varstring*)(sjtbl->tmp_table->field[0]))->length_bytes == 1)
1357  {
1358  *ptr= (uchar)(sjtbl->rowid_len + sjtbl->null_bytes);
1359  ptr++;
1360  }
1361  else
1362  {
1363  int2store(ptr, sjtbl->rowid_len + sjtbl->null_bytes);
1364  ptr += 2;
1365  }
1366 
1367  // 2. Zero the null bytes
1368  uchar *const nulls_ptr= ptr;
1369  if (sjtbl->null_bytes)
1370  {
1371  memset(ptr, 0, sjtbl->null_bytes);
1372  ptr += sjtbl->null_bytes;
1373  }
1374 
1375  // 3. Put the rowids
1376  for (uint i=0; tab != tab_end; tab++, i++)
1377  {
1378  handler *h= tab->join_tab->table->file;
1379  if (tab->join_tab->table->maybe_null && tab->join_tab->table->null_row)
1380  {
1381  /* It's a NULL-complemented row */
1382  *(nulls_ptr + tab->null_byte) |= tab->null_bit;
1383  memset(ptr + tab->rowid_offset, 0, h->ref_length);
1384  }
1385  else
1386  {
1387  /* Copy the rowid value */
1388  memcpy(ptr + tab->rowid_offset, h->ref, h->ref_length);
1389  }
1390  }
1391 
1392  error= sjtbl->tmp_table->file->ha_write_row(sjtbl->tmp_table->record[0]);
1393  if (error)
1394  {
1395  /* If this is a duplicate error, return immediately */
1396  if (!sjtbl->tmp_table->file->is_fatal_error(error, HA_CHECK_DUP))
1397  DBUG_RETURN(1);
1398  /*
1399  Other error than duplicate error: Attempt to create a temporary table.
1400  */
1401  bool is_duplicate;
1402  if (create_myisam_from_heap(thd, sjtbl->tmp_table,
1403  sjtbl->start_recinfo, &sjtbl->recinfo,
1404  error, TRUE, &is_duplicate))
1405  DBUG_RETURN(-1);
1406  DBUG_RETURN(is_duplicate ? 1 : 0);
1407  }
1408  DBUG_RETURN(0);
1409 }
1410 
1411 
1416 static int do_sj_reset(SJ_TMP_TABLE *sj_tbl)
1417 {
1418  DBUG_ENTER("do_sj_reset");
1419  if (sj_tbl->tmp_table)
1420  {
1421  int rc= sj_tbl->tmp_table->file->ha_delete_all_rows();
1422  DBUG_RETURN(rc);
1423  }
1424  sj_tbl->have_confluent_row= FALSE;
1425  DBUG_RETURN(0);
1426 }
1427 
1443 static enum_nested_loop_state
1444 evaluate_join_record(JOIN *join, JOIN_TAB *join_tab)
1445 {
1446  bool not_used_in_distinct=join_tab->not_used_in_distinct;
1447  ha_rows found_records=join->found_records;
1448  Item *condition= join_tab->condition();
1449  bool found= TRUE;
1450  DBUG_ENTER("evaluate_join_record");
1451  DBUG_PRINT("enter",
1452  ("join: %p join_tab index: %d table: %s cond: %p",
1453  join, static_cast<int>(join_tab - join_tab->join->join_tab),
1454  join_tab->table->alias, condition));
1455 
1456  if (condition)
1457  {
1458  found= test(condition->val_int());
1459 
1460  if (join->thd->killed)
1461  {
1462  join->thd->send_kill_message();
1463  DBUG_RETURN(NESTED_LOOP_KILLED);
1464  }
1465 
1466  /* check for errors evaluating the condition */
1467  if (join->thd->is_error())
1468  DBUG_RETURN(NESTED_LOOP_ERROR);
1469  }
1470  if (found)
1471  {
1472  /*
1473  There is no condition on this join_tab or the attached pushed down
1474  condition is true => a match is found.
1475  */
1476  while (join_tab->first_unmatched && found)
1477  {
1478  /*
1479  The while condition is always false if join_tab is not
1480  the last inner join table of an outer join operation.
1481  */
1482  JOIN_TAB *first_unmatched= join_tab->first_unmatched;
1483  /*
1484  Mark that a match for current outer table is found.
1485  This activates push down conditional predicates attached
1486  to the all inner tables of the outer join.
1487  */
1488  first_unmatched->found= 1;
1489  for (JOIN_TAB *tab= first_unmatched; tab <= join_tab; tab++)
1490  {
1491  /* Check all predicates that has just been activated. */
1492  /*
1493  Actually all predicates non-guarded by first_unmatched->found
1494  will be re-evaluated again. It could be fixed, but, probably,
1495  it's not worth doing now.
1496  */
1497  /*
1498  not_exists_optimize has been created from a
1499  condition containing 'is_null'. This 'is_null'
1500  predicate is still present on any 'tab' with
1501  'not_exists_optimize'. Furthermore, the usual rules
1502  for condition guards also applies for
1503  'not_exists_optimize' -> When 'is_null==false' we
1504  know all cond. guards are open and we can apply
1505  the 'not_exists_optimize'.
1506  */
1507  DBUG_ASSERT(!(tab->table->reginfo.not_exists_optimize &&
1508  !tab->condition()));
1509 
1510  if (tab->condition() && !tab->condition()->val_int())
1511  {
1512  /* The condition attached to table tab is false */
1513 
1514  if (tab->table->reginfo.not_exists_optimize)
1515  {
1516  /*
1517  When not_exists_optimizer is set and a matching row is found, the
1518  outer row should be excluded from the result set: no need to
1519  explore this record, thus we don't call the next_select.
1520  And, no need to explore other following records of 'tab', so we
1521  set join_tab->return_tab.
1522  As we set join_tab->found above, evaluate_join_record() at the
1523  upper level will not yield a NULL-complemented record.
1524  */
1525  join->return_tab= join_tab - 1;
1526  DBUG_RETURN(NESTED_LOOP_OK);
1527  }
1528 
1529  if (tab == join_tab)
1530  found= 0;
1531  else
1532  {
1533  /*
1534  Set a return point if rejected predicate is attached
1535  not to the last table of the current nest level.
1536  */
1537  join->return_tab= tab;
1538  DBUG_RETURN(NESTED_LOOP_OK);
1539  }
1540  }
1541  }
1542  /*
1543  Check whether join_tab is not the last inner table
1544  for another embedding outer join.
1545  */
1546  if ((first_unmatched= first_unmatched->first_upper) &&
1547  first_unmatched->last_inner != join_tab)
1548  first_unmatched= 0;
1549  join_tab->first_unmatched= first_unmatched;
1550  }
1551 
1552  JOIN_TAB *return_tab= join->return_tab;
1553 
1554  if (join_tab->finishes_weedout() && found)
1555  {
1556  int res= do_sj_dups_weedout(join->thd, join_tab->check_weed_out_table);
1557  if (res == -1)
1558  DBUG_RETURN(NESTED_LOOP_ERROR);
1559  else if (res == 1)
1560  found= FALSE;
1561  }
1562  else if (join_tab->do_loosescan() && join_tab->match_tab->found_match)
1563  {
1564  /* Loosescan algorithm requires 'sorted' retrieval of keys. */
1565  DBUG_ASSERT(join_tab->use_order());
1566  /*
1567  Previous row combination for duplicate-generating range,
1568  generated a match. Compare keys of this row and previous row
1569  to determine if this is a duplicate that should be skipped.
1570  */
1571  if (key_cmp(join_tab->table->key_info[join_tab->index].key_part,
1572  join_tab->loosescan_buf, join_tab->loosescan_key_len))
1573  /*
1574  Keys do not match.
1575  Reset found_match for last table of duplicate-generating range,
1576  to avoid comparing keys until a new match has been found.
1577  */
1578  join_tab->match_tab->found_match= false;
1579  else
1580  found= false;
1581  }
1582 
1583  join_tab->found_match= true;
1584 
1585  /*
1586  It was not just a return to lower loop level when one
1587  of the newly activated predicates is evaluated as false
1588  (See above join->return_tab= tab).
1589  */
1590  join->examined_rows++;
1591  DBUG_PRINT("counts", ("evaluate_join_record join->examined_rows++: %lu",
1592  (ulong) join->examined_rows));
1593 
1594  if (found)
1595  {
1596  enum enum_nested_loop_state rc;
1597  /* A match from join_tab is found for the current partial join. */
1598  rc= (*join_tab->next_select)(join, join_tab+1, 0);
1599  join->thd->get_stmt_da()->inc_current_row_for_warning();
1600  if (rc != NESTED_LOOP_OK)
1601  DBUG_RETURN(rc);
1602 
1603  if (join_tab->do_loosescan() && join_tab->match_tab->found_match)
1604  {
1605  /*
1606  A match was found for a duplicate-generating range of a semijoin.
1607  Copy key to be able to determine whether subsequent rows
1608  will give duplicates that should be skipped.
1609  */
1610  KEY *key= join_tab->table->key_info + join_tab->index;
1611  key_copy(join_tab->loosescan_buf, join_tab->read_record.record, key,
1612  join_tab->loosescan_key_len);
1613  }
1614  else if (join_tab->do_firstmatch() && join_tab->match_tab->found_match)
1615  {
1616  /*
1617  We should return to join_tab->firstmatch_return after we have
1618  enumerated all the suffixes for current prefix row combination
1619  */
1620  set_if_smaller(return_tab, join_tab->firstmatch_return);
1621  }
1622 
1623  /*
1624  Test if this was a SELECT DISTINCT query on a table that
1625  was not in the field list; In this case we can abort if
1626  we found a row, as no new rows can be added to the result.
1627  */
1628  if (not_used_in_distinct && found_records != join->found_records)
1629  set_if_smaller(return_tab, join_tab - 1);
1630 
1631  set_if_smaller(join->return_tab, return_tab);
1632  }
1633  else
1634  {
1635  join->thd->get_stmt_da()->inc_current_row_for_warning();
1636  if (join_tab->not_null_compl)
1637  {
1638  /* a NULL-complemented row is not in a table so cannot be locked */
1639  join_tab->read_record.unlock_row(join_tab);
1640  }
1641  }
1642  }
1643  else
1644  {
1645  /*
1646  The condition pushed down to the table join_tab rejects all rows
1647  with the beginning coinciding with the current partial join.
1648  */
1649  join->examined_rows++;
1650  join->thd->get_stmt_da()->inc_current_row_for_warning();
1651  if (join_tab->not_null_compl)
1652  join_tab->read_record.unlock_row(join_tab);
1653  }
1654  DBUG_RETURN(NESTED_LOOP_OK);
1655 }
1656 
1657 
1666 static enum_nested_loop_state
1667 evaluate_null_complemented_join_record(JOIN *join, JOIN_TAB *join_tab)
1668 {
1669  /*
1670  The table join_tab is the first inner table of a outer join operation
1671  and no matches has been found for the current outer row.
1672  */
1673  JOIN_TAB *first_inner_tab= join_tab;
1674  JOIN_TAB *last_inner_tab= join_tab->last_inner;
1675 
1676  DBUG_ENTER("evaluate_null_complemented_join_record");
1677 
1678  for ( ; join_tab <= last_inner_tab ; join_tab++)
1679  {
1680  // Make sure that the rowid buffer is bound, duplicates weedout needs it
1681  if (join_tab->copy_current_rowid &&
1682  !join_tab->copy_current_rowid->buffer_is_bound())
1683  join_tab->copy_current_rowid->bind_buffer(join_tab->table->file->ref);
1684 
1685  /* Change the the values of guard predicate variables. */
1686  join_tab->found= 1;
1687  join_tab->not_null_compl= 0;
1688  /* The outer row is complemented by nulls for each inner tables */
1689  restore_record(join_tab->table,s->default_values); // Make empty record
1690  mark_as_null_row(join_tab->table); // For group by without error
1691  if (join_tab->starts_weedout() && join_tab > first_inner_tab)
1692  {
1693  // sub_select() has not performed a reset for this table.
1694  do_sj_reset(join_tab->flush_weedout_table);
1695  }
1696  /* Check all attached conditions for inner table rows. */
1697  if (join_tab->condition() && !join_tab->condition()->val_int())
1698  DBUG_RETURN(NESTED_LOOP_OK);
1699  }
1700  join_tab= last_inner_tab;
1701  /*
1702  From the point of view of the rest of execution, this record matches
1703  (it has been built and satisfies conditions, no need to do more evaluation
1704  on it). See similar code in evaluate_join_record().
1705  */
1706  JOIN_TAB *first_unmatched= join_tab->first_unmatched->first_upper;
1707  if (first_unmatched != NULL &&
1708  first_unmatched->last_inner != join_tab)
1709  first_unmatched= NULL;
1710  join_tab->first_unmatched= first_unmatched;
1711  /*
1712  The row complemented by nulls satisfies all conditions
1713  attached to inner tables.
1714  Finish evaluation of record and send it to be joined with
1715  remaining tables.
1716  Note that evaluate_join_record will re-evaluate the condition attached
1717  to the last inner table of the current outer join. This is not deemed to
1718  have a significant performance impact.
1719  */
1720  const enum_nested_loop_state rc= evaluate_join_record(join, join_tab);
1721  DBUG_RETURN(rc);
1722 }
1723 
1724 
1725 /*****************************************************************************
1726  The different ways to read a record
1727  Returns -1 if row was not found, 0 if row was found and 1 on errors
1728 *****************************************************************************/
1729 
1732 int report_handler_error(TABLE *table, int error)
1733 {
1734  if (error == HA_ERR_END_OF_FILE || error == HA_ERR_KEY_NOT_FOUND)
1735  {
1736  table->status= STATUS_GARBAGE;
1737  return -1; // key not found; ok
1738  }
1739  /*
1740  Do not spam the error log with these temporary errors:
1741  LOCK_DEADLOCK LOCK_WAIT_TIMEOUT TABLE_DEF_CHANGED
1742  Also skip printing to error log if the current thread has been killed.
1743  */
1744  if (error != HA_ERR_LOCK_DEADLOCK &&
1745  error != HA_ERR_LOCK_WAIT_TIMEOUT &&
1746  error != HA_ERR_TABLE_DEF_CHANGED &&
1747  !table->in_use->killed)
1748  sql_print_error("Got error %d when reading table '%s'",
1749  error, table->s->path.str);
1750  table->file->print_error(error,MYF(0));
1751  return 1;
1752 }
1753 
1754 
1755 int safe_index_read(JOIN_TAB *tab)
1756 {
1757  int error;
1758  TABLE *table= tab->table;
1759  if ((error=table->file->ha_index_read_map(table->record[0],
1760  tab->ref.key_buff,
1761  make_prev_keypart_map(tab->ref.key_parts),
1762  HA_READ_KEY_EXACT)))
1763  return report_handler_error(table, error);
1764  return 0;
1765 }
1766 
1767 
1768 static int
1769 test_if_quick_select(JOIN_TAB *tab)
1770 {
1771  tab->select->set_quick(NULL);
1772  return tab->select->test_quick_select(tab->join->thd,
1773  tab->keys,
1774  0, // empty table map
1775  HA_POS_ERROR,
1776  false, // don't force quick range
1777  ORDER::ORDER_NOT_RELEVANT);
1778 }
1779 
1780 
1790 int
1792 {
1793  int error;
1794  DBUG_ENTER("join_read_const_table");
1795  TABLE *table=tab->table;
1796  table->const_table=1;
1797  table->null_row=0;
1798  table->status= STATUS_GARBAGE | STATUS_NOT_FOUND;
1799 
1800  if (table->reginfo.lock_type >= TL_WRITE_ALLOW_WRITE)
1801  {
1802  const enum_sql_command sql_command= tab->join->thd->lex->sql_command;
1803  if (sql_command == SQLCOM_UPDATE_MULTI ||
1804  sql_command == SQLCOM_DELETE_MULTI)
1805  {
1806  /*
1807  In a multi-UPDATE, if we represent "depends on" with "->", we have:
1808  "what columns to read (read_set)" ->
1809  "whether table will be updated on-the-fly or with tmp table" ->
1810  "whether to-be-updated columns are used by access path"
1811  "access path to table (range, ref, scan...)" ->
1812  "query execution plan" ->
1813  "what tables are const" ->
1814  "reading const tables" ->
1815  "what columns to read (read_set)".
1816  To break this loop, we always read all columns of a constant table if
1817  it is going to be updated.
1818  Another case is in multi-UPDATE and multi-DELETE, when the table has a
1819  trigger: bits of columns needed by the trigger are turned on in
1820  result->initialize_tables(), which has not yet been called when we do
1821  the reading now, so we must read all columns.
1822  */
1823  bitmap_set_all(table->read_set);
1824  table->file->column_bitmaps_signal();
1825  }
1826  }
1827 
1828  if (tab->type == JT_SYSTEM)
1829  {
1830  if ((error=join_read_system(tab)))
1831  { // Info for DESCRIBE
1832  tab->info= ET_CONST_ROW_NOT_FOUND;
1833  /* Mark for EXPLAIN that the row was not found */
1834  pos->records_read=0.0;
1835  pos->ref_depend_map= 0;
1836  if (!table->pos_in_table_list->outer_join || error > 0)
1837  DBUG_RETURN(error);
1838  }
1839  }
1840  else
1841  {
1842  if (!table->key_read && table->covering_keys.is_set(tab->ref.key) &&
1843  !table->no_keyread &&
1844  (int) table->reginfo.lock_type <= (int) TL_READ_HIGH_PRIORITY)
1845  {
1846  table->set_keyread(TRUE);
1847  tab->index= tab->ref.key;
1848  }
1849  error=join_read_const(tab);
1850  table->set_keyread(FALSE);
1851  if (error)
1852  {
1853  tab->info= ET_UNIQUE_ROW_NOT_FOUND;
1854  /* Mark for EXPLAIN that the row was not found */
1855  pos->records_read=0.0;
1856  pos->ref_depend_map= 0;
1857  if (!table->pos_in_table_list->outer_join || error > 0)
1858  DBUG_RETURN(error);
1859  }
1860  }
1861 
1862  if (*tab->on_expr_ref && !table->null_row)
1863  {
1864  // We cannot handle outer-joined tables with expensive join conditions here:
1865  DBUG_ASSERT(!(*tab->on_expr_ref)->is_expensive());
1866  if ((table->null_row= test((*tab->on_expr_ref)->val_int() == 0)))
1867  mark_as_null_row(table);
1868  }
1869  if (!table->null_row)
1870  table->maybe_null=0;
1871 
1872  /* Check appearance of new constant items in Item_equal objects */
1873  JOIN *join= tab->join;
1874  if (join->conds)
1875  update_const_equal_items(join->conds, tab);
1876  TABLE_LIST *tbl;
1877  for (tbl= join->select_lex->leaf_tables; tbl; tbl= tbl->next_leaf)
1878  {
1879  TABLE_LIST *embedded;
1880  TABLE_LIST *embedding= tbl;
1881  do
1882  {
1883  embedded= embedding;
1884  if (embedded->join_cond())
1885  update_const_equal_items(embedded->join_cond(), tab);
1886  embedding= embedded->embedding;
1887  }
1888  while (embedding &&
1889  embedding->nested_join->join_list.head() == embedded);
1890  }
1891 
1892  DBUG_RETURN(0);
1893 }
1894 
1895 
1906 static int
1907 join_read_system(JOIN_TAB *tab)
1908 {
1909  TABLE *table= tab->table;
1910  int error;
1911  if (table->status & STATUS_GARBAGE) // If first read
1912  {
1913  if ((error=table->file->read_first_row(table->record[0],
1914  table->s->primary_key)))
1915  {
1916  if (error != HA_ERR_END_OF_FILE)
1917  return report_handler_error(table, error);
1918  mark_as_null_row(tab->table);
1919  empty_record(table); // Make empty record
1920  return -1;
1921  }
1922  store_record(table,record[1]);
1923  }
1924  else if (!table->status) // Only happens with left join
1925  restore_record(table,record[1]); // restore old record
1926  table->null_row=0;
1927  return table->status ? -1 : 0;
1928 }
1929 
1930 
1942 static int
1943 join_read_const(JOIN_TAB *tab)
1944 {
1945  int error;
1946  TABLE *table= tab->table;
1947  DBUG_ENTER("join_read_const");
1948 
1949 
1950  if (table->status & STATUS_GARBAGE) // If first read
1951  {
1952  table->status= 0;
1953  if (cp_buffer_from_ref(tab->join->thd, table, &tab->ref))
1954  error=HA_ERR_KEY_NOT_FOUND;
1955  else
1956  {
1957  error=table->file->ha_index_read_idx_map(table->record[0],tab->ref.key,
1958  (uchar*) tab->ref.key_buff,
1959  make_prev_keypart_map(tab->ref.key_parts),
1960  HA_READ_KEY_EXACT);
1961  }
1962  if (error)
1963  {
1964  table->status= STATUS_NOT_FOUND;
1965  mark_as_null_row(tab->table);
1966  empty_record(table);
1967  if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
1968  {
1969  const int ret= report_handler_error(table, error);
1970  DBUG_RETURN(ret);
1971  }
1972  DBUG_RETURN(-1);
1973  }
1974  store_record(table,record[1]);
1975  }
1976  else if (!(table->status & ~STATUS_NULL_ROW)) // Only happens with left join
1977  {
1978  table->status=0;
1979  restore_record(table,record[1]); // restore old record
1980  }
1981  table->null_row=0;
1982  DBUG_RETURN(table->status ? -1 : 0);
1983 }
1984 
1985 
2001 static int
2002 join_read_key(JOIN_TAB *tab)
2003 {
2004  TABLE *const table= tab->table;
2005  TABLE_REF *table_ref= &tab->ref;
2006  int error;
2007 
2008  if (!table->file->inited)
2009  {
2010  DBUG_ASSERT(!tab->use_order()); //Don't expect sort req. for single row.
2011  if ((error= table->file->ha_index_init(table_ref->key, tab->use_order())))
2012  {
2013  (void) report_handler_error(table, error);
2014  return 1;
2015  }
2016  }
2017 
2018  /*
2019  We needn't do "Late NULLs Filtering" because eq_ref is restricted to
2020  indices on NOT NULL columns (see create_ref_for_key()).
2021  */
2022  if (cmp_buffer_with_ref(tab->join->thd, table, table_ref) ||
2023  (table->status & (STATUS_GARBAGE | STATUS_NULL_ROW)))
2024  {
2025  if (table_ref->key_err)
2026  {
2027  table->status=STATUS_NOT_FOUND;
2028  return -1;
2029  }
2030  /*
2031  Moving away from the current record. Unlock the row
2032  in the handler if it did not match the partial WHERE.
2033  */
2034  if (table_ref->has_record && table_ref->use_count == 0)
2035  {
2036  table->file->unlock_row();
2037  table_ref->has_record= FALSE;
2038  }
2039  error= table->file->ha_index_read_map(table->record[0],
2040  table_ref->key_buff,
2041  make_prev_keypart_map(table_ref->key_parts),
2042  HA_READ_KEY_EXACT);
2043  if (error && error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
2044  return report_handler_error(table, error);
2045 
2046  if (! error)
2047  {
2048  table_ref->has_record= TRUE;
2049  table_ref->use_count= 1;
2050  }
2051  }
2052  else if (table->status == 0)
2053  {
2054  DBUG_ASSERT(table_ref->has_record);
2055  table_ref->use_count++;
2056  }
2057  table->null_row=0;
2058  return table->status ? -1 : 0;
2059 }
2060 
2070 void
2072 {
2073  DBUG_ASSERT(tab->ref.use_count);
2074  if (tab->ref.use_count)
2075  tab->ref.use_count--;
2076 }
2077 
2102 static int
2103 join_read_linked_first(JOIN_TAB *tab)
2104 {
2105  int error;
2106  TABLE *table= tab->table;
2107  DBUG_ENTER("join_read_linked_first");
2108 
2109  DBUG_ASSERT(!tab->use_order()); // Pushed child can't be sorted
2110  if (!table->file->inited &&
2111  (error= table->file->ha_index_init(tab->ref.key, tab->use_order())))
2112  {
2113  (void) report_handler_error(table, error);
2114  DBUG_RETURN(error);
2115  }
2116 
2117  /* Perform "Late NULLs Filtering" (see internals manual for explanations) */
2118  if (tab->ref.impossible_null_ref())
2119  {
2120  DBUG_PRINT("info", ("join_read_linked_first null_rejected"));
2121  DBUG_RETURN(-1);
2122  }
2123 
2124  if (cp_buffer_from_ref(tab->join->thd, table, &tab->ref))
2125  {
2126  table->status=STATUS_NOT_FOUND;
2127  DBUG_RETURN(-1);
2128  }
2129 
2130  // 'read' itself is a NOOP:
2131  // handler::index_read_pushed() only unpack the prefetched row and set 'status'
2132  error=table->file->index_read_pushed(table->record[0],
2133  tab->ref.key_buff,
2134  make_prev_keypart_map(tab->ref.key_parts));
2135  if (unlikely(error && error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE))
2136  DBUG_RETURN(report_handler_error(table, error));
2137 
2138  table->null_row=0;
2139  int rc= table->status ? -1 : 0;
2140  DBUG_RETURN(rc);
2141 }
2142 
2143 static int
2144 join_read_linked_next(READ_RECORD *info)
2145 {
2146  TABLE *table= info->table;
2147  DBUG_ENTER("join_read_linked_next");
2148 
2149  int error=table->file->index_next_pushed(table->record[0]);
2150  if (error)
2151  {
2152  if (unlikely(error != HA_ERR_END_OF_FILE))
2153  DBUG_RETURN(report_handler_error(table, error));
2154  table->status= STATUS_GARBAGE;
2155  DBUG_RETURN(-1);
2156  }
2157  DBUG_RETURN(error);
2158 }
2159 
2160 /*
2161  ref access method implementation: "read_first" function
2162 
2163  SYNOPSIS
2164  join_read_always_key()
2165  tab JOIN_TAB of the accessed table
2166 
2167  DESCRIPTION
2168  This is "read_fist" function for the "ref" access method.
2169 
2170  The functon must leave the index initialized when it returns.
2171  ref_or_null access implementation depends on that.
2172 
2173  RETURN
2174  0 - Ok
2175  -1 - Row not found
2176  1 - Error
2177 */
2178 
2179 static int
2180 join_read_always_key(JOIN_TAB *tab)
2181 {
2182  int error;
2183  TABLE *table= tab->table;
2184 
2185  /* Initialize the index first */
2186  if (!table->file->inited &&
2187  (error= table->file->ha_index_init(tab->ref.key, tab->use_order())))
2188  {
2189  (void) report_handler_error(table, error);
2190  return 1;
2191  }
2192 
2193  /* Perform "Late NULLs Filtering" (see internals manual for explanations) */
2194  TABLE_REF *ref= &tab->ref;
2195  if (ref->impossible_null_ref())
2196  {
2197  DBUG_PRINT("info", ("join_read_always_key null_rejected"));
2198  return -1;
2199  }
2200 
2201  if (cp_buffer_from_ref(tab->join->thd, table, ref))
2202  return -1;
2203  if ((error= table->file->ha_index_read_map(table->record[0],
2204  tab->ref.key_buff,
2205  make_prev_keypart_map(tab->ref.key_parts),
2206  HA_READ_KEY_EXACT)))
2207  {
2208  if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
2209  return report_handler_error(table, error);
2210  return -1; /* purecov: inspected */
2211  }
2212  return 0;
2213 }
2214 
2215 
2221 int
2223 {
2224  int error;
2225  TABLE *table= tab->table;
2226 
2227  if (!table->file->inited &&
2228  (error= table->file->ha_index_init(tab->ref.key, tab->use_order())))
2229  {
2230  (void) report_handler_error(table, error);
2231  return 1;
2232  }
2233  if (cp_buffer_from_ref(tab->join->thd, table, &tab->ref))
2234  return -1;
2235  if ((error=table->file->ha_index_read_last_map(table->record[0],
2236  tab->ref.key_buff,
2237  make_prev_keypart_map(tab->ref.key_parts))))
2238  {
2239  if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
2240  return report_handler_error(table, error);
2241  return -1; /* purecov: inspected */
2242  }
2243  return 0;
2244 }
2245 
2246 
2247  /* ARGSUSED */
2248 static int
2249 join_no_more_records(READ_RECORD *info __attribute__((unused)))
2250 {
2251  return -1;
2252 }
2253 
2254 
2255 static int
2256 join_read_next_same(READ_RECORD *info)
2257 {
2258  int error;
2259  TABLE *table= info->table;
2260  JOIN_TAB *tab=table->reginfo.join_tab;
2261 
2262  if ((error= table->file->ha_index_next_same(table->record[0],
2263  tab->ref.key_buff,
2264  tab->ref.key_length)))
2265  {
2266  if (error != HA_ERR_END_OF_FILE)
2267  return report_handler_error(table, error);
2268  table->status= STATUS_GARBAGE;
2269  return -1;
2270  }
2271  return 0;
2272 }
2273 
2274 
2275 int
2276 join_read_prev_same(READ_RECORD *info)
2277 {
2278  int error;
2279  TABLE *table= info->table;
2280  JOIN_TAB *tab=table->reginfo.join_tab;
2281 
2282  /*
2283  Using ha_index_prev() for reading records from the table can cause
2284  performance issues if used in combination with ICP. The ICP code
2285  in the storage engine does not know when to stop reading from the
2286  index and a call to ha_index_prev() might cause the storage engine
2287  to read to the beginning of the index if no qualifying record is
2288  found.
2289  */
2290  DBUG_ASSERT(table->file->pushed_idx_cond == NULL);
2291 
2292  if ((error= table->file->ha_index_prev(table->record[0])))
2293  return report_handler_error(table, error);
2294  if (key_cmp_if_same(table, tab->ref.key_buff, tab->ref.key,
2295  tab->ref.key_length))
2296  {
2297  table->status=STATUS_NOT_FOUND;
2298  error= -1;
2299  }
2300  return error;
2301 }
2302 
2303 
2304 int
2305 join_init_quick_read_record(JOIN_TAB *tab)
2306 {
2307  /*
2308  This is for QS_DYNAMIC_RANGE, i.e., "Range checked for each
2309  record". The trace for the range analysis below this point will
2310  be printed with different ranges for every record to the left of
2311  this table in the join.
2312  */
2313 
2314 #ifdef OPTIMIZER_TRACE
2315  Opt_trace_context * const trace= &tab->join->thd->opt_trace;
2316  const bool disable_trace=
2317  tab->select->traced_before &&
2318  !trace->feature_enabled(Opt_trace_context::DYNAMIC_RANGE);
2319  Opt_trace_disable_I_S disable_trace_wrapper(trace, disable_trace);
2320 
2321  tab->select->traced_before= true;
2322 
2323  Opt_trace_object wrapper(trace);
2324  Opt_trace_object trace_table(trace, "rows_estimation_per_outer_row");
2325  trace_table.add_utf8_table(tab->table);
2326 #endif
2327 
2328  /*
2329  If this join tab was read through a QUICK for the last record
2330  combination from earlier tables, test_if_quick_select() will
2331  delete that quick and effectively close the index. Otherwise, we
2332  need to close the index before the next join iteration starts
2333  because the handler object might be reused by a different access
2334  strategy.
2335  */
2336  if ((!tab->select || !tab->select->quick) &&
2337  (tab->table->file->inited != handler::NONE))
2338  tab->table->file->ha_index_or_rnd_end();
2339 
2340  if (test_if_quick_select(tab) == -1)
2341  return -1; /* No possible records */
2342  return join_init_read_record(tab);
2343 }
2344 
2345 
2346 int read_first_record_seq(JOIN_TAB *tab)
2347 {
2348  if (tab->read_record.table->file->ha_rnd_init(1))
2349  return 1;
2350  return (*tab->read_record.read_record)(&tab->read_record);
2351 }
2352 
2353 
2374 {
2375  int error;
2376 
2377  if (tab->distinct && tab->remove_duplicates()) // Remove duplicates.
2378  return 1;
2379  if (tab->filesort && tab->sort_table()) // Sort table.
2380  return 1;
2381 
2382  if (tab->select && tab->select->quick && (error= tab->select->quick->reset()))
2383  {
2384  /* Ensures error status is propageted back to client */
2385  report_handler_error(tab->table, error);
2386  return 1;
2387  }
2388  if (init_read_record(&tab->read_record, tab->join->thd, tab->table,
2389  tab->select, 1, 1, FALSE))
2390  return 1;
2391 
2392  return (*tab->read_record.read_record)(&tab->read_record);
2393 }
2394 
2395 /*
2396  This helper function materializes derived table/view and then calls
2397  read_first_record function to set up access to the materialized table.
2398 */
2399 
2400 int
2401 join_materialize_derived(JOIN_TAB *tab)
2402 {
2403  TABLE_LIST *derived= tab->table->pos_in_table_list;
2404  DBUG_ASSERT(derived->uses_materialization() && !tab->materialized);
2405 
2406  if (derived->materializable_is_const()) // Has been materialized by optimizer
2407  return NESTED_LOOP_OK;
2408 
2409  bool res= mysql_handle_single_derived(tab->table->in_use->lex,
2410  derived, &mysql_derived_materialize);
2411  if (!tab->table->in_use->lex->describe)
2412  mysql_handle_single_derived(tab->table->in_use->lex,
2413  derived, &mysql_derived_cleanup);
2414  return res ? NESTED_LOOP_ERROR : NESTED_LOOP_OK;
2415 }
2416 
2417 
2418 
2419 /*
2420  Helper function for materialization of a semi-joined subquery.
2421 
2422  @param tab JOIN_TAB referencing a materialized semi-join table
2423 
2424  @return Nested loop state
2425 */
2426 
2427 int
2428 join_materialize_semijoin(JOIN_TAB *tab)
2429 {
2430  DBUG_ENTER("join_materialize_semijoin");
2431 
2432  Semijoin_mat_exec *const sjm= tab->sj_mat_exec;
2433 
2434  JOIN_TAB *const first= tab->join->join_tab + sjm->inner_table_index;
2435  JOIN_TAB *const last= first + (sjm->table_count - 1);
2436  /*
2437  Set up the end_sj_materialize function after the last inner table,
2438  so that generated rows are inserted into the materialized table.
2439  */
2440  last->next_select= end_sj_materialize;
2441  last->sj_mat_exec= sjm; // TODO: This violates comment for sj_mat_exec!
2442 
2443  int rc;
2444  if ((rc= sub_select(tab->join, first, false)) < 0)
2445  DBUG_RETURN(rc);
2446  if ((rc= sub_select(tab->join, first, true)) < 0)
2447  DBUG_RETURN(rc);
2448 
2449  last->next_select= NULL;
2450  last->sj_mat_exec= NULL;
2451 
2452  DBUG_RETURN(NESTED_LOOP_OK);
2453 }
2454 
2455 
2461 bool
2463 {
2464  /*
2465  No need to require sorted access for single row reads
2466  being performed by const- or EQ_REF-accessed tables.
2467  */
2468  if (type == JT_EQ_REF ||
2469  type == JT_CONST ||
2470  type == JT_SYSTEM)
2471  return false;
2472 
2473  /*
2474  First non-const table requires sorted results
2475  if ORDER or GROUP BY use ordered index.
2476  */
2477  if (this == &join->join_tab[join->const_tables] &&
2478  join->ordered_index_usage != JOIN::ordered_index_void)
2479  return true;
2480 
2481  /*
2482  LooseScan strategy for semijoin requires sorted
2483  results even if final result is not to be sorted.
2484  */
2485  if (position->sj_strategy == SJ_OPT_LOOSE_SCAN)
2486  return true;
2487 
2488  /* Fall through: Results don't have to be sorted */
2489  return false;
2490 }
2491 
2492 /*
2493  Helper function for sorting table with filesort.
2494 */
2495 
2496 bool
2497 JOIN_TAB::sort_table()
2498 {
2499  int rc;
2500  DBUG_PRINT("info",("Sorting for index"));
2501  THD_STAGE_INFO(join->thd, stage_creating_sort_index);
2502  DBUG_ASSERT(join->ordered_index_usage != (filesort->order == join->order ?
2503  JOIN::ordered_index_order_by :
2504  JOIN::ordered_index_group_by));
2505  rc= create_sort_index(join->thd, join, this);
2506  return (rc != 0);
2507 }
2508 
2509 
2510 int
2511 join_read_first(JOIN_TAB *tab)
2512 {
2513  int error;
2514  TABLE *table=tab->table;
2515  if (table->covering_keys.is_set(tab->index) && !table->no_keyread)
2516  table->set_keyread(TRUE);
2517  tab->table->status=0;
2518  tab->read_record.table=table;
2519  tab->read_record.index=tab->index;
2520  tab->read_record.record=table->record[0];
2521  tab->read_record.read_record=join_read_next;
2522 
2523  if (!table->file->inited &&
2524  (error= table->file->ha_index_init(tab->index, tab->use_order())))
2525  {
2526  (void) report_handler_error(table, error);
2527  return 1;
2528  }
2529  if ((error= tab->table->file->ha_index_first(tab->table->record[0])))
2530  {
2531  if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
2532  report_handler_error(table, error);
2533  return -1;
2534  }
2535  return 0;
2536 }
2537 
2538 
2539 static int
2540 join_read_next(READ_RECORD *info)
2541 {
2542  int error;
2543  if ((error= info->table->file->ha_index_next(info->record)))
2544  return report_handler_error(info->table, error);
2545  return 0;
2546 }
2547 
2548 
2549 int
2550 join_read_last(JOIN_TAB *tab)
2551 {
2552  TABLE *table=tab->table;
2553  int error;
2554  if (table->covering_keys.is_set(tab->index) && !table->no_keyread)
2555  table->set_keyread(TRUE);
2556  tab->table->status=0;
2557  tab->read_record.read_record=join_read_prev;
2558  tab->read_record.table=table;
2559  tab->read_record.index=tab->index;
2560  tab->read_record.record=table->record[0];
2561  if (!table->file->inited &&
2562  (error= table->file->ha_index_init(tab->index, tab->use_order())))
2563  {
2564  (void) report_handler_error(table, error);
2565  return 1;
2566  }
2567  if ((error= tab->table->file->ha_index_last(tab->table->record[0])))
2568  return report_handler_error(table, error);
2569  return 0;
2570 }
2571 
2572 
2573 static int
2574 join_read_prev(READ_RECORD *info)
2575 {
2576  int error;
2577  if ((error= info->table->file->ha_index_prev(info->record)))
2578  return report_handler_error(info->table, error);
2579  return 0;
2580 }
2581 
2582 
2583 static int
2584 join_ft_read_first(JOIN_TAB *tab)
2585 {
2586  int error;
2587  TABLE *table= tab->table;
2588 
2589  if (!table->file->inited &&
2590  (error= table->file->ha_index_init(tab->ref.key, tab->use_order())))
2591  {
2592  (void) report_handler_error(table, error);
2593  return 1;
2594  }
2595  table->file->ft_init();
2596 
2597  if ((error= table->file->ft_read(table->record[0])))
2598  return report_handler_error(table, error);
2599  return 0;
2600 }
2601 
2602 static int
2603 join_ft_read_next(READ_RECORD *info)
2604 {
2605  int error;
2606  if ((error= info->table->file->ft_read(info->table->record[0])))
2607  return report_handler_error(info->table, error);
2608  return 0;
2609 }
2610 
2611 
2616 static int
2617 join_read_always_key_or_null(JOIN_TAB *tab)
2618 {
2619  int res;
2620 
2621  /* First read according to key which is NOT NULL */
2622  *tab->ref.null_ref_key= 0; // Clear null byte
2623  if ((res= join_read_always_key(tab)) >= 0)
2624  return res;
2625 
2626  /* Then read key with null value */
2627  *tab->ref.null_ref_key= 1; // Set null byte
2628  return safe_index_read(tab);
2629 }
2630 
2631 
2632 static int
2633 join_read_next_same_or_null(READ_RECORD *info)
2634 {
2635  int error;
2636  if ((error= join_read_next_same(info)) >= 0)
2637  return error;
2638  JOIN_TAB *tab= info->table->reginfo.join_tab;
2639 
2640  /* Test if we have already done a read after null key */
2641  if (*tab->ref.null_ref_key)
2642  return -1; // All keys read
2643  *tab->ref.null_ref_key= 1; // Set null byte
2644  return safe_index_read(tab); // then read null keys
2645 }
2646 
2647 
2656 void
2658 {
2659  // Must have an associated table
2660  if (!tab->table)
2661  return;
2665  uint pushed_joins= tab->table->file->number_of_pushed_joins();
2666  if (pushed_joins > 0)
2667  {
2668  if (tab->table->file->root_of_pushed_join() != tab->table)
2669  {
2670  /*
2671  Is child of a pushed join operation:
2672  Replace access functions with its linked counterpart.
2673  ... Which is effectively a NOOP as the row is already fetched
2674  together with the root of the linked operation.
2675  */
2676  DBUG_ASSERT(tab->type != JT_REF_OR_NULL);
2677  tab->read_first_record= join_read_linked_first;
2678  tab->read_record.read_record= join_read_linked_next;
2679  tab->read_record.unlock_row= rr_unlock_row;
2680  return;
2681  }
2682  }
2683 
2688  else if (tab->read_first_record != NULL)
2689  return;
2690 
2691  // Fall through to set default access functions:
2692  switch (tab->type)
2693  {
2694  case JT_REF:
2695  tab->read_first_record= join_read_always_key;
2696  tab->read_record.read_record= join_read_next_same;
2697  tab->read_record.unlock_row= rr_unlock_row;
2698  break;
2699 
2700  case JT_REF_OR_NULL:
2701  tab->read_first_record= join_read_always_key_or_null;
2702  tab->read_record.read_record= join_read_next_same_or_null;
2703  tab->read_record.unlock_row= rr_unlock_row;
2704  break;
2705 
2706  case JT_CONST:
2707  tab->read_first_record= join_read_const;
2708  tab->read_record.read_record= join_no_more_records;
2709  tab->read_record.unlock_row= rr_unlock_row;
2710  break;
2711 
2712  case JT_EQ_REF:
2713  tab->read_first_record= join_read_key;
2714  tab->read_record.read_record= join_no_more_records;
2715  tab->read_record.unlock_row= join_read_key_unlock_row;
2716  break;
2717 
2718  case JT_FT:
2719  tab->read_first_record= join_ft_read_first;
2720  tab->read_record.read_record= join_ft_read_next;
2721  tab->read_record.unlock_row= rr_unlock_row;
2722  break;
2723 
2724  case JT_SYSTEM:
2725  tab->read_first_record= join_read_system;
2726  tab->read_record.read_record= join_no_more_records;
2727  tab->read_record.unlock_row= rr_unlock_row;
2728  break;
2729 
2730  default:
2731  tab->read_record.unlock_row= rr_unlock_row;
2732  break;
2733  }
2734 }
2735 
2736 
2737 /*****************************************************************************
2738  DESCRIPTION
2739  Functions that end one nested loop iteration. Different functions
2740  are used to support GROUP BY clause and to redirect records
2741  to a table (e.g. in case of SELECT into a temporary table) or to the
2742  network client.
2743  See the enum_nested_loop_state enumeration for the description of return
2744  values.
2745 *****************************************************************************/
2746 
2747 /* ARGSUSED */
2748 static enum_nested_loop_state
2749 end_send(JOIN *join, JOIN_TAB *join_tab, bool end_of_records)
2750 {
2751  DBUG_ENTER("end_send");
2752  /*
2753  When all tables are const this function is called with jointab == NULL.
2754  This function shouldn't be called for the first join_tab as it needs
2755  to get fields from previous tab.
2756  */
2757  DBUG_ASSERT(join_tab == NULL || join_tab != join->join_tab);
2758  //TODO pass fields via argument
2759  List<Item> *fields= join_tab ? (join_tab-1)->fields : join->fields;
2760 
2761  if (!end_of_records)
2762  {
2763  int error;
2764  if (join->tables &&
2765  join->join_tab->is_using_loose_index_scan())
2766  {
2767  /* Copy non-aggregated fields when loose index scan is used. */
2768  copy_fields(&join->tmp_table_param);
2769  }
2770  // Use JOIN's HAVING for the case of tableless SELECT.
2771  if (join->having && join->having->val_int() == 0)
2772  DBUG_RETURN(NESTED_LOOP_OK); // Didn't match having
2773  error=0;
2774  if (join->do_send_rows)
2775  error=join->result->send_data(*fields);
2776  if (error)
2777  DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
2778 
2779  ++join->send_records;
2780  if (join->send_records >= join->unit->select_limit_cnt &&
2781  !join->do_send_rows)
2782  {
2783  /*
2784  If we have used Priority Queue for optimizing order by with limit,
2785  then stop here, there are no more records to consume.
2786  When this optimization is used, end_send is called on the next
2787  join_tab.
2788  */
2789  if (join->order &&
2790  join->select_options & OPTION_FOUND_ROWS &&
2791  join_tab > join->join_tab &&
2792  (join_tab - 1)->filesort && (join_tab - 1)->filesort->using_pq)
2793  {
2794  DBUG_PRINT("info", ("filesort NESTED_LOOP_QUERY_LIMIT"));
2795  DBUG_RETURN(NESTED_LOOP_QUERY_LIMIT);
2796  }
2797  }
2798  if (join->send_records >= join->unit->select_limit_cnt &&
2799  join->do_send_rows)
2800  {
2801  if (join->select_options & OPTION_FOUND_ROWS)
2802  {
2803  JOIN_TAB *jt=join->join_tab;
2804  if ((join->primary_tables == 1) &&
2805  !join->sort_and_group &&
2806  !join->send_group_parts &&
2807  !join->having &&
2808  !jt->condition() &&
2809  !(jt->select && jt->select->quick) &&
2810  (jt->table->file->ha_table_flags() & HA_STATS_RECORDS_IS_EXACT) &&
2811  (jt->ref.key < 0))
2812  {
2813  /* Join over all rows in table; Return number of found rows */
2814  TABLE *table=jt->table;
2815 
2816  if (table->sort.record_pointers ||
2817  (table->sort.io_cache && my_b_inited(table->sort.io_cache)))
2818  {
2819  /* Using filesort */
2820  join->send_records= table->sort.found_records;
2821  }
2822  else
2823  {
2824  table->file->info(HA_STATUS_VARIABLE);
2825  join->send_records= table->file->stats.records;
2826  }
2827  }
2828  else
2829  {
2830  join->do_send_rows= 0;
2831  if (join->unit->fake_select_lex)
2832  join->unit->fake_select_lex->select_limit= 0;
2833  DBUG_RETURN(NESTED_LOOP_OK);
2834  }
2835  }
2836  DBUG_RETURN(NESTED_LOOP_QUERY_LIMIT); // Abort nicely
2837  }
2838  else if (join->send_records >= join->fetch_limit)
2839  {
2840  /*
2841  There is a server side cursor and all rows for
2842  this fetch request are sent.
2843  */
2844  DBUG_RETURN(NESTED_LOOP_CURSOR_LIMIT);
2845  }
2846  }
2847  DBUG_RETURN(NESTED_LOOP_OK);
2848 }
2849 
2850 
2851  /* ARGSUSED */
2852 enum_nested_loop_state
2853 end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
2854  bool end_of_records)
2855 {
2856  int idx= -1;
2857  enum_nested_loop_state ok_code= NESTED_LOOP_OK;
2858  List<Item> *fields= join_tab ? (join_tab-1)->fields : join->fields;
2859  DBUG_ENTER("end_send_group");
2860 
2861 
2862  if (!join->items3.is_null() && !join->set_group_rpa)
2863  {
2864  join->set_group_rpa= true;
2865  join->set_items_ref_array(join->items3);
2866  }
2867 
2868  if (!join->first_record || end_of_records ||
2869  (idx=test_if_item_cache_changed(join->group_fields)) >= 0)
2870  {
2871  if (!join->group_sent &&
2872  (join->first_record ||
2873  (end_of_records && !join->group && !join->group_optimized_away)))
2874  {
2875  if (idx < (int) join->send_group_parts)
2876  {
2877  int error=0;
2878  {
2879  table_map save_nullinfo= 0;
2880  if (!join->first_record)
2881  {
2882  /*
2883  If this is a subquery, we need to save and later restore
2884  the const table NULL info before clearing the tables
2885  because the following executions of the subquery do not
2886  reevaluate constant fields. @see save_const_null_info
2887  and restore_const_null_info
2888  */
2889  if (join->select_lex->master_unit()->item && join->const_tables)
2890  save_const_null_info(join, &save_nullinfo);
2891 
2892  // Calculate aggregate functions for no rows
2893  List_iterator_fast<Item> it(*fields);
2894  Item *item;
2895 
2896  while ((item= it++))
2897  item->no_rows_in_result();
2898 
2899  // Mark tables as containing only NULL values
2900  join->clear();
2901  }
2902  if (join->having && join->having->val_int() == 0)
2903  error= -1; // Didn't satisfy having
2904  else
2905  {
2906  if (join->do_send_rows)
2907  error=join->result->send_data(*fields) ? 1 : 0;
2908  join->send_records++;
2909  join->group_sent= true;
2910  }
2911  if (join->rollup.state != ROLLUP::STATE_NONE && error <= 0)
2912  {
2913  if (join->rollup_send_data((uint) (idx+1)))
2914  error= 1;
2915  }
2916  if (save_nullinfo)
2917  restore_const_null_info(join, save_nullinfo);
2918 
2919  }
2920  if (error > 0)
2921  DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
2922  if (end_of_records)
2923  DBUG_RETURN(NESTED_LOOP_OK);
2924  if (join->send_records >= join->unit->select_limit_cnt &&
2925  join->do_send_rows)
2926  {
2927  if (!(join->select_options & OPTION_FOUND_ROWS))
2928  DBUG_RETURN(NESTED_LOOP_QUERY_LIMIT); // Abort nicely
2929  join->do_send_rows=0;
2930  join->unit->select_limit_cnt = HA_POS_ERROR;
2931  }
2932  else if (join->send_records >= join->fetch_limit)
2933  {
2934  /*
2935  There is a server side cursor and all rows
2936  for this fetch request are sent.
2937  */
2938  /*
2939  Preventing code duplication. When finished with the group reset
2940  the group functions and copy_fields. We fall through. bug #11904
2941  */
2942  ok_code= NESTED_LOOP_CURSOR_LIMIT;
2943  }
2944  }
2945  }
2946  else
2947  {
2948  if (end_of_records)
2949  DBUG_RETURN(NESTED_LOOP_OK);
2950  join->first_record=1;
2951  (void)(test_if_item_cache_changed(join->group_fields));
2952  }
2953  if (idx < (int) join->send_group_parts)
2954  {
2955  /*
2956  This branch is executed also for cursors which have finished their
2957  fetch limit - the reason for ok_code.
2958  */
2959  copy_fields(&join->tmp_table_param);
2960  if (init_sum_functions(join->sum_funcs, join->sum_funcs_end[idx+1]))
2961  DBUG_RETURN(NESTED_LOOP_ERROR);
2962  join->group_sent= false;
2963  DBUG_RETURN(ok_code);
2964  }
2965  }
2966  if (update_sum_func(join->sum_funcs))
2967  DBUG_RETURN(NESTED_LOOP_ERROR);
2968  DBUG_RETURN(NESTED_LOOP_OK);
2969 }
2970 
2971 
2972  /* ARGSUSED */
2973 static enum_nested_loop_state
2974 end_write(JOIN *join, JOIN_TAB *join_tab, bool end_of_records)
2975 {
2976  TABLE *const table= join_tab->table;
2977  DBUG_ENTER("end_write");
2978 
2979  if (join->thd->killed) // Aborted by user
2980  {
2981  join->thd->send_kill_message();
2982  DBUG_RETURN(NESTED_LOOP_KILLED); /* purecov: inspected */
2983  }
2984  if (!end_of_records)
2985  {
2986  copy_fields(join_tab->tmp_table_param);
2987  if (copy_funcs(join_tab->tmp_table_param->items_to_copy, join->thd))
2988  DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
2989 
2990  if (!join_tab->having || join_tab->having->val_int())
2991  {
2992  int error;
2993  join->found_records++;
2994  if ((error=table->file->ha_write_row(table->record[0])))
2995  {
2996  if (!table->file->is_fatal_error(error, HA_CHECK_DUP))
2997  goto end;
2998  if (create_myisam_from_heap(join->thd, table,
2999  join_tab->tmp_table_param->start_recinfo,
3000  &join_tab->tmp_table_param->recinfo,
3001  error, TRUE, NULL))
3002  DBUG_RETURN(NESTED_LOOP_ERROR); // Not a table_is_full error
3003  table->s->uniques=0; // To ensure rows are the same
3004  }
3005  if (++join_tab->send_records >=
3006  join_tab->tmp_table_param->end_write_records &&
3007  join->do_send_rows)
3008  {
3009  if (!(join->select_options & OPTION_FOUND_ROWS))
3010  DBUG_RETURN(NESTED_LOOP_QUERY_LIMIT);
3011  join->do_send_rows=0;
3012  join->unit->select_limit_cnt = HA_POS_ERROR;
3013  DBUG_RETURN(NESTED_LOOP_OK);
3014  }
3015  }
3016  }
3017 end:
3018  DBUG_RETURN(NESTED_LOOP_OK);
3019 }
3020 
3021 /* ARGSUSED */
3024 static enum_nested_loop_state
3025 end_update(JOIN *join, JOIN_TAB *join_tab, bool end_of_records)
3026 {
3027  TABLE *const table= join_tab->table;
3028  ORDER *group;
3029  int error;
3030  DBUG_ENTER("end_update");
3031 
3032  if (end_of_records)
3033  DBUG_RETURN(NESTED_LOOP_OK);
3034  if (join->thd->killed) // Aborted by user
3035  {
3036  join->thd->send_kill_message();
3037  DBUG_RETURN(NESTED_LOOP_KILLED); /* purecov: inspected */
3038  }
3039 
3040  join->found_records++;
3041  copy_fields(join_tab->tmp_table_param); // Groups are copied twice.
3042  /* Make a key of group index */
3043  for (group=table->group ; group ; group=group->next)
3044  {
3045  Item *item= *group->item;
3046  item->save_org_in_field(group->field);
3047  /* Store in the used key if the field was 0 */
3048  if (item->maybe_null)
3049  group->buff[-1]= (char) group->field->is_null();
3050  }
3051  if (!table->file->ha_index_read_map(table->record[1],
3052  join_tab->tmp_table_param->group_buff,
3053  HA_WHOLE_KEY,
3054  HA_READ_KEY_EXACT))
3055  { /* Update old record */
3056  restore_record(table,record[1]);
3057  update_tmptable_sum_func(join->sum_funcs,table);
3058  if ((error=table->file->ha_update_row(table->record[1],
3059  table->record[0])))
3060  {
3061  table->file->print_error(error,MYF(0)); /* purecov: inspected */
3062  DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
3063  }
3064  DBUG_RETURN(NESTED_LOOP_OK);
3065  }
3066 
3067  /*
3068  Copy null bits from group key to table
3069  We can't copy all data as the key may have different format
3070  as the row data (for example as with VARCHAR keys)
3071  */
3072  KEY_PART_INFO *key_part;
3073  for (group=table->group,key_part=table->key_info[0].key_part;
3074  group ;
3075  group=group->next,key_part++)
3076  {
3077  if (key_part->null_bit)
3078  memcpy(table->record[0]+key_part->offset, group->buff, 1);
3079  }
3080  init_tmptable_sum_functions(join->sum_funcs);
3081  if (copy_funcs(join_tab->tmp_table_param->items_to_copy, join->thd))
3082  DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
3083  if ((error=table->file->ha_write_row(table->record[0])))
3084  {
3085  if (create_myisam_from_heap(join->thd, table,
3086  join_tab->tmp_table_param->start_recinfo,
3087  &join_tab->tmp_table_param->recinfo,
3088  error, FALSE, NULL))
3089  DBUG_RETURN(NESTED_LOOP_ERROR); // Not a table_is_full error
3090  /* Change method to update rows */
3091  if ((error= table->file->ha_index_init(0, 0)))
3092  {
3093  table->file->print_error(error, MYF(0));
3094  DBUG_RETURN(NESTED_LOOP_ERROR);
3095  }
3096  ((QEP_tmp_table*)join_tab->op)->set_write_func(end_unique_update);
3097  }
3098  join_tab->send_records++;
3099  DBUG_RETURN(NESTED_LOOP_OK);
3100 }
3101 
3102 
3105 static enum_nested_loop_state
3106 end_unique_update(JOIN *join, JOIN_TAB *join_tab, bool end_of_records)
3107 {
3108  TABLE *table= join_tab->table;
3109  int error;
3110  DBUG_ENTER("end_unique_update");
3111 
3112  if (end_of_records)
3113  DBUG_RETURN(NESTED_LOOP_OK);
3114  if (join->thd->killed) // Aborted by user
3115  {
3116  join->thd->send_kill_message();
3117  DBUG_RETURN(NESTED_LOOP_KILLED); /* purecov: inspected */
3118  }
3119 
3120  init_tmptable_sum_functions(join->sum_funcs);
3121  copy_fields(join_tab->tmp_table_param); // Groups are copied twice.
3122  if (copy_funcs(join_tab->tmp_table_param->items_to_copy, join->thd))
3123  DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
3124 
3125  if (!(error=table->file->ha_write_row(table->record[0])))
3126  join_tab->send_records++; // New group
3127  else
3128  {
3129  if ((int) table->file->get_dup_key(error) < 0)
3130  {
3131  table->file->print_error(error,MYF(0)); /* purecov: inspected */
3132  DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
3133  }
3134  if (table->file->ha_rnd_pos(table->record[1], table->file->dup_ref))
3135  {
3136  table->file->print_error(error,MYF(0)); /* purecov: inspected */
3137  DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
3138  }
3139  restore_record(table,record[1]);
3140  update_tmptable_sum_func(join->sum_funcs,table);
3141  if ((error=table->file->ha_update_row(table->record[1],
3142  table->record[0])))
3143  {
3144  table->file->print_error(error,MYF(0)); /* purecov: inspected */
3145  DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
3146  }
3147  }
3148  DBUG_RETURN(NESTED_LOOP_OK);
3149 }
3150 
3151 
3152  /* ARGSUSED */
3153 enum_nested_loop_state
3154 end_write_group(JOIN *join, JOIN_TAB *join_tab, bool end_of_records)
3155 {
3156  TABLE *table= join_tab->table;
3157  int idx= -1;
3158  DBUG_ENTER("end_write_group");
3159 
3160  if (join->thd->killed)
3161  { // Aborted by user
3162  join->thd->send_kill_message();
3163  DBUG_RETURN(NESTED_LOOP_KILLED); /* purecov: inspected */
3164  }
3165  if (!join->first_record || end_of_records ||
3166  (idx=test_if_item_cache_changed(join->group_fields)) >= 0)
3167  {
3168  if (join->first_record || (end_of_records && !join->group))
3169  {
3170  int send_group_parts= join->send_group_parts;
3171  if (idx < send_group_parts)
3172  {
3173  table_map save_nullinfo= 0;
3174  if (!join->first_record)
3175  {
3176  // Dead code or we need a test case for this branch
3177  DBUG_ASSERT(false);
3178  /*
3179  If this is a subquery, we need to save and later restore
3180  the const table NULL info before clearing the tables
3181  because the following executions of the subquery do not
3182  reevaluate constant fields. @see save_const_null_info
3183  and restore_const_null_info
3184  */
3185  if (join->select_lex->master_unit()->item && join->const_tables)
3186  save_const_null_info(join, &save_nullinfo);
3187 
3188  // Calculate aggregate functions for no rows
3189  List_iterator_fast<Item> it(*(join_tab-1)->fields);
3190  Item *item;
3191  while ((item= it++))
3192  item->no_rows_in_result();
3193 
3194  // Mark tables as containing only NULL values
3195  join->clear();
3196  }
3197  copy_sum_funcs(join->sum_funcs,
3198  join->sum_funcs_end[send_group_parts]);
3199  if (!join_tab->having || join_tab->having->val_int())
3200  {
3201  int error= table->file->ha_write_row(table->record[0]);
3202  if (error &&
3203  create_myisam_from_heap(join->thd, table,
3204  join_tab->tmp_table_param->start_recinfo,
3205  &join_tab->tmp_table_param->recinfo,
3206  error, FALSE, NULL))
3207  DBUG_RETURN(NESTED_LOOP_ERROR);
3208  }
3209  if (join->rollup.state != ROLLUP::STATE_NONE)
3210  {
3211  if (join->rollup_write_data((uint) (idx+1), table))
3212  DBUG_RETURN(NESTED_LOOP_ERROR);
3213  }
3214  if (save_nullinfo)
3215  restore_const_null_info(join, save_nullinfo);
3216 
3217  if (end_of_records)
3218  DBUG_RETURN(NESTED_LOOP_OK);
3219  }
3220  }
3221  else
3222  {
3223  if (end_of_records)
3224  DBUG_RETURN(NESTED_LOOP_OK);
3225  join->first_record=1;
3226  (void)(test_if_item_cache_changed(join->group_fields));
3227  }
3228  if (idx < (int) join->send_group_parts)
3229  {
3230  copy_fields(join_tab->tmp_table_param);
3231  if (copy_funcs(join_tab->tmp_table_param->items_to_copy, join->thd))
3232  DBUG_RETURN(NESTED_LOOP_ERROR);
3233  if (init_sum_functions(join->sum_funcs, join->sum_funcs_end[idx+1]))
3234  DBUG_RETURN(NESTED_LOOP_ERROR);
3235  DBUG_RETURN(NESTED_LOOP_OK);
3236  }
3237  }
3238  if (update_sum_func(join->sum_funcs))
3239  DBUG_RETURN(NESTED_LOOP_ERROR);
3240  DBUG_RETURN(NESTED_LOOP_OK);
3241 }
3242 
3243 
3244 /*
3245  If not selecting by given key, create an index how records should be read
3246 
3247  SYNOPSIS
3248  create_sort_index()
3249  thd Thread handler
3250  join Join with table to sort
3251  order How table should be sorted
3252  filesort_limit Max number of rows that needs to be sorted
3253  select_limit Max number of rows in final output
3254  Used to decide if we should use index or not
3255  IMPLEMENTATION
3256  - If there is an index that can be used, the first non-const join_tab in
3257  'join' is modified to use this index.
3258  - If no index, create with filesort() an index file that can be used to
3259  retrieve rows in order (should be done with 'read_record').
3260  The sorted data is stored in tab->table and will be freed when calling
3261  free_io_cache(tab->table).
3262 
3263  RETURN VALUES
3264  0 ok
3265  -1 Some fatal error
3266  1 No records
3267 */
3268 
3269 static int
3270 create_sort_index(THD *thd, JOIN *join, JOIN_TAB *tab)
3271 {
3272  ha_rows examined_rows;
3273  ha_rows found_rows;
3274  ha_rows filesort_retval= HA_POS_ERROR;
3275  TABLE *table;
3276  SQL_SELECT *select;
3277  Filesort *fsort= tab->filesort;
3278  DBUG_ENTER("create_sort_index");
3279 
3280  // One row, no need to sort. make_tmp_tables_info should already handle this.
3281  DBUG_ASSERT(!join->plan_is_const() && fsort);
3282  table= tab->table;
3283  select= fsort->select;
3284 
3285  table->sort.io_cache=(IO_CACHE*) my_malloc(sizeof(IO_CACHE),
3286  MYF(MY_WME | MY_ZEROFILL));
3287  table->status=0; // May be wrong if quick_select
3288 
3289  // If table has a range, move it to select
3290  if (select && tab->ref.key >= 0)
3291  {
3292  if (!select->quick)
3293  {
3294  if (tab->quick)
3295  {
3296  select->quick= tab->quick;
3297  tab->quick= NULL;
3298  /*
3299  We can only use 'Only index' if quick key is same as ref_key
3300  and in index_merge 'Only index' cannot be used
3301  */
3302  if (((uint) tab->ref.key != select->quick->index))
3303  table->set_keyread(FALSE);
3304  }
3305  else
3306  {
3307  /*
3308  We have a ref on a const; Change this to a range that filesort
3309  can use.
3310  For impossible ranges (like when doing a lookup on NULL on a NOT NULL
3311  field, quick will contain an empty record set.
3312  */
3313  if (!(select->quick= (tab->type == JT_FT ?
3314  get_ft_select(thd, table, tab->ref.key) :
3315  get_quick_select_for_ref(thd, table, &tab->ref,
3316  tab->found_records))))
3317  goto err;
3318  }
3319  fsort->own_select= true;
3320  }
3321  else
3322  {
3323  DBUG_ASSERT(tab->type == JT_REF || tab->type == JT_EQ_REF);
3324  // Update ref value
3325  if ((cp_buffer_from_ref(thd, table, &tab->ref) && thd->is_fatal_error))
3326  goto err; // out of memory
3327  }
3328  }
3329 
3330  /* Fill schema tables with data before filesort if it's necessary */
3331  if ((join->select_lex->options & OPTION_SCHEMA_TABLE) &&
3332  get_schema_tables_result(join, PROCESSED_BY_CREATE_SORT_INDEX))
3333  goto err;
3334 
3335  if (table->s->tmp_table)
3336  table->file->info(HA_STATUS_VARIABLE); // Get record count
3337  filesort_retval= filesort(thd, table, fsort, tab->keep_current_rowid,
3338  &examined_rows, &found_rows);
3339  table->sort.found_records= filesort_retval;
3340  tab->records= found_rows; // For SQL_CALC_ROWS
3341  tab->join->examined_rows+=examined_rows;
3342  table->set_keyread(FALSE); // Restore if we used indexes
3343  if (tab->type == JT_FT)
3344  table->file->ft_end();
3345  else
3346  table->file->ha_index_or_rnd_end();
3347  DBUG_RETURN(filesort_retval == HA_POS_ERROR);
3348 err:
3349  DBUG_RETURN(-1);
3350 }
3351 
3352 
3353 /*****************************************************************************
3354  Remove duplicates from tmp table
3355  This should be recoded to add a unique index to the table and remove
3356  duplicates
3357  Table is a locked single thread table
3358  fields is the number of fields to check (from the end)
3359 *****************************************************************************/
3360 
3361 static bool compare_record(TABLE *table, Field **ptr)
3362 {
3363  for (; *ptr ; ptr++)
3364  {
3365  if ((*ptr)->cmp_offset(table->s->rec_buff_length))
3366  return 1;
3367  }
3368  return 0;
3369 }
3370 
3371 static bool copy_blobs(Field **ptr)
3372 {
3373  for (; *ptr ; ptr++)
3374  {
3375  if ((*ptr)->flags & BLOB_FLAG)
3376  if (((Field_blob *) (*ptr))->copy())
3377  return 1; // Error
3378  }
3379  return 0;
3380 }
3381 
3382 static void free_blobs(Field **ptr)
3383 {
3384  for (; *ptr ; ptr++)
3385  {
3386  if ((*ptr)->flags & BLOB_FLAG)
3387  ((Field_blob *) (*ptr))->free();
3388  }
3389 }
3390 
3391 
3392 bool
3393 JOIN_TAB::remove_duplicates()
3394 {
3395  bool error;
3396  ulong reclength,offset;
3397  uint field_count;
3398  List<Item> *fields= (this-1)->fields;
3399  DBUG_ENTER("remove_duplicates");
3400 
3401  DBUG_ASSERT(join->tmp_tables > 0 && table->s->tmp_table != NO_TMP_TABLE);
3402  THD_STAGE_INFO(join->thd, stage_removing_duplicates);
3403 
3404  table->reginfo.lock_type=TL_WRITE;
3405 
3406  /* Calculate how many saved fields there is in list */
3407  field_count=0;
3408  List_iterator<Item> it(*fields);
3409  Item *item;
3410  while ((item=it++))
3411  {
3412  if (item->get_tmp_table_field() && ! item->const_item())
3413  field_count++;
3414  }
3415 
3416  if (!field_count && !(join->select_options & OPTION_FOUND_ROWS) && !having)
3417  { // only const items with no OPTION_FOUND_ROWS
3418  join->unit->select_limit_cnt= 1; // Only send first row
3419  DBUG_RETURN(false);
3420  }
3421  Field **first_field= table->field+ table->s->fields - field_count;
3422  offset= (field_count ?
3423  table->field[table->s->fields - field_count]->
3424  offset(table->record[0]) : 0);
3425  reclength= table->s->reclength-offset;
3426 
3427  free_io_cache(table); // Safety
3428  table->file->info(HA_STATUS_VARIABLE);
3429  if (table->s->db_type() == heap_hton ||
3430  (!table->s->blob_fields &&
3431  ((ALIGN_SIZE(reclength) + HASH_OVERHEAD) * table->file->stats.records <
3432  join->thd->variables.sortbuff_size)))
3433  error=remove_dup_with_hash_index(join->thd, table,
3434  field_count, first_field,
3435  reclength, having);
3436  else
3437  error=remove_dup_with_compare(join->thd, table, first_field, offset,
3438  having);
3439 
3440  free_blobs(first_field);
3441  DBUG_RETURN(error);
3442 }
3443 
3444 
3445 static bool remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
3446  ulong offset, Item *having)
3447 {
3448  handler *file=table->file;
3449  char *org_record,*new_record;
3450  uchar *record;
3451  int error;
3452  ulong reclength= table->s->reclength-offset;
3453  DBUG_ENTER("remove_dup_with_compare");
3454 
3455  org_record=(char*) (record=table->record[0])+offset;
3456  new_record=(char*) table->record[1]+offset;
3457 
3458  if ((error= file->ha_rnd_init(1)))
3459  goto err;
3460  error=file->ha_rnd_next(record);
3461  for (;;)
3462  {
3463  if (thd->killed)
3464  {
3465  thd->send_kill_message();
3466  error=0;
3467  goto err;
3468  }
3469  if (error)
3470  {
3471  if (error == HA_ERR_RECORD_DELETED)
3472  {
3473  error= file->ha_rnd_next(record);
3474  continue;
3475  }
3476  if (error == HA_ERR_END_OF_FILE)
3477  break;
3478  goto err;
3479  }
3480  if (having && !having->val_int())
3481  {
3482  if ((error=file->ha_delete_row(record)))
3483  goto err;
3484  error=file->ha_rnd_next(record);
3485  continue;
3486  }
3487  if (copy_blobs(first_field))
3488  {
3489  my_message(ER_OUTOFMEMORY, ER(ER_OUTOFMEMORY), MYF(ME_FATALERROR));
3490  error=0;
3491  goto err;
3492  }
3493  memcpy(new_record,org_record,reclength);
3494 
3495  /* Read through rest of file and mark duplicated rows deleted */
3496  bool found=0;
3497  for (;;)
3498  {
3499  if ((error=file->ha_rnd_next(record)))
3500  {
3501  if (error == HA_ERR_RECORD_DELETED)
3502  continue;
3503  if (error == HA_ERR_END_OF_FILE)
3504  break;
3505  goto err;
3506  }
3507  if (compare_record(table, first_field) == 0)
3508  {
3509  if ((error=file->ha_delete_row(record)))
3510  goto err;
3511  }
3512  else if (!found)
3513  {
3514  found=1;
3515  file->position(record); // Remember position
3516  }
3517  }
3518  if (!found)
3519  break; // End of file
3520  /* Restart search on next row */
3521  error=file->restart_rnd_next(record,file->ref);
3522  }
3523 
3524  file->extra(HA_EXTRA_NO_CACHE);
3525  DBUG_RETURN(false);
3526 err:
3527  file->extra(HA_EXTRA_NO_CACHE);
3528  if (file->inited)
3529  (void) file->ha_rnd_end();
3530  if (error)
3531  file->print_error(error,MYF(0));
3532  DBUG_RETURN(true);
3533 }
3534 
3535 
3543 static bool remove_dup_with_hash_index(THD *thd, TABLE *table,
3544  uint field_count,
3545  Field **first_field,
3546  ulong key_length,
3547  Item *having)
3548 {
3549  uchar *key_buffer, *key_pos, *record=table->record[0];
3550  int error;
3551  handler *file= table->file;
3552  ulong extra_length= ALIGN_SIZE(key_length)-key_length;
3553  uint *field_lengths,*field_length;
3554  HASH hash;
3555  DBUG_ENTER("remove_dup_with_hash_index");
3556 
3557  if (!my_multi_malloc(MYF(MY_WME),
3558  &key_buffer,
3559  (uint) ((key_length + extra_length) *
3560  (long) file->stats.records),
3561  &field_lengths,
3562  (uint) (field_count*sizeof(*field_lengths)),
3563  NullS))
3564  DBUG_RETURN(true);
3565 
3566  {
3567  Field **ptr;
3568  ulong total_length= 0;
3569  for (ptr= first_field, field_length=field_lengths ; *ptr ; ptr++)
3570  {
3571  uint length= (*ptr)->sort_length();
3572  (*field_length++)= length;
3573  total_length+= length;
3574  }
3575  DBUG_PRINT("info",("field_count: %u key_length: %lu total_length: %lu",
3576  field_count, key_length, total_length));
3577  DBUG_ASSERT(total_length <= key_length);
3578  key_length= total_length;
3579  extra_length= ALIGN_SIZE(key_length)-key_length;
3580  }
3581 
3582  if (my_hash_init(&hash, &my_charset_bin, (uint) file->stats.records, 0,
3583  key_length, (my_hash_get_key) 0, 0, 0))
3584  {
3585  my_free(key_buffer);
3586  DBUG_RETURN(true);
3587  }
3588 
3589  if ((error= file->ha_rnd_init(1)))
3590  goto err;
3591  key_pos=key_buffer;
3592  for (;;)
3593  {
3594  uchar *org_key_pos;
3595  if (thd->killed)
3596  {
3597  thd->send_kill_message();
3598  error=0;
3599  goto err;
3600  }
3601  if ((error=file->ha_rnd_next(record)))
3602  {
3603  if (error == HA_ERR_RECORD_DELETED)
3604  continue;
3605  if (error == HA_ERR_END_OF_FILE)
3606  break;
3607  goto err;
3608  }
3609  if (having && !having->val_int())
3610  {
3611  if ((error=file->ha_delete_row(record)))
3612  goto err;
3613  continue;
3614  }
3615 
3616  /* copy fields to key buffer */
3617  org_key_pos= key_pos;
3618  field_length=field_lengths;
3619  for (Field **ptr= first_field ; *ptr ; ptr++)
3620  {
3621  (*ptr)->make_sort_key(key_pos,*field_length);
3622  key_pos+= *field_length++;
3623  }
3624  /* Check if it exists before */
3625  if (my_hash_search(&hash, org_key_pos, key_length))
3626  {
3627  /* Duplicated found ; Remove the row */
3628  if ((error=file->ha_delete_row(record)))
3629  goto err;
3630  }
3631  else
3632  {
3633  if (my_hash_insert(&hash, org_key_pos))
3634  goto err;
3635  }
3636  key_pos+=extra_length;
3637  }
3638  my_free(key_buffer);
3639  my_hash_free(&hash);
3640  file->extra(HA_EXTRA_NO_CACHE);
3641  (void) file->ha_rnd_end();
3642  DBUG_RETURN(false);
3643 
3644 err:
3645  my_free(key_buffer);
3646  my_hash_free(&hash);
3647  file->extra(HA_EXTRA_NO_CACHE);
3648  if (file->inited)
3649  (void) file->ha_rnd_end();
3650  if (error)
3651  file->print_error(error,MYF(0));
3652  DBUG_RETURN(true);
3653 }
3654 
3655 
3656 /*
3657  eq_ref: Create the lookup key and check if it is the same as saved key
3658 
3659  SYNOPSIS
3660  cmp_buffer_with_ref()
3661  tab Join tab of the accessed table
3662  table The table to read. This is usually tab->table, except for
3663  semi-join when we might need to make a lookup in a temptable
3664  instead.
3665  tab_ref The structure with methods to collect index lookup tuple.
3666  This is usually table->ref, except for the case of when we're
3667  doing lookup into semi-join materialization table.
3668 
3669  DESCRIPTION
3670  Used by eq_ref access method: create the index lookup key and check if
3671  we've used this key at previous lookup (If yes, we don't need to repeat
3672  the lookup - the record has been already fetched)
3673 
3674  RETURN
3675  TRUE No cached record for the key, or failed to create the key (due to
3676  out-of-domain error)
3677  FALSE The created key is the same as the previous one (and the record
3678  is already in table->record)
3679 */
3680 
3681 static bool
3682 cmp_buffer_with_ref(THD *thd, TABLE *table, TABLE_REF *tab_ref)
3683 {
3684  bool no_prev_key;
3685  if (!tab_ref->disable_cache)
3686  {
3687  if (!(no_prev_key= tab_ref->key_err))
3688  {
3689  /* Previous access found a row. Copy its key */
3690  memcpy(tab_ref->key_buff2, tab_ref->key_buff, tab_ref->key_length);
3691  }
3692  }
3693  else
3694  no_prev_key= TRUE;
3695  if ((tab_ref->key_err= cp_buffer_from_ref(thd, table, tab_ref)) ||
3696  no_prev_key)
3697  return 1;
3698  return memcmp(tab_ref->key_buff2, tab_ref->key_buff, tab_ref->key_length)
3699  != 0;
3700 }
3701 
3702 
3703 bool
3704 cp_buffer_from_ref(THD *thd, TABLE *table, TABLE_REF *ref)
3705 {
3706  enum enum_check_fields save_count_cuted_fields= thd->count_cuted_fields;
3707  thd->count_cuted_fields= CHECK_FIELD_IGNORE;
3708  my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
3709  bool result= 0;
3710 
3711  for (uint part_no= 0; part_no < ref->key_parts; part_no++)
3712  {
3713  store_key *s_key= ref->key_copy[part_no];
3714  if (!s_key)
3715  continue;
3716 
3717  if (s_key->copy() & 1)
3718  {
3719  result= 1;
3720  break;
3721  }
3722  }
3723  thd->count_cuted_fields= save_count_cuted_fields;
3724  dbug_tmp_restore_column_map(table->write_set, old_map);
3725  return result;
3726 }
3727 
3728 
3742 bool
3743 make_group_fields(JOIN *main_join, JOIN *curr_join)
3744 {
3745  if (main_join->group_fields_cache.elements)
3746  {
3747  curr_join->group_fields= main_join->group_fields_cache;
3748  curr_join->sort_and_group= 1;
3749  }
3750  else
3751  {
3752  if (alloc_group_fields(curr_join, curr_join->group_list))
3753  return (1);
3754  main_join->group_fields_cache= curr_join->group_fields;
3755  }
3756  return (0);
3757 }
3758 
3759 
3766 bool
3768 {
3769  if (group)
3770  {
3771  for (; group ; group=group->next)
3772  {
3773  Cached_item *tmp=new_Cached_item(join->thd, *group->item, FALSE);
3774  if (!tmp || join->group_fields.push_front(tmp))
3775  return TRUE;
3776  }
3777  }
3778  join->sort_and_group=1; /* Mark for do_select */
3779  return FALSE;
3780 }
3781 
3782 
3783 /*
3784  Test if a single-row cache of items changed, and update the cache.
3785 
3786  @details Test if a list of items that typically represents a result
3787  row has changed. If the value of some item changed, update the cached
3788  value for this item.
3789 
3790  @param list list of <item, cached_value> pairs stored as Cached_item.
3791 
3792  @return -1 if no item changed
3793  @return index of the first item that changed
3794 */
3795 
3796 int test_if_item_cache_changed(List<Cached_item> &list)
3797 {
3798  DBUG_ENTER("test_if_item_cache_changed");
3799  List_iterator<Cached_item> li(list);
3800  int idx= -1,i;
3801  Cached_item *buff;
3802 
3803  for (i=(int) list.elements-1 ; (buff=li++) ; i--)
3804  {
3805  if (buff->cmp())
3806  idx=i;
3807  }
3808  DBUG_PRINT("info", ("idx: %d", idx));
3809  DBUG_RETURN(idx);
3810 }
3811 
3812 
3842 bool
3843 setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param,
3844  Ref_ptr_array ref_pointer_array,
3845  List<Item> &res_selected_fields, List<Item> &res_all_fields,
3846  uint elements, List<Item> &all_fields)
3847 {
3848  Item *pos;
3849  List_iterator_fast<Item> li(all_fields);
3850  Copy_field *copy= NULL;
3851  Copy_field *copy_start __attribute__((unused));
3852  res_selected_fields.empty();
3853  res_all_fields.empty();
3854  List_iterator_fast<Item> itr(res_all_fields);
3855  List<Item> extra_funcs;
3856  uint i, border= all_fields.elements - elements;
3857  DBUG_ENTER("setup_copy_fields");
3858 
3859  if (param->field_count &&
3860  !(copy=param->copy_field= new Copy_field[param->field_count]))
3861  goto err2;
3862 
3863  param->copy_funcs.empty();
3864  copy_start= copy;
3865  for (i= 0; (pos= li++); i++)
3866  {
3867  Field *field;
3868  uchar *tmp;
3869  Item *real_pos= pos->real_item();
3870  /*
3871  Aggregate functions can be substituted for fields (by e.g. temp tables).
3872  We need to filter those substituted fields out.
3873  */
3874  if (real_pos->type() == Item::FIELD_ITEM &&
3875  !(real_pos != pos &&
3876  ((Item_ref *)pos)->ref_type() == Item_ref::AGGREGATE_REF))
3877  {
3878  Item_field *item;
3879  if (!(item= new Item_field(thd, ((Item_field*) real_pos))))
3880  goto err;
3881  if (pos->type() == Item::REF_ITEM)
3882  {
3883  /* preserve the names of the ref when dereferncing */
3884  Item_ref *ref= (Item_ref *) pos;
3885  item->db_name= ref->db_name;
3886  item->table_name= ref->table_name;
3887  item->item_name= ref->item_name;
3888  }
3889  pos= item;
3890  if (item->field->flags & BLOB_FLAG)
3891  {
3892  if (!(pos= Item_copy::create(pos)))
3893  goto err;
3894  /*
3895  Item_copy_string::copy for function can call
3896  Item_copy_string::val_int for blob via Item_ref.
3897  But if Item_copy_string::copy for blob isn't called before,
3898  it's value will be wrong
3899  so let's insert Item_copy_string for blobs in the beginning of
3900  copy_funcs
3901  (to see full test case look at having.test, BUG #4358)
3902  */
3903  if (param->copy_funcs.push_front(pos))
3904  goto err;
3905  }
3906  else
3907  {
3908  /*
3909  set up save buffer and change result_field to point at
3910  saved value
3911  */
3912  field= item->field;
3913  item->result_field=field->new_field(thd->mem_root,field->table, 1);
3914  /*
3915  We need to allocate one extra byte for null handling and
3916  another extra byte to not get warnings from purify in
3917  Field_string::val_int
3918  */
3919  if (!(tmp= (uchar*) sql_alloc(field->pack_length()+2)))
3920  goto err;
3921  if (copy)
3922  {
3923  DBUG_ASSERT (param->field_count > (uint) (copy - copy_start));
3924  copy->set(tmp, item->result_field);
3925  item->result_field->move_field(copy->to_ptr,copy->to_null_ptr,1);
3926 #ifdef HAVE_purify
3927  copy->to_ptr[copy->from_length]= 0;
3928 #endif
3929  copy++;
3930  }
3931  }
3932  }
3933  else if ((real_pos->type() == Item::FUNC_ITEM ||
3934  real_pos->type() == Item::SUBSELECT_ITEM ||
3935  real_pos->type() == Item::CACHE_ITEM ||
3936  real_pos->type() == Item::COND_ITEM) &&
3937  !real_pos->with_sum_func)
3938  { // Save for send fields
3939  pos= real_pos;
3940  /* TODO:
3941  In most cases this result will be sent to the user.
3942  This should be changed to use copy_int or copy_real depending
3943  on how the value is to be used: In some cases this may be an
3944  argument in a group function, like: IF(ISNULL(col),0,COUNT(*))
3945  */
3946  if (!(pos= Item_copy::create(pos)))
3947  goto err;
3948  if (i < border) // HAVING, ORDER and GROUP BY
3949  {
3950  if (extra_funcs.push_back(pos))
3951  goto err;
3952  }
3953  else if (param->copy_funcs.push_back(pos))
3954  goto err;
3955  }
3956  res_all_fields.push_back(pos);
3957  ref_pointer_array[((i < border)? all_fields.elements-i-1 : i-border)]=
3958  pos;
3959  }
3960  param->copy_field_end= copy;
3961 
3962  for (i= 0; i < border; i++)
3963  itr++;
3964  itr.sublist(res_selected_fields, elements);
3965  /*
3966  Put elements from HAVING, ORDER BY and GROUP BY last to ensure that any
3967  reference used in these will resolve to a item that is already calculated
3968  */
3969  param->copy_funcs.concat(&extra_funcs);
3970 
3971  DBUG_RETURN(0);
3972 
3973  err:
3974  if (copy)
3975  delete [] param->copy_field; // This is never 0
3976  param->copy_field=0;
3977 err2:
3978  DBUG_RETURN(TRUE);
3979 }
3980 
3981 
3989 void
3990 copy_fields(TMP_TABLE_PARAM *param)
3991 {
3992  Copy_field *ptr=param->copy_field;
3993  Copy_field *end=param->copy_field_end;
3994 
3995  DBUG_ASSERT((ptr != NULL && end >= ptr) || (ptr == NULL && end == NULL));
3996 
3997  for (; ptr < end; ptr++)
3998  (*ptr->do_copy)(ptr);
3999 
4000  List_iterator_fast<Item> it(param->copy_funcs);
4001  Item_copy *item;
4002  while ((item = (Item_copy*) it++))
4003  item->copy();
4004 }
4005 
4006 
4024 bool
4025 change_to_use_tmp_fields(THD *thd, Ref_ptr_array ref_pointer_array,
4026  List<Item> &res_selected_fields,
4027  List<Item> &res_all_fields,
4028  uint elements, List<Item> &all_fields)
4029 {
4030  List_iterator_fast<Item> it(all_fields);
4031  Item *item_field,*item;
4032  DBUG_ENTER("change_to_use_tmp_fields");
4033 
4034  res_selected_fields.empty();
4035  res_all_fields.empty();
4036 
4037  uint border= all_fields.elements - elements;
4038  for (uint i= 0; (item= it++); i++)
4039  {
4040  Field *field;
4041  if (item->with_sum_func && item->type() != Item::SUM_FUNC_ITEM)
4042  item_field= item;
4043  else if (item->type() == Item::FIELD_ITEM)
4044  item_field= item->get_tmp_table_item(thd);
4045  else if (item->type() == Item::FUNC_ITEM &&
4046  ((Item_func*)item)->functype() == Item_func::SUSERVAR_FUNC)
4047  {
4048  field= item->get_tmp_table_field();
4049  if (field != NULL)
4050  {
4051  /*
4052  Replace "@:=<expression>" with "@:=<tmp table column>". Otherwise, we
4053  would re-evaluate <expression>, and if expression were a subquery, this
4054  would access already-unlocked tables.
4055  */
4058  Item_field *new_field= new Item_field(field);
4059  if (!suv || !new_field)
4060  DBUG_RETURN(true); // Fatal error
4061  List<Item> list;
4062  list.push_back(new_field);
4063  suv->set_arguments(list);
4064  item_field= suv;
4065  }
4066  else
4067  item_field= item;
4068  }
4069  else if ((field= item->get_tmp_table_field()))
4070  {
4071  if (item->type() == Item::SUM_FUNC_ITEM && field->table->group)
4072  item_field= ((Item_sum*) item)->result_item(field);
4073  else
4074  item_field= (Item*) new Item_field(field);
4075  if (!item_field)
4076  DBUG_RETURN(true); // Fatal error
4077 
4078  if (item->real_item()->type() != Item::FIELD_ITEM)
4079  field->orig_table= 0;
4080  item_field->item_name= item->item_name;
4081  if (item->type() == Item::REF_ITEM)
4082  {
4083  Item_field *ifield= (Item_field *) item_field;
4084  Item_ref *iref= (Item_ref *) item;
4085  ifield->table_name= iref->table_name;
4086  ifield->db_name= iref->db_name;
4087  }
4088 #ifndef DBUG_OFF
4089  if (!item_field->item_name.is_set())
4090  {
4091  char buff[256];
4092  String str(buff,sizeof(buff),&my_charset_bin);
4093  str.length(0);
4094  item->print(&str, QT_ORDINARY);
4095  item_field->item_name.copy(str.ptr(), str.length());
4096  }
4097 #endif
4098  }
4099  else
4100  item_field= item;
4101 
4102  res_all_fields.push_back(item_field);
4103  ref_pointer_array[((i < border)? all_fields.elements-i-1 : i-border)]=
4104  item_field;
4105  }
4106 
4107  List_iterator_fast<Item> itr(res_all_fields);
4108  for (uint i= 0; i < border; i++)
4109  itr++;
4110  itr.sublist(res_selected_fields, elements);
4111  DBUG_RETURN(false);
4112 }
4113 
4114 
4132 bool
4133 change_refs_to_tmp_fields(THD *thd, Ref_ptr_array ref_pointer_array,
4134  List<Item> &res_selected_fields,
4135  List<Item> &res_all_fields, uint elements,
4136  List<Item> &all_fields)
4137 {
4138  List_iterator_fast<Item> it(all_fields);
4139  Item *item, *new_item;
4140  res_selected_fields.empty();
4141  res_all_fields.empty();
4142 
4143  uint i, border= all_fields.elements - elements;
4144  for (i= 0; (item= it++); i++)
4145  {
4146  res_all_fields.push_back(new_item= item->get_tmp_table_item(thd));
4147  ref_pointer_array[((i < border)? all_fields.elements-i-1 : i-border)]=
4148  new_item;
4149  }
4150 
4151  List_iterator_fast<Item> itr(res_all_fields);
4152  for (i= 0; i < border; i++)
4153  itr++;
4154  itr.sublist(res_selected_fields, elements);
4155 
4156  return thd->is_fatal_error;
4157 }
4158 
4159 
4177 static void save_const_null_info(JOIN *join, table_map *save_nullinfo)
4178 {
4179  DBUG_ASSERT(join->const_tables);
4180 
4181  for (uint tableno= 0; tableno < join->const_tables; tableno++)
4182  {
4183  TABLE *tbl= (join->join_tab+tableno)->table;
4184  /*
4185  tbl->status and tbl->null_row must be in sync: either both set
4186  or none set. Otherwise, an additional table_map parameter is
4187  needed to save/restore_const_null_info() these separately
4188  */
4189  DBUG_ASSERT(tbl->null_row ? (tbl->status & STATUS_NULL_ROW) :
4190  !(tbl->status & STATUS_NULL_ROW));
4191 
4192  if (!tbl->null_row)
4193  *save_nullinfo|= tbl->map;
4194  }
4195 }
4196 
4213 static void restore_const_null_info(JOIN *join, table_map save_nullinfo)
4214 {
4215  DBUG_ASSERT(join->const_tables && save_nullinfo);
4216 
4217  for (uint tableno= 0; tableno < join->const_tables; tableno++)
4218  {
4219  TABLE *tbl= (join->join_tab+tableno)->table;
4220  if ((save_nullinfo & tbl->map))
4221  {
4222  /*
4223  The table had null_row=false and STATUS_NULL_ROW set when
4224  save_const_null_info was called
4225  */
4226  tbl->null_row= false;
4227  tbl->status&= ~STATUS_NULL_ROW;
4228  }
4229  }
4230 }
4231 
4232 
4233 /****************************************************************************
4234  QEP_tmp_table implementation
4235 ****************************************************************************/
4236 
4247 bool
4248 QEP_tmp_table::prepare_tmp_table()
4249 {
4250  TABLE *table= join_tab->table;
4251  JOIN *join= join_tab->join;
4252  int rc= 0;
4253 
4254  if (!join_tab->table->is_created())
4255  {
4256  if (instantiate_tmp_table(table, join_tab->tmp_table_param->keyinfo,
4257  join_tab->tmp_table_param->start_recinfo,
4258  &join_tab->tmp_table_param->recinfo,
4259  join->select_options,
4260  join->thd->variables.big_tables,
4261  &join->thd->opt_trace))
4262  return true;
4263  (void) table->file->extra(HA_EXTRA_WRITE_CACHE);
4264  empty_record(table);
4265  }
4266  /* If it wasn't already, start index scan for grouping using table index. */
4267  if (!table->file->inited && table->group &&
4268  join_tab->tmp_table_param->sum_func_count && table->s->keys)
4269  rc= table->file->ha_index_init(0, 0);
4270  else
4271  rc= table->file->ha_rnd_init(0);
4272  if (rc)
4273  {
4274  table->file->print_error(rc, MYF(0));
4275  return true;
4276  }
4277  return false;
4278 }
4279 
4280 
4289 enum_nested_loop_state
4290 QEP_tmp_table::put_record(bool end_of_records)
4291 {
4292  // Lasy tmp table creation/initialization
4293  if (!join_tab->table->file->inited)
4294  prepare_tmp_table();
4295  enum_nested_loop_state rc= (*write_func)(join_tab->join, join_tab,
4296  end_of_records);
4297  return rc;
4298 }
4299 
4300 
4307 enum_nested_loop_state
4309 {
4310  enum_nested_loop_state rc= NESTED_LOOP_OK;
4311  TABLE *table= join_tab->table;
4312  JOIN *join= join_tab->join;
4313 
4314  // All records were stored, send them further
4315  int tmp, new_errno= 0;
4316 
4317  if ((rc= put_record(true)) < NESTED_LOOP_OK)
4318  return rc;
4319 
4320  if ((tmp= table->file->extra(HA_EXTRA_NO_CACHE)))
4321  {
4322  DBUG_PRINT("error",("extra(HA_EXTRA_NO_CACHE) failed"));
4323  new_errno= tmp;
4324  }
4325  if ((tmp= table->file->ha_index_or_rnd_end()))
4326  {
4327  DBUG_PRINT("error",("ha_index_or_rnd_end() failed"));
4328  new_errno= tmp;
4329  }
4330  if (new_errno)
4331  {
4332  table->file->print_error(new_errno,MYF(0));
4333  return NESTED_LOOP_ERROR;
4334  }
4335  // Update ref array
4336  join_tab->join->set_items_ref_array(*join_tab->ref_array);
4337  table->reginfo.lock_type= TL_UNLOCK;
4338 
4339  bool in_first_read= true;
4340  while (rc == NESTED_LOOP_OK)
4341  {
4342  int error;
4343  if (in_first_read)
4344  {
4345  in_first_read= false;
4346  error= join_init_read_record(join_tab);
4347  }
4348  else
4349  error= join_tab->read_record.read_record(&join_tab->read_record);
4350 
4351  if (error > 0 || (join->thd->is_error())) // Fatal error
4352  rc= NESTED_LOOP_ERROR;
4353  else if (error < 0)
4354  break;
4355  else if (join->thd->killed) // Aborted by user
4356  {
4357  join->thd->send_kill_message();
4358  rc= NESTED_LOOP_KILLED;
4359  }
4360  else
4361  rc= evaluate_join_record(join, join_tab);
4362  }
4363 
4364  // Finish rnd scn after sending records
4365  if (join_tab->table->file->inited)
4366  join_tab->table->file->ha_rnd_end();
4367 
4368  return rc;
4369 }
4370 
4371