MySQL 5.6.14 Source Code Document
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
sql_join_buffer.h
1 #ifndef SQL_JOIN_CACHE_INCLUDED
2 #define SQL_JOIN_CACHE_INCLUDED
3 
4 #include "sql_executor.h"
5 
6 /* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
7 
8  This program is free software; you can redistribute it and/or modify
9  it under the terms of the GNU General Public License as published by
10  the Free Software Foundation; version 2 of the License.
11 
12  This program is distributed in the hope that it will be useful,
13  but WITHOUT ANY WARRANTY; without even the implied warranty of
14  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  GNU General Public License for more details.
16 
17  You should have received a copy of the GNU General Public License
18  along with this program; if not, write to the Free Software
19  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
20 
23 /*
24  Categories of data fields of variable length written into join cache buffers.
25  The value of any of these fields is written into cache together with the
26  prepended length of the value.
27 */
28 #define CACHE_BLOB 1 /* blob field */
29 #define CACHE_STRIPPED 2 /* field stripped of trailing spaces */
30 #define CACHE_VARSTR1 3 /* short string value (length takes 1 byte) */
31 #define CACHE_VARSTR2 4 /* long string value (length takes 2 bytes) */
32 
33 /*
34  The CACHE_FIELD structure used to describe fields of records that
35  are written into a join cache buffer from record buffers and backward.
36 */
37 typedef struct st_cache_field {
38  uchar *str;
39  uint length;
40  /*
41  Field object for the moved field
42  (0 - for a flag field, see JOIN_CACHE::create_flag_fields).
43  */
44  Field *field;
45  uint type;
46  /*
47  The number of the record offset value for the field in the sequence
48  of offsets placed after the last field of the record. These
49  offset values are used to access fields referred to from other caches.
50  If the value is 0 then no offset for the field is saved in the
51  trailing sequence of offsets.
52  */
53  uint referenced_field_no;
56  /* The remaining structure fields are used as containers for temp values */
57  uint blob_length;
58  uint offset;
60  void bind_buffer(uchar *buffer)
61  {
62  if (next_copy_rowid != NULL)
63  next_copy_rowid->bind_buffer(buffer);
64  str= buffer;
65  }
66  bool buffer_is_bound() const { return str != NULL; }
67 } CACHE_FIELD;
68 
69 
70 /*
71  JOIN_CACHE is the base class to support the implementations of both
72  Blocked-Based Nested Loops (BNL) Join Algorithm and Batched Key Access (BKA)
73  Join Algorithm. The first algorithm is supported by the derived class
74  JOIN_CACHE_BNL, while the second algorithm is supported by the derived
75  class JOIN_CACHE_BKA.
76  These two algorithms have a lot in common. Both algorithms first
77  accumulate the records of the left join operand in a join buffer and
78  then search for matching rows of the second operand for all accumulated
79  records.
80  For the first algorithm this strategy saves on logical I/O operations:
81  the entire set of records from the join buffer requires only one look-through
82  the records provided by the second operand.
83  For the second algorithm the accumulation of records allows to optimize
84  fetching rows of the second operand from disk for some engines (MyISAM,
85  InnoDB), or to minimize the number of round-trips between the Server and
86  the engine nodes (NDB Cluster).
87 */
88 
90 {
91 
92 private:
93 
94  /* Size of the offset of a record from the cache */
95  uint size_of_rec_ofs;
96  /* Size of the length of a record in the cache */
97  uint size_of_rec_len;
98  /* Size of the offset of a field within a record in the cache */
99  uint size_of_fld_ofs;
100 
101 protected:
102 
103  /* 3 functions below actually do not use the hidden parameter 'this' */
104 
105  /* Calculate the number of bytes used to store an offset value */
106  uint offset_size(uint len)
107  { return (len < 256 ? 1 : len < 256*256 ? 2 : 4); }
108 
109  /* Get the offset value that takes ofs_sz bytes at the position ptr */
110  ulong get_offset(uint ofs_sz, uchar *ptr)
111  {
112  switch (ofs_sz) {
113  case 1: return uint(*ptr);
114  case 2: return uint2korr(ptr);
115  case 4: return uint4korr(ptr);
116  }
117  return 0;
118  }
119 
120  /* Set the offset value ofs that takes ofs_sz bytes at the position ptr */
121  void store_offset(uint ofs_sz, uchar *ptr, ulong ofs)
122  {
123  switch (ofs_sz) {
124  case 1: *ptr= (uchar) ofs; return;
125  case 2: int2store(ptr, (uint16) ofs); return;
126  case 4: int4store(ptr, (uint32) ofs); return;
127  }
128  }
129 
130  /*
131  The total maximal length of the fields stored for a record in the cache.
132  For blob fields only the sizes of the blob lengths are taken into account.
133  */
134  uint length;
135 
136  /*
137  Representation of the executed multi-way join through which all needed
138  context can be accessed.
139  */
140  JOIN *join;
141 
142  /*
143  Cardinality of the range of join tables whose fields can be put into the
144  cache. (A table from the range not necessarily contributes to the cache.)
145  */
146  uint tables;
147 
148  /*
149  The total number of flag and data fields that can appear in a record
150  written into the cache. Fields with null values are always skipped
151  to save space.
152  */
153  uint fields;
154 
155  /*
156  The total number of flag fields in a record put into the cache. They are
157  used for table null bitmaps, table null row flags, and an optional match
158  flag. Flag fields go before other fields in a cache record with the match
159  flag field placed always at the very beginning of the record.
160  */
161  uint flag_fields;
162 
163  /* The total number of blob fields that are written into the cache */
164  uint blobs;
165 
166  /*
167  The total number of fields referenced from field descriptors for other join
168  caches. These fields are used to construct key values to access matching
169  rows with index lookups. Currently the fields can be referenced only from
170  descriptors for bka caches. However they may belong to a cache of any type.
171  */
172  uint referenced_fields;
173 
174  /*
175  The current number of already created data field descriptors.
176  This number can be useful for implementations of the init methods.
177  */
178  uint data_field_count;
179 
180  /*
181  The current number of already created pointers to the data field
182  descriptors. This number can be useful for implementations of
183  the init methods.
184  */
185  uint data_field_ptr_count;
186  /*
187  Array of the descriptors of fields containing 'fields' elements.
188  These are all fields that are stored for a record in the cache.
189  */
190  CACHE_FIELD *field_descr;
191 
192  /*
193  Array of pointers to the blob descriptors that contains 'blobs' elements.
194  */
195  CACHE_FIELD **blob_ptr;
196 
197  /*
198  This flag indicates that records written into the join buffer contain
199  a match flag field. The flag must be set by the init method.
200  */
201  bool with_match_flag;
202  /*
203  This flag indicates that any record is prepended with the length of the
204  record which allows us to skip the record or part of it without reading.
205  */
206  bool with_length;
207 
208  /*
209  The maximal number of bytes used for a record representation in
210  the cache excluding the space for blob data.
211  For future derived classes this representation may contains some
212  redundant info such as a key value associated with the record.
213  */
214  uint pack_length;
215  /*
216  The value of pack_length incremented by the total size of all
217  pointers of a record in the cache to the blob data.
218  */
219  uint pack_length_with_blob_ptrs;
220 
221  /* Pointer to the beginning of the join buffer */
222  uchar *buff;
223  /*
224  Size of the entire memory allocated for the join buffer.
225  Part of this memory may be reserved for the auxiliary buffer.
226  */
227  ulong buff_size;
228  /* Size of the auxiliary buffer. */
229  ulong aux_buff_size;
230 
231  /* The number of records put into the join buffer */
232  uint records;
233 
234  /*
235  Pointer to the current position in the join buffer.
236  This member is used both when writing to buffer and
237  when reading from it.
238  */
239  uchar *pos;
240  /*
241  Pointer to the first free position in the join buffer,
242  right after the last record into it.
243  */
244  uchar *end_pos;
245 
246  /*
247  Pointer to the beginning of first field of the current read/write record
248  from the join buffer. The value is adjusted by the get_record/put_record
249  functions.
250  */
251  uchar *curr_rec_pos;
252  /*
253  Pointer to the beginning of first field of the last record
254  from the join buffer.
255  */
256  uchar *last_rec_pos;
257 
258  /*
259  Flag is set if the blob data for the last record in the join buffer
260  is in record buffers rather than in the join cache.
261  */
262  bool last_rec_blob_data_is_in_rec_buff;
263 
264  /*
265  Pointer to the position to the current record link.
266  Record links are used only with linked caches. Record links allow to set
267  connections between parts of one join record that are stored in different
268  join buffers.
269  In the simplest case a record link is just a pointer to the beginning of
270  the record stored in the buffer.
271  In a more general case a link could be a reference to an array of pointers
272  to records in the buffer. */
273  uchar *curr_rec_link;
274 
277 
278  void calc_record_fields();
279  int alloc_fields(uint external_fields);
280  void create_flag_fields();
281  void create_remaining_fields(bool all_read_fields);
282  void set_constants();
283  bool alloc_buffer();
284 
285  uint get_size_of_rec_offset() { return size_of_rec_ofs; }
286  uint get_size_of_rec_length() { return size_of_rec_len; }
287  uint get_size_of_fld_offset() { return size_of_fld_ofs; }
288 
289  uchar *get_rec_ref(uchar *ptr)
290  {
291  return buff+get_offset(size_of_rec_ofs, ptr-size_of_rec_ofs);
292  }
293  ulong get_rec_length(uchar *ptr)
294  {
295  return (ulong) get_offset(size_of_rec_len, ptr);
296  }
297  ulong get_fld_offset(uchar *ptr)
298  {
299  return (ulong) get_offset(size_of_fld_ofs, ptr);
300  }
301 
302  void store_rec_ref(uchar *ptr, uchar* ref)
303  {
304  store_offset(size_of_rec_ofs, ptr-size_of_rec_ofs, (ulong) (ref-buff));
305  }
306 
307  void store_rec_length(uchar *ptr, ulong len)
308  {
309  store_offset(size_of_rec_len, ptr, len);
310  }
311  void store_fld_offset(uchar *ptr, ulong ofs)
312  {
313  store_offset(size_of_fld_ofs, ptr, ofs);
314  }
315 
316  /* Write record fields and their required offsets into the join buffer */
317  uint write_record_data(uchar *link, bool *is_full);
318 
319  /*
320  This method must determine for how much the auxiliary buffer should be
321  incremented when a new record is added to the join buffer.
322  If no auxiliary buffer is needed the function should return 0.
323  */
324  virtual uint aux_buffer_incr() { return 0; }
325 
330  virtual uint aux_buffer_min_size() const { return 0; }
331 
332  /* Shall calculate how much space is remaining in the join buffer */
333  virtual ulong rem_space()
334  {
335  return std::max<ulong>(buff_size-(end_pos-buff)-aux_buff_size, 0UL);
336  }
337 
338  /* Shall skip record from the join buffer if its match flag is on */
339  virtual bool skip_record_if_match();
340 
341  /* Read some flag and data fields of a record from the join buffer */
343 
344  /* Read some flag fields of a record from the join buffer */
345  void read_some_flag_fields();
346 
347  /* Read all flag fields of the record which is at position rec_ptr */
348  void read_all_flag_fields_by_pos(uchar *rec_ptr);
349 
350  /* Read a data record field from the join buffer */
351  uint read_record_field(CACHE_FIELD *copy, bool last_record);
352 
353  /* Read a referenced field from the join buffer */
354  bool read_referenced_field(CACHE_FIELD *copy, uchar *rec_ptr, uint *len);
355 
356  /*
357  True if rec_ptr points to the record whose blob data stay in
358  record buffers
359  */
360  bool blob_data_is_in_rec_buff(uchar *rec_ptr)
361  {
362  return rec_ptr == last_rec_pos && last_rec_blob_data_is_in_rec_buff;
363  }
364 
365  /* Find matches from the next table for records from the join buffer */
366  virtual enum_nested_loop_state join_matching_records(bool skip_last)=0;
367 
368  /* Add null complements for unmatched outer records from buffer */
369  virtual enum_nested_loop_state join_null_complements(bool skip_last);
370 
371  /* Restore the fields of the last record from the join buffer */
372  virtual void restore_last_record();
373 
374  /*Set match flag for a record in join buffer if it has not been set yet */
375  bool set_match_flag_if_none(JOIN_TAB *first_inner, uchar *rec_ptr);
376 
377  enum_nested_loop_state generate_full_extensions(uchar *rec_ptr);
378 
379  /* Check matching to a partial join record from the join buffer */
380  virtual bool check_match(uchar *rec_ptr);
381 
384  {
385  return (t->last_sj_inner_tab == t &&
386  t->get_sj_strategy() == SJ_OPT_FIRST_MATCH) ||
387  (t->first_inner && t->first_inner->last_inner == t &&
388  t->table->reginfo.not_exists_optimize);
389  }
390 
391  /*
392  This function shall add a record into the join buffer and return TRUE
393  if it has been decided that it should be the last record in the buffer.
394  */
395  virtual bool put_record_in_cache();
396 
397 public:
398  /* Pointer to the previous join cache if there is any */
399  JOIN_CACHE *prev_cache;
400  /* Pointer to the next join cache if there is any */
401  JOIN_CACHE *next_cache;
402 
403  /* Shall initialize the join cache structure */
404  virtual int init()=0;
405 
406  /* The function shall return TRUE only for BKA caches */
407  virtual bool is_key_access() { return FALSE; }
408 
409  /* Shall reset the join buffer for reading/writing */
410  virtual void reset_cache(bool for_writing);
411 
412  /* Add a record into join buffer and call join_records() if it's full */
413  virtual enum_nested_loop_state put_record()
414  {
415  if (put_record_in_cache())
416  return join_records(false);
417  return NESTED_LOOP_OK;
418  }
419  /*
420  This function shall read the next record into the join buffer and return
421  TRUE if there is no more next records.
422  */
423  virtual bool get_record();
424 
425  /*
426  This function shall read the record at the position rec_ptr
427  in the join buffer
428  */
429  virtual void get_record_by_pos(uchar *rec_ptr);
430 
431  /* Shall return the value of the match flag for the positioned record */
432  virtual bool get_match_flag_by_pos(uchar *rec_ptr);
433 
434  /* Shall return the position of the current record */
435  virtual uchar *get_curr_rec() { return curr_rec_pos; }
436 
437  /* Shall set the current record link */
438  virtual void set_curr_rec_link(uchar *link) { curr_rec_link= link; }
439 
440  /* Shall return the current record link */
441  virtual uchar *get_curr_rec_link()
442  {
443  return (curr_rec_link ? curr_rec_link : get_curr_rec());
444  }
445 
446  /* Join records from the join buffer with records from the next join table */
447  enum_nested_loop_state end_send() { return join_records(false); };
448  enum_nested_loop_state join_records(bool skip_last);
449 
450  enum_op_type type() { return OT_CACHE; }
451 
452  virtual ~JOIN_CACHE() {}
453  void free()
454  {
455  /*
456  JOIN_CACHE doesn't support unlinking cache chain. This code is needed
457  only by set_join_cache_denial().
458  */
459  /*
460  If there is a previous/next cache linked to this cache through the
461  (next|prev)_cache pointer: remove the link.
462  */
463  if (prev_cache)
464  prev_cache->next_cache= NULL;
465  if (next_cache)
466  next_cache->prev_cache= NULL;
467 
468  my_free(buff);
469  buff= NULL;
470  }
471 
473  enum {ALG_NONE= 0, ALG_BNL= 1, ALG_BKA= 2, ALG_BKA_UNIQUE= 4};
474 
475  friend class JOIN_CACHE_BNL;
476  friend class JOIN_CACHE_BKA;
477  friend class JOIN_CACHE_BKA_UNIQUE;
478 };
479 
481 {
482 
483 protected:
484 
485  /* Using BNL find matches from the next table for records from join buffer */
486  enum_nested_loop_state join_matching_records(bool skip_last);
487 
488 public:
489 
490  /*
491  This constructor creates an unlinked BNL join cache. The cache is to be
492  used to join table 'tab' to the result of joining the previous tables
493  specified by the 'j' parameter.
494  */
495  JOIN_CACHE_BNL(JOIN *j, JOIN_TAB *tab)
496  {
497  join= j;
498  join_tab= tab;
499  prev_cache= next_cache= 0;
500  }
501 
502  /*
503  This constructor creates a linked BNL join cache. The cache is to be
504  used to join table 'tab' to the result of joining the previous tables
505  specified by the 'j' parameter. The parameter 'prev' specifies the previous
506  cache object to which this cache is linked.
507  */
508  JOIN_CACHE_BNL(JOIN *j, JOIN_TAB *tab, JOIN_CACHE *prev)
509  {
510  join= j;
511  join_tab= tab;
512  prev_cache= prev;
513  next_cache= 0;
514  if (prev)
515  prev->next_cache= this;
516  }
517 
518  /* Initialize the BNL cache */
519  int init();
520 
521 };
522 
524 {
525 protected:
526 
527  /* Flag to to be passed to the MRR interface */
528  uint mrr_mode;
529 
530  /* MRR buffer assotiated with this join cache */
531  HANDLER_BUFFER mrr_buff;
532 
533  /* Shall initialize the MRR buffer */
534  virtual void init_mrr_buff()
535  {
536  mrr_buff.buffer= end_pos;
537  mrr_buff.buffer_end= buff+buff_size;
538  }
539 
540  /*
541  The number of the cache fields that are used in building keys to access
542  the table join_tab
543  */
544  uint local_key_arg_fields;
545  /*
546  The total number of the fields in the previous caches that are used
547  in building keys t access the table join_tab
548  */
549  uint external_key_arg_fields;
550 
551  /*
552  This flag indicates that the key values will be read directly from the join
553  buffer. It will save us building key values in the key buffer.
554  */
555  bool use_emb_key;
556  /* The length of an embedded key value */
557  uint emb_key_length;
558 
559  /* Check the possibility to read the access keys directly from join buffer */
560  bool check_emb_key_usage();
561 
563  uint aux_buffer_incr();
564 
566  uint aux_buffer_min_size() const;
567 
568  /* Using BKA find matches from the next table for records from join buffer */
569  enum_nested_loop_state join_matching_records(bool skip_last);
570 
571  /* Prepare to search for records that match records from the join buffer */
572  bool init_join_matching_records(RANGE_SEQ_IF *seq_funcs, uint ranges);
573 
574 public:
575 
576  /*
577  This constructor creates an unlinked BKA join cache. The cache is to be
578  used to join table 'tab' to the result of joining the previous tables
579  specified by the 'j' parameter.
580  The MRR mode initially is set to 'flags'.
581  */
582  JOIN_CACHE_BKA(JOIN *j, JOIN_TAB *tab, uint flags)
583  {
584  join= j;
585  join_tab= tab;
586  prev_cache= next_cache= 0;
587  mrr_mode= flags;
588  }
589 
590  /*
591  This constructor creates a linked BKA join cache. The cache is to be
592  used to join table 'tab' to the result of joining the previous tables
593  specified by the 'j' parameter. The parameter 'prev' specifies the cache
594  object to which this cache is linked.
595  The MRR mode initially is set to 'flags'.
596  */
597  JOIN_CACHE_BKA(JOIN *j, JOIN_TAB *tab, uint flags, JOIN_CACHE* prev)
598  {
599  join= j;
600  join_tab= tab;
601  prev_cache= prev;
602  next_cache= 0;
603  if (prev)
604  prev->next_cache= this;
605  mrr_mode= flags;
606  }
607 
608  /* Initialize the BKA cache */
609  int init();
610 
611  bool is_key_access() { return TRUE; }
612 
613  /* Shall get the key built over the next record from the join buffer */
614  virtual uint get_next_key(uchar **key);
615 
616  /* Check if the record combination matches the index condition */
617  bool skip_index_tuple(range_seq_t rseq, char *range_info);
618 };
619 
620 /*
621  The class JOIN_CACHE_BKA_UNIQUE supports the variant of the BKA join algorithm
622  that submits only distinct keys to the MRR interface. The records in the join
623  buffer of a cache of this class that have the same access key are linked into
624  a chain attached to a key entry structure that either itself contains the key
625  value, or, in the case when the keys are embedded, refers to its occurance in
626  one of the records from the chain.
627  To build the chains with the same keys a hash table is employed. It is placed
628  at the very end of the join buffer. The array of hash entries is allocated
629  first at the very bottom of the join buffer, then go key entries. A hash entry
630  contains a header of the list of the key entries with the same hash value.
631  Each key entry is a structure of the following type:
632  struct st_join_cache_key_entry {
633  union {
634  uchar[] value;
635  cache_ref *value_ref; // offset from the beginning of the buffer
636  } hash_table_key;
637  key_ref next_key; // offset backward from the beginning of hash table
638  cache_ref *last_rec // offset from the beginning of the buffer
639  }
640  The references linking the records in a chain are always placed at the very
641  beginning of the record info stored in the join buffer. The records are
642  linked in a circular list. A new record is always added to the end of this
643  list. When a key is passed to the MRR interface it can be passed either with
644  an association link containing a reference to the header of the record chain
645  attached to the corresponding key entry in the hash table, or without any
646  association link. When the next record is returned by a call to the MRR
647  function multi_range_read_next without any association (because if was not
648  passed together with the key) then the key value is extracted from the
649  returned record and searched for it in the hash table. If there is any records
650  with such key the chain of them will be yielded as the result of this search.
651 
652  The following picture represents a typical layout for the info stored in the
653  join buffer of a join cache object of the JOIN_CACHE_BKA_UNIQUE class.
654 
655  buff
656  V
657  +----------------------------------------------------------------------------+
658  | |[*]record_1_1| |
659  | ^ | |
660  | | +--------------------------------------------------+ |
661  | | |[*]record_2_1| | |
662  | | ^ | V |
663  | | | +------------------+ |[*]record_1_2| |
664  | | +--------------------+-+ | |
665  |+--+ +---------------------+ | | +-------------+ |
666  || | | V | | |
667  |||[*]record_3_1| |[*]record_1_3| |[*]record_2_2| | |
668  ||^ ^ ^ | |
669  ||+----------+ | | | |
670  ||^ | |<---------------------------+-------------------+ |
671  |++ | | ... mrr | buffer ... ... | | |
672  | | | | |
673  | +-----+--------+ | +-----|-------+ |
674  | V | | | V | | |
675  ||key_3|[/]|[*]| | | |key_2|[/]|[*]| | |
676  | +-+---|-----------------------+ | |
677  | V | | | | |
678  | |key_1|[*]|[*]| | | ... |[*]| ... |[*]| ... | |
679  +----------------------------------------------------------------------------+
680  ^ ^ ^
681  | i-th entry j-th entry
682  hash table
683 
684  i-th hash entry:
685  circular record chain for key_1:
686  record_1_1
687  record_1_2
688  record_1_3 (points to record_1_1)
689  circular record chain for key_3:
690  record_3_1 (points to itself)
691 
692  j-th hash entry:
693  circular record chain for key_2:
694  record_2_1
695  record_2_2 (points to record_2_1)
696 
697 */
698 
700 {
701 
702 private:
703 
704  /* Size of the offset of a key entry in the hash table */
705  uint size_of_key_ofs;
706 
707  /*
708  Length of a key value.
709  It is assumed that all key values have the same length.
710  */
711  uint key_length;
712  /*
713  Length of the key entry in the hash table.
714  A key entry either contains the key value, or it contains a reference
715  to the key value if use_emb_key flag is set for the cache.
716  */
717  uint key_entry_length;
718 
719  /* The beginning of the hash table in the join buffer */
720  uchar *hash_table;
721  /* Number of hash entries in the hash table */
722  uint hash_entries;
723 
724  /* Number of key entries in the hash table (number of distinct keys) */
725  uint key_entries;
726 
727  /* The position of the last key entry in the hash table */
728  uchar *last_key_entry;
729 
730  /* The position of the currently retrieved key entry in the hash table */
731  uchar *curr_key_entry;
732 
733  /*
734  The offset of the record fields from the beginning of the record
735  representation. The record representation starts with a reference to
736  the next record in the key record chain followed by the length of
737  the trailing record data followed by a reference to the record segment
738  in the previous cache, if any, followed by the record fields.
739  */
740  uint rec_fields_offset;
741  /* The offset of the data fields from the beginning of the record fields */
742  uint data_fields_offset;
743 
744  uint get_hash_idx(uchar* key, uint key_len);
745 
746  void cleanup_hash_table();
747 
748 protected:
749 
750  uint get_size_of_key_offset() { return size_of_key_ofs; }
751 
752  /*
753  Get the position of the next_key_ptr field pointed to by
754  a linking reference stored at the position key_ref_ptr.
755  This reference is actually the offset backward from the
756  beginning of hash table.
757  */
758  uchar *get_next_key_ref(uchar *key_ref_ptr)
759  {
760  return hash_table-get_offset(size_of_key_ofs, key_ref_ptr);
761  }
762 
763  /*
764  Store the linking reference to the next_key_ptr field at
765  the position key_ref_ptr. The position of the next_key_ptr
766  field is pointed to by ref. The stored reference is actually
767  the offset backward from the beginning of the hash table.
768  */
769  void store_next_key_ref(uchar *key_ref_ptr, uchar *ref)
770  {
771  store_offset(size_of_key_ofs, key_ref_ptr, (ulong) (hash_table-ref));
772  }
773 
774  /*
775  Check whether the reference to the next_key_ptr field at the position
776  key_ref_ptr contains a nil value.
777  */
778  bool is_null_key_ref(uchar *key_ref_ptr)
779  {
780  ulong nil= 0;
781  return memcmp(key_ref_ptr, &nil, size_of_key_ofs ) == 0;
782  }
783 
784  /*
785  Set the reference to the next_key_ptr field at the position
786  key_ref_ptr equal to nil.
787  */
788  void store_null_key_ref(uchar *key_ref_ptr)
789  {
790  ulong nil= 0;
791  store_offset(size_of_key_ofs, key_ref_ptr, nil);
792  }
793 
794  uchar *get_next_rec_ref(uchar *ref_ptr)
795  {
796  return buff+get_offset(get_size_of_rec_offset(), ref_ptr);
797  }
798 
799  void store_next_rec_ref(uchar *ref_ptr, uchar *ref)
800  {
801  store_offset(get_size_of_rec_offset(), ref_ptr, (ulong) (ref-buff));
802  }
803 
804  /*
805  Get the position of the embedded key value for the current
806  record pointed to by get_curr_rec().
807  */
808  uchar *get_curr_emb_key()
809  {
810  return get_curr_rec()+data_fields_offset;
811  }
812 
813  /*
814  Get the position of the embedded key value pointed to by a reference
815  stored at ref_ptr. The stored reference is actually the offset from
816  the beginning of the join buffer.
817  */
818  uchar *get_emb_key(uchar *ref_ptr)
819  {
820  return buff+get_offset(get_size_of_rec_offset(), ref_ptr);
821  }
822 
823  /*
824  Store the reference to an embedded key at the position key_ref_ptr.
825  The position of the embedded key is pointed to by ref. The stored
826  reference is actually the offset from the beginning of the join buffer.
827  */
828  void store_emb_key_ref(uchar *ref_ptr, uchar *ref)
829  {
830  store_offset(get_size_of_rec_offset(), ref_ptr, (ulong) (ref-buff));
831  }
832 
833  /*
834  Calculate how much space in the buffer would not be occupied by
835  records, key entries and additional memory for the MMR buffer.
836  */
837  ulong rem_space()
838  {
839  return std::max<ulong>(last_key_entry-end_pos-aux_buff_size, 0UL);
840  }
841 
842  /*
843  Initialize the MRR buffer allocating some space within the join buffer.
844  The entire space between the last record put into the join buffer and the
845  last key entry added to the hash table is used for the MRR buffer.
846  */
847  void init_mrr_buff()
848  {
849  mrr_buff.buffer= end_pos;
850  mrr_buff.buffer_end= last_key_entry;
851  }
852 
853  /* Skip record from JOIN_CACHE_BKA_UNIQUE buffer if its match flag is on */
854  bool skip_record_if_match();
855 
856  /* Using BKA_UNIQUE find matches for records from join buffer */
857  enum_nested_loop_state join_matching_records(bool skip_last);
858 
859  /* Search for a key in the hash table of the join buffer */
860  bool key_search(uchar *key, uint key_len, uchar **key_ref_ptr);
861 
862  virtual bool check_match(uchar *rec_ptr);
863 
864  /* Add a record into the JOIN_CACHE_BKA_UNIQUE buffer */
865  bool put_record_in_cache();
866 
867 public:
868 
869  /*
870  This constructor creates an unlinked BKA_UNIQUE join cache. The cache is
871  to be used to join table 'tab' to the result of joining the previous tables
872  specified by the 'j' parameter.
873  The MRR mode initially is set to 'flags'.
874  */
875  JOIN_CACHE_BKA_UNIQUE(JOIN *j, JOIN_TAB *tab, uint flags)
876  :JOIN_CACHE_BKA(j, tab, flags) {}
877 
878  /*
879  This constructor creates a linked BKA_UNIQUE join cache. The cache is
880  to be used to join table 'tab' to the result of joining the previous tables
881  specified by the 'j' parameter. The parameter 'prev' specifies the cache
882  object to which this cache is linked.
883  The MRR mode initially is set to 'flags'.
884  */
885  JOIN_CACHE_BKA_UNIQUE(JOIN *j, JOIN_TAB *tab, uint flags, JOIN_CACHE* prev)
886  :JOIN_CACHE_BKA(j, tab, flags, prev) {}
887 
888  /* Initialize the BKA_UNIQUE cache */
889  int init();
890 
891  /* Reset the JOIN_CACHE_BKA_UNIQUE buffer for reading/writing */
892  void reset_cache(bool for_writing);
893 
894  /* Read the next record from the JOIN_CACHE_BKA_UNIQUE buffer */
895  bool get_record();
896 
897  /*
898  Shall check whether all records in a key chain have
899  their match flags set on
900  */
901  virtual bool check_all_match_flags_for_key(uchar *key_chain_ptr);
902 
903  uint get_next_key(uchar **key);
904 
905  /* Get the head of the record chain attached to the current key entry */
906  uchar *get_curr_key_chain()
907  {
908  return get_next_rec_ref(curr_key_entry+key_entry_length-
909  get_size_of_rec_offset());
910  }
911 
912  /* Check if the record combination matches the index condition */
913  bool skip_index_tuple(range_seq_t rseq, char *range_info);
914 };
915 
916 
917 #endif /* SQL_JOIN_CACHE_INCLUDED */