MySQL 5.6.14 Source Code Document
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
opt_trace.cc
Go to the documentation of this file.
1 /* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
2 
3  This program is free software; you can redistribute it and/or modify
4  it under the terms of the GNU General Public License as published by
5  the Free Software Foundation; version 2 of the License.
6 
7  This program is distributed in the hope that it will be useful,
8  but WITHOUT ANY WARRANTY; without even the implied warranty of
9  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  GNU General Public License for more details.
11 
12  You should have received a copy of the GNU General Public License
13  along with this program; if not, write to the Free Software
14  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
15 
21 #include "opt_trace.h"
22 #include "mysqld.h" // system_charset_info
23 #include "item.h" // Item
24 #include "sql_string.h" // String
25 #include "m_string.h" // _dig_vec_lower
26 
27 #ifdef OPTIMIZER_TRACE
28 
29 // gcc.gnu.org/bugzilla/show_bug.cgi?id=29365
30 namespace random_name_to_avoid_gcc_bug_29365 {
36 class Buffer
37 {
38 private:
39  size_t allowed_mem_size;
40  size_t missing_bytes;
41  String string_buf;
42 public:
43  Buffer() : allowed_mem_size(0), missing_bytes(0) {}
44 
45  uint32 alloced_length() const { return string_buf.alloced_length(); }
46  uint32 length() const { return string_buf.length(); }
47  void prealloc();
48  char *c_ptr_safe() { return string_buf.c_ptr_safe(); }
49  const char *ptr() const { return string_buf.ptr(); }
50 
51  const CHARSET_INFO *charset() const { return string_buf.charset(); }
52  void set_charset(const CHARSET_INFO *charset)
53  { string_buf.set_charset(charset); }
54 
60  void append(const char *str, size_t length);
61  void append(const char *str) { return append(str, strlen(str)); }
68  void append_escaped(const char *str, size_t length);
69  void append(char chr);
70 
71  size_t get_allowed_mem_size() const { return allowed_mem_size; }
72  size_t get_missing_bytes() const { return missing_bytes; }
73 
74  void set_allowed_mem_size(size_t a) { allowed_mem_size= a; }
75 };
76 
77 
78 } // namespace
79 
80 
81 using random_name_to_avoid_gcc_bug_29365::Buffer;
82 
83 
92 class Opt_trace_stmt
93 {
94 public:
99  Opt_trace_stmt(Opt_trace_context *ctx_arg);
100 
105  void end();
106 
108  bool has_ended() const { return ended; }
109 
111  void set_allowed_mem_size(size_t size);
112 
114  void set_query(const char* query, size_t length,
115  const CHARSET_INFO *charset);
116 
117  /* Below, functions for filling the statement's trace */
118 
130  bool open_struct(const char *key, Opt_trace_struct *ots,
131  bool wants_disable_I_S, char opening_bracket);
141  void close_struct(const char *saved_key, bool has_disabled_I_S,
142  char closing_bracket);
143 
145  void separator();
147  void next_line();
148 
161  void add(const char *key, const char *val, size_t val_length,
162  bool quotes, bool escape);
163 
164  /* Below, functions to request information from this instance */
165 
167  void fill_info(Opt_trace_info *info) const;
168 
170  const char *trace_buffer_tail(size_t size);
171 
173  size_t alloced_length() const
174  { return trace_buffer.alloced_length() + query_buffer.alloced_length(); }
175 
176  void assert_current_struct(const Opt_trace_struct *s) const
177  { DBUG_ASSERT(current_struct == s); }
178 
180  void missing_privilege();
181 
182  bool support_I_S() const { return I_S_disabled == 0; }
183 
185  void disable_I_S() { ++I_S_disabled; }
186 
191  void restore_I_S() { --I_S_disabled; }
192 
198  const char *make_unknown_key();
199 
200 private:
201 
202  bool ended;
203 
216  int I_S_disabled;
217 
218  bool missing_priv;
219 
220  Opt_trace_context *ctx;
221  Opt_trace_struct *current_struct;
222 
224  Dynamic_array<Opt_trace_struct *> stack_of_current_structs;
225 
226  Buffer trace_buffer;
227  Buffer query_buffer;
228 
234  uint unknown_key_count;
236  char unknown_key[24];
237 };
238 
239 
240 // implementation of class Opt_trace_struct
241 
242 namespace {
244 const char brackets[]= { '[', '{', ']', '}' };
245 inline char opening_bracket(bool requires_key)
246 {
247  return brackets[requires_key];
248 }
249 inline char closing_bracket(bool requires_key)
250 {
251  return brackets[requires_key + 2];
252 }
253 } // namespace
254 
255 
256 void Opt_trace_struct::do_construct(Opt_trace_context *ctx,
257  bool requires_key_arg,
258  const char *key,
260 {
261  saved_key= key;
262  requires_key= requires_key_arg;
263 
264  DBUG_PRINT("opt", ("%s: starting struct", key));
265  stmt= ctx->get_current_stmt_in_gen();
266 #ifndef DBUG_OFF
267  previous_key[0]= 0;
268 #endif
269  has_disabled_I_S= !ctx->feature_enabled(feature);
270  empty= true;
271  if (likely(!stmt->open_struct(key, this, has_disabled_I_S,
272  opening_bracket(requires_key))))
273  started= true;
274 }
275 
276 
277 void Opt_trace_struct::do_destruct()
278 {
279  DBUG_PRINT("opt", ("%s: ending struct", saved_key));
280  DBUG_ASSERT(started);
281  stmt->close_struct(saved_key, has_disabled_I_S,
282  closing_bracket(requires_key));
283  started= false;
284 }
285 
286 
294 Opt_trace_struct& Opt_trace_struct::do_add(const char *key, const char *val,
295  size_t val_length,
296  bool escape)
297 {
298  DBUG_ASSERT(started);
299  DBUG_PRINT("opt", ("%s: \"%.*s\"", key, (int)val_length, val));
300  stmt->add(key, val, val_length, true, escape);
301  return *this;
302 }
303 
304 namespace {
306 LEX_CSTRING bool_as_text[]= { { STRING_WITH_LEN("false") },
307  { STRING_WITH_LEN("true") } };
308 }
309 
310 Opt_trace_struct& Opt_trace_struct::do_add(const char *key, bool val)
311 {
312  DBUG_ASSERT(started);
313  DBUG_PRINT("opt", ("%s: %d", key, (int)val));
314  const LEX_CSTRING *text= &bool_as_text[val];
315  stmt->add(key, text->str, text->length, false, false);
316  return *this;
317 }
318 
319 
320 Opt_trace_struct& Opt_trace_struct::do_add(const char *key, longlong val)
321 {
322  DBUG_ASSERT(started);
323  char buf[22]; // 22 is enough for digits of a 64-bit int
324  llstr(val, buf);
325  DBUG_PRINT("opt", ("%s: %s", key, buf));
326  stmt->add(key, buf, strlen(buf), false, false);
327  return *this;
328 }
329 
330 
331 Opt_trace_struct& Opt_trace_struct::do_add(const char *key, ulonglong val)
332 {
333  DBUG_ASSERT(started);
334  char buf[22];
335  ullstr(val, buf);
336  DBUG_PRINT("opt", ("%s: %s", key, buf));
337  stmt->add(key, buf, strlen(buf), false, false);
338  return *this;
339 }
340 
341 
342 Opt_trace_struct& Opt_trace_struct::do_add(const char *key, double val)
343 {
344  DBUG_ASSERT(started);
345  char buf[32]; // 32 is enough for digits of a double
346  my_snprintf(buf, sizeof(buf), "%g", val);
347  DBUG_PRINT("opt", ("%s: %s", key, buf));
348  stmt->add(key, buf, strlen(buf), false, false);
349  return *this;
350 }
351 
352 
353 Opt_trace_struct& Opt_trace_struct::do_add_null(const char *key)
354 {
355  DBUG_ASSERT(started);
356  DBUG_PRINT("opt", ("%s: null", key));
357  stmt->add(key, STRING_WITH_LEN("null"), false, false);
358  return *this;
359 }
360 
361 
362 Opt_trace_struct& Opt_trace_struct::do_add(const char *key, Item *item)
363 {
364  char buff[256];
365  String str(buff,(uint32) sizeof(buff), system_charset_info);
366  str.length(0);
367  if (item != NULL)
368  {
369  // QT_TO_SYSTEM_CHARSET because trace must be in UTF8
370  item->print(&str, enum_query_type(QT_TO_SYSTEM_CHARSET |
371  QT_SHOW_SELECT_NUMBER |
372  QT_NO_DEFAULT_DB));
373  /* needs escaping */
374  return do_add(key, str.ptr(), str.length(), true);
375  }
376  else
377  return do_add_null(key);
378 }
379 
380 
381 Opt_trace_struct& Opt_trace_struct::do_add_hex(const char *key, uint64 val)
382 {
383  DBUG_ASSERT(started);
384  char buf[2 + 16], *p_end= buf + sizeof(buf) - 1, *p= p_end;
385  for ( ; ; )
386  {
387  *p--= _dig_vec_lower[val & 15];
388  *p--= _dig_vec_lower[(val & 240) >> 4];
389  val>>= 8;
390  if (val == 0)
391  break;
392  }
393  *p--= 'x';
394  *p= '0';
395  const int len= p_end + 1 - p;
396  DBUG_PRINT("opt", ("%s: %.*s", key, len, p));
397  stmt->add(check_key(key), p, len, false, false);
398  return *this;
399 }
400 
401 
402 Opt_trace_struct& Opt_trace_struct::do_add_utf8_table(const TABLE *tab)
403 {
404  TABLE_LIST * const tl= tab->pos_in_table_list;
405  if (tl != NULL)
406  {
407  StringBuffer<32> str;
408  tl->print(tab->in_use, &str, enum_query_type(QT_TO_SYSTEM_CHARSET |
409  QT_SHOW_SELECT_NUMBER |
410  QT_NO_DEFAULT_DB |
411  QT_DERIVED_TABLE_ONLY_ALIAS));
412  return do_add("table", str.ptr(), str.length(), true);
413  }
414  return *this;
415 }
416 
417 
418 const char *Opt_trace_struct::check_key(const char *key)
419 {
420  DBUG_ASSERT(started);
421  // User should always add to the innermost open object, not outside.
422  stmt->assert_current_struct(this);
423  bool has_key= key != NULL;
424  if (unlikely(has_key != requires_key))
425  {
426  // fix the key to produce correct JSON syntax:
427  key= has_key ? NULL : stmt->make_unknown_key();
428  has_key= !has_key;
429  }
430  if (has_key)
431  {
432 #ifndef DBUG_OFF
433  /*
434  Check that we're not having two identical consecutive keys in one
435  object; though the real restriction should not have 'consecutive'.
436  */
437  DBUG_ASSERT(strncmp(previous_key, key, sizeof(previous_key) - 1) != 0);
438  strncpy(previous_key, key, sizeof(previous_key) - 1);
439  previous_key[sizeof(previous_key) - 1]= 0;
440 #endif
441  }
442  return key;
443 }
444 
445 
446 // Implementation of Opt_trace_stmt class
447 
448 Opt_trace_stmt::Opt_trace_stmt(Opt_trace_context *ctx_arg) :
449  ended(false), I_S_disabled(0), missing_priv(false), ctx(ctx_arg),
450  current_struct(NULL), unknown_key_count(0)
451 {
452  // Trace is always in UTF8. This is the only charset which JSON accepts.
453  trace_buffer.set_charset(system_charset_info);
454  DBUG_ASSERT(system_charset_info == &my_charset_utf8_general_ci);
455 }
456 
457 
458 void Opt_trace_stmt::end()
459 {
460  DBUG_ASSERT(stack_of_current_structs.elements() == 0);
461  DBUG_ASSERT(I_S_disabled >= 0);
462  ended= true;
463  /*
464  Because allocation is done in big chunks, buffer->Ptr[str_length]
465  may be uninitialized while buffer->Ptr[allocated length] is 0, so we
466  must use c_ptr_safe() as we want a 0-terminated string (which is easier
467  to manipulate in a debugger, or to compare in unit tests with
468  EXPECT_STREQ).
469  c_ptr_safe() may realloc an empty String from 0 bytes to 8 bytes,
470  when it adds the closing \0.
471  */
472  trace_buffer.c_ptr_safe();
473  // Send the full nice trace to DBUG.
474  DBUG_EXECUTE("opt",
475  {
476  const char *trace= trace_buffer.c_ptr_safe();
477  DBUG_LOCK_FILE;
478  fputs("Complete optimizer trace:", DBUG_FILE);
479  fputs(trace, DBUG_FILE);
480  fputs("\n", DBUG_FILE);
481  DBUG_UNLOCK_FILE;
482  }
483  );
484  if (unlikely(missing_priv))
485  ctx->restore_I_S();
486 }
487 
488 
489 void Opt_trace_stmt::set_allowed_mem_size(size_t size)
490 {
491  trace_buffer.set_allowed_mem_size(size);
492 }
493 
494 
495 void Opt_trace_stmt::set_query(const char *query, size_t length,
496  const CHARSET_INFO *charset)
497 {
498  // Should be called only once per statement.
499  DBUG_ASSERT(query_buffer.ptr() == NULL);
500  query_buffer.set_charset(charset);
501  if (!support_I_S())
502  {
503  /*
504  Query won't be read, don't waste resources storing it. Still we have set
505  the charset, which is necessary.
506  */
507  return;
508  }
509  // We are taking a bit of space from 'trace_buffer'.
510  size_t available=
511  (trace_buffer.alloced_length() >= trace_buffer.get_allowed_mem_size()) ?
512  0 : (trace_buffer.get_allowed_mem_size() - trace_buffer.alloced_length());
513  query_buffer.set_allowed_mem_size(available);
514  // No need to escape query, this is not for JSON.
515  query_buffer.append(query, length);
516  // Space which query took is taken out of the trace:
517  const size_t new_allowed_mem_size=
518  (query_buffer.alloced_length() >= trace_buffer.get_allowed_mem_size()) ?
519  0 : (trace_buffer.get_allowed_mem_size() - query_buffer.alloced_length());
520  trace_buffer.set_allowed_mem_size(new_allowed_mem_size);
521 }
522 
523 
524 bool Opt_trace_stmt::open_struct(const char *key, Opt_trace_struct *ots,
525  bool wants_disable_I_S,
526  char opening_bracket)
527 {
528  if (support_I_S())
529  {
530  if (wants_disable_I_S)
531  {
532 
533  /*
534  User requested no tracing for this structure's feature. We are
535  entering a disabled portion; put an ellipsis "..." to alert the user.
536  Disabling applies to all the structure's children.
537  It is possible that inside this struct, a new statement is created
538  (range optimizer can evaluate stored functions...): its tracing is
539  disabled too.
540  When the structure is destroyed, the initial setting is restored.
541  */
542  if (current_struct != NULL)
543  {
544  if (key != NULL)
545  current_struct->add_alnum(key, "...");
546  else
547  current_struct->add_alnum("...");
548  }
549  }
550  else
551  {
552  trace_buffer.prealloc();
553  add(key, &opening_bracket, 1, false, false);
554  }
555  }
556  if (wants_disable_I_S)
557  ctx->disable_I_S_for_this_and_children();
558  {
559  DBUG_EXECUTE_IF("opt_trace_oom_in_open_struct",
560  DBUG_SET("+d,simulate_out_of_memory"););
561  const bool rc= stack_of_current_structs.append(current_struct);
562  /*
563  If the append() above didn't trigger reallocation, we need to turn the
564  symbol off by ourselves, or it could make an unrelated allocation
565  fail.
566  */
567  DBUG_EXECUTE_IF("opt_trace_oom_in_open_struct",
568  DBUG_SET("-d,simulate_out_of_memory"););
569  if (unlikely(rc))
570  return true;
571  }
572  current_struct= ots;
573  return false;
574 }
575 
576 
577 void Opt_trace_stmt::close_struct(const char *saved_key,
578  bool has_disabled_I_S,
579  char closing_bracket)
580 {
581  /*
582  This was constructed with current_stmt_in_gen=NULL which was pushed in
583  'open_struct()'. So this NULL is in the array, back() is safe.
584  */
585  current_struct= *(stack_of_current_structs.back());
586  stack_of_current_structs.pop();
587  if (support_I_S())
588  {
589  next_line();
590  trace_buffer.append(closing_bracket);
591  if (ctx->get_end_marker() && saved_key != NULL)
592  {
593  trace_buffer.append(STRING_WITH_LEN(" /* "));
594  trace_buffer.append(saved_key);
595  trace_buffer.append(STRING_WITH_LEN(" */"));
596  }
597  }
598  if (has_disabled_I_S)
599  ctx->restore_I_S();
600 }
601 
602 
603 void Opt_trace_stmt::separator()
604 {
605  DBUG_ASSERT(support_I_S());
606  // Put a comma first, if we have already written an object at this level.
607  if (current_struct != NULL)
608  {
609  if (!current_struct->set_not_empty())
610  trace_buffer.append(',');
611  next_line();
612  }
613 }
614 
615 
616 namespace {
617 const char my_spaces[] =
618  " "
619  " "
620  " "
621  ;
622 }
623 
624 
625 void Opt_trace_stmt::next_line()
626 {
627  if (ctx->get_one_line())
628  return;
629  trace_buffer.append('\n');
630 
631  uint to_be_printed= 2 * stack_of_current_structs.elements();
632  const size_t spaces_len= sizeof(my_spaces) - 1;
633  while (to_be_printed > spaces_len)
634  {
635  trace_buffer.append(my_spaces, spaces_len);
636  to_be_printed-= spaces_len;
637  }
638  trace_buffer.append(my_spaces, to_be_printed);
639 }
640 
641 
642 const char *Opt_trace_stmt::make_unknown_key()
643 {
644  my_snprintf(unknown_key, sizeof(unknown_key),
645  "unknown_key_%u", ++unknown_key_count);
646  return unknown_key;
647 }
648 
649 
650 void Opt_trace_stmt::add(const char *key, const char *val, size_t val_length,
651  bool quotes, bool escape)
652 {
653  if (!support_I_S())
654  return;
655  separator();
656  if (current_struct != NULL)
657  key= current_struct->check_key(key);
658  if (key != NULL)
659  {
660  trace_buffer.append('"');
661  trace_buffer.append(key);
662  trace_buffer.append(STRING_WITH_LEN("\": "));
663  }
664  if (quotes)
665  trace_buffer.append('"');
666  /*
667  Objects' keys use "normal" characters (A-Za-z0-9_), no escaping
668  needed. Same for numeric/bool values. Only string values may need
669  escaping.
670  */
671  if (escape)
672  trace_buffer.append_escaped(val, val_length);
673  else
674  trace_buffer.append(val, val_length);
675  if (quotes)
676  trace_buffer.append('"');
677 }
678 
679 
680 void Opt_trace_stmt::fill_info(Opt_trace_info *info) const
681 {
682  if (unlikely(info->missing_priv= missing_priv))
683  {
684  info->trace_ptr= info->query_ptr= "";
685  info->trace_length= info->query_length= 0;
686  info->query_charset= &my_charset_bin;
687  info->missing_bytes= 0;
688  }
689  else
690  {
691  info->trace_ptr= trace_buffer.ptr();
692  info->trace_length= trace_buffer.length();
693  info->query_ptr= query_buffer.ptr();
694  info->query_length= query_buffer.length();
695  info->query_charset= query_buffer.charset();
696  info->missing_bytes= trace_buffer.get_missing_bytes() +
697  query_buffer.get_missing_bytes();
698  }
699 }
700 
701 
702 const char *Opt_trace_stmt::trace_buffer_tail(size_t size)
703 {
704  size_t buffer_len= trace_buffer.length();
705  const char *ptr= trace_buffer.c_ptr_safe();
706  if (buffer_len > size)
707  ptr+= buffer_len - size;
708  return ptr;
709 }
710 
711 
712 void Opt_trace_stmt::missing_privilege()
713 {
714  if (!missing_priv)
715  {
716  DBUG_PRINT("opt", ("trace denied"));
717  // This mark will make the trace appear empty in OPTIMIZER_TRACE table.
718  missing_priv= true;
719  // And all substatements will not be traced.
720  ctx->disable_I_S_for_this_and_children();
721  }
722 }
723 
724 
725 // Implementation of class Buffer
726 
727 namespace random_name_to_avoid_gcc_bug_29365 {
728 
729 void Buffer::append_escaped(const char *str, size_t length)
730 {
731  if (alloced_length() >= allowed_mem_size)
732  {
733  missing_bytes+= length;
734  return;
735  }
736  const char *pstr, *pstr_end;
737  char buf[128]; // Temporary output buffer.
738  char *pbuf= buf;
739  for (pstr= str, pstr_end= (str + length) ; pstr < pstr_end ; pstr++)
740  {
741  char esc;
742  const char c= *pstr;
743  /*
744  JSON syntax says that control characters must be escaped. Experience
745  confirms that this means ASCII 0->31 and " and \ . A few of
746  them are accepted with a short escaping syntax (using \ : like \n)
747  but for most of them, only \uXXXX works, where XXXX is a
748  hexadecimal value for the code point.
749  Rules also mention escaping / , but Python's and Perl's json modules
750  do not require it, and somewhere on Internet someone said JSON
751  allows escaping of / but does not require it.
752 
753  Because UTF8 has the same characters in range 0-127 as ASCII does, and
754  other UTF8 characters don't contain 0-127 bytes, if we see a byte
755  equal to 0 it is really the UTF8 u0000 character (a.k.a. ASCII NUL)
756  and not a part of a longer character; if we see a newline, same,
757  etc. That wouldn't necessarily be true with another character set.
758  */
759  switch (c)
760  {
761  // Don't use \u when possible for common chars, \ is easier to read:
762  case '\\': esc= '\\'; break;
763  case '"' : esc= '\"'; break;
764  case '\n': esc= 'n' ; break;
765  case '\r': esc= 'r' ; break;
766  case '\t': esc= 't' ; break;
767  default : esc= 0 ; break;
768  }
769  if (esc != 0) // Escaping with backslash.
770  {
771  *pbuf++= '\\';
772  *pbuf++= esc;
773  }
774  else
775  {
776  uint ascii_code= (uint)c;
777  if (ascii_code < 32) // Escaping with \u
778  {
779  *pbuf++= '\\';
780  *pbuf++= 'u';
781  *pbuf++= '0';
782  *pbuf++= '0';
783  if (ascii_code < 16)
784  {
785  *pbuf++= '0';
786  }
787  else
788  {
789  *pbuf++= '1';
790  ascii_code-= 16;
791  }
792  *pbuf++= _dig_vec_lower[ascii_code];
793  }
794  else
795  *pbuf++= c; // Normal character, no escaping needed.
796  }
797  /*
798  To fit a next character, we need at most 6 bytes (happens when using
799  \uXXXX syntax) before the buffer's end:
800  */
801  if (pbuf > buf + (sizeof(buf) - 6))
802  {
803  // Possibly no room in 'buf' for next char, so flush buf.
804  string_buf.append(buf, static_cast<uint32>(pbuf - buf));
805  pbuf= buf; // back to buf's start
806  }
807  }
808  // Flush any chars left in 'buf'.
809  string_buf.append(buf, static_cast<uint32>(pbuf - buf));
810 }
811 
812 
813 void Buffer::append(const char *str, size_t length)
814 {
815  if (alloced_length() >= allowed_mem_size)
816  {
817  missing_bytes+= length;
818  return;
819  }
820  DBUG_EXECUTE_IF("opt_trace_oom_in_buffers",
821  DBUG_SET("+d,simulate_out_of_memory"););
822  string_buf.append(str, static_cast<uint32>(length));
823  DBUG_EXECUTE_IF("opt_trace_oom_in_buffers",
824  DBUG_SET("-d,simulate_out_of_memory"););
825 }
826 
827 
828 void Buffer::append(char chr)
829 {
830  if (alloced_length() >= allowed_mem_size)
831  {
832  missing_bytes++;
833  return;
834  }
835  // No need for escaping chr, given how this function is used.
836  string_buf.append(chr);
837 }
838 
839 
840 void Buffer::prealloc()
841 {
842  const size_t alloced= alloced_length();
843  const size_t first_increment= 1024;
844  if ((alloced - length()) < (first_increment / 3))
845  {
846  /*
847  Support for I_S will produce long strings, and there is little free
848  space left in the allocated buffer, so it looks like
849  realloc is soon unavoidable; so let's get many bytes at a time.
850  Note that if this re-allocation fails, or any String::append(), we
851  will get a weird trace; either truncated if the server stops, or maybe
852  with a hole if there is later memory again for the trace's
853  continuation. The statement will fail anyway due to my_error(), in the
854  server.
855  We jump from 0 to first_increment and then multiply by 1.5. Unlike
856  addition of a constant length, multiplying is expected to give amortized
857  constant reallocation time; 1.5 is a commonly seen factor in the
858  litterature.
859  */
860  size_t new_size= (alloced == 0) ? first_increment : (alloced * 15 / 10);
861  size_t max_size= allowed_mem_size;
862  /*
863  Determine a safety margin:
864  (A) String::realloc() adds at most ALIGN_SIZE(1) bytes to requested
865  length, so we need to decrement max_size by this amount, to be sure that
866  we don't allocate more than max_size
867  (B) We need to stay at least one byte under that max_size, or the next
868  append() would trigger up-front truncation, which is potentially wrong
869  for a "pre-emptive allocation" as we do here.
870  */
871  const size_t safety_margin= ALIGN_SIZE(1) /* (A) */ + 1 /* (B) */;
872  if (max_size >= safety_margin)
873  {
874  max_size-= safety_margin;
875  if (new_size > max_size) // Don't pre-allocate more than the limit.
876  new_size= max_size;
877  if (new_size >= alloced) // Never shrink string.
878  string_buf.realloc(static_cast<uint32>(new_size));
879  }
880  }
881 }
882 
883 } // namespace
884 
885 
886 // Implementation of Opt_trace_context class
887 
888 const char *Opt_trace_context::flag_names[]=
889 {
890  "enabled", "one_line", "default", NullS
891 };
892 
893 const char *Opt_trace_context::feature_names[]=
894 {
895  "greedy_search", "range_optimizer", "dynamic_range",
896  "repeated_subselect", "default", NullS
897 };
898 
900 Opt_trace_context::default_features=
901  Opt_trace_context::feature_value(Opt_trace_context::GREEDY_SEARCH |
902  Opt_trace_context::RANGE_OPTIMIZER |
903  Opt_trace_context::DYNAMIC_RANGE |
904  Opt_trace_context::REPEATED_SUBSELECT);
905 
906 
907 Opt_trace_context::~Opt_trace_context()
908 {
909  if (unlikely(pimpl != NULL))
910  {
911  /* There may well be some few ended traces left: */
912  purge_stmts(true);
913  /* All should have moved to 'del' list: */
914  DBUG_ASSERT(pimpl->all_stmts_for_I_S.elements() == 0);
915  /* All of 'del' list should have been deleted: */
916  DBUG_ASSERT(pimpl->all_stmts_to_del.elements() == 0);
917  delete pimpl;
918  }
919 }
920 
921 
922 template<class T> T * new_nothrow_w_my_error()
923 {
924  T * const t= new (std::nothrow) T();
925  if (unlikely(t == NULL))
926  my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR),
927  static_cast<int>(sizeof(T)));
928  return t;
929 }
930 template<class T, class Arg> T * new_nothrow_w_my_error(Arg a)
931 {
932  T * const t= new (std::nothrow) T(a);
933  if (unlikely(t == NULL))
934  my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR),
935  static_cast<int>(sizeof(T)));
936  return t;
937 }
938 
939 
940 bool Opt_trace_context::start(bool support_I_S_arg,
941  bool support_dbug_or_missing_priv,
942  bool end_marker_arg, bool one_line_arg,
943  long offset_arg, long limit_arg,
944  ulong max_mem_size_arg, ulonglong features_arg)
945 {
946  DBUG_ENTER("Opt_trace_context::start");
947 
948  if (I_S_disabled != 0)
949  {
950  DBUG_PRINT("opt", ("opt_trace is already disabled"));
951  support_I_S_arg= false;
952  }
953 
954  /*
955  Decide on optimizations possible to realize the requested support.
956  If I_S or debug output is requested, we need to create an Opt_trace_stmt.
957  Same if we should support calls to Opt_trace_context::missing_privilege(),
958  because that function requires an Opt_trace_stmt.
959  */
960  if (!support_I_S_arg && !support_dbug_or_missing_priv)
961  {
962  // The statement will not do tracing.
963  if (likely(pimpl == NULL) || pimpl->current_stmt_in_gen == NULL)
964  {
965  /*
966  This should be the most commonly taken branch in a release binary,
967  when the connection rarely has optimizer tracing runtime-enabled.
968  It's thus important that it's optimized: we can short-cut the creation
969  and starting of Opt_trace_stmt, unlike in the next "else" branch.
970  */
971  DBUG_RETURN(false);
972  }
973  /*
974  If we come here, there is a parent statement which has a trace.
975  Imagine that we don't create a trace for the child statement
976  here. Then trace structures of the child will be accidentally attached
977  to the parent's trace (as it is still 'current_stmt_in_gen', which
978  constructors of Opt_trace_struct will use); thus the child's trace
979  will be visible (as a chunk of the parent's trace). That would be
980  incorrect. To avoid this, we create a trace for the child but with I_S
981  output disabled; this changes 'current_stmt_in_gen', thus this child's
982  trace structures will be attached to the child's trace and thus not be
983  visible.
984  */
985  }
986 
987  DBUG_EXECUTE_IF("no_new_opt_trace_stmt", DBUG_ASSERT(0););
988 
989  if (pimpl == NULL &&
990  ((pimpl= new_nothrow_w_my_error<Opt_trace_context_impl>()) == NULL))
991  DBUG_RETURN(true);
992 
993  /*
994  If tracing is disabled by some caller, then don't change settings (offset
995  etc). Doing otherwise would surely bring a problem.
996  */
997  if (I_S_disabled == 0)
998  {
999  /*
1000  Here we allow a stored routine's sub-statement to enable/disable
1001  tracing, or change settings. Thus in a stored routine's body, there can
1002  be some 'SET OPTIMIZER_TRACE="enabled=[on|off]"' to trace only certain
1003  sub-statements.
1004  */
1005  pimpl->end_marker= end_marker_arg;
1006  pimpl->one_line= one_line_arg;
1007  pimpl->offset= offset_arg;
1008  pimpl->limit= limit_arg;
1009  pimpl->max_mem_size= max_mem_size_arg;
1010  // MISC always on
1011  pimpl->features= Opt_trace_context::feature_value(features_arg |
1012  Opt_trace_context::MISC);
1013  }
1014  if (support_I_S_arg && pimpl->offset >= 0)
1015  {
1016  /* If outside the offset/limit window, no need to support I_S */
1017  if (pimpl->since_offset_0 < pimpl->offset)
1018  {
1019  DBUG_PRINT("opt", ("disabled: since_offset_0(%ld) < offset(%ld)",
1020  pimpl->since_offset_0, pimpl->offset));
1021  support_I_S_arg= false;
1022  }
1023  else if (pimpl->since_offset_0 >= (pimpl->offset + pimpl->limit))
1024  {
1025  DBUG_PRINT("opt", ("disabled: since_offset_0(%ld) >="
1026  " offset(%ld) + limit(%ld)",
1027  pimpl->since_offset_0, pimpl->offset, pimpl->limit));
1028  support_I_S_arg= false;
1029  }
1030  pimpl->since_offset_0++;
1031  }
1032  {
1033  /*
1034  We don't allocate it in THD's MEM_ROOT as it must survive until a next
1035  statement (SELECT) reads the trace.
1036  */
1037  Opt_trace_stmt *stmt= new_nothrow_w_my_error<Opt_trace_stmt>(this);
1038 
1039  DBUG_PRINT("opt",("new stmt %p support_I_S %d", stmt, support_I_S_arg));
1040 
1041  if (unlikely(stmt == NULL ||
1042  pimpl->stack_of_current_stmts
1043  .append(pimpl->current_stmt_in_gen)))
1044  goto err; // append() above called my_error()
1045 
1046  /*
1047  If sending only to DBUG, don't show to the user.
1048  Same if tracing was temporarily disabled at higher layers with
1049  Opt_trace_disable_I_S.
1050  So we just link it to the 'del' list for purging when ended.
1051  */
1053  if (support_I_S_arg)
1054  list= &pimpl->all_stmts_for_I_S;
1055  else
1056  {
1057  stmt->disable_I_S(); // no need to fill a not-shown JSON trace
1058  list= &pimpl->all_stmts_to_del;
1059  }
1060 
1061  if (unlikely(list->append(stmt)))
1062  goto err;
1063 
1064  pimpl->current_stmt_in_gen= stmt;
1065 
1066  // As we just added one trace, maybe the previous ones are unneeded now
1067  purge_stmts(false);
1068  // This purge may have freed space, compute max allowed size:
1069  stmt->set_allowed_mem_size(allowed_mem_size_for_current_stmt());
1070  DBUG_RETURN(false);
1071 err:
1072  delete stmt;
1073  DBUG_ASSERT(0);
1074  DBUG_RETURN(true);
1075  }
1076 }
1077 
1078 
1079 void Opt_trace_context::end()
1080 {
1081  DBUG_ASSERT(I_S_disabled >= 0);
1082  if (likely(pimpl == NULL))
1083  return;
1084  if (pimpl->current_stmt_in_gen != NULL)
1085  {
1086  pimpl->current_stmt_in_gen->end();
1087  /*
1088  pimpl was constructed with current_stmt_in_gen=NULL which was pushed in
1089  'start()'. So this NULL is in the array, back() is safe.
1090  */
1091  Opt_trace_stmt * const parent= *(pimpl->stack_of_current_stmts.back());
1092  pimpl->stack_of_current_stmts.pop();
1093  pimpl->current_stmt_in_gen= parent;
1094  if (parent != NULL)
1095  {
1096  /*
1097  Parent regains control, now it needs to be told that its child has
1098  used space, and thus parent's allowance has shrunk.
1099  */
1100  parent->set_allowed_mem_size(allowed_mem_size_for_current_stmt());
1101  }
1102  /*
1103  Purge again. Indeed when we are here, compared to the previous start()
1104  we have one more ended trace, so can potentially free more. Consider
1105  offset=-1 and:
1106  top_stmt, started
1107  sub_stmt, starts: can't free top_stmt as it is not ended yet
1108  sub_stmt, ends: won't free sub_stmt (as user will want to see it),
1109  can't free top_stmt as not ended yet
1110  top_stmt, continued
1111  top_stmt, ends: free top_stmt as it's not last and is ended, keep
1112  only sub_stmt.
1113  Still the purge is done in ::start() too, as an optimization, for this
1114  case:
1115  sub_stmt, started
1116  sub_stmt, ended
1117  sub_stmt, starts: can free above sub_stmt, will save memory compared
1118  to free-ing it only when the new sub_stmt ends.
1119  */
1120  purge_stmts(false);
1121  }
1122  else
1123  DBUG_ASSERT(pimpl->stack_of_current_stmts.elements() == 0);
1124 }
1125 
1126 
1127 bool Opt_trace_context::support_I_S() const
1128 {
1129  return (pimpl != NULL) && (pimpl->current_stmt_in_gen != NULL) &&
1130  pimpl->current_stmt_in_gen->support_I_S();
1131 }
1132 
1133 
1134 void Opt_trace_context::purge_stmts(bool purge_all)
1135 {
1136  DBUG_ENTER("Opt_trace_context::purge_stmts");
1137  if (!purge_all && pimpl->offset >= 0)
1138  {
1139  /* This case is managed in @c Opt_trace_context::start() */
1140  DBUG_VOID_RETURN;
1141  }
1142  long idx;
1143  /*
1144  Start from the newest traces (array's end), scroll back in time. This
1145  direction is necessary, as we may delete elements from the array (assume
1146  purge_all=true and array has 2 elements and we traverse starting from
1147  index 0: cell 0 is deleted, making cell 1 become cell 0; index is
1148  incremented to 1, which is past the array's end, so break out of the loop:
1149  cell 0 (old cell 1) was not deleted, wrong).
1150  */
1151  for (idx= (pimpl->all_stmts_for_I_S.elements() - 1) ; idx >= 0 ; idx--)
1152  {
1153  if (!purge_all &&
1154  ((pimpl->all_stmts_for_I_S.elements() + pimpl->offset) <= idx))
1155  {
1156  /* OFFSET mandates that this trace should be kept; move to previous */
1157  }
1158  else
1159  {
1160  /*
1161  Remember to free it (as in @c free()) when possible. For now, make it
1162  invisible in OPTIMIZER_TRACE table.
1163  */
1164  DBUG_EXECUTE_IF("opt_trace_oom_in_purge",
1165  DBUG_SET("+d,simulate_out_of_memory"););
1166  if (likely(!pimpl->all_stmts_to_del
1167  .append(pimpl->all_stmts_for_I_S.at(idx))))
1168  pimpl->all_stmts_for_I_S.del(idx);
1169  else
1170  {
1171  /*
1172  OOM. Cannot purge. Which at worse should only break the
1173  offset/limit feature (the trace will accidentally still show up in
1174  the OPTIMIZER_TRACE table). append() above has called my_error().
1175  */
1176  }
1177  DBUG_EXECUTE_IF("opt_trace_oom_in_purge",
1178  DBUG_SET("-d,simulate_out_of_memory"););
1179  }
1180  }
1181  /* Examine list of "to be freed" traces and free what can be */
1182  for (idx= (pimpl->all_stmts_to_del.elements() - 1) ; idx >= 0 ; idx--)
1183  {
1184  Opt_trace_stmt *stmt= pimpl->all_stmts_to_del.at(idx);
1185 #ifndef DBUG_OFF
1186  bool skip_del= false;
1187  DBUG_EXECUTE_IF("opt_trace_oom_in_purge", skip_del= true;);
1188 #else
1189  const bool skip_del= false;
1190 #endif
1191  if (!stmt->has_ended() || skip_del)
1192  {
1193  /*
1194  This trace is not finished, freeing it now would lead to use of
1195  freed memory if a structure is later added to it. This would be
1196  possible: assume OFFSET=-1 and we have
1197  CALL statement starts executing
1198  create its trace (call it "trace #1")
1199  add structure to trace #1
1200  add structure to trace #1
1201  First sub-statement executing
1202  create its trace (call it "trace #2")
1203  from then on, trace #1 is not needed, free() it
1204  add structure to trace #2
1205  add structure to trace #2
1206  First sub-statement ends
1207  add structure to trace #1 - oops, adding to a free()d trace!
1208  So if a trace is not finished, we will wait until it is and
1209  re-consider it then (which is why this function is called in @c
1210  Opt_trace_stmt::end() too).
1211 
1212  In unit testing, to simulate OOM, we let the list grow so
1213  that it consumes its pre-allocated cells and finally requires a
1214  (failing) allocation.
1215  */
1216  }
1217  else
1218  {
1219  pimpl->all_stmts_to_del.del(idx);
1220  delete stmt;
1221  }
1222  }
1223  DBUG_VOID_RETURN;
1224 }
1225 
1226 
1227 size_t Opt_trace_context::allowed_mem_size_for_current_stmt() const
1228 {
1229  size_t mem_size= 0;
1230  int idx;
1231  for (idx= (pimpl->all_stmts_for_I_S.elements() - 1) ; idx >= 0 ; idx--)
1232  {
1233  const Opt_trace_stmt *stmt= pimpl->all_stmts_for_I_S.at(idx);
1234  mem_size+= stmt->alloced_length();
1235  }
1236  // Even to-be-deleted traces use memory, so consider them in sum
1237  for (idx= (pimpl->all_stmts_to_del.elements() - 1) ; idx >= 0 ; idx--)
1238  {
1239  const Opt_trace_stmt *stmt= pimpl->all_stmts_to_del.at(idx);
1240  mem_size+= stmt->alloced_length();
1241  }
1242  /* The current statement is in exactly one of the two lists above */
1243  mem_size-= pimpl->current_stmt_in_gen->alloced_length();
1244  size_t rc= (mem_size <= pimpl->max_mem_size) ?
1245  (pimpl->max_mem_size - mem_size) : 0;
1246  DBUG_PRINT("opt", ("rc %llu max_mem_size %llu",
1247  (ulonglong)rc, (ulonglong)pimpl->max_mem_size));
1248  return rc;
1249 }
1250 
1251 
1252 void Opt_trace_context::set_query(const char *query, size_t length,
1253  const CHARSET_INFO *charset)
1254 {
1255  pimpl->current_stmt_in_gen->set_query(query, length, charset);
1256 }
1257 
1258 
1259 void Opt_trace_context::reset()
1260 {
1261  if (pimpl == NULL)
1262  return;
1263  purge_stmts(true);
1264  pimpl->since_offset_0= 0;
1265 }
1266 
1267 
1268 void Opt_trace_context::
1269 Opt_trace_context_impl::disable_I_S_for_this_and_children()
1270 {
1271  if (current_stmt_in_gen != NULL)
1272  current_stmt_in_gen->disable_I_S();
1273 }
1274 
1275 
1276 void Opt_trace_context::Opt_trace_context_impl::restore_I_S()
1277 {
1278  if (current_stmt_in_gen != NULL)
1279  current_stmt_in_gen->restore_I_S();
1280 }
1281 
1282 
1283 void Opt_trace_context::missing_privilege()
1284 {
1285  /*
1286  By storing the 'missing_priv' mark in Opt_trace_stmt instead of in
1287  Opt_trace_context we get automatic re-enabling of I_S when the stmt ends,
1288  Opt_trace_stmt::missing_priv being the "memory" of where I_S has been
1289  disabled.
1290  Storing in Opt_trace_context would require an external memory (probably a
1291  RAII object), which would not be possible in
1292  TABLE_LIST::prepare_security(), where I_S must be disabled even after the
1293  end of that function - so RAII would not work.
1294 
1295  Which is why this function needs an existing current_stmt_in_gen.
1296  */
1297  pimpl->current_stmt_in_gen->missing_privilege();
1298 }
1299 
1300 
1301 const Opt_trace_stmt
1302 *Opt_trace_context::get_next_stmt_for_I_S(long *got_so_far) const
1303 {
1304  const Opt_trace_stmt *p;
1305  if ((pimpl == NULL) ||
1306  (*got_so_far >= pimpl->limit) ||
1307  (*got_so_far >= pimpl->all_stmts_for_I_S.elements()))
1308  p= NULL;
1309  else
1310  {
1311  p= pimpl->all_stmts_for_I_S.at(*got_so_far);
1312  DBUG_ASSERT(p != NULL);
1313  (*got_so_far)++;
1314  }
1315  return p;
1316 }
1317 
1318 
1319 // Implementation of class Opt_trace_iterator
1320 
1321 Opt_trace_iterator::Opt_trace_iterator(Opt_trace_context *ctx_arg) :
1322  ctx(ctx_arg), row_count(0)
1323 {
1324  next();
1325 }
1326 
1327 void Opt_trace_iterator::next()
1328 {
1329  cursor= ctx->get_next_stmt_for_I_S(&row_count);
1330 }
1331 
1332 
1333 void Opt_trace_iterator::get_value(Opt_trace_info *info) const
1334 {
1335  cursor->fill_info(info);
1336 }
1337 
1338 #endif // OPTIMIZER_TRACE