MySQL 5.6.14 Source Code Document
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
mdl.cc
1 /* Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
2 
3  This program is free software; you can redistribute it and/or modify
4  it under the terms of the GNU General Public License as published by
5  the Free Software Foundation; version 2 of the License.
6 
7  This program is distributed in the hope that it will be useful,
8  but WITHOUT ANY WARRANTY; without even the implied warranty of
9  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  GNU General Public License for more details.
11 
12  You should have received a copy of the GNU General Public License
13  along with this program; if not, write to the Free Software Foundation,
14  51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */
15 
16 
17 #include "mdl.h"
18 #include "debug_sync.h"
19 #include "sql_array.h"
20 #include <hash.h>
21 #include <mysqld_error.h>
22 #include <mysql/plugin.h>
23 #include <mysql/service_thd_wait.h>
24 #include <mysql/psi/mysql_stage.h>
25 
26 #ifdef HAVE_PSI_INTERFACE
27 static PSI_mutex_key key_MDL_map_mutex;
28 static PSI_mutex_key key_MDL_wait_LOCK_wait_status;
29 
30 static PSI_mutex_info all_mdl_mutexes[]=
31 {
32  { &key_MDL_map_mutex, "MDL_map::mutex", 0},
33  { &key_MDL_wait_LOCK_wait_status, "MDL_wait::LOCK_wait_status", 0}
34 };
35 
36 static PSI_rwlock_key key_MDL_lock_rwlock;
37 static PSI_rwlock_key key_MDL_context_LOCK_waiting_for;
38 
39 static PSI_rwlock_info all_mdl_rwlocks[]=
40 {
41  { &key_MDL_lock_rwlock, "MDL_lock::rwlock", 0},
42  { &key_MDL_context_LOCK_waiting_for, "MDL_context::LOCK_waiting_for", 0}
43 };
44 
45 static PSI_cond_key key_MDL_wait_COND_wait_status;
46 
47 static PSI_cond_info all_mdl_conds[]=
48 {
49  { &key_MDL_wait_COND_wait_status, "MDL_context::COND_wait_status", 0}
50 };
51 
56 static void init_mdl_psi_keys(void)
57 {
58  int count;
59 
60  count= array_elements(all_mdl_mutexes);
61  mysql_mutex_register("sql", all_mdl_mutexes, count);
62 
63  count= array_elements(all_mdl_rwlocks);
64  mysql_rwlock_register("sql", all_mdl_rwlocks, count);
65 
66  count= array_elements(all_mdl_conds);
67  mysql_cond_register("sql", all_mdl_conds, count);
68 
69  MDL_key::init_psi_keys();
70 }
71 #endif /* HAVE_PSI_INTERFACE */
72 
73 
79 PSI_stage_info MDL_key::m_namespace_to_wait_state_name[NAMESPACE_END]=
80 {
81  {0, "Waiting for global read lock", 0},
82  {0, "Waiting for schema metadata lock", 0},
83  {0, "Waiting for table metadata lock", 0},
84  {0, "Waiting for stored function metadata lock", 0},
85  {0, "Waiting for stored procedure metadata lock", 0},
86  {0, "Waiting for trigger metadata lock", 0},
87  {0, "Waiting for event metadata lock", 0},
88  {0, "Waiting for commit lock", 0}
89 };
90 
91 #ifdef HAVE_PSI_INTERFACE
92 void MDL_key::init_psi_keys()
93 {
94  int i;
95  int count;
96  PSI_stage_info *info __attribute__((unused));
97 
98  count= array_elements(MDL_key::m_namespace_to_wait_state_name);
99  for (i= 0; i<count; i++)
100  {
101  /* mysql_stage_register wants an array of pointers, registering 1 by 1. */
102  info= & MDL_key::m_namespace_to_wait_state_name[i];
103  mysql_stage_register("sql", &info, 1);
104  }
105 }
106 #endif
107 
108 static bool mdl_initialized= 0;
109 
110 
111 class MDL_object_lock;
113 
114 
122 {
123 public:
126  inline MDL_lock *find_or_insert(const MDL_key *mdl_key,
127  my_hash_value_type hash_value);
128  inline void remove(MDL_lock *lock);
129  my_hash_value_type get_key_hash(const MDL_key *mdl_key) const
130  {
131  return my_calc_hash(&m_locks, mdl_key->ptr(), mdl_key->length());
132  }
133 private:
134  bool move_from_hash_to_lock_mutex(MDL_lock *lock);
136  HASH m_locks;
137  /* Protects access to m_locks hash. */
138  mysql_mutex_t m_mutex;
156  Lock_cache;
157  Lock_cache m_unused_locks_cache;
158 };
159 
160 
164 ulong mdl_locks_hash_partitions;
165 
172 class MDL_map
173 {
174 public:
175  void init();
176  void destroy();
177  MDL_lock *find_or_insert(const MDL_key *key);
178  void remove(MDL_lock *lock);
179 private:
183  MDL_lock *m_global_lock;
185  MDL_lock *m_commit_lock;
186 };
187 
188 
195 {
196 public:
197  Deadlock_detection_visitor(MDL_context *start_node_arg)
198  : m_start_node(start_node_arg),
199  m_victim(NULL),
200  m_current_search_depth(0),
201  m_found_deadlock(FALSE)
202  {}
203  virtual bool enter_node(MDL_context *node);
204  virtual void leave_node(MDL_context *node);
205 
206  virtual bool inspect_edge(MDL_context *dest);
207 
208  MDL_context *get_victim() const { return m_victim; }
209 private:
214  void opt_change_victim_to(MDL_context *new_victim);
215 private:
220  MDL_context *m_start_node;
222  MDL_context *m_victim;
229  uint m_current_search_depth;
231  bool m_found_deadlock;
245  static const uint MAX_SEARCH_DEPTH= 32;
246 };
247 
248 
262 {
263  m_found_deadlock= ++m_current_search_depth >= MAX_SEARCH_DEPTH;
264  if (m_found_deadlock)
265  {
266  DBUG_ASSERT(! m_victim);
267  opt_change_victim_to(node);
268  }
269  return m_found_deadlock;
270 }
271 
272 
282 {
283  --m_current_search_depth;
284  if (m_found_deadlock)
285  opt_change_victim_to(node);
286 }
287 
288 
297 {
298  m_found_deadlock= node == m_start_node;
299  return m_found_deadlock;
300 }
301 
302 
311 void
312 Deadlock_detection_visitor::opt_change_victim_to(MDL_context *new_victim)
313 {
314  if (m_victim == NULL ||
315  m_victim->get_deadlock_weight() >= new_victim->get_deadlock_weight())
316  {
317  /* Swap victims, unlock the old one. */
318  MDL_context *tmp= m_victim;
319  m_victim= new_victim;
320  m_victim->lock_deadlock_victim();
321  if (tmp)
322  tmp->unlock_deadlock_victim();
323  }
324 }
325 
326 
332 #define MDL_BIT(A) static_cast<MDL_lock::bitmap_t>(1U << A)
333 
345 class MDL_lock
346 {
347 public:
348  typedef unsigned short bitmap_t;
349 
351  {
352  public:
353  typedef I_P_List<MDL_ticket,
356  &MDL_ticket::prev_in_lock>,
359  List;
360  operator const List &() const { return m_list; }
361  Ticket_list() :m_bitmap(0) {}
362 
363  void add_ticket(MDL_ticket *ticket);
364  void remove_ticket(MDL_ticket *ticket);
365  bool is_empty() const { return m_list.is_empty(); }
366  bitmap_t bitmap() const { return m_bitmap; }
367  private:
368  void clear_bit_if_not_in_list(enum_mdl_type type);
369  private:
371  List m_list;
373  bitmap_t m_bitmap;
374  };
375 
377 
378 public:
415 
416  bool is_empty() const
417  {
418  return (m_granted.is_empty() && m_waiting.is_empty());
419  }
420 
421  virtual const bitmap_t *incompatible_granted_types_bitmap() const = 0;
422  virtual const bitmap_t *incompatible_waiting_types_bitmap() const = 0;
423 
424  bool has_pending_conflicting_lock(enum_mdl_type type);
425 
426  bool can_grant_lock(enum_mdl_type type, MDL_context *requstor_ctx,
427  bool ignore_lock_priority) const;
428 
429  inline static MDL_lock *create(const MDL_key *key,
430  MDL_map_partition *map_part);
431 
432  void reschedule_waiters();
433 
434  void remove_ticket(Ticket_list MDL_lock::*queue, MDL_ticket *ticket);
435 
436  bool visit_subgraph(MDL_ticket *waiting_ticket,
437  MDL_wait_for_graph_visitor *gvisitor);
438 
439  virtual bool needs_notification(const MDL_ticket *ticket) const = 0;
440  virtual void notify_conflicting_locks(MDL_context *ctx) = 0;
441 
442  virtual bitmap_t hog_lock_types_bitmap() const = 0;
443 
448 
454 
455 public:
456 
457  MDL_lock(const MDL_key *key_arg, MDL_map_partition *map_part)
458  : key(key_arg),
459  m_hog_lock_count(0),
460  m_ref_usage(0),
461  m_ref_release(0),
462  m_is_destroyed(FALSE),
463  m_version(0),
464  m_map_part(map_part)
465  {
466  mysql_prlock_init(key_MDL_lock_rwlock, &m_rwlock);
467  }
468 
469  virtual ~MDL_lock()
470  {
472  }
473  inline static void destroy(MDL_lock *lock);
474 public:
497  uint m_ref_release;
498  bool m_is_destroyed;
514  ulonglong m_version;
519 };
520 
521 
528 class MDL_scoped_lock : public MDL_lock
529 {
530 public:
531  MDL_scoped_lock(const MDL_key *key_arg, MDL_map_partition *map_part)
532  : MDL_lock(key_arg, map_part)
533  { }
534 
535  virtual const bitmap_t *incompatible_granted_types_bitmap() const
536  {
537  return m_granted_incompatible;
538  }
539  virtual const bitmap_t *incompatible_waiting_types_bitmap() const
540  {
541  return m_waiting_incompatible;
542  }
543  virtual bool needs_notification(const MDL_ticket *ticket) const
544  {
545  return (ticket->get_type() == MDL_SHARED);
546  }
547  virtual void notify_conflicting_locks(MDL_context *ctx);
548 
549  /*
550  In scoped locks, only IX lock request would starve because of X/S. But that
551  is practically very rare case. So just return 0 from this function.
552  */
553  virtual bitmap_t hog_lock_types_bitmap() const
554  {
555  return 0;
556  }
557 
558 private:
559  static const bitmap_t m_granted_incompatible[MDL_TYPE_END];
560  static const bitmap_t m_waiting_incompatible[MDL_TYPE_END];
561 };
562 
563 
569 class MDL_object_lock : public MDL_lock
570 {
571 public:
572  MDL_object_lock(const MDL_key *key_arg, MDL_map_partition *map_part)
573  : MDL_lock(key_arg, map_part)
574  { }
575 
580  void reset(const MDL_key *new_key)
581  {
582  /* We need to change only object's key. */
583  key.mdl_key_init(new_key);
584  /* m_granted and m_waiting should be already in the empty/initial state. */
585  DBUG_ASSERT(is_empty());
586  /* Object should not be marked as destroyed. */
587  DBUG_ASSERT(! m_is_destroyed);
588  /*
589  Values of the rest of the fields should be preserved between old and
590  new versions of the object. E.g., m_version and m_ref_usage/release
591  should be kept intact to properly handle possible remaining references
592  to the old version of the object.
593  */
594  }
595 
596  virtual const bitmap_t *incompatible_granted_types_bitmap() const
597  {
598  return m_granted_incompatible;
599  }
600  virtual const bitmap_t *incompatible_waiting_types_bitmap() const
601  {
602  return m_waiting_incompatible;
603  }
604  virtual bool needs_notification(const MDL_ticket *ticket) const
605  {
606  return (ticket->get_type() >= MDL_SHARED_NO_WRITE);
607  }
608  virtual void notify_conflicting_locks(MDL_context *ctx);
609 
610  /*
611  To prevent starvation, these lock types that are only granted
612  max_write_lock_count times in a row while other lock types are
613  waiting.
614  */
615  virtual bitmap_t hog_lock_types_bitmap() const
616  {
617  return (MDL_BIT(MDL_SHARED_NO_WRITE) |
618  MDL_BIT(MDL_SHARED_NO_READ_WRITE) |
619  MDL_BIT(MDL_EXCLUSIVE));
620  }
621 
622 private:
623  static const bitmap_t m_granted_incompatible[MDL_TYPE_END];
624  static const bitmap_t m_waiting_incompatible[MDL_TYPE_END];
625 
626 public:
628  MDL_object_lock *next_in_cache, **prev_in_cache;
629 };
630 
631 
636  public I_P_List_adapter<MDL_object_lock, &MDL_object_lock::next_in_cache,
637  &MDL_object_lock::prev_in_cache>
638 {
639 };
640 
641 
642 static MDL_map mdl_locks;
646 ulong mdl_locks_cache_size;
647 
648 
649 extern "C"
650 {
651 static uchar *
652 mdl_locks_key(const uchar *record, size_t *length,
653  my_bool not_used __attribute__((unused)))
654 {
655  MDL_lock *lock=(MDL_lock*) record;
656  *length= lock->key.length();
657  return (uchar*) lock->key.ptr();
658 }
659 } /* extern "C" */
660 
661 
673 void mdl_init()
674 {
675  DBUG_ASSERT(! mdl_initialized);
676  mdl_initialized= TRUE;
677 
678 #ifdef HAVE_PSI_INTERFACE
679  init_mdl_psi_keys();
680 #endif
681 
682  mdl_locks.init();
683 }
684 
685 
693 void mdl_destroy()
694 {
695  if (mdl_initialized)
696  {
697  mdl_initialized= FALSE;
698  mdl_locks.destroy();
699  }
700 }
701 
702 
706 {
707  MDL_key global_lock_key(MDL_key::GLOBAL, "", "");
708  MDL_key commit_lock_key(MDL_key::COMMIT, "", "");
709 
710  m_global_lock= MDL_lock::create(&global_lock_key, NULL);
711  m_commit_lock= MDL_lock::create(&commit_lock_key, NULL);
712 
713  for (uint i= 0; i < mdl_locks_hash_partitions; i++)
714  {
715  MDL_map_partition *part= new (std::nothrow) MDL_map_partition();
716  m_partitions.append(part);
717  }
718 }
719 
720 
724 {
725  mysql_mutex_init(key_MDL_map_mutex, &m_mutex, NULL);
726  my_hash_init(&m_locks, &my_charset_bin, 16 /* FIXME */, 0, 0,
727  mdl_locks_key, 0, 0);
728 };
729 
730 
737 {
738  MDL_lock::destroy(m_global_lock);
739  MDL_lock::destroy(m_commit_lock);
740 
741  while (m_partitions.elements() > 0)
742  {
743  MDL_map_partition *part= m_partitions.pop();
744  delete part;
745  }
746 }
747 
748 
755 {
756  DBUG_ASSERT(!m_locks.records);
757  mysql_mutex_destroy(&m_mutex);
758  my_hash_free(&m_locks);
759 
760  MDL_object_lock *lock;
761  while ((lock= m_unused_locks_cache.pop_front()))
762  MDL_lock::destroy(lock);
763 }
764 
765 
776 {
777  MDL_lock *lock;
778 
779  if (mdl_key->mdl_namespace() == MDL_key::GLOBAL ||
780  mdl_key->mdl_namespace() == MDL_key::COMMIT)
781  {
782  /*
783  Avoid locking any m_mutex when lock for GLOBAL or COMMIT namespace is
784  requested. Return pointer to pre-allocated MDL_lock instance instead.
785  Such an optimization allows to save one mutex lock/unlock for any
786  statement changing data.
787 
788  It works since these namespaces contain only one element so keys
789  for them look like '<namespace-id>\0\0'.
790  */
791  DBUG_ASSERT(mdl_key->length() == 3);
792 
793  lock= (mdl_key->mdl_namespace() == MDL_key::GLOBAL) ? m_global_lock :
794  m_commit_lock;
795 
796  mysql_prlock_wrlock(&lock->m_rwlock);
797 
798  return lock;
799  }
800 
801  my_hash_value_type hash_value= m_partitions.at(0)->get_key_hash(mdl_key);
802  uint part_id= hash_value % mdl_locks_hash_partitions;
803  MDL_map_partition *part= m_partitions.at(part_id);
804 
805  return part->find_or_insert(mdl_key, hash_value);
806 }
807 
808 
819  my_hash_value_type hash_value)
820 {
821  MDL_lock *lock;
822 
823 retry:
824  mysql_mutex_lock(&m_mutex);
825  if (!(lock= (MDL_lock*) my_hash_search_using_hash_value(&m_locks,
826  hash_value,
827  mdl_key->ptr(),
828  mdl_key->length())))
829  {
830  MDL_object_lock *unused_lock= NULL;
831 
832  /*
833  No lock object found so we need to create a new one
834  or reuse an existing unused object.
835  */
836  if (mdl_key->mdl_namespace() != MDL_key::SCHEMA &&
837  m_unused_locks_cache.elements())
838  {
839  /*
840  We need a MDL_object_lock type of object and the unused objects
841  cache has some. Get the first object from the cache and set a new
842  key for it.
843  */
844  DBUG_ASSERT(mdl_key->mdl_namespace() != MDL_key::GLOBAL &&
845  mdl_key->mdl_namespace() != MDL_key::COMMIT);
846 
847  unused_lock= m_unused_locks_cache.pop_front();
848  unused_lock->reset(mdl_key);
849 
850  lock= unused_lock;
851  }
852  else
853  {
854  lock= MDL_lock::create(mdl_key, this);
855  }
856 
857  if (!lock || my_hash_insert(&m_locks, (uchar*)lock))
858  {
859  if (unused_lock)
860  {
861  /*
862  Note that we can't easily destroy an object from cache here as it
863  still might be referenced by other threads. So we simply put it
864  back into the cache.
865  */
866  m_unused_locks_cache.push_front(unused_lock);
867  }
868  else
869  {
870  MDL_lock::destroy(lock);
871  }
872  mysql_mutex_unlock(&m_mutex);
873  return NULL;
874  }
875  }
876 
877  if (move_from_hash_to_lock_mutex(lock))
878  goto retry;
879 
880  return lock;
881 }
882 
883 
894 bool MDL_map_partition::move_from_hash_to_lock_mutex(MDL_lock *lock)
895 {
896  ulonglong version;
897 
898  DBUG_ASSERT(! lock->m_is_destroyed);
899  mysql_mutex_assert_owner(&m_mutex);
900 
901  /*
902  We increment m_ref_usage which is a reference counter protected by
903  MDL_map_partition::m_mutex under the condition it is present in the hash
904  and m_is_destroyed is FALSE.
905  */
906  lock->m_ref_usage++;
907  /* Read value of the version counter under protection of m_mutex lock. */
908  version= lock->m_version;
909  mysql_mutex_unlock(&m_mutex);
910 
912  lock->m_ref_release++;
913 
914  if (unlikely(lock->m_version != version))
915  {
916  /*
917  If the current value of version differs from one that was read while
918  we held m_mutex mutex, this MDL_lock object was moved to the unused
919  objects list or destroyed while we held no locks.
920  We should retry our search. But first we should destroy the MDL_lock
921  object if necessary.
922  */
923  if (unlikely(lock->m_is_destroyed))
924  {
925  /*
926  Object was released while we held no locks, we need to
927  release it if no others hold references to it, while our own
928  reference count ensured that the object as such haven't got
929  its memory released yet. We can also safely compare
930  m_ref_usage and m_ref_release since the object is no longer
931  present in the hash (or unused objects list) so no one will
932  be able to find it and increment m_ref_usage anymore.
933  */
934  uint ref_usage= lock->m_ref_usage;
935  uint ref_release= lock->m_ref_release;
937  if (ref_usage == ref_release)
938  MDL_lock::destroy(lock);
939  }
940  else
941  {
942  /*
943  Object was not destroyed but its version has changed.
944  This means that it was moved to the unused objects list
945  (and even might be already re-used). So now it might
946  correspond to a different key, therefore we should simply
947  retry our search.
948  */
950  }
951  return TRUE;
952  }
953  return FALSE;
954 }
955 
956 
964 {
965  if (lock->key.mdl_namespace() == MDL_key::GLOBAL ||
966  lock->key.mdl_namespace() == MDL_key::COMMIT)
967  {
968  /*
969  Never destroy pre-allocated MDL_lock objects for GLOBAL and
970  COMMIT namespaces.
971  */
973  return;
974  }
975 
976  lock->m_map_part->remove(lock);
977 }
978 
979 
987 {
988  mysql_mutex_lock(&m_mutex);
989  my_hash_delete(&m_locks, (uchar*) lock);
990  /*
991  To let threads holding references to the MDL_lock object know that it was
992  moved to the list of unused objects or destroyed, we increment the version
993  counter under protection of both MDL_map_partition::m_mutex and
994  MDL_lock::m_rwlock locks. This allows us to read the version value while
995  having either one of those locks.
996  */
997  lock->m_version++;
998 
999  if ((lock->key.mdl_namespace() != MDL_key::SCHEMA) &&
1000  (m_unused_locks_cache.elements() <
1001  mdl_locks_cache_size/mdl_locks_hash_partitions))
1002  {
1003  /*
1004  This is an object of MDL_object_lock type and the cache of unused
1005  objects has not reached its maximum size yet. So instead of destroying
1006  object we move it to the list of unused objects to allow its later
1007  re-use with possibly different key. Any threads holding references to
1008  this object (owning MDL_map_partition::m_mutex or MDL_lock::m_rwlock)
1009  will notice this thanks to the fact that we have changed the
1010  MDL_lock::m_version counter.
1011  */
1012  DBUG_ASSERT(lock->key.mdl_namespace() != MDL_key::GLOBAL &&
1013  lock->key.mdl_namespace() != MDL_key::COMMIT);
1014 
1015  m_unused_locks_cache.push_front((MDL_object_lock*)lock);
1016  mysql_mutex_unlock(&m_mutex);
1017  mysql_prlock_unlock(&lock->m_rwlock);
1018  }
1019  else
1020  {
1021  /*
1022  Destroy the MDL_lock object, but ensure that anyone that is
1023  holding a reference to the object is not remaining, if so he
1024  has the responsibility to release it.
1025 
1026  Setting of m_is_destroyed to TRUE while holding _both_
1027  MDL_map_partition::m_mutex and MDL_lock::m_rwlock mutexes transfers
1028  the protection of m_ref_usage from MDL_map_partition::m_mutex to
1029  MDL_lock::m_rwlock while removal of the object from the hash
1030  (and cache of unused objects) makes it read-only. Therefore
1031  whoever acquires MDL_lock::m_rwlock next will see the most up
1032  to date version of m_ref_usage.
1033 
1034  This means that when m_is_destroyed is TRUE and we hold the
1035  MDL_lock::m_rwlock we can safely read the m_ref_usage
1036  member.
1037  */
1038  uint ref_usage, ref_release;
1039 
1040  lock->m_is_destroyed= TRUE;
1041  ref_usage= lock->m_ref_usage;
1042  ref_release= lock->m_ref_release;
1043  mysql_mutex_unlock(&m_mutex);
1044  mysql_prlock_unlock(&lock->m_rwlock);
1045  if (ref_usage == ref_release)
1046  MDL_lock::destroy(lock);
1047  }
1048 }
1049 
1050 
1058  :
1059  m_owner(NULL),
1060  m_needs_thr_lock_abort(FALSE),
1061  m_waiting_for(NULL)
1062 {
1063  mysql_prlock_init(key_MDL_context_LOCK_waiting_for, &m_LOCK_waiting_for);
1064 }
1065 
1066 
1080 {
1081  DBUG_ASSERT(m_tickets[MDL_STATEMENT].is_empty());
1082  DBUG_ASSERT(m_tickets[MDL_TRANSACTION].is_empty());
1083  DBUG_ASSERT(m_tickets[MDL_EXPLICIT].is_empty());
1084 
1085  mysql_prlock_destroy(&m_LOCK_waiting_for);
1086 }
1087 
1088 
1110  const char *db_arg,
1111  const char *name_arg,
1112  enum_mdl_type mdl_type_arg,
1113  enum_mdl_duration mdl_duration_arg)
1114 {
1115  key.mdl_key_init(mdl_namespace, db_arg, name_arg);
1116  type= mdl_type_arg;
1117  duration= mdl_duration_arg;
1118  ticket= NULL;
1119 }
1120 
1121 
1131 void MDL_request::init(const MDL_key *key_arg,
1132  enum_mdl_type mdl_type_arg,
1133  enum_mdl_duration mdl_duration_arg)
1134 {
1135  key.mdl_key_init(key_arg);
1136  type= mdl_type_arg;
1137  duration= mdl_duration_arg;
1138  ticket= NULL;
1139 }
1140 
1141 
1148 inline MDL_lock *MDL_lock::create(const MDL_key *mdl_key,
1149  MDL_map_partition *map_part)
1150 {
1151  switch (mdl_key->mdl_namespace())
1152  {
1153  case MDL_key::GLOBAL:
1154  case MDL_key::SCHEMA:
1155  case MDL_key::COMMIT:
1156  return new (std::nothrow) MDL_scoped_lock(mdl_key, map_part);
1157  default:
1158  return new (std::nothrow) MDL_object_lock(mdl_key, map_part);
1159  }
1160 }
1161 
1162 
1163 void MDL_lock::destroy(MDL_lock *lock)
1164 {
1165  delete lock;
1166 }
1167 
1168 
1177 MDL_ticket *MDL_ticket::create(MDL_context *ctx_arg, enum_mdl_type type_arg
1178 #ifndef DBUG_OFF
1179  , enum_mdl_duration duration_arg
1180 #endif
1181  )
1182 {
1183  return new (std::nothrow)
1184  MDL_ticket(ctx_arg, type_arg
1185 #ifndef DBUG_OFF
1186  , duration_arg
1187 #endif
1188  );
1189 }
1190 
1191 
1192 void MDL_ticket::destroy(MDL_ticket *ticket)
1193 {
1194  delete ticket;
1195 }
1196 
1197 
1206 {
1207  return (m_lock->key.mdl_namespace() == MDL_key::GLOBAL ||
1208  m_type >= MDL_SHARED_UPGRADABLE ?
1209  DEADLOCK_WEIGHT_DDL : DEADLOCK_WEIGHT_DML);
1210 }
1211 
1212 
1216  :m_wait_status(EMPTY)
1217 {
1218  mysql_mutex_init(key_MDL_wait_LOCK_wait_status, &m_LOCK_wait_status, NULL);
1219  mysql_cond_init(key_MDL_wait_COND_wait_status, &m_COND_wait_status, NULL);
1220 }
1221 
1222 
1226 {
1227  mysql_mutex_destroy(&m_LOCK_wait_status);
1228  mysql_cond_destroy(&m_COND_wait_status);
1229 }
1230 
1231 
1237 bool MDL_wait::set_status(enum_wait_status status_arg)
1238 {
1239  bool was_occupied= TRUE;
1240  mysql_mutex_lock(&m_LOCK_wait_status);
1241  if (m_wait_status == EMPTY)
1242  {
1243  was_occupied= FALSE;
1244  m_wait_status= status_arg;
1245  mysql_cond_signal(&m_COND_wait_status);
1246  }
1247  mysql_mutex_unlock(&m_LOCK_wait_status);
1248  return was_occupied;
1249 }
1250 
1251 
1254 MDL_wait::enum_wait_status MDL_wait::get_status()
1255 {
1256  enum_wait_status result;
1257  mysql_mutex_lock(&m_LOCK_wait_status);
1258  result= m_wait_status;
1259  mysql_mutex_unlock(&m_LOCK_wait_status);
1260  return result;
1261 }
1262 
1263 
1267 {
1268  mysql_mutex_lock(&m_LOCK_wait_status);
1269  m_wait_status= EMPTY;
1270  mysql_mutex_unlock(&m_LOCK_wait_status);
1271 }
1272 
1273 
1288 MDL_wait::enum_wait_status
1290  bool set_status_on_timeout,
1291  const PSI_stage_info *wait_state_name)
1292 {
1293  PSI_stage_info old_stage;
1294  enum_wait_status result;
1295  int wait_result= 0;
1296 
1297  mysql_mutex_lock(&m_LOCK_wait_status);
1298 
1299  owner->ENTER_COND(&m_COND_wait_status, &m_LOCK_wait_status,
1300  wait_state_name, & old_stage);
1301  thd_wait_begin(NULL, THD_WAIT_META_DATA_LOCK);
1302  while (!m_wait_status && !owner->is_killed() &&
1303  wait_result != ETIMEDOUT && wait_result != ETIME)
1304  {
1305  wait_result= mysql_cond_timedwait(&m_COND_wait_status, &m_LOCK_wait_status,
1306  abs_timeout);
1307  }
1308  thd_wait_end(NULL);
1309 
1310  if (m_wait_status == EMPTY)
1311  {
1312  /*
1313  Wait has ended not due to a status being set from another
1314  thread but due to this connection/statement being killed or a
1315  time out.
1316  To avoid races, which may occur if another thread sets
1317  GRANTED status before the code which calls this method
1318  processes the abort/timeout, we assign the status under
1319  protection of the m_LOCK_wait_status, within the critical
1320  section. An exception is when set_status_on_timeout is
1321  false, which means that the caller intends to restart the
1322  wait.
1323  */
1324  if (owner->is_killed())
1325  m_wait_status= KILLED;
1326  else if (set_status_on_timeout)
1327  m_wait_status= TIMEOUT;
1328  }
1329  result= m_wait_status;
1330 
1331  owner->EXIT_COND(& old_stage);
1332 
1333  return result;
1334 }
1335 
1336 
1346 void MDL_lock::Ticket_list::clear_bit_if_not_in_list(enum_mdl_type type)
1347 {
1348  MDL_lock::Ticket_iterator it(m_list);
1349  const MDL_ticket *ticket;
1350 
1351  while ((ticket= it++))
1352  if (ticket->get_type() == type)
1353  return;
1354  m_bitmap&= ~ MDL_BIT(type);
1355 }
1356 
1357 
1364 {
1365  /*
1366  Ticket being added to the list must have MDL_ticket::m_lock set,
1367  since for such tickets methods accessing this member might be
1368  called by other threads.
1369  */
1370  DBUG_ASSERT(ticket->get_lock());
1371  /*
1372  Add ticket to the *back* of the queue to ensure fairness
1373  among requests with the same priority.
1374  */
1375  m_list.push_back(ticket);
1376  m_bitmap|= MDL_BIT(ticket->get_type());
1377 }
1378 
1379 
1386 {
1387  m_list.remove(ticket);
1388  /*
1389  Check if waiting queue has another ticket with the same type as
1390  one which was removed. If there is no such ticket, i.e. we have
1391  removed last ticket of particular type, then we need to update
1392  bitmap of waiting ticket's types.
1393  Note that in most common case, i.e. when shared lock is removed
1394  from waiting queue, we are likely to find ticket of the same
1395  type early without performing full iteration through the list.
1396  So this method should not be too expensive.
1397  */
1398  clear_bit_if_not_in_list(ticket->get_type());
1399 }
1400 
1401 
1414 {
1416  MDL_ticket *ticket;
1417  bool skip_high_priority= false;
1418  bitmap_t hog_lock_types= hog_lock_types_bitmap();
1419 
1420  if (m_hog_lock_count >= max_write_lock_count)
1421  {
1422  /*
1423  If number of successively granted high-prio, strong locks has exceeded
1424  max_write_lock_count give a way to low-prio, weak locks to avoid their
1425  starvation.
1426  */
1427 
1428  if ((m_waiting.bitmap() & ~hog_lock_types) != 0)
1429  {
1430  /*
1431  Even though normally when m_hog_lock_count is non-0 there is
1432  some pending low-prio lock, we still can encounter situation
1433  when m_hog_lock_count is non-0 and there are no pending low-prio
1434  locks. This, for example, can happen when a ticket for pending
1435  low-prio lock was removed from waiters list due to timeout,
1436  and reschedule_waiters() is called after that to update the
1437  waiters queue. m_hog_lock_count will be reset to 0 at the
1438  end of this call in such case.
1439 
1440  Note that it is not an issue if we fail to wake up any pending
1441  waiters for weak locks in the loop below. This would mean that
1442  all of them are either killed, timed out or chosen as a victim
1443  by deadlock resolver, but have not managed to remove ticket
1444  from the waiters list yet. After tickets will be removed from
1445  the waiters queue there will be another call to
1446  reschedule_waiters() with pending bitmap updated to reflect new
1447  state of waiters queue.
1448  */
1449  skip_high_priority= true;
1450  }
1451  }
1452 
1453  /*
1454  Find the first (and hence the oldest) waiting request which
1455  can be satisfied (taking into account priority). Grant lock to it.
1456  Repeat the process for the remainder of waiters.
1457  Note we don't need to re-start iteration from the head of the
1458  list after satisfying the first suitable request as in our case
1459  all compatible types of requests have the same priority.
1460 
1461  TODO/FIXME: We should:
1462  - Either switch to scheduling without priorities
1463  which will allow to stop iteration through the
1464  list of waiters once we found the first ticket
1465  which can't be satisfied
1466  - Or implement some check using bitmaps which will
1467  allow to stop iteration in cases when, e.g., we
1468  grant SNRW lock and there are no pending S or
1469  SH locks.
1470  */
1471  while ((ticket= it++))
1472  {
1473  /*
1474  Skip high-prio, strong locks if earlier we have decided to give way to
1475  low-prio, weaker locks.
1476  */
1477  if (skip_high_priority &&
1478  ((MDL_BIT(ticket->get_type()) & hog_lock_types) != 0))
1479  continue;
1480 
1481  if (can_grant_lock(ticket->get_type(), ticket->get_ctx(),
1482  skip_high_priority))
1483  {
1484  if (! ticket->get_ctx()->m_wait.set_status(MDL_wait::GRANTED))
1485  {
1486  /*
1487  Satisfy the found request by updating lock structures.
1488  It is OK to do so even after waking up the waiter since any
1489  session which tries to get any information about the state of
1490  this lock has to acquire MDL_lock::m_rwlock first and thus,
1491  when manages to do so, already sees an updated state of the
1492  MDL_lock object.
1493  */
1494  m_waiting.remove_ticket(ticket);
1495  m_granted.add_ticket(ticket);
1496 
1497  /*
1498  Increase counter of successively granted high-priority strong locks,
1499  if we have granted one.
1500  */
1501  if ((MDL_BIT(ticket->get_type()) & hog_lock_types) != 0)
1502  m_hog_lock_count++;
1503  }
1504  /*
1505  If we could not update the wait slot of the waiter,
1506  it can be due to fact that its connection/statement was
1507  killed or it has timed out (i.e. the slot is not empty).
1508  Since in all such cases the waiter assumes that the lock was
1509  not been granted, we should keep the request in the waiting
1510  queue and look for another request to reschedule.
1511  */
1512  }
1513  }
1514 
1515  if ((m_waiting.bitmap() & ~hog_lock_types) == 0)
1516  {
1517  /*
1518  Reset number of successively granted high-prio, strong locks
1519  if there are no pending low-prio, weak locks.
1520  This ensures:
1521  - That m_hog_lock_count is correctly reset after strong lock
1522  is released and weak locks are granted (or there are no
1523  other lock requests).
1524  - That situation when SNW lock is granted along with some SR
1525  locks, but SW locks are still blocked are handled correctly.
1526  - That m_hog_lock_count is zero in most cases when there are no pending
1527  weak locks (see comment at the start of this method for example of
1528  exception). This allows to save on checks at the start of this method.
1529  */
1530  m_hog_lock_count= 0;
1531  }
1532 }
1533 
1534 
1584 const MDL_lock::bitmap_t MDL_scoped_lock::m_granted_incompatible[MDL_TYPE_END] =
1585 {
1586  MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED),
1587  MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_INTENTION_EXCLUSIVE), 0, 0, 0, 0, 0, 0,
1588  MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED) | MDL_BIT(MDL_INTENTION_EXCLUSIVE)
1589 };
1590 
1591 const MDL_lock::bitmap_t MDL_scoped_lock::m_waiting_incompatible[MDL_TYPE_END] =
1592 {
1593  MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED),
1594  MDL_BIT(MDL_EXCLUSIVE), 0, 0, 0, 0, 0, 0, 0
1595 };
1596 
1597 
1652 const MDL_lock::bitmap_t
1653 MDL_object_lock::m_granted_incompatible[MDL_TYPE_END] =
1654 {
1655  0,
1656  MDL_BIT(MDL_EXCLUSIVE),
1657  MDL_BIT(MDL_EXCLUSIVE),
1658  MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE),
1659  MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE) |
1660  MDL_BIT(MDL_SHARED_NO_WRITE),
1661  MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE) |
1662  MDL_BIT(MDL_SHARED_NO_WRITE) | MDL_BIT(MDL_SHARED_UPGRADABLE),
1663  MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE) |
1664  MDL_BIT(MDL_SHARED_NO_WRITE) | MDL_BIT(MDL_SHARED_UPGRADABLE) |
1665  MDL_BIT(MDL_SHARED_WRITE),
1666  MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE) |
1667  MDL_BIT(MDL_SHARED_NO_WRITE) | MDL_BIT(MDL_SHARED_UPGRADABLE) |
1668  MDL_BIT(MDL_SHARED_WRITE) | MDL_BIT(MDL_SHARED_READ),
1669  MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE) |
1670  MDL_BIT(MDL_SHARED_NO_WRITE) | MDL_BIT(MDL_SHARED_UPGRADABLE) |
1671  MDL_BIT(MDL_SHARED_WRITE) | MDL_BIT(MDL_SHARED_READ) |
1672  MDL_BIT(MDL_SHARED_HIGH_PRIO) | MDL_BIT(MDL_SHARED)
1673 };
1674 
1675 
1676 const MDL_lock::bitmap_t
1677 MDL_object_lock::m_waiting_incompatible[MDL_TYPE_END] =
1678 {
1679  0,
1680  MDL_BIT(MDL_EXCLUSIVE),
1681  0,
1682  MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE),
1683  MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE) |
1684  MDL_BIT(MDL_SHARED_NO_WRITE),
1685  MDL_BIT(MDL_EXCLUSIVE),
1686  MDL_BIT(MDL_EXCLUSIVE),
1687  MDL_BIT(MDL_EXCLUSIVE),
1688  0
1689 };
1690 
1691 
1708 bool
1709 MDL_lock::can_grant_lock(enum_mdl_type type_arg,
1710  MDL_context *requestor_ctx,
1711  bool ignore_lock_priority) const
1712 {
1713  bool can_grant= FALSE;
1714  bitmap_t waiting_incompat_map= incompatible_waiting_types_bitmap()[type_arg];
1715  bitmap_t granted_incompat_map= incompatible_granted_types_bitmap()[type_arg];
1716 
1717  /*
1718  New lock request can be satisfied iff:
1719  - There are no incompatible types of satisfied requests
1720  in other contexts
1721  - There are no waiting requests which have higher priority
1722  than this request when priority was not ignored.
1723  */
1724  if (ignore_lock_priority || !(m_waiting.bitmap() & waiting_incompat_map))
1725  {
1726  if (! (m_granted.bitmap() & granted_incompat_map))
1727  can_grant= TRUE;
1728  else
1729  {
1731  MDL_ticket *ticket;
1732 
1733  /* Check that the incompatible lock belongs to some other context. */
1734  while ((ticket= it++))
1735  {
1736  if (ticket->get_ctx() != requestor_ctx &&
1737  ticket->is_incompatible_when_granted(type_arg))
1738  break;
1739  }
1740  if (ticket == NULL) /* Incompatible locks are our own. */
1741  can_grant= TRUE;
1742  }
1743  }
1744  return can_grant;
1745 }
1746 
1747 
1751 {
1753  (this->*list).remove_ticket(ticket);
1754  if (is_empty())
1755  mdl_locks.remove(this);
1756  else
1757  {
1758  /*
1759  There can be some contexts waiting to acquire a lock
1760  which now might be able to do it. Grant the lock to
1761  them and wake them up!
1762 
1763  We always try to reschedule locks, since there is no easy way
1764  (i.e. by looking at the bitmaps) to find out whether it is
1765  required or not.
1766  In a general case, even when the queue's bitmap is not changed
1767  after removal of the ticket, there is a chance that some request
1768  can be satisfied (due to the fact that a granted request
1769  reflected in the bitmap might belong to the same context as a
1770  pending request).
1771  */
1774  }
1775 }
1776 
1777 
1788 {
1789  bool result;
1790 
1792 
1794  result= (m_waiting.bitmap() & incompatible_granted_types_bitmap()[type]);
1796  return result;
1797 }
1798 
1799 
1800 MDL_wait_for_graph_visitor::~MDL_wait_for_graph_visitor()
1801 {
1802 }
1803 
1804 
1805 MDL_wait_for_subgraph::~MDL_wait_for_subgraph()
1806 {
1807 }
1808 
1818 bool MDL_ticket::has_stronger_or_equal_type(enum_mdl_type type) const
1819 {
1820  const MDL_lock::bitmap_t *
1821  granted_incompat_map= m_lock->incompatible_granted_types_bitmap();
1822 
1823  return ! (granted_incompat_map[type] & ~(granted_incompat_map[m_type]));
1824 }
1825 
1826 
1827 bool MDL_ticket::is_incompatible_when_granted(enum_mdl_type type) const
1828 {
1829  return (MDL_BIT(m_type) &
1830  m_lock->incompatible_granted_types_bitmap()[type]);
1831 }
1832 
1833 
1834 bool MDL_ticket::is_incompatible_when_waiting(enum_mdl_type type) const
1835 {
1836  return (MDL_BIT(m_type) &
1837  m_lock->incompatible_waiting_types_bitmap()[type]);
1838 }
1839 
1840 
1856 MDL_ticket *
1857 MDL_context::find_ticket(MDL_request *mdl_request,
1858  enum_mdl_duration *result_duration)
1859 {
1860  MDL_ticket *ticket;
1861  int i;
1862 
1863  for (i= 0; i < MDL_DURATION_END; i++)
1864  {
1865  enum_mdl_duration duration= (enum_mdl_duration)((mdl_request->duration+i) %
1866  MDL_DURATION_END);
1867  Ticket_iterator it(m_tickets[duration]);
1868 
1869  while ((ticket= it++))
1870  {
1871  if (mdl_request->key.is_equal(&ticket->m_lock->key) &&
1872  ticket->has_stronger_or_equal_type(mdl_request->type))
1873  {
1874  *result_duration= duration;
1875  return ticket;
1876  }
1877  }
1878  }
1879  return NULL;
1880 }
1881 
1882 
1909 bool
1911 {
1912  MDL_ticket *ticket;
1913 
1914  if (try_acquire_lock_impl(mdl_request, &ticket))
1915  return TRUE;
1916 
1917  if (! mdl_request->ticket)
1918  {
1919  /*
1920  Our attempt to acquire lock without waiting has failed.
1921  Let us release resources which were acquired in the process.
1922  We can't get here if we allocated a new lock object so there
1923  is no need to release it.
1924  */
1925  DBUG_ASSERT(! ticket->m_lock->is_empty());
1926  mysql_prlock_unlock(&ticket->m_lock->m_rwlock);
1927  MDL_ticket::destroy(ticket);
1928  }
1929 
1930  return FALSE;
1931 }
1932 
1933 
1950 bool
1951 MDL_context::try_acquire_lock_impl(MDL_request *mdl_request,
1952  MDL_ticket **out_ticket)
1953 {
1954  MDL_lock *lock;
1955  MDL_key *key= &mdl_request->key;
1956  MDL_ticket *ticket;
1957  enum_mdl_duration found_duration;
1958 
1959  DBUG_ASSERT(mdl_request->type != MDL_EXCLUSIVE ||
1960  is_lock_owner(MDL_key::GLOBAL, "", "", MDL_INTENTION_EXCLUSIVE));
1961  DBUG_ASSERT(mdl_request->ticket == NULL);
1962 
1963  /* Don't take chances in production. */
1964  mdl_request->ticket= NULL;
1966 
1967  /*
1968  Check whether the context already holds a shared lock on the object,
1969  and if so, grant the request.
1970  */
1971  if ((ticket= find_ticket(mdl_request, &found_duration)))
1972  {
1973  DBUG_ASSERT(ticket->m_lock);
1974  DBUG_ASSERT(ticket->has_stronger_or_equal_type(mdl_request->type));
1975  /*
1976  If the request is for a transactional lock, and we found
1977  a transactional lock, just reuse the found ticket.
1978 
1979  It's possible that we found a transactional lock,
1980  but the request is for a HANDLER lock. In that case HANDLER
1981  code will clone the ticket (see below why it's needed).
1982 
1983  If the request is for a transactional lock, and we found
1984  a HANDLER lock, create a copy, to make sure that when user
1985  does HANDLER CLOSE, the transactional lock is not released.
1986 
1987  If the request is for a handler lock, and we found a
1988  HANDLER lock, also do the clone. HANDLER CLOSE for one alias
1989  should not release the lock on the table HANDLER opened through
1990  a different alias.
1991  */
1992  mdl_request->ticket= ticket;
1993  if ((found_duration != mdl_request->duration ||
1994  mdl_request->duration == MDL_EXPLICIT) &&
1995  clone_ticket(mdl_request))
1996  {
1997  /* Clone failed. */
1998  mdl_request->ticket= NULL;
1999  return TRUE;
2000  }
2001  return FALSE;
2002  }
2003 
2004  if (!(ticket= MDL_ticket::create(this, mdl_request->type
2005 #ifndef DBUG_OFF
2006  , mdl_request->duration
2007 #endif
2008  )))
2009  return TRUE;
2010 
2011  /* The below call implicitly locks MDL_lock::m_rwlock on success. */
2012  if (!(lock= mdl_locks.find_or_insert(key)))
2013  {
2014  MDL_ticket::destroy(ticket);
2015  return TRUE;
2016  }
2017 
2018  ticket->m_lock= lock;
2019 
2020  if (lock->can_grant_lock(mdl_request->type, this, false))
2021  {
2022  lock->m_granted.add_ticket(ticket);
2023 
2024  mysql_prlock_unlock(&lock->m_rwlock);
2025 
2026  m_tickets[mdl_request->duration].push_front(ticket);
2027 
2028  mdl_request->ticket= ticket;
2029  }
2030  else
2031  *out_ticket= ticket;
2032 
2033  return FALSE;
2034 }
2035 
2036 
2050 bool
2052 {
2053  MDL_ticket *ticket;
2054 
2056  /*
2057  By submitting mdl_request->type to MDL_ticket::create()
2058  we effectively downgrade the cloned lock to the level of
2059  the request.
2060  */
2061  if (!(ticket= MDL_ticket::create(this, mdl_request->type
2062 #ifndef DBUG_OFF
2063  , mdl_request->duration
2064 #endif
2065  )))
2066  return TRUE;
2067 
2068  /* clone() is not supposed to be used to get a stronger lock. */
2069  DBUG_ASSERT(mdl_request->ticket->has_stronger_or_equal_type(ticket->m_type));
2070 
2071  ticket->m_lock= mdl_request->ticket->m_lock;
2072  mdl_request->ticket= ticket;
2073 
2074  mysql_prlock_wrlock(&ticket->m_lock->m_rwlock);
2075  ticket->m_lock->m_granted.add_ticket(ticket);
2076  mysql_prlock_unlock(&ticket->m_lock->m_rwlock);
2077 
2078  m_tickets[mdl_request->duration].push_front(ticket);
2079 
2080  return FALSE;
2081 }
2082 
2083 
2092 {
2094  MDL_ticket *conflicting_ticket;
2095 
2096  while ((conflicting_ticket= it++))
2097  {
2098  /* Only try to abort locks on which we back off. */
2099  if (conflicting_ticket->get_ctx() != ctx &&
2100  conflicting_ticket->get_type() < MDL_SHARED_UPGRADABLE)
2101 
2102  {
2103  MDL_context *conflicting_ctx= conflicting_ticket->get_ctx();
2104 
2105  /*
2106  If thread which holds conflicting lock is waiting on table-level
2107  lock or some other non-MDL resource we might need to wake it up
2108  by calling code outside of MDL.
2109  */
2110  ctx->get_owner()->
2111  notify_shared_lock(conflicting_ctx->get_owner(),
2112  conflicting_ctx->get_needs_thr_lock_abort());
2113  }
2114  }
2115 }
2116 
2117 
2125 {
2127  MDL_ticket *conflicting_ticket;
2128 
2129  while ((conflicting_ticket= it++))
2130  {
2131  if (conflicting_ticket->get_ctx() != ctx &&
2132  conflicting_ticket->get_type() == MDL_INTENTION_EXCLUSIVE)
2133 
2134  {
2135  MDL_context *conflicting_ctx= conflicting_ticket->get_ctx();
2136 
2137  /*
2138  Thread which holds global IX lock can be a handler thread for
2139  insert delayed. We need to kill such threads in order to get
2140  global shared lock. We do this my calling code outside of MDL.
2141  */
2142  ctx->get_owner()->
2143  notify_shared_lock(conflicting_ctx->get_owner(),
2144  conflicting_ctx->get_needs_thr_lock_abort());
2145  }
2146  }
2147 }
2148 
2149 
2162 bool
2163 MDL_context::acquire_lock(MDL_request *mdl_request, ulong lock_wait_timeout)
2164 {
2165  MDL_lock *lock;
2166  MDL_ticket *ticket;
2167  struct timespec abs_timeout;
2168  MDL_wait::enum_wait_status wait_status;
2169  /* Do some work outside the critical section. */
2170  set_timespec(abs_timeout, lock_wait_timeout);
2171 
2172  if (try_acquire_lock_impl(mdl_request, &ticket))
2173  return TRUE;
2174 
2175  if (mdl_request->ticket)
2176  {
2177  /*
2178  We have managed to acquire lock without waiting.
2179  MDL_lock, MDL_context and MDL_request were updated
2180  accordingly, so we can simply return success.
2181  */
2182  return FALSE;
2183  }
2184 
2185  /*
2186  Our attempt to acquire lock without waiting has failed.
2187  As a result of this attempt we got MDL_ticket with m_lock
2188  member pointing to the corresponding MDL_lock object which
2189  has MDL_lock::m_rwlock write-locked.
2190  */
2191  lock= ticket->m_lock;
2192 
2193  lock->m_waiting.add_ticket(ticket);
2194 
2195  /*
2196  Once we added a pending ticket to the waiting queue,
2197  we must ensure that our wait slot is empty, so
2198  that our lock request can be scheduled. Do that in the
2199  critical section formed by the acquired write lock on MDL_lock.
2200  */
2201  m_wait.reset_status();
2202 
2203  if (lock->needs_notification(ticket))
2204  lock->notify_conflicting_locks(this);
2205 
2206  mysql_prlock_unlock(&lock->m_rwlock);
2207 
2208  will_wait_for(ticket);
2209 
2210  /* There is a shared or exclusive lock on the object. */
2211  DEBUG_SYNC(get_thd(), "mdl_acquire_lock_wait");
2212 
2213  find_deadlock();
2214 
2215  if (lock->needs_notification(ticket))
2216  {
2217  struct timespec abs_shortwait;
2218  set_timespec(abs_shortwait, 1);
2219  wait_status= MDL_wait::EMPTY;
2220 
2221  while (cmp_timespec(abs_shortwait, abs_timeout) <= 0)
2222  {
2223  /* abs_timeout is far away. Wait a short while and notify locks. */
2224  wait_status= m_wait.timed_wait(m_owner, &abs_shortwait, FALSE,
2225  mdl_request->key.get_wait_state_name());
2226 
2227  if (wait_status != MDL_wait::EMPTY)
2228  break;
2229 
2230  mysql_prlock_wrlock(&lock->m_rwlock);
2231  lock->notify_conflicting_locks(this);
2232  mysql_prlock_unlock(&lock->m_rwlock);
2233  set_timespec(abs_shortwait, 1);
2234  }
2235  if (wait_status == MDL_wait::EMPTY)
2236  wait_status= m_wait.timed_wait(m_owner, &abs_timeout, TRUE,
2237  mdl_request->key.get_wait_state_name());
2238  }
2239  else
2240  wait_status= m_wait.timed_wait(m_owner, &abs_timeout, TRUE,
2241  mdl_request->key.get_wait_state_name());
2242 
2243  done_waiting_for();
2244 
2245  if (wait_status != MDL_wait::GRANTED)
2246  {
2247  lock->remove_ticket(&MDL_lock::m_waiting, ticket);
2248  MDL_ticket::destroy(ticket);
2249  switch (wait_status)
2250  {
2251  case MDL_wait::VICTIM:
2252  my_error(ER_LOCK_DEADLOCK, MYF(0));
2253  break;
2254  case MDL_wait::TIMEOUT:
2255  my_error(ER_LOCK_WAIT_TIMEOUT, MYF(0));
2256  break;
2257  case MDL_wait::KILLED:
2258  break;
2259  default:
2260  DBUG_ASSERT(0);
2261  break;
2262  }
2263  return TRUE;
2264  }
2265 
2266  /*
2267  We have been granted our request.
2268  State of MDL_lock object is already being appropriately updated by a
2269  concurrent thread (@sa MDL_lock:reschedule_waiters()).
2270  So all we need to do is to update MDL_context and MDL_request objects.
2271  */
2272  DBUG_ASSERT(wait_status == MDL_wait::GRANTED);
2273 
2274  m_tickets[mdl_request->duration].push_front(ticket);
2275 
2276  mdl_request->ticket= ticket;
2277 
2278  return FALSE;
2279 }
2280 
2281 
2282 extern "C" int mdl_request_ptr_cmp(const void* ptr1, const void* ptr2)
2283 {
2284  MDL_request *req1= *(MDL_request**)ptr1;
2285  MDL_request *req2= *(MDL_request**)ptr2;
2286  return req1->key.cmp(&req2->key);
2287 }
2288 
2289 
2311  ulong lock_wait_timeout)
2312 {
2313  MDL_request_list::Iterator it(*mdl_requests);
2314  MDL_request **sort_buf, **p_req;
2315  MDL_savepoint mdl_svp= mdl_savepoint();
2316  ssize_t req_count= static_cast<ssize_t>(mdl_requests->elements());
2317 
2318  if (req_count == 0)
2319  return FALSE;
2320 
2321  /* Sort requests according to MDL_key. */
2322  if (! (sort_buf= (MDL_request **)my_malloc(req_count *
2323  sizeof(MDL_request*),
2324  MYF(MY_WME))))
2325  return TRUE;
2326 
2327  for (p_req= sort_buf; p_req < sort_buf + req_count; p_req++)
2328  *p_req= it++;
2329 
2330  my_qsort(sort_buf, req_count, sizeof(MDL_request*),
2331  mdl_request_ptr_cmp);
2332 
2333  for (p_req= sort_buf; p_req < sort_buf + req_count; p_req++)
2334  {
2335  if (acquire_lock(*p_req, lock_wait_timeout))
2336  goto err;
2337  }
2338  my_free(sort_buf);
2339  return FALSE;
2340 
2341 err:
2342  /*
2343  Release locks we have managed to acquire so far.
2344  Use rollback_to_savepoint() since there may be duplicate
2345  requests that got assigned the same ticket.
2346  */
2347  rollback_to_savepoint(mdl_svp);
2348  /* Reset lock requests back to its initial state. */
2349  for (req_count= p_req - sort_buf, p_req= sort_buf;
2350  p_req < sort_buf + req_count; p_req++)
2351  {
2352  (*p_req)->ticket= NULL;
2353  }
2354  my_free(sort_buf);
2355  return TRUE;
2356 }
2357 
2358 
2384 bool
2386  enum_mdl_type new_type,
2387  ulong lock_wait_timeout)
2388 {
2389  MDL_request mdl_xlock_request;
2390  MDL_savepoint mdl_svp= mdl_savepoint();
2391  bool is_new_ticket;
2392 
2393  DBUG_ENTER("MDL_context::upgrade_shared_lock");
2394  DEBUG_SYNC(get_thd(), "mdl_upgrade_lock");
2395 
2396  /*
2397  Do nothing if already upgraded. Used when we FLUSH TABLE under
2398  LOCK TABLES and a table is listed twice in LOCK TABLES list.
2399  */
2400  if (mdl_ticket->has_stronger_or_equal_type(new_type))
2401  DBUG_RETURN(FALSE);
2402 
2403  mdl_xlock_request.init(&mdl_ticket->m_lock->key, new_type,
2404  MDL_TRANSACTION);
2405 
2406  if (acquire_lock(&mdl_xlock_request, lock_wait_timeout))
2407  DBUG_RETURN(TRUE);
2408 
2409  is_new_ticket= ! has_lock(mdl_svp, mdl_xlock_request.ticket);
2410 
2411  /* Merge the acquired and the original lock. @todo: move to a method. */
2412  mysql_prlock_wrlock(&mdl_ticket->m_lock->m_rwlock);
2413  if (is_new_ticket)
2414  mdl_ticket->m_lock->m_granted.remove_ticket(mdl_xlock_request.ticket);
2415  /*
2416  Set the new type of lock in the ticket. To update state of
2417  MDL_lock object correctly we need to temporarily exclude
2418  ticket from the granted queue and then include it back.
2419  */
2420  mdl_ticket->m_lock->m_granted.remove_ticket(mdl_ticket);
2421  mdl_ticket->m_type= new_type;
2422  mdl_ticket->m_lock->m_granted.add_ticket(mdl_ticket);
2423 
2424  mysql_prlock_unlock(&mdl_ticket->m_lock->m_rwlock);
2425 
2426  if (is_new_ticket)
2427  {
2428  m_tickets[MDL_TRANSACTION].remove(mdl_xlock_request.ticket);
2429  MDL_ticket::destroy(mdl_xlock_request.ticket);
2430  }
2431 
2432  DBUG_RETURN(FALSE);
2433 }
2434 
2435 
2446  MDL_wait_for_graph_visitor *gvisitor)
2447 {
2448  MDL_ticket *ticket;
2449  MDL_context *src_ctx= waiting_ticket->get_ctx();
2450  bool result= TRUE;
2451 
2453 
2454  /* Must be initialized after taking a read lock. */
2455  Ticket_iterator granted_it(m_granted);
2456  Ticket_iterator waiting_it(m_waiting);
2457 
2458  /*
2459  MDL_lock's waiting and granted queues and MDL_context::m_waiting_for
2460  member are updated by different threads when the lock is granted
2461  (see MDL_context::acquire_lock() and MDL_lock::reschedule_waiters()).
2462  As a result, here we may encounter a situation when MDL_lock data
2463  already reflects the fact that the lock was granted but
2464  m_waiting_for member has not been updated yet.
2465 
2466  For example, imagine that:
2467 
2468  thread1: Owns SNW lock on table t1.
2469  thread2: Attempts to acquire SW lock on t1,
2470  but sees an active SNW lock.
2471  Thus adds the ticket to the waiting queue and
2472  sets m_waiting_for to point to the ticket.
2473  thread1: Releases SNW lock, updates MDL_lock object to
2474  grant SW lock to thread2 (moves the ticket for
2475  SW from waiting to the active queue).
2476  Attempts to acquire a new SNW lock on t1,
2477  sees an active SW lock (since it is present in the
2478  active queue), adds ticket for SNW lock to the waiting
2479  queue, sets m_waiting_for to point to this ticket.
2480 
2481  At this point deadlock detection algorithm run by thread1 will see that:
2482  - Thread1 waits for SNW lock on t1 (since m_waiting_for is set).
2483  - SNW lock is not granted, because it conflicts with active SW lock
2484  owned by thread 2 (since ticket for SW is present in granted queue).
2485  - Thread2 waits for SW lock (since its m_waiting_for has not been
2486  updated yet!).
2487  - SW lock is not granted because there is pending SNW lock from thread1.
2488  Therefore deadlock should exist [sic!].
2489 
2490  To avoid detection of such false deadlocks we need to check the "actual"
2491  status of the ticket being waited for, before analyzing its blockers.
2492  We do this by checking the wait status of the context which is waiting
2493  for it. To avoid races this has to be done under protection of
2494  MDL_lock::m_rwlock lock.
2495  */
2496  if (src_ctx->m_wait.get_status() != MDL_wait::EMPTY)
2497  {
2498  result= FALSE;
2499  goto end;
2500  }
2501 
2502  /*
2503  To avoid visiting nodes which were already marked as victims of
2504  deadlock detection (or whose requests were already satisfied) we
2505  enter the node only after peeking at its wait status.
2506  This is necessary to avoid active waiting in a situation
2507  when previous searches for a deadlock already selected the
2508  node we're about to enter as a victim (see the comment
2509  in MDL_context::find_deadlock() for explanation why several searches
2510  can be performed for the same wait).
2511  There is no guarantee that the node isn't chosen a victim while we
2512  are visiting it but this is OK: in the worst case we might do some
2513  extra work and one more context might be chosen as a victim.
2514  */
2515  if (gvisitor->enter_node(src_ctx))
2516  goto end;
2517 
2518  /*
2519  We do a breadth-first search first -- that is, inspect all
2520  edges of the current node, and only then follow up to the next
2521  node. In workloads that involve wait-for graph loops this
2522  has proven to be a more efficient strategy [citation missing].
2523  */
2524  while ((ticket= granted_it++))
2525  {
2526  /* Filter out edges that point to the same node. */
2527  if (ticket->get_ctx() != src_ctx &&
2528  ticket->is_incompatible_when_granted(waiting_ticket->get_type()) &&
2529  gvisitor->inspect_edge(ticket->get_ctx()))
2530  {
2531  goto end_leave_node;
2532  }
2533  }
2534 
2535  while ((ticket= waiting_it++))
2536  {
2537  /* Filter out edges that point to the same node. */
2538  if (ticket->get_ctx() != src_ctx &&
2539  ticket->is_incompatible_when_waiting(waiting_ticket->get_type()) &&
2540  gvisitor->inspect_edge(ticket->get_ctx()))
2541  {
2542  goto end_leave_node;
2543  }
2544  }
2545 
2546  /* Recurse and inspect all adjacent nodes. */
2547  granted_it.rewind();
2548  while ((ticket= granted_it++))
2549  {
2550  if (ticket->get_ctx() != src_ctx &&
2551  ticket->is_incompatible_when_granted(waiting_ticket->get_type()) &&
2552  ticket->get_ctx()->visit_subgraph(gvisitor))
2553  {
2554  goto end_leave_node;
2555  }
2556  }
2557 
2558  waiting_it.rewind();
2559  while ((ticket= waiting_it++))
2560  {
2561  if (ticket->get_ctx() != src_ctx &&
2562  ticket->is_incompatible_when_waiting(waiting_ticket->get_type()) &&
2563  ticket->get_ctx()->visit_subgraph(gvisitor))
2564  {
2565  goto end_leave_node;
2566  }
2567  }
2568 
2569  result= FALSE;
2570 
2571 end_leave_node:
2572  gvisitor->leave_node(src_ctx);
2573 
2574 end:
2576  return result;
2577 }
2578 
2579 
2591 {
2592  return m_lock->visit_subgraph(this, gvisitor);
2593 }
2594 
2595 
2613 {
2614  bool result= FALSE;
2615 
2616  mysql_prlock_rdlock(&m_LOCK_waiting_for);
2617 
2618  if (m_waiting_for)
2619  result= m_waiting_for->accept_visitor(gvisitor);
2620 
2621  mysql_prlock_unlock(&m_LOCK_waiting_for);
2622 
2623  return result;
2624 }
2625 
2626 
2639 {
2640  while (1)
2641  {
2642  /*
2643  The fact that we use fresh instance of gvisitor for each
2644  search performed by find_deadlock() below is important,
2645  the code responsible for victim selection relies on this.
2646  */
2647  Deadlock_detection_visitor dvisitor(this);
2648  MDL_context *victim;
2649 
2650  if (! visit_subgraph(&dvisitor))
2651  {
2652  /* No deadlocks are found! */
2653  break;
2654  }
2655 
2656  victim= dvisitor.get_victim();
2657 
2658  /*
2659  Failure to change status of the victim is OK as it means
2660  that the victim has received some other message and is
2661  about to stop its waiting/to break deadlock loop.
2662  Even when the initiator of the deadlock search is
2663  chosen the victim, we need to set the respective wait
2664  result in order to "close" it for any attempt to
2665  schedule the request.
2666  This is needed to avoid a possible race during
2667  cleanup in case when the lock request on which the
2668  context was waiting is concurrently satisfied.
2669  */
2670  (void) victim->m_wait.set_status(MDL_wait::VICTIM);
2671  victim->unlock_deadlock_victim();
2672 
2673  if (victim == this)
2674  break;
2675  /*
2676  After adding a new edge to the waiting graph we found that it
2677  creates a loop (i.e. there is a deadlock). We decided to destroy
2678  this loop by removing an edge, but not the one that we added.
2679  Since this doesn't guarantee that all loops created by addition
2680  of the new edge are destroyed, we have to repeat the search.
2681  */
2682  }
2683 }
2684 
2685 
2694 void MDL_context::release_lock(enum_mdl_duration duration, MDL_ticket *ticket)
2695 {
2696  MDL_lock *lock= ticket->m_lock;
2697  DBUG_ENTER("MDL_context::release_lock");
2698  DBUG_PRINT("enter", ("db=%s name=%s", lock->key.db_name(),
2699  lock->key.name()));
2700 
2701  DBUG_ASSERT(this == ticket->get_ctx());
2703 
2704  lock->remove_ticket(&MDL_lock::m_granted, ticket);
2705 
2706  m_tickets[duration].remove(ticket);
2707  MDL_ticket::destroy(ticket);
2708 
2709  DBUG_VOID_RETURN;
2710 }
2711 
2712 
2721 {
2722  DBUG_ASSERT(ticket->m_duration == MDL_EXPLICIT);
2723 
2724  release_lock(MDL_EXPLICIT, ticket);
2725 }
2726 
2727 
2741 void MDL_context::release_locks_stored_before(enum_mdl_duration duration,
2742  MDL_ticket *sentinel)
2743 {
2744  MDL_ticket *ticket;
2745  Ticket_iterator it(m_tickets[duration]);
2746  DBUG_ENTER("MDL_context::release_locks_stored_before");
2747 
2748  if (m_tickets[duration].is_empty())
2749  DBUG_VOID_RETURN;
2750 
2751  while ((ticket= it++) && ticket != sentinel)
2752  {
2753  DBUG_PRINT("info", ("found lock to release ticket=%p", ticket));
2754  release_lock(duration, ticket);
2755  }
2756 
2757  DBUG_VOID_RETURN;
2758 }
2759 
2760 
2770 {
2771  /* Use MDL_ticket::m_lock to identify other locks for the same object. */
2772  MDL_lock *lock= name->m_lock;
2773 
2774  /* Remove matching lock tickets from the context. */
2775  MDL_ticket *ticket;
2776  Ticket_iterator it_ticket(m_tickets[MDL_EXPLICIT]);
2777 
2778  while ((ticket= it_ticket++))
2779  {
2780  DBUG_ASSERT(ticket->m_lock);
2781  if (ticket->m_lock == lock)
2782  release_lock(MDL_EXPLICIT, ticket);
2783  }
2784 }
2785 
2786 
2793 void MDL_ticket::downgrade_lock(enum_mdl_type type)
2794 {
2796 
2797  /*
2798  Do nothing if already downgraded. Used when we FLUSH TABLE under
2799  LOCK TABLES and a table is listed twice in LOCK TABLES list.
2800  Note that this code might even try to "downgrade" a weak lock
2801  (e.g. SW) to a stronger one (e.g SNRW). So we can't even assert
2802  here that target lock is weaker than existing lock.
2803  */
2804  if (m_type == type || !has_stronger_or_equal_type(type))
2805  return;
2806 
2807  /* Only allow downgrade from EXCLUSIVE and SHARED_NO_WRITE. */
2808  DBUG_ASSERT(m_type == MDL_EXCLUSIVE ||
2809  m_type == MDL_SHARED_NO_WRITE);
2810 
2811  mysql_prlock_wrlock(&m_lock->m_rwlock);
2812  /*
2813  To update state of MDL_lock object correctly we need to temporarily
2814  exclude ticket from the granted queue and then include it back.
2815  */
2816  m_lock->m_granted.remove_ticket(this);
2817  m_type= type;
2818  m_lock->m_granted.add_ticket(this);
2819  m_lock->reschedule_waiters();
2820  mysql_prlock_unlock(&m_lock->m_rwlock);
2821 }
2822 
2823 
2838 bool
2840  const char *db, const char *name,
2841  enum_mdl_type mdl_type)
2842 {
2843  MDL_request mdl_request;
2844  enum_mdl_duration not_unused;
2845  /* We don't care about exact duration of lock here. */
2846  mdl_request.init(mdl_namespace, db, name, mdl_type, MDL_TRANSACTION);
2847  MDL_ticket *ticket= find_ticket(&mdl_request, &not_unused);
2848 
2849  DBUG_ASSERT(ticket == NULL || ticket->m_lock);
2850 
2851  return ticket;
2852 }
2853 
2854 
2864 {
2865  return m_lock->has_pending_conflicting_lock(m_type);
2866 }
2867 
2868 
2879 {
2880  DBUG_ENTER("MDL_context::rollback_to_savepoint");
2881 
2882  /* If savepoint is NULL, it is from the start of the transaction. */
2883  release_locks_stored_before(MDL_STATEMENT, mdl_savepoint.m_stmt_ticket);
2884  release_locks_stored_before(MDL_TRANSACTION, mdl_savepoint.m_trans_ticket);
2885 
2886  DBUG_VOID_RETURN;
2887 }
2888 
2889 
2900 {
2901  DBUG_ENTER("MDL_context::release_transactional_locks");
2902  release_locks_stored_before(MDL_STATEMENT, NULL);
2903  release_locks_stored_before(MDL_TRANSACTION, NULL);
2904  DBUG_VOID_RETURN;
2905 }
2906 
2907 
2908 void MDL_context::release_statement_locks()
2909 {
2910  DBUG_ENTER("MDL_context::release_transactional_locks");
2911  release_locks_stored_before(MDL_STATEMENT, NULL);
2912  DBUG_VOID_RETURN;
2913 }
2914 
2915 
2926 bool MDL_context::has_lock(const MDL_savepoint &mdl_savepoint,
2927  MDL_ticket *mdl_ticket)
2928 {
2929  MDL_ticket *ticket;
2930  /* Start from the beginning, most likely mdl_ticket's been just acquired. */
2931  MDL_context::Ticket_iterator s_it(m_tickets[MDL_STATEMENT]);
2932  MDL_context::Ticket_iterator t_it(m_tickets[MDL_TRANSACTION]);
2933 
2934  while ((ticket= s_it++) && ticket != mdl_savepoint.m_stmt_ticket)
2935  {
2936  if (ticket == mdl_ticket)
2937  return FALSE;
2938  }
2939 
2940  while ((ticket= t_it++) && ticket != mdl_savepoint.m_trans_ticket)
2941  {
2942  if (ticket == mdl_ticket)
2943  return FALSE;
2944  }
2945  return TRUE;
2946 }
2947 
2948 
2960  enum_mdl_duration duration)
2961 {
2962  DBUG_ASSERT(mdl_ticket->m_duration == MDL_TRANSACTION &&
2963  duration != MDL_TRANSACTION);
2964 
2965  m_tickets[MDL_TRANSACTION].remove(mdl_ticket);
2966  m_tickets[duration].push_front(mdl_ticket);
2967 #ifndef DBUG_OFF
2968  mdl_ticket->m_duration= duration;
2969 #endif
2970 }
2971 
2972 
2978 {
2979  int i;
2980  MDL_ticket *ticket;
2981 
2982  /*
2983  In the most common case when this function is called list
2984  of transactional locks is bigger than list of locks with
2985  explicit duration. So we start by swapping these two lists
2986  and then move elements from new list of transactional
2987  locks and list of statement locks to list of locks with
2988  explicit duration.
2989  */
2990 
2991  m_tickets[MDL_EXPLICIT].swap(m_tickets[MDL_TRANSACTION]);
2992 
2993  for (i= 0; i < MDL_EXPLICIT; i++)
2994  {
2995  Ticket_iterator it_ticket(m_tickets[i]);
2996 
2997  while ((ticket= it_ticket++))
2998  {
2999  m_tickets[i].remove(ticket);
3000  m_tickets[MDL_EXPLICIT].push_front(ticket);
3001  }
3002  }
3003 
3004 #ifndef DBUG_OFF
3005  Ticket_iterator exp_it(m_tickets[MDL_EXPLICIT]);
3006 
3007  while ((ticket= exp_it++))
3008  ticket->m_duration= MDL_EXPLICIT;
3009 #endif
3010 }
3011 
3012 
3018 {
3019  MDL_ticket *ticket;
3020 
3021  /*
3022  In the most common case when this function is called list
3023  of explicit locks is bigger than two other lists (in fact,
3024  list of statement locks is always empty). So we start by
3025  swapping list of explicit and transactional locks and then
3026  move contents of new list of explicit locks to list of
3027  locks with transactional duration.
3028  */
3029 
3030  DBUG_ASSERT(m_tickets[MDL_STATEMENT].is_empty());
3031 
3032  m_tickets[MDL_TRANSACTION].swap(m_tickets[MDL_EXPLICIT]);
3033 
3034  Ticket_iterator it_ticket(m_tickets[MDL_EXPLICIT]);
3035 
3036  while ((ticket= it_ticket++))
3037  {
3038  m_tickets[MDL_EXPLICIT].remove(ticket);
3039  m_tickets[MDL_TRANSACTION].push_front(ticket);
3040  }
3041 
3042 #ifndef DBUG_OFF
3043  Ticket_iterator trans_it(m_tickets[MDL_TRANSACTION]);
3044 
3045  while ((ticket= trans_it++))
3046  ticket->m_duration= MDL_TRANSACTION;
3047 #endif
3048 }