MySQL 5.6.14 Source Code Document
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
mdl-t.cc
1 /* Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved.
2 
3  This program is free software; you can redistribute it and/or modify
4  it under the terms of the GNU General Public License as published by
5  the Free Software Foundation; version 2 of the License.
6 
7  This program is distributed in the hope that it will be useful,
8  but WITHOUT ANY WARRANTY; without even the implied warranty of
9  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  GNU General Public License for more details.
11 
12  You should have received a copy of the GNU General Public License
13  along with this program; if not, write to the Free Software
14  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
15 
25 // First include (the generated) my_config.h, to get correct platform defines.
26 #include "my_config.h"
27 #include <gtest/gtest.h>
28 
29 #include "mdl.h"
30 #include <mysqld_error.h>
31 
32 #include "thr_malloc.h"
33 #include "thread_utils.h"
34 #include "test_mdl_context_owner.h"
35 
36 /*
37  Mock thd_wait_begin/end functions
38 */
39 
40 extern "C" void thd_wait_begin(MYSQL_THD thd, int wait_type)
41 {
42 }
43 
44 extern "C" void thd_wait_end(MYSQL_THD thd)
45 {
46 }
47 
48 /*
49  A mock error handler.
50 */
51 static uint expected_error= 0;
52 extern "C" void test_error_handler_hook(uint err, const char *str, myf MyFlags)
53 {
54  EXPECT_EQ(expected_error, err) << str;
55 }
56 
57 /*
58  Mock away this global function.
59  We don't need DEBUG_SYNC functionality in a unit test.
60  */
61 void debug_sync(THD *thd, const char *sync_point_name, size_t name_len)
62 {
63  DBUG_PRINT("debug_sync_point", ("hit: '%s'", sync_point_name));
64  FAIL() << "Not yet implemented.";
65 }
66 
67 /*
68  Putting everything in a namespace prevents any (unintentional)
69  name clashes with the code under test.
70 */
71 namespace mdl_unittest {
72 
74 using thread::Thread;
75 
76 const char db_name[]= "some_database";
77 const char table_name1[]= "some_table1";
78 const char table_name2[]= "some_table2";
79 const char table_name3[]= "some_table3";
80 const char table_name4[]= "some_table4";
81 const ulong zero_timeout= 0;
82 const ulong long_timeout= (ulong) 3600L*24L*365L;
83 
84 
85 class MDLTest : public ::testing::Test, public Test_MDL_context_owner
86 {
87 protected:
88  MDLTest()
89  : m_null_ticket(NULL),
90  m_null_request(NULL)
91  {
92  }
93 
94  static void SetUpTestCase()
95  {
96  error_handler_hook= test_error_handler_hook;
97  mdl_locks_hash_partitions= MDL_LOCKS_HASH_PARTITIONS_DEFAULT;
98  }
99 
100  void SetUp()
101  {
102  expected_error= 0;
103  mdl_init();
104  m_mdl_context.init(this);
105  EXPECT_FALSE(m_mdl_context.has_locks());
106  m_global_request.init(MDL_key::GLOBAL, "", "", MDL_INTENTION_EXCLUSIVE,
107  MDL_TRANSACTION);
108  }
109 
110  void TearDown()
111  {
112  m_mdl_context.destroy();
113  mdl_destroy();
114  }
115 
116  virtual bool notify_shared_lock(MDL_context_owner *in_use,
117  bool needs_thr_lock_abort)
118  {
119  return in_use->notify_shared_lock(NULL, needs_thr_lock_abort);
120  }
121 
122  // A utility member for testing single lock requests.
123  void test_one_simple_shared_lock(enum_mdl_type lock_type);
124 
125  const MDL_ticket *m_null_ticket;
126  const MDL_request *m_null_request;
127  MDL_context m_mdl_context;
128  MDL_request m_request;
129  MDL_request m_global_request;
130  MDL_request_list m_request_list;
131 private:
132  GTEST_DISALLOW_COPY_AND_ASSIGN_(MDLTest);
133 };
134 
135 
136 /*
137  Will grab a lock on table_name of given type in the run() function.
138  The two notifications are for synchronizing with the main thread.
139  Does *not* take ownership of the notifications.
140 */
142 {
143 public:
144  MDL_thread(const char *table_name,
145  enum_mdl_type mdl_type,
146  Notification *lock_grabbed,
147  Notification *release_locks,
148  Notification *lock_blocked,
149  Notification *lock_released)
150  : m_table_name(table_name),
151  m_mdl_type(mdl_type),
152  m_lock_grabbed(lock_grabbed),
153  m_release_locks(release_locks),
154  m_lock_blocked(lock_blocked),
155  m_lock_released(lock_released),
156  m_ignore_notify(false)
157  {
158  m_mdl_context.init(this);
159  }
160 
161  ~MDL_thread()
162  {
163  m_mdl_context.destroy();
164  }
165 
166  virtual void run();
167  void ignore_notify() { m_ignore_notify= true; }
168 
169  virtual bool notify_shared_lock(MDL_context_owner *in_use,
170  bool needs_thr_lock_abort)
171  {
172  if (in_use)
173  return in_use->notify_shared_lock(NULL, needs_thr_lock_abort);
174 
175  if (m_ignore_notify)
176  return false;
177  m_release_locks->notify();
178  return true;
179  }
180 
181  virtual void enter_cond(mysql_cond_t *cond,
182  mysql_mutex_t* mutex,
183  const PSI_stage_info *stage,
184  PSI_stage_info *old_stage,
185  const char *src_function,
186  const char *src_file,
187  int src_line)
188  {
189  Test_MDL_context_owner::enter_cond(cond, mutex, stage, old_stage,
190  src_function, src_file, src_line);
191 
192  /*
193  No extra checks needed here since MDL uses enter_con only when thread
194  is blocked.
195  */
196  if (m_lock_blocked)
197  m_lock_blocked->notify();
198 
199  return;
200  }
201 
202  MDL_context& get_mdl_context()
203  {
204  return m_mdl_context;
205  }
206 
207 private:
208  const char *m_table_name;
209  enum_mdl_type m_mdl_type;
210  Notification *m_lock_grabbed;
211  Notification *m_release_locks;
212  Notification *m_lock_blocked;
213  Notification *m_lock_released;
214  bool m_ignore_notify;
215  MDL_context m_mdl_context;
216 };
217 
218 
219 void MDL_thread::run()
220 {
222  MDL_request global_request;
223  MDL_request_list request_list;
224  global_request.init(MDL_key::GLOBAL, "", "", MDL_INTENTION_EXCLUSIVE,
225  MDL_TRANSACTION);
226  request.init(MDL_key::TABLE, db_name, m_table_name, m_mdl_type,
227  MDL_TRANSACTION);
228 
229  request_list.push_front(&request);
230  if (m_mdl_type >= MDL_SHARED_UPGRADABLE)
231  request_list.push_front(&global_request);
232 
233  EXPECT_FALSE(m_mdl_context.acquire_locks(&request_list, long_timeout));
234  EXPECT_TRUE(m_mdl_context.
235  is_lock_owner(MDL_key::TABLE, db_name, m_table_name, m_mdl_type));
236 
237  // Tell the main thread that we have grabbed our locks.
238  m_lock_grabbed->notify();
239  // Hold on to locks until we are told to release them
240  m_release_locks->wait_for_notification();
241 
242  m_mdl_context.release_transactional_locks();
243 
244  // Tell the main thread that grabbed lock is released.
245  if (m_lock_released)
246  m_lock_released->notify();
247 }
248 
249 // Google Test recommends DeathTest suffix for classes use in death tests.
250 typedef MDLTest MDLDeathTest;
251 
252 
253 /*
254  Verifies that we die with a DBUG_ASSERT if we destry a non-empty MDL_context.
255  */
256 #if GTEST_HAS_DEATH_TEST && !defined(DBUG_OFF)
257 TEST_F(MDLDeathTest, DieWhenMTicketsNonempty)
258 {
259  ::testing::FLAGS_gtest_death_test_style = "threadsafe";
260  m_request.init(MDL_key::TABLE, db_name, table_name1, MDL_SHARED,
261  MDL_TRANSACTION);
262 
263  EXPECT_FALSE(m_mdl_context.try_acquire_lock(&m_request));
264  EXPECT_DEATH(m_mdl_context.destroy(),
265  ".*Assertion.*MDL_TRANSACTION.*is_empty.*");
266  m_mdl_context.release_transactional_locks();
267 }
268 #endif // GTEST_HAS_DEATH_TEST && !defined(DBUG_OFF)
269 
270 
271 
272 /*
273  The most basic test: just construct and destruct our test fixture.
274  */
275 TEST_F(MDLTest, ConstructAndDestruct)
276 {
277 }
278 
279 
280 void MDLTest::test_one_simple_shared_lock(enum_mdl_type lock_type)
281 {
282  m_request.init(MDL_key::TABLE, db_name, table_name1, lock_type,
283  MDL_TRANSACTION);
284 
285  EXPECT_EQ(lock_type, m_request.type);
286  EXPECT_EQ(m_null_ticket, m_request.ticket);
287 
288  EXPECT_FALSE(m_mdl_context.try_acquire_lock(&m_request));
289  EXPECT_NE(m_null_ticket, m_request.ticket);
290  EXPECT_TRUE(m_mdl_context.has_locks());
291  EXPECT_TRUE(m_mdl_context.
292  is_lock_owner(MDL_key::TABLE, db_name, table_name1, lock_type));
293 
294  MDL_request request_2;
295  request_2.init(&m_request.key, lock_type, MDL_TRANSACTION);
296  EXPECT_FALSE(m_mdl_context.try_acquire_lock(&request_2));
297  EXPECT_EQ(m_request.ticket, request_2.ticket);
298 
299  m_mdl_context.release_transactional_locks();
300  EXPECT_FALSE(m_mdl_context.has_locks());
301 }
302 
303 
304 /*
305  Acquires one lock of type MDL_SHARED.
306  */
307 TEST_F(MDLTest, OneShared)
308 {
309  test_one_simple_shared_lock(MDL_SHARED);
310 }
311 
312 
313 /*
314  Acquires one lock of type MDL_SHARED_HIGH_PRIO.
315  */
316 TEST_F(MDLTest, OneSharedHighPrio)
317 {
318  test_one_simple_shared_lock(MDL_SHARED_HIGH_PRIO);
319 }
320 
321 
322 /*
323  Acquires one lock of type MDL_SHARED_READ.
324  */
325 TEST_F(MDLTest, OneSharedRead)
326 {
327  test_one_simple_shared_lock(MDL_SHARED_READ);
328 }
329 
330 
331 /*
332  Acquires one lock of type MDL_SHARED_WRITE.
333  */
334 TEST_F(MDLTest, OneSharedWrite)
335 {
336  test_one_simple_shared_lock(MDL_SHARED_WRITE);
337 }
338 
339 
340 /*
341  Acquires one lock of type MDL_EXCLUSIVE.
342  */
343 TEST_F(MDLTest, OneExclusive)
344 {
345  const enum_mdl_type lock_type= MDL_EXCLUSIVE;
346  m_request.init(MDL_key::TABLE, db_name, table_name1, lock_type,
347  MDL_TRANSACTION);
348  EXPECT_EQ(m_null_ticket, m_request.ticket);
349 
350  m_request_list.push_front(&m_request);
351  m_request_list.push_front(&m_global_request);
352 
353  EXPECT_FALSE(m_mdl_context.acquire_locks(&m_request_list, long_timeout));
354 
355  EXPECT_NE(m_null_ticket, m_request.ticket);
356  EXPECT_NE(m_null_ticket, m_global_request.ticket);
357  EXPECT_TRUE(m_mdl_context.has_locks());
358  EXPECT_TRUE(m_mdl_context.
359  is_lock_owner(MDL_key::TABLE, db_name, table_name1, lock_type));
360  EXPECT_TRUE(m_mdl_context.
361  is_lock_owner(MDL_key::GLOBAL, "", "", MDL_INTENTION_EXCLUSIVE));
362  EXPECT_TRUE(m_request.ticket->is_upgradable_or_exclusive());
363 
364  m_mdl_context.release_transactional_locks();
365  EXPECT_FALSE(m_mdl_context.has_locks());
366 }
367 
368 
369 /*
370  Acquires two locks, on different tables, of type MDL_SHARED.
371  Verifies that they are independent.
372  */
373 TEST_F(MDLTest, TwoShared)
374 {
375  MDL_request request_2;
376  m_request.init(MDL_key::TABLE, db_name, table_name1, MDL_SHARED, MDL_EXPLICIT);
377  request_2.init(MDL_key::TABLE, db_name, table_name2, MDL_SHARED, MDL_EXPLICIT);
378 
379  EXPECT_FALSE(m_mdl_context.try_acquire_lock(&m_request));
380  EXPECT_FALSE(m_mdl_context.try_acquire_lock(&request_2));
381  EXPECT_TRUE(m_mdl_context.has_locks());
382  ASSERT_NE(m_null_ticket, m_request.ticket);
383  ASSERT_NE(m_null_ticket, request_2.ticket);
384 
385  EXPECT_TRUE(m_mdl_context.
386  is_lock_owner(MDL_key::TABLE, db_name, table_name1, MDL_SHARED));
387  EXPECT_TRUE(m_mdl_context.
388  is_lock_owner(MDL_key::TABLE, db_name, table_name2, MDL_SHARED));
389  EXPECT_FALSE(m_mdl_context.
390  is_lock_owner(MDL_key::TABLE, db_name, table_name3, MDL_SHARED));
391 
392  m_mdl_context.release_lock(m_request.ticket);
393  EXPECT_FALSE(m_mdl_context.
394  is_lock_owner(MDL_key::TABLE, db_name, table_name1, MDL_SHARED));
395  EXPECT_TRUE(m_mdl_context.has_locks());
396 
397  m_mdl_context.release_lock(request_2.ticket);
398  EXPECT_FALSE(m_mdl_context.
399  is_lock_owner(MDL_key::TABLE, db_name, table_name2, MDL_SHARED));
400  EXPECT_FALSE(m_mdl_context.has_locks());
401 }
402 
403 
404 /*
405  Verifies that two different contexts can acquire a shared lock
406  on the same table.
407  */
408 TEST_F(MDLTest, SharedLocksBetweenContexts)
409 {
410  MDL_context mdl_context2;
411  mdl_context2.init(this);
412  MDL_request request_2;
413  m_request.init(MDL_key::TABLE, db_name, table_name1, MDL_SHARED,
414  MDL_TRANSACTION);
415  request_2.init(MDL_key::TABLE, db_name, table_name1, MDL_SHARED,
416  MDL_TRANSACTION);
417 
418  EXPECT_FALSE(m_mdl_context.try_acquire_lock(&m_request));
419  EXPECT_FALSE(mdl_context2.try_acquire_lock(&request_2));
420 
421  EXPECT_TRUE(m_mdl_context.
422  is_lock_owner(MDL_key::TABLE, db_name, table_name1, MDL_SHARED));
423  EXPECT_TRUE(mdl_context2.
424  is_lock_owner(MDL_key::TABLE, db_name, table_name1, MDL_SHARED));
425 
426  m_mdl_context.release_transactional_locks();
427  mdl_context2.release_transactional_locks();
428 }
429 
430 
431 /*
432  Verifies that we can upgrade a shared lock to exclusive.
433  */
434 TEST_F(MDLTest, UpgradeSharedUpgradable)
435 {
436  m_request.init(MDL_key::TABLE, db_name, table_name1, MDL_SHARED_UPGRADABLE,
437  MDL_TRANSACTION);
438 
439  m_request_list.push_front(&m_request);
440  m_request_list.push_front(&m_global_request);
441 
442  EXPECT_FALSE(m_mdl_context.acquire_locks(&m_request_list, long_timeout));
443  EXPECT_FALSE(m_mdl_context.
444  upgrade_shared_lock(m_request.ticket, MDL_EXCLUSIVE, long_timeout));
445  EXPECT_EQ(MDL_EXCLUSIVE, m_request.ticket->get_type());
446 
447  // Another upgrade should be a no-op.
448  EXPECT_FALSE(m_mdl_context.
449  upgrade_shared_lock(m_request.ticket, MDL_EXCLUSIVE, long_timeout));
450  EXPECT_EQ(MDL_EXCLUSIVE, m_request.ticket->get_type());
451 
452  m_mdl_context.release_transactional_locks();
453 }
454 
455 
456 /*
457  Verfies that locks are released when we roll back to a savepoint.
458  */
459 TEST_F(MDLTest, SavePoint)
460 {
461  MDL_request request_2;
462  MDL_request request_3;
463  MDL_request request_4;
464  m_request.init(MDL_key::TABLE, db_name, table_name1, MDL_SHARED,
465  MDL_TRANSACTION);
466  request_2.init(MDL_key::TABLE, db_name, table_name2, MDL_SHARED,
467  MDL_TRANSACTION);
468  request_3.init(MDL_key::TABLE, db_name, table_name3, MDL_SHARED,
469  MDL_TRANSACTION);
470  request_4.init(MDL_key::TABLE, db_name, table_name4, MDL_SHARED,
471  MDL_TRANSACTION);
472 
473  EXPECT_FALSE(m_mdl_context.try_acquire_lock(&m_request));
474  EXPECT_FALSE(m_mdl_context.try_acquire_lock(&request_2));
475  MDL_savepoint savepoint= m_mdl_context.mdl_savepoint();
476  EXPECT_FALSE(m_mdl_context.try_acquire_lock(&request_3));
477  EXPECT_FALSE(m_mdl_context.try_acquire_lock(&request_4));
478 
479  EXPECT_TRUE(m_mdl_context.
480  is_lock_owner(MDL_key::TABLE, db_name, table_name1, MDL_SHARED));
481  EXPECT_TRUE(m_mdl_context.
482  is_lock_owner(MDL_key::TABLE, db_name, table_name2, MDL_SHARED));
483  EXPECT_TRUE(m_mdl_context.
484  is_lock_owner(MDL_key::TABLE, db_name, table_name3, MDL_SHARED));
485  EXPECT_TRUE(m_mdl_context.
486  is_lock_owner(MDL_key::TABLE, db_name, table_name4, MDL_SHARED));
487 
488  m_mdl_context.rollback_to_savepoint(savepoint);
489  EXPECT_TRUE(m_mdl_context.
490  is_lock_owner(MDL_key::TABLE, db_name, table_name1, MDL_SHARED));
491  EXPECT_TRUE(m_mdl_context.
492  is_lock_owner(MDL_key::TABLE, db_name, table_name2, MDL_SHARED));
493  EXPECT_FALSE(m_mdl_context.
494  is_lock_owner(MDL_key::TABLE, db_name, table_name3, MDL_SHARED));
495  EXPECT_FALSE(m_mdl_context.
496  is_lock_owner(MDL_key::TABLE, db_name, table_name4, MDL_SHARED));
497 
498  m_mdl_context.release_transactional_locks();
499  EXPECT_FALSE(m_mdl_context.
500  is_lock_owner(MDL_key::TABLE, db_name, table_name1, MDL_SHARED));
501  EXPECT_FALSE(m_mdl_context.
502  is_lock_owner(MDL_key::TABLE, db_name, table_name2, MDL_SHARED));
503 }
504 
505 
506 /*
507  Verifies that we can grab shared locks concurrently, in different threads.
508  */
509 TEST_F(MDLTest, ConcurrentShared)
510 {
511  Notification lock_grabbed;
512  Notification release_locks;
513  MDL_thread mdl_thread(table_name1, MDL_SHARED, &lock_grabbed,
514  &release_locks, NULL, NULL);
515  mdl_thread.start();
516  lock_grabbed.wait_for_notification();
517 
518  m_request.init(MDL_key::TABLE, db_name, table_name1, MDL_SHARED,
519  MDL_TRANSACTION);
520 
521  EXPECT_FALSE(m_mdl_context.acquire_lock(&m_request, long_timeout));
522  EXPECT_TRUE(m_mdl_context.
523  is_lock_owner(MDL_key::TABLE, db_name, table_name1, MDL_SHARED));
524 
525  release_locks.notify();
526  mdl_thread.join();
527 
528  m_mdl_context.release_transactional_locks();
529 }
530 
531 
532 /*
533  Verifies that we cannot grab an exclusive lock on something which
534  is locked with a shared lock in a different thread.
535  */
536 TEST_F(MDLTest, ConcurrentSharedExclusive)
537 {
538  expected_error= ER_LOCK_WAIT_TIMEOUT;
539 
540  Notification lock_grabbed;
541  Notification release_locks;
542  MDL_thread mdl_thread(table_name1, MDL_SHARED, &lock_grabbed, &release_locks,
543  NULL, NULL);
544  mdl_thread.ignore_notify();
545  mdl_thread.start();
546  lock_grabbed.wait_for_notification();
547 
548  m_request.init(MDL_key::TABLE, db_name, table_name1, MDL_EXCLUSIVE,
549  MDL_TRANSACTION);
550 
551  m_request_list.push_front(&m_request);
552  m_request_list.push_front(&m_global_request);
553 
554  // We should *not* be able to grab the lock here.
555  EXPECT_TRUE(m_mdl_context.acquire_locks(&m_request_list, zero_timeout));
556  EXPECT_FALSE(m_mdl_context.
557  is_lock_owner(MDL_key::TABLE,
558  db_name, table_name1, MDL_EXCLUSIVE));
559 
560  release_locks.notify();
561  mdl_thread.join();
562 
563  // Now we should be able to grab the lock.
564  EXPECT_FALSE(m_mdl_context.acquire_locks(&m_request_list, zero_timeout));
565  EXPECT_NE(m_null_ticket, m_request.ticket);
566 
567  m_mdl_context.release_transactional_locks();
568 }
569 
570 
571 /*
572  Verifies that we cannot we cannot grab a shared lock on something which
573  is locked exlusively in a different thread.
574  */
575 TEST_F(MDLTest, ConcurrentExclusiveShared)
576 {
577  Notification lock_grabbed;
578  Notification release_locks;
579  MDL_thread mdl_thread(table_name1, MDL_EXCLUSIVE,
580  &lock_grabbed, &release_locks, NULL, NULL);
581  mdl_thread.start();
582  lock_grabbed.wait_for_notification();
583 
584  m_request.init(MDL_key::TABLE, db_name, table_name1, MDL_SHARED,
585  MDL_TRANSACTION);
586 
587  // We should *not* be able to grab the lock here.
588  EXPECT_FALSE(m_mdl_context.try_acquire_lock(&m_request));
589  EXPECT_EQ(m_null_ticket, m_request.ticket);
590 
591  release_locks.notify();
592 
593  // The other thread should eventually release its locks.
594  EXPECT_FALSE(m_mdl_context.acquire_lock(&m_request, long_timeout));
595  EXPECT_NE(m_null_ticket, m_request.ticket);
596 
597  mdl_thread.join();
598  m_mdl_context.release_transactional_locks();
599 }
600 
601 
602 /*
603  Verifies the following scenario:
604  Thread 1: grabs a shared upgradable lock.
605  Thread 2: grabs a shared lock.
606  Thread 1: asks for an upgrade to exclusive (needs to wait for thread 2)
607  Thread 2: gets notified, and releases lock.
608  Thread 1: gets the exclusive lock.
609  */
610 TEST_F(MDLTest, ConcurrentUpgrade)
611 {
612  m_request.init(MDL_key::TABLE, db_name, table_name1, MDL_SHARED_UPGRADABLE,
613  MDL_TRANSACTION);
614  m_request_list.push_front(&m_request);
615  m_request_list.push_front(&m_global_request);
616 
617  EXPECT_FALSE(m_mdl_context.acquire_locks(&m_request_list, long_timeout));
618  EXPECT_TRUE(m_mdl_context.
619  is_lock_owner(MDL_key::TABLE,
620  db_name, table_name1, MDL_SHARED_UPGRADABLE));
621  EXPECT_FALSE(m_mdl_context.
622  is_lock_owner(MDL_key::TABLE,
623  db_name, table_name1, MDL_EXCLUSIVE));
624 
625  Notification lock_grabbed;
626  Notification release_locks;
627  MDL_thread mdl_thread(table_name1, MDL_SHARED, &lock_grabbed, &release_locks,
628  NULL, NULL);
629  mdl_thread.start();
630  lock_grabbed.wait_for_notification();
631 
632  EXPECT_FALSE(m_mdl_context.
633  upgrade_shared_lock(m_request.ticket, MDL_EXCLUSIVE, long_timeout));
634  EXPECT_TRUE(m_mdl_context.
635  is_lock_owner(MDL_key::TABLE,
636  db_name, table_name1, MDL_EXCLUSIVE));
637 
638  mdl_thread.join();
639  m_mdl_context.release_transactional_locks();
640 }
641 
642 
643 TEST_F(MDLTest, UpgradableConcurrency)
644 {
645  MDL_request request_2;
646  MDL_request_list request_list;
647  Notification lock_grabbed;
648  Notification release_locks;
649  MDL_thread mdl_thread(table_name1, MDL_SHARED_UPGRADABLE,
650  &lock_grabbed, &release_locks, NULL, NULL);
651  mdl_thread.start();
652  lock_grabbed.wait_for_notification();
653 
654  // We should be able to take a SW lock.
655  m_request.init(MDL_key::TABLE, db_name, table_name1, MDL_SHARED_WRITE,
656  MDL_TRANSACTION);
657  EXPECT_FALSE(m_mdl_context.try_acquire_lock(&m_request));
658  EXPECT_NE(m_null_ticket, m_request.ticket);
659 
660  // But SHARED_UPGRADABLE is not compatible with itself
661  expected_error= ER_LOCK_WAIT_TIMEOUT;
662  request_2.init(MDL_key::TABLE, db_name, table_name1, MDL_SHARED_UPGRADABLE,
663  MDL_TRANSACTION);
664  request_list.push_front(&m_global_request);
665  request_list.push_front(&request_2);
666  EXPECT_TRUE(m_mdl_context.acquire_locks(&request_list, zero_timeout));
667  EXPECT_EQ(m_null_ticket, request_2.ticket);
668 
669  release_locks.notify();
670 
671  mdl_thread.join();
672  m_mdl_context.release_transactional_locks();
673 }
674 
675 
676 /*
677  Verifies following scenario,
678  Low priority lock requests starvation. Lock is granted to high priority
679  lock request in wait queue always as max_write_lock_count is a large value.
680  - max_write_lock_count == default value i.e ~(ulong)0L
681  - THREAD 1: Acquires X lock on the table.
682  - THREAD 2: Requests for SR lock on the table.
683  - THREAD 3: Requests for SW lock on the table.
684  - THREAD 4: Requests for SNRW on the table.
685  - THREAD 1: Releases X lock.
686  - THREAD 5: Requests for SNRW lock on the table.
687  - THREAD 4: Releases SNRW lock.
688  - THREAD 2,3: Check whether THREADs got lock on the table.
689  Though, THREAD 2,3 requested lock before THREAD 4's SNRW lock and
690  THREAD 5's SNRW lock, lock is granted for THREAD 4 and 5.
691 */
692 TEST_F(MDLTest, HogLockTest1)
693 {
694  Notification thd_lock_grabbed[5];
695  Notification thd_release_locks[5];
696  Notification thd_lock_blocked[5];
697  Notification thd_lock_released[5];
698 
699  /* Locks taken by the threads */
700  enum {THD1_X, THD2_SR, THD3_SW, THD4_SNRW, THD5_SNRW};
701 
702  /*
703  THREAD1: Acquiring X lock on table.
704  Lock Wait Queue: <empty>
705  Lock granted: <empty>
706  */
707  MDL_thread mdl_thread1(table_name1, MDL_EXCLUSIVE, &thd_lock_grabbed[THD1_X],
708  &thd_release_locks[THD1_X], &thd_lock_blocked[THD1_X],
709  &thd_lock_released[THD1_X]);
710  mdl_thread1.start();
711  thd_lock_grabbed[THD1_X].wait_for_notification();
712 
713  /*
714  THREAD2: Requesting SR lock on table.
715  Lock Wait Queue: SR
716  Lock granted: X
717  */
718  MDL_thread mdl_thread2(table_name1, MDL_SHARED_READ,
719  &thd_lock_grabbed[THD2_SR],
720  &thd_release_locks[THD2_SR],
721  &thd_lock_blocked[THD2_SR],
722  &thd_lock_released[THD2_SR]);
723  mdl_thread2.start();
724  thd_lock_blocked[THD2_SR].wait_for_notification();
725 
726  /*
727  THREAD3: Requesting SW lock on table.
728  Lock Wait Queue: SR<--SW
729  Lock granted: X
730  */
731  MDL_thread mdl_thread3(table_name1, MDL_SHARED_WRITE,
732  &thd_lock_grabbed[THD3_SW],
733  &thd_release_locks[THD3_SW],
734  &thd_lock_blocked[THD3_SW],
735  &thd_lock_released[THD3_SW]);
736  mdl_thread3.start();
737  thd_lock_blocked[THD3_SW].wait_for_notification();
738 
739  /*
740  THREAD4: Requesting SNRW lock on table.
741  Lock Wait Queue: SR<--SW<--SNRW
742  Lock granted: X
743  */
744  MDL_thread mdl_thread4(table_name1, MDL_SHARED_NO_READ_WRITE,
745  &thd_lock_grabbed[THD4_SNRW],
746  &thd_release_locks[THD4_SNRW],
747  &thd_lock_blocked[THD4_SNRW],
748  &thd_lock_released[THD4_SNRW]);
749  mdl_thread4.start();
750  thd_lock_blocked[THD4_SNRW].wait_for_notification();
751 
752  /* THREAD 1: Release X lock. */
753  thd_release_locks[THD1_X].notify();
754  thd_lock_released[THD1_X].wait_for_notification();
755 
756  /*
757  Lock Wait Queue: SR<--SW
758  Lock granted: SNRW
759  */
760  thd_lock_grabbed[THD4_SNRW].wait_for_notification();
761 
762  /*
763  THREAD 5: Requests SNRW lock on the table.
764  Lock Wait Queue: SR<--SW<--SNRW
765  Lock granted: SNRW
766  */
767  MDL_thread mdl_thread5(table_name1, MDL_SHARED_NO_READ_WRITE,
768  &thd_lock_grabbed[THD5_SNRW],
769  &thd_release_locks[THD5_SNRW],
770  &thd_lock_blocked[THD5_SNRW],
771  &thd_lock_released[THD5_SNRW]);
772  mdl_thread5.start();
773  thd_lock_blocked[THD5_SNRW].wait_for_notification();
774 
775  /* THREAD 4: Release SNRW lock */
776  thd_release_locks[THD4_SNRW].notify();
777  thd_lock_released[THD4_SNRW].wait_for_notification();
778 
779  /* THREAD 2: Is Lock granted to me? */
780  EXPECT_FALSE((mdl_thread2.get_mdl_context()).
781  is_lock_owner(MDL_key::TABLE, db_name, table_name1,
782  MDL_SHARED_READ));
783  /* THREAD 3: Is Lock granted to me? */
784  EXPECT_FALSE((mdl_thread3.get_mdl_context()).
785  is_lock_owner(MDL_key::TABLE, db_name, table_name1,
786  MDL_SHARED_WRITE));
787  /*
788  THREAD 5: Lock is granted to THREAD 5 as priority is higher.
789  Lock Wait Queue: SR<--SW
790  Lock granted: SNRW
791  */
792  thd_lock_grabbed[THD5_SNRW].wait_for_notification();
793  thd_release_locks[THD5_SNRW].notify();
794  thd_lock_released[THD5_SNRW].wait_for_notification();
795 
796  /*CLEANUP*/
797  thd_lock_grabbed[THD2_SR].wait_for_notification();
798  thd_release_locks[THD2_SR].notify();
799  thd_lock_released[THD2_SR].wait_for_notification();
800 
801  thd_lock_grabbed[THD3_SW].wait_for_notification();
802  thd_release_locks[THD3_SW].notify();
803  thd_lock_released[THD3_SW].wait_for_notification();
804 
805  mdl_thread1.join();
806  mdl_thread2.join();
807  mdl_thread3.join();
808  mdl_thread4.join();
809  mdl_thread5.join();
810 }
811 
812 
813 /*
814  Verifies following scenario,
815  After granting max_write_lock_count(=1) number of times for high priority
816  lock request, lock is granted to starving low priority lock request
817  in wait queue.
818  - max_write_lock_count= 1
819  - THREAD 1: Acquires X lock on the table.
820  - THREAD 2: Requests for SR lock on the table.
821  - THREAD 3: Requests for SW lock on the table.
822  - THREAD 4: Requests for SNRW on the table.
823  - THREAD 1: Releases X lock. m_hog_lock_count= 1
824  - THREAD 5: Requests for SNRW lock on the table.
825  - THREAD 4: Releases SNRW lock.
826  - THREAD 2,3: Release lock.
827  While releasing X held by THREAD-1, m_hog_lock_count becomes 1 and while
828  releasing SNRW lock in THREAD 4, lock is granted to starving low priority
829  locks as m_hog_lock_count == max_write_lock_count.
830  So THREAD 2, 3 gets lock here instead of THREAD 5.
831 */
832 TEST_F(MDLTest, HogLockTest2)
833 {
834  Notification thd_lock_grabbed[5];
835  Notification thd_release_locks[5];
836  Notification thd_lock_blocked[5];
837  Notification thd_lock_released[5];
838  const ulong org_max_write_lock_count= max_write_lock_count;
839 
840  /* Locks taken by the threads */
841  enum {THD1_X, THD2_SR, THD3_SW, THD4_SNRW, THD5_SNRW};
842 
843  max_write_lock_count= 1;
844 
845  /*
846  THREAD1: Acquiring X lock on table.
847  Lock Wait Queue: <empty>
848  Lock Granted: <empty>
849  */
850  MDL_thread mdl_thread1(table_name1, MDL_EXCLUSIVE,
851  &thd_lock_grabbed[THD1_X],
852  &thd_release_locks[THD1_X],
853  &thd_lock_blocked[THD1_X],
854  &thd_lock_released[THD1_X]);
855  mdl_thread1.start();
856  thd_lock_grabbed[THD1_X].wait_for_notification();
857 
858  /*
859  THREAD2: Requesting SR lock on table.
860  Lock Wait Queue: SR
861  Lock Granted: X
862  */
863  MDL_thread mdl_thread2(table_name1, MDL_SHARED_READ,
864  &thd_lock_grabbed[THD2_SR],
865  &thd_release_locks[THD2_SR],
866  &thd_lock_blocked[THD2_SR],
867  &thd_lock_released[THD2_SR]);
868  mdl_thread2.start();
869  thd_lock_blocked[THD2_SR].wait_for_notification();
870 
871  /*
872  THREAD3: Requesting SW lock on table.
873  Lock Wait Queue: SR<--SW
874  Lock Granted: X
875  */
876  MDL_thread mdl_thread3(table_name1, MDL_SHARED_WRITE,
877  &thd_lock_grabbed[THD3_SW],
878  &thd_release_locks[THD3_SW],
879  &thd_lock_blocked[THD3_SW],
880  &thd_lock_released[THD3_SW]);
881  mdl_thread3.start();
882  thd_lock_blocked[THD3_SW].wait_for_notification();
883 
884  /*
885  THREAD4: Requesting SNRW lock on table.
886  Lock Wait Queue: SR<--SW<--SNRW
887  Lock Granted: X
888  */
889  MDL_thread mdl_thread4(table_name1, MDL_SHARED_NO_READ_WRITE,
890  &thd_lock_grabbed[THD4_SNRW],
891  &thd_release_locks[THD4_SNRW],
892  &thd_lock_blocked[THD4_SNRW],
893  &thd_lock_released[THD4_SNRW]);
894  mdl_thread4.start();
895  thd_lock_blocked[THD4_SNRW].wait_for_notification();
896 
897  /*
898  THREAD 1: Release X lock.
899  Lock Wait Queue: SR<--SW
900  Lock Granted: SNRW
901  m_hog_lock_count= 1
902  */
903  thd_release_locks[THD1_X].notify();
904  thd_lock_released[THD1_X].wait_for_notification();
905 
906  /* Lock is granted to THREAD 4 */
907  thd_lock_grabbed[THD4_SNRW].wait_for_notification();
908 
909  /*
910  THREAD 5: Requests SNRW lock on the table.
911  Lock Wait Queue: SR<--SW<--SNRW
912  Lock Granted: SNRW
913  */
914  MDL_thread mdl_thread5(table_name1, MDL_SHARED_NO_READ_WRITE,
915  &thd_lock_grabbed[THD5_SNRW],
916  &thd_release_locks[THD5_SNRW],
917  &thd_lock_blocked[THD5_SNRW],
918  &thd_lock_released[THD5_SNRW]);
919  mdl_thread5.start();
920  thd_lock_blocked[THD5_SNRW].wait_for_notification();
921 
922  /* THREAD 4: Release SNRW lock */
923  thd_release_locks[THD4_SNRW].notify();
924  thd_lock_released[THD4_SNRW].wait_for_notification();
925 
926  /*
927  THREAD 2: Since max_write_lock_count == m_hog_lock_count, Lock is granted to
928  THREAD 2 and 3 instead of THREAD 5.
929  Lock Wait Queue: SNRW
930  Lock Granted: SR, SW
931  */
932  thd_lock_grabbed[THD2_SR].wait_for_notification();
933  thd_lock_grabbed[THD3_SW].wait_for_notification();
934 
935  thd_release_locks[THD2_SR].notify();
936  thd_lock_released[THD2_SR].wait_for_notification();
937 
938  thd_release_locks[THD3_SW].notify();
939  thd_lock_released[THD3_SW].wait_for_notification();
940 
941  /* Cleanup */
942  thd_lock_grabbed[THD5_SNRW].wait_for_notification();
943  thd_release_locks[THD5_SNRW].notify();
944  thd_lock_released[THD5_SNRW].wait_for_notification();
945 
946  mdl_thread1.join();
947  mdl_thread2.join();
948  mdl_thread3.join();
949  mdl_thread4.join();
950  mdl_thread5.join();
951 
952  max_write_lock_count= org_max_write_lock_count;
953 }
954 
955 
956 /*
957  Verifies locks priorities,
958  X has priority over--> S, SR, SW, SU, (SNW, SNRW)
959  SNRW has priority over--> SR, SW
960  SNW has priority over--> SW
961 
962  - max_write_lock_count contains default value i.e ~(ulong)0L
963  - THREAD 1: Acquires X lock on the table.
964  - THREAD 2: Requests for S lock on the table.
965  - THREAD 3: Requests for SR lock on the table.
966  - THREAD 4: Requests for SW lock on the table.
967  - THREAD 5: Requests for SU lock on the table.
968  - THREAD 6: Requests for SNRW on the table.
969  - THREAD 1: Releases X lock.
970  Lock is granted THREAD 2, THREAD 5.
971  - THREAD 5: RELEASE SU lock.
972  Lock is granted to THREAD 6.
973  - THREAD 7: Requests for SNW lock on the table.
974  - THREAD 6: Releases SNRW lock.
975  Lock is granted to THREAD 4 & THREAD 7.
976  - THREAD 4: Check whether THREAD got lock on the table.
977  At each locks release, locks of equal priorities are granted.
978  At the end only SW will be in wait queue as lock is granted to SNW
979  lock request.
980  */
981 TEST_F(MDLTest, LockPriorityTest)
982 {
983  Notification thd_lock_grabbed[7];
984  Notification thd_release_locks[7];
985  Notification thd_lock_blocked[7];
986  Notification thd_lock_released[7];
987 
988  /* Locks taken by the threads */
989  enum {THD1_X, THD2_S, THD3_SR, THD4_SW, THD5_SU, THD6_SNRW, THD7_SNW};
990 
991  /*THREAD1: Acquiring X lock on table */
992  MDL_thread mdl_thread1(table_name1, MDL_EXCLUSIVE, &thd_lock_grabbed[THD1_X],
993  &thd_release_locks[THD1_X], &thd_lock_blocked[THD1_X],
994  &thd_lock_released[THD1_X]);
995  mdl_thread1.start();
996  thd_lock_grabbed[THD1_X].wait_for_notification();
997 
998  /*
999  THREAD2: Requesting S lock on table.
1000  Lock Wait Queue: S
1001  Lock Granted: X
1002  */
1003  MDL_thread mdl_thread2(table_name1, MDL_SHARED, &thd_lock_grabbed[THD2_S],
1004  &thd_release_locks[THD2_S], &thd_lock_blocked[THD2_S],
1005  &thd_lock_released[THD2_S]);
1006  mdl_thread2.start();
1007  thd_lock_blocked[THD2_S].wait_for_notification();
1008 
1009  /*
1010  THREAD3: Requesting SR lock on table.
1011  Lock Wait Queue: S<--SR
1012  Lock Granted: X
1013  */
1014  MDL_thread mdl_thread3(table_name1, MDL_SHARED_READ, &thd_lock_grabbed[THD3_SR],
1015  &thd_release_locks[THD3_SR], &thd_lock_blocked[THD3_SR],
1016  &thd_lock_released[THD3_SR]);
1017  mdl_thread3.start();
1018  thd_lock_blocked[THD3_SR].wait_for_notification();
1019 
1020  /*
1021  THREAD4: Requesting SW lock on table.
1022  Lock Wait Queue: S<--SR<--SW
1023  Lock Granted: X
1024  */
1025  MDL_thread mdl_thread4(table_name1, MDL_SHARED_WRITE,
1026  &thd_lock_grabbed[THD4_SW],
1027  &thd_release_locks[THD4_SW],
1028  &thd_lock_blocked[THD4_SW],
1029  &thd_lock_released[THD4_SW]);
1030  mdl_thread4.start();
1031  thd_lock_blocked[THD4_SW].wait_for_notification();
1032 
1033  /*
1034  THREAD5: Requesting SU lock on table
1035  Lock Wait Queue: S<--SR<--SW<--SU
1036  Lock Granted: X
1037  */
1038  MDL_thread mdl_thread5(table_name1, MDL_SHARED_UPGRADABLE,
1039  &thd_lock_grabbed[THD5_SU],
1040  &thd_release_locks[THD5_SU],
1041  &thd_lock_blocked[THD5_SU],
1042  &thd_lock_released[THD5_SU]);
1043  mdl_thread5.start();
1044  thd_lock_blocked[THD5_SU].wait_for_notification();
1045 
1046  /*
1047  THREAD6: Requesting SNRW lock on table
1048  Lock Wait Queue: S<--SR<--SW<--SU<--SNRW
1049  Lock Granted: X
1050  */
1051  MDL_thread mdl_thread6(table_name1, MDL_SHARED_NO_READ_WRITE,
1052  &thd_lock_grabbed[THD6_SNRW],
1053  &thd_release_locks[THD6_SNRW],
1054  &thd_lock_blocked[THD6_SNRW],
1055  &thd_lock_released[THD6_SNRW]);
1056  mdl_thread6.start();
1057  thd_lock_blocked[THD6_SNRW].wait_for_notification();
1058 
1059  /*
1060  Lock wait Queue status: S<--SR<--SW<--SU<--SNRW
1061  THREAD 1: Release X lock.
1062  */
1063  thd_release_locks[THD1_X].notify();
1064  thd_lock_released[THD1_X].wait_for_notification();
1065 
1066  /*
1067  THREAD 5: Verify and Release lock.
1068  Lock wait Queue status: SR<--SW<--SNRW
1069  Lock Granted: S, SU
1070  */
1071  thd_lock_grabbed[THD2_S].wait_for_notification();
1072  thd_release_locks[THD2_S].notify();
1073  thd_lock_released[THD2_S].wait_for_notification();
1074 
1075  thd_lock_grabbed[THD5_SU].wait_for_notification();
1076  thd_release_locks[THD5_SU].notify();
1077  thd_lock_released[THD5_SU].wait_for_notification();
1078 
1079  /* Now Lock Granted to THREAD 6 SNRW lock type request*/
1080  thd_lock_grabbed[THD6_SNRW].wait_for_notification();
1081 
1082  /*
1083  THREAD 7: Requests SNW lock on the table.
1084  Lock wait Queue status: SR<--SW<--SNW
1085  Lock Granted: SNRW
1086  */
1087  MDL_thread mdl_thread7(table_name1, MDL_SHARED_NO_WRITE,
1088  &thd_lock_grabbed[THD7_SNW],
1089  &thd_release_locks[THD7_SNW],
1090  &thd_lock_blocked[THD7_SNW],
1091  &thd_lock_released[THD7_SNW]);
1092  mdl_thread7.start();
1093  thd_lock_blocked[THD7_SNW].wait_for_notification();
1094 
1095  /* THREAD 6: Release SNRW lock */
1096  thd_release_locks[THD6_SNRW].notify();
1097  thd_lock_released[THD6_SNRW].wait_for_notification();
1098 
1099  /* Now lock is granted to THREAD 3 & 7 */
1100  thd_lock_grabbed[THD7_SNW].wait_for_notification();
1101  thd_lock_grabbed[THD3_SR].wait_for_notification();
1102 
1103  /*
1104  THREAD 3: Release SR lock
1105  Lock wait Queue status: SW
1106  Lock Granted: SR, SNW
1107  */
1108  thd_release_locks[THD3_SR].notify();
1109  thd_lock_released[THD3_SR].wait_for_notification();
1110 
1111  /* THREAD 4: Verify whether lock is granted or not*/
1112  EXPECT_FALSE((mdl_thread4.get_mdl_context()).
1113  is_lock_owner(MDL_key::TABLE, db_name, table_name1,
1114  MDL_SHARED_WRITE));
1115 
1116  /*CLEANUP*/
1117  thd_release_locks[THD7_SNW].notify();
1118  thd_lock_released[THD7_SNW].wait_for_notification();
1119 
1120  thd_lock_grabbed[THD4_SW].wait_for_notification();
1121  thd_release_locks[THD4_SW].notify();
1122  thd_lock_released[THD4_SW].wait_for_notification();
1123 
1124  mdl_thread1.join();
1125  mdl_thread2.join();
1126  mdl_thread3.join();
1127  mdl_thread4.join();
1128  mdl_thread5.join();
1129  mdl_thread6.join();
1130  mdl_thread7.join();
1131 }
1132 
1133 
1134 /*
1135  Verifies locks priorities when max_write_lock_count= 1
1136  X has priority over--> S, SR, SW, SU, (SNW, SNRW)
1137  SNRW has priority over--> SR, SW
1138  SNW has priority over--> SW
1139 
1140  - max_write_lock_count= 1
1141  - THREAD 1: Acquires X lock on the table.
1142  - THREAD 2: Requests for S lock on the table.
1143  - THREAD 3: Requests for SR lock on the table.
1144  - THREAD 4: Requests for SW lock on the table.
1145  - THREAD 5: Requests for SU lock on the table.
1146  - THREAD 6: Requests for X on the table.
1147  - THREAD 1: Releases X lock.
1148  Lock is granted THREAD 6.
1149  - THREAD 7: Requests SNRW lock.
1150  - THREAD 6: Releases X lock.
1151  Lock is granted to THREAD 2,3,4,5.
1152  - THREAD 7: Check Whether lock is granted or not.
1153  */
1154 TEST_F(MDLTest, HogLockTest3)
1155 {
1156  Notification thd_lock_grabbed[7];
1157  Notification thd_release_locks[7];
1158  Notification thd_lock_blocked[7];
1159  Notification thd_lock_released[7];
1160  const ulong org_max_write_lock_count= max_write_lock_count;
1161 
1162  enum {THD1_X, THD2_S, THD3_SR, THD4_SW, THD5_SU, THD6_X, THD7_SNRW};
1163 
1164  max_write_lock_count= 1;
1165 
1166  /* THREAD1: Acquiring X lock on table. */
1167  MDL_thread mdl_thread1(table_name1, MDL_EXCLUSIVE, &thd_lock_grabbed[THD1_X],
1168  &thd_release_locks[THD1_X], &thd_lock_blocked[THD1_X],
1169  &thd_lock_released[THD1_X]);
1170  mdl_thread1.start();
1171  thd_lock_grabbed[THD1_X].wait_for_notification();
1172 
1173  /*
1174  THREAD2: Requesting S lock on table.
1175  Lock Wait Queue: S
1176  Lock Granted: X
1177  */
1178  MDL_thread mdl_thread2(table_name1, MDL_SHARED, &thd_lock_grabbed[THD2_S],
1179  &thd_release_locks[THD2_S], &thd_lock_blocked[THD2_S],
1180  &thd_lock_released[THD2_S]);
1181  mdl_thread2.start();
1182  thd_lock_blocked[THD2_S].wait_for_notification();
1183 
1184  /*
1185  THREAD3: Requesting SR lock on table.
1186  Lock Wait Queue: S<--SR
1187  Lock Granted: X
1188  */
1189  MDL_thread mdl_thread3(table_name1, MDL_SHARED_READ, &thd_lock_grabbed[THD3_SR],
1190  &thd_release_locks[THD3_SR], &thd_lock_blocked[THD3_SR],
1191  &thd_lock_released[THD3_SR]);
1192  mdl_thread3.start();
1193  thd_lock_blocked[THD3_SR].wait_for_notification();
1194 
1195  /*
1196  THREAD4: Requesting SW lock on table.
1197  Lock Wait Queue: S<--SR<--SW.
1198  Lock Granted: X
1199  */
1200  MDL_thread mdl_thread4(table_name1, MDL_SHARED_WRITE,
1201  &thd_lock_grabbed[THD4_SW],
1202  &thd_release_locks[THD4_SW],
1203  &thd_lock_blocked[THD4_SW],
1204  &thd_lock_released[THD4_SW]);
1205  mdl_thread4.start();
1206  thd_lock_blocked[THD4_SW].wait_for_notification();
1207 
1208  /*
1209  THREAD5: Requesting SU lock on table.
1210  Lock Wait Queue: S<--SR<--SW<--SU
1211  Lock Granted: X
1212  */
1213  MDL_thread mdl_thread5(table_name1, MDL_SHARED_UPGRADABLE,
1214  &thd_lock_grabbed[THD5_SU],
1215  &thd_release_locks[THD5_SU],
1216  &thd_lock_blocked[THD5_SU],
1217  &thd_lock_released[THD5_SU]);
1218  mdl_thread5.start();
1219  thd_lock_blocked[THD5_SU].wait_for_notification();
1220 
1221  /*
1222  THREAD6: Requesting X lock on table
1223  Lock Wait Queue: S<--SR<--SW<--SU<--X
1224  Lock Granted: X
1225  */
1226  MDL_thread mdl_thread6(table_name1, MDL_EXCLUSIVE, &thd_lock_grabbed[THD6_X],
1227  &thd_release_locks[THD6_X], &thd_lock_blocked[THD6_X],
1228  &thd_lock_released[THD6_X]);
1229  mdl_thread6.start();
1230  thd_lock_blocked[THD6_X].wait_for_notification();
1231 
1232  /*
1233  Lock wait Queue status: S<--SR<--SW<--SU<--X
1234  Lock Granted: X
1235  THREAD 1: Release X lock.
1236  */
1237  thd_release_locks[THD1_X].notify();
1238  thd_lock_released[THD1_X].wait_for_notification();
1239 
1240  /* Lock is granted to THREAD 6*/
1241  thd_lock_grabbed[THD6_X].wait_for_notification();
1242 
1243  /*
1244  THREAD7: Requesting SNRW lock on table
1245  Lock wait Queue status: S<--SR<--SW<--SU
1246  Lock Granted: X
1247  */
1248  MDL_thread mdl_thread7(table_name1, MDL_SHARED_NO_READ_WRITE,
1249  &thd_lock_grabbed[THD7_SNRW],
1250  &thd_release_locks[THD7_SNRW],
1251  &thd_lock_blocked[THD7_SNRW],
1252  &thd_lock_released[THD7_SNRW]);
1253  mdl_thread7.start();
1254  thd_lock_blocked[THD7_SNRW].wait_for_notification();
1255 
1256  /* THREAD 6: Release X lock. */
1257  thd_release_locks[THD6_X].notify();
1258  thd_lock_released[THD6_X].wait_for_notification();
1259 
1260  /* Lock is granted to THREAD 2, 3, 4, 5*/
1261  thd_lock_grabbed[THD2_S].wait_for_notification();
1262  thd_lock_grabbed[THD3_SR].wait_for_notification();
1263  thd_lock_grabbed[THD4_SW].wait_for_notification();
1264  thd_lock_grabbed[THD5_SU].wait_for_notification();
1265 
1266  /*
1267  Lock wait Queue status: <empty>
1268  Lock Granted: <empty>
1269  THREAD 7: high priority SNRW lock is still waiting.
1270  */
1271  EXPECT_FALSE((mdl_thread7.get_mdl_context()).
1272  is_lock_owner(MDL_key::TABLE, db_name, table_name1,
1273  MDL_SHARED_NO_READ_WRITE));
1274 
1275  /* CLEAN UP */
1276  thd_release_locks[THD2_S].notify();
1277  thd_lock_released[THD2_S].wait_for_notification();
1278 
1279  thd_release_locks[THD3_SR].notify();
1280  thd_lock_released[THD3_SR].wait_for_notification();
1281 
1282  thd_release_locks[THD4_SW].notify();
1283  thd_lock_released[THD4_SW].wait_for_notification();
1284 
1285  thd_release_locks[THD5_SU].notify();
1286  thd_lock_released[THD5_SU].wait_for_notification();
1287 
1288  thd_lock_grabbed[THD7_SNRW].wait_for_notification();
1289  thd_release_locks[THD7_SNRW].notify();
1290  thd_lock_released[THD7_SNRW].wait_for_notification();
1291 
1292  mdl_thread1.join();
1293  mdl_thread2.join();
1294  mdl_thread3.join();
1295  mdl_thread4.join();
1296  mdl_thread5.join();
1297  mdl_thread6.join();
1298  mdl_thread7.join();
1299 
1300  max_write_lock_count= org_max_write_lock_count;
1301 }
1302 
1303 
1304 /*
1305  Verifies whether m_hog_lock_count is resets or not,
1306  when there are no low priority lock request.
1307 
1308  - max_write_lock_count= 1
1309  - THREAD 1: Acquires X lock on the table.
1310  - THREAD 2: Requests for SU lock on the table.
1311  - THREAD 3: Requests for X lock on the table.
1312  - THREAD 1: Releases X lock.
1313  Lock is granted to THREAD 3
1314  m_hog_lock_count= 1;
1315  - THREAD 3: Releases X lock.
1316  Lock is granted to THRED 2.
1317  m_hog_lock_count= 0;
1318  - THREAD 4: Requests for SNRW lock.
1319  - THREAD 5: Requests for R lock.
1320  - THREAD 2: Releases SU lock.
1321  Lock is granted to THREAD 4.
1322  */
1323 TEST_F(MDLTest, HogLockTest4)
1324 {
1325  Notification thd_lock_grabbed[5];
1326  Notification thd_release_locks[5];
1327  Notification thd_lock_blocked[5];
1328  Notification thd_lock_released[5];
1329  const ulong org_max_write_lock_count= max_write_lock_count;
1330 
1331  /* Locks taken by the threads */
1332  enum {THD1_X, THD2_SU, THD3_X, THD4_SNRW, THD5_SR};
1333 
1334  max_write_lock_count= 1;
1335 
1336  /* THREAD1: Acquiring X lock on table */
1337  MDL_thread mdl_thread1(table_name1, MDL_EXCLUSIVE, &thd_lock_grabbed[THD1_X],
1338  &thd_release_locks[THD1_X], &thd_lock_blocked[THD1_X],
1339  &thd_lock_released[THD1_X]);
1340  mdl_thread1.start();
1341  thd_lock_grabbed[THD1_X].wait_for_notification();
1342 
1343  /* THREAD2: Requesting SU lock on table */
1344  MDL_thread mdl_thread2(table_name1, MDL_SHARED_UPGRADABLE,
1345  &thd_lock_grabbed[THD2_SU],
1346  &thd_release_locks[THD2_SU],
1347  &thd_lock_blocked[THD2_SU],
1348  &thd_lock_released[THD2_SU]);
1349  mdl_thread2.start();
1350  thd_lock_blocked[THD2_SU].wait_for_notification();
1351 
1352  /* THREAD3: Requesting X lock on table */
1353  MDL_thread mdl_thread3(table_name1, MDL_EXCLUSIVE, &thd_lock_grabbed[THD3_X],
1354  &thd_release_locks[THD3_X], &thd_lock_blocked[THD3_X],
1355  &thd_lock_released[THD3_X]);
1356  mdl_thread3.start();
1357  thd_lock_blocked[THD3_X].wait_for_notification();
1358 
1359  /*
1360  THREAD1: Release X lock.
1361  Lock Request Queue: SU<--X
1362  Lock Grant: X
1363  m_hog_lock_count= 1
1364  */
1365  thd_release_locks[THD1_X].notify();
1366  thd_lock_released[THD1_X].wait_for_notification();
1367  /* Lock is granted to THREAD 3 */
1368  thd_lock_grabbed[THD3_X].wait_for_notification();
1369 
1370  /*
1371  THREAD3: Release X lock.
1372  Lock Request Queue: <empty>
1373  Lock Grant: SU
1374  m_hog_lock_count= 0
1375  */
1376  thd_release_locks[THD3_X].notify();
1377  thd_lock_released[THD3_X].wait_for_notification();
1378  /*Lock is granted to THREAD 2 */
1379  thd_lock_grabbed[THD2_SU].wait_for_notification();
1380 
1381  /*
1382  THREAD4: Requesting SNRW lock on table.
1383  Lock Request Queue: SNRW
1384  Lock Grant: SU
1385  */
1386  MDL_thread mdl_thread4(table_name1, MDL_SHARED_NO_READ_WRITE,
1387  &thd_lock_grabbed[THD4_SNRW],
1388  &thd_release_locks[THD4_SNRW],
1389  &thd_lock_blocked[THD4_SNRW],
1390  &thd_lock_released[THD4_SNRW]);
1391  mdl_thread4.start();
1392  thd_lock_blocked[THD4_SNRW].wait_for_notification();
1393 
1394  /*
1395  THREAD5: Requesting SR lock on table.
1396  Lock Request Queue: SNRW<--SR
1397  Lock Grant: SU
1398  */
1399  MDL_thread mdl_thread5(table_name1, MDL_SHARED_READ,
1400  &thd_lock_grabbed[THD5_SR],
1401  &thd_release_locks[THD5_SR],
1402  &thd_lock_blocked[THD5_SR],
1403  &thd_lock_released[THD5_SR]);
1404  mdl_thread5.start();
1405  thd_lock_blocked[THD5_SR].wait_for_notification();
1406 
1407  /* THREAD 2: Release lock. */
1408  thd_release_locks[THD2_SU].notify();
1409  thd_lock_released[THD2_SU].wait_for_notification();
1410 
1411  /*
1412  Lock Request Queue: SR
1413  Lock Grant: SNRW
1414  Lock is granted to THREAD 5 if m_hog_lock_count is not reset.
1415  */
1416  thd_lock_grabbed[THD4_SNRW].wait_for_notification();
1417 
1418  /* THREAD5: Lock is not granted */
1419  EXPECT_FALSE((mdl_thread5.get_mdl_context()).
1420  is_lock_owner(MDL_key::TABLE, db_name, table_name1,
1421  MDL_SHARED_READ));
1422 
1423  /* CLEAN UP */
1424  thd_release_locks[THD4_SNRW].notify();
1425  thd_lock_released[THD4_SNRW].wait_for_notification();
1426 
1427  thd_lock_grabbed[THD5_SR].wait_for_notification();
1428  thd_release_locks[THD5_SR].notify();
1429  thd_lock_released[THD5_SR].wait_for_notification();
1430 
1431  mdl_thread1.join();
1432  mdl_thread2.join();
1433  mdl_thread3.join();
1434  mdl_thread4.join();
1435  mdl_thread5.join();
1436 
1437  max_write_lock_count= org_max_write_lock_count;
1438 }
1439 
1440 
1441 /*
1442  Verifies resetting of m_hog_lock_count when only few of
1443  the waiting low priority locks are granted and queue has
1444  some more low priority lock requests in queue.
1445  m_hog_lock_count should not be reset to 0 when few low priority
1446  lock requests are granted.
1447 
1448  - max_write_lock_count= 1
1449  - THREAD 1: Acquires X lock on the table.
1450  - THREAD 2: Requests for SNW lock on the table.
1451  - THREAD 3: Requests for SR lock on the table.
1452  - THREAD 4: Requests for SW lock on the table.
1453  - THREAD 5: Requests for SU lock on the table.
1454  - THREAD 1: Releases X lock.
1455  Lock is granted THREAD 2, 3 as they are of same priority.
1456  - THREAD 6: Requests for SNRW lock.
1457  - THREAD 2: Releases SNW lock.
1458  Lock shoule be granted to THREAD 4, 5 as
1459  m_hog_lock_count == max_write_lock_count.
1460  - THREAD 3: Check Whether lock is granted or not.
1461  */
1462 TEST_F(MDLTest, HogLockTest5)
1463 {
1464  Notification thd_lock_grabbed[6];
1465  Notification thd_release_locks[6];
1466  Notification thd_lock_blocked[6];
1467  Notification thd_lock_released[6];
1468  const ulong org_max_write_lock_count= max_write_lock_count;
1469 
1470  /* Locks taken by the threads */
1471  enum {THD1_X, THD2_SNW, THD3_SR, THD4_SW, THD5_SU, THD6_SNRW};
1472  max_write_lock_count= 1;
1473 
1474  /* THREAD1: Acquiring X lock on table. */
1475  MDL_thread mdl_thread1(table_name1, MDL_EXCLUSIVE, &thd_lock_grabbed[THD1_X],
1476  &thd_release_locks[THD1_X], &thd_lock_blocked[THD1_X],
1477  &thd_lock_released[THD1_X]);
1478  mdl_thread1.start();
1479  thd_lock_grabbed[THD1_X].wait_for_notification();
1480 
1481  /* THREAD2: Requesting SNW lock on table. */
1482  MDL_thread mdl_thread2(table_name1, MDL_SHARED_NO_WRITE,
1483  &thd_lock_grabbed[THD2_SNW],
1484  &thd_release_locks[THD2_SNW],
1485  &thd_lock_blocked[THD2_SNW],
1486  &thd_lock_released[THD2_SNW]);
1487  mdl_thread2.start();
1488  thd_lock_blocked[THD2_SNW].wait_for_notification();
1489 
1490  /* THREAD3: Requesting SR lock on table. */
1491  MDL_thread mdl_thread3(table_name1, MDL_SHARED_READ,
1492  &thd_lock_grabbed[THD3_SR],
1493  &thd_release_locks[THD3_SR],
1494  &thd_lock_blocked[THD3_SR],
1495  &thd_lock_released[THD3_SR]);
1496  mdl_thread3.start();
1497  thd_lock_blocked[THD3_SR].wait_for_notification();
1498 
1499  /* THREAD4: Requesting SW lock on table. */
1500  MDL_thread mdl_thread4(table_name1, MDL_SHARED_WRITE,
1501  &thd_lock_grabbed[THD4_SW],
1502  &thd_release_locks[THD4_SW],
1503  &thd_lock_blocked[THD4_SW],
1504  &thd_lock_released[THD4_SW]);
1505  mdl_thread4.start();
1506  thd_lock_blocked[THD4_SW].wait_for_notification();
1507 
1508  /* THREAD5: Requesting SNW lock on table. */
1509  MDL_thread mdl_thread5(table_name1, MDL_SHARED_UPGRADABLE,
1510  &thd_lock_grabbed[THD5_SU],
1511  &thd_release_locks[THD5_SU],
1512  &thd_lock_blocked[THD5_SU],
1513  &thd_lock_released[THD5_SU]);
1514  mdl_thread5.start();
1515  thd_lock_blocked[THD5_SU].wait_for_notification();
1516 
1517  /*
1518  Lock wait Queue status: SNW<--SR<--SW<--SU
1519  Lock Granted: X
1520  THREAD 1: Release X lock.
1521  */
1522  thd_release_locks[THD1_X].notify();
1523  thd_lock_released[THD1_X].wait_for_notification();
1524 
1525  /*
1526  Lock wait Queue status: SW<--SU
1527  Lock Granted: SR, SNW
1528  Lock is granted for Thread 2, 3
1529  */
1530  thd_lock_grabbed[THD2_SNW].wait_for_notification();
1531  thd_lock_grabbed[THD3_SR].wait_for_notification();
1532 
1533  /*
1534  THREAD5: Requesting SNRW lock on table.
1535  Lock wait Queue status: SW<--SU<--SNRW
1536  Lock Granted: SR, SNW
1537  */
1538  MDL_thread mdl_thread6(table_name1, MDL_SHARED_NO_READ_WRITE,
1539  &thd_lock_grabbed[THD6_SNRW],
1540  &thd_release_locks[THD6_SNRW],
1541  &thd_lock_blocked[THD6_SNRW],
1542  &thd_lock_released[THD6_SNRW]);
1543  mdl_thread6.start();
1544  thd_lock_blocked[THD6_SNRW].wait_for_notification();
1545 
1546 
1547  /* Thread 2: Release SNW lock */
1548  thd_release_locks[THD2_SNW].notify();
1549  thd_lock_released[THD2_SNW].wait_for_notification();
1550 
1551  /*
1552  Lock wait Queue status: SNRW
1553  Lock Granted: SR, SW, SU
1554  Lock is granted to Thread 4,5 instead of Thread 6
1555  THREAD6: Lock is not granted
1556  */
1557  EXPECT_FALSE((mdl_thread6.get_mdl_context()).
1558  is_lock_owner(MDL_key::TABLE, db_name, table_name1,
1559  MDL_SHARED_NO_READ_WRITE));
1560 
1561  thd_lock_grabbed[THD4_SW].wait_for_notification();
1562  thd_release_locks[THD4_SW].notify();
1563  thd_lock_released[THD4_SW].wait_for_notification();
1564 
1565  thd_lock_grabbed[THD5_SU].wait_for_notification();
1566  thd_release_locks[THD5_SU].notify();
1567  thd_lock_released[THD5_SU].wait_for_notification();
1568 
1569  /* CLEANUP */
1570  thd_lock_grabbed[THD6_SNRW].wait_for_notification();
1571  thd_release_locks[THD6_SNRW].notify();
1572  thd_lock_released[THD6_SNRW].wait_for_notification();
1573 
1574  mdl_thread1.join();
1575  mdl_thread2.join();
1576  mdl_thread3.join();
1577  mdl_thread4.join();
1578  mdl_thread5.join();
1579  mdl_thread6.join();
1580 
1581  max_write_lock_count= org_max_write_lock_count;
1582 }
1583 
1584 
1587 class MDLKeyTest : public ::testing::Test
1588 {
1589 protected:
1590  MDLKeyTest()
1591  { }
1592 private:
1593  GTEST_DISALLOW_COPY_AND_ASSIGN_(MDLKeyTest);
1594 };
1595 
1596 
1597 // Google Test recommends DeathTest suffix for classes use in death tests.
1598 typedef MDLKeyTest MDLKeyDeathTest;
1599 
1600 
1601 /*
1602  Verifies that debug build dies with a DBUG_ASSERT if we try to construct
1603  MDL_key with too long database or object names.
1604 */
1605 
1606 #if GTEST_HAS_DEATH_TEST && !defined(DBUG_OFF)
1607 TEST_F(MDLKeyDeathTest, DieWhenNamesAreTooLong)
1608 {
1609  ::testing::FLAGS_gtest_death_test_style = "threadsafe";
1610 
1611  /* We need a name which is longer than NAME_LEN = 64*3 = 192.*/
1612  const char *too_long_name=
1613  "0123456789012345678901234567890123456789012345678901234567890123"
1614  "0123456789012345678901234567890123456789012345678901234567890123"
1615  "0123456789012345678901234567890123456789012345678901234567890123"
1616  "0123456789";
1617 
1618  EXPECT_DEATH(MDL_key key0(MDL_key::TABLE, too_long_name, ""),
1619  ".*Assertion.*strlen.*");
1620  EXPECT_DEATH(MDL_key key1(MDL_key::TABLE, "", too_long_name),
1621  ".*Assertion.*strlen.*");
1622 
1623  MDL_key key2;
1624 
1625  EXPECT_DEATH(key2.mdl_key_init(MDL_key::TABLE, too_long_name, ""),
1626  ".*Assertion.*strlen.*");
1627  EXPECT_DEATH(key2.mdl_key_init(MDL_key::TABLE, "", too_long_name),
1628  ".*Assertion.*strlen.*");
1629 
1630 }
1631 #endif // GTEST_HAS_DEATH_TEST && !defined(DBUG_OFF)
1632 
1633 
1634 /*
1635  Verifies that for production build we allow construction of
1636  MDL_key with too long database or object names, but they are
1637  truncated.
1638 */
1639 
1640 #if defined(DBUG_OFF)
1641 TEST_F(MDLKeyTest, TruncateTooLongNames)
1642 {
1643  /* We need a name which is longer than NAME_LEN = 64*3 = 192.*/
1644  const char *too_long_name=
1645  "0123456789012345678901234567890123456789012345678901234567890123"
1646  "0123456789012345678901234567890123456789012345678901234567890123"
1647  "0123456789012345678901234567890123456789012345678901234567890123"
1648  "0123456789";
1649 
1650  MDL_key key(MDL_key::TABLE, too_long_name, too_long_name);
1651 
1652  const char *db_name= key.db_name();
1653  const char *name= key.name();
1654 
1655  EXPECT_LE(strlen(db_name), (uint)NAME_LEN);
1656  EXPECT_TRUE(strncmp(db_name, too_long_name, NAME_LEN) == 0);
1657  EXPECT_LE(strlen(name), (uint)NAME_LEN);
1658  EXPECT_TRUE(strncmp(name, too_long_name, NAME_LEN) == 0);
1659 }
1660 #endif // defined(DBUG_OFF)
1661 
1662 
1663 } // namespace