MySQL 5.6.14 Source Code Document
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
pfs.cc
Go to the documentation of this file.
1 /* Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
2 
3  This program is free software; you can redistribute it and/or modify
4  it under the terms of the GNU General Public License as published by
5  the Free Software Foundation; version 2 of the License.
6 
7  This program is distributed in the hope that it will be useful,
8  but WITHOUT ANY WARRANTY; without even the implied warranty of
9  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  GNU General Public License for more details.
11 
12  You should have received a copy of the GNU General Public License
13  along with this program; if not, write to the Free Software Foundation,
14  51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */
15 
20 #include "my_global.h"
21 #include "thr_lock.h"
22 #include "mysql/psi/psi.h"
23 #include "mysql/psi/mysql_thread.h"
24 #include "my_pthread.h"
25 #include "sql_const.h"
26 #include "pfs.h"
27 #include "pfs_instr_class.h"
28 #include "pfs_instr.h"
29 #include "pfs_host.h"
30 #include "pfs_user.h"
31 #include "pfs_account.h"
32 #include "pfs_global.h"
33 #include "pfs_column_values.h"
34 #include "pfs_timer.h"
35 #include "pfs_events_waits.h"
36 #include "pfs_events_stages.h"
37 #include "pfs_events_statements.h"
38 #include "pfs_setup_actor.h"
39 #include "pfs_setup_object.h"
40 #include "sql_error.h"
41 #include "sp_head.h"
42 #include "pfs_digest.h"
43 
1113 pthread_key(PFS_thread*, THR_PFS);
1115 
1120 static enum_operation_type mutex_operation_map[]=
1121 {
1122  OPERATION_TYPE_LOCK,
1123  OPERATION_TYPE_TRYLOCK
1124 };
1125 
1130 static enum_operation_type rwlock_operation_map[]=
1131 {
1132  OPERATION_TYPE_READLOCK,
1133  OPERATION_TYPE_WRITELOCK,
1134  OPERATION_TYPE_TRYREADLOCK,
1135  OPERATION_TYPE_TRYWRITELOCK
1136 };
1137 
1142 static enum_operation_type cond_operation_map[]=
1143 {
1144  OPERATION_TYPE_WAIT,
1145  OPERATION_TYPE_TIMEDWAIT
1146 };
1147 
1152 static enum_operation_type file_operation_map[]=
1153 {
1154  OPERATION_TYPE_FILECREATE,
1155  OPERATION_TYPE_FILECREATETMP,
1156  OPERATION_TYPE_FILEOPEN,
1157  OPERATION_TYPE_FILESTREAMOPEN,
1158  OPERATION_TYPE_FILECLOSE,
1159  OPERATION_TYPE_FILESTREAMCLOSE,
1160  OPERATION_TYPE_FILEREAD,
1161  OPERATION_TYPE_FILEWRITE,
1162  OPERATION_TYPE_FILESEEK,
1163  OPERATION_TYPE_FILETELL,
1164  OPERATION_TYPE_FILEFLUSH,
1165  OPERATION_TYPE_FILESTAT,
1166  OPERATION_TYPE_FILEFSTAT,
1167  OPERATION_TYPE_FILECHSIZE,
1168  OPERATION_TYPE_FILEDELETE,
1169  OPERATION_TYPE_FILERENAME,
1170  OPERATION_TYPE_FILESYNC
1171 };
1172 
1177 static enum_operation_type table_io_operation_map[]=
1178 {
1179  OPERATION_TYPE_TABLE_FETCH,
1180  OPERATION_TYPE_TABLE_WRITE_ROW,
1181  OPERATION_TYPE_TABLE_UPDATE_ROW,
1182  OPERATION_TYPE_TABLE_DELETE_ROW
1183 };
1184 
1189 static enum_operation_type table_lock_operation_map[]=
1190 {
1191  OPERATION_TYPE_TL_READ_NORMAL, /* PFS_TL_READ */
1192  OPERATION_TYPE_TL_READ_WITH_SHARED_LOCKS, /* PFS_TL_READ_WITH_SHARED_LOCKS */
1193  OPERATION_TYPE_TL_READ_HIGH_PRIORITY, /* PFS_TL_READ_HIGH_PRIORITY */
1194  OPERATION_TYPE_TL_READ_NO_INSERTS, /* PFS_TL_READ_NO_INSERT */
1195  OPERATION_TYPE_TL_WRITE_ALLOW_WRITE, /* PFS_TL_WRITE_ALLOW_WRITE */
1196  OPERATION_TYPE_TL_WRITE_CONCURRENT_INSERT, /* PFS_TL_WRITE_CONCURRENT_INSERT */
1197  OPERATION_TYPE_TL_WRITE_DELAYED, /* PFS_TL_WRITE_DELAYED */
1198  OPERATION_TYPE_TL_WRITE_LOW_PRIORITY, /* PFS_TL_WRITE_LOW_PRIORITY */
1199  OPERATION_TYPE_TL_WRITE_NORMAL, /* PFS_TL_WRITE */
1200  OPERATION_TYPE_TL_READ_EXTERNAL, /* PFS_TL_READ_EXTERNAL */
1201  OPERATION_TYPE_TL_WRITE_EXTERNAL /* PFS_TL_WRITE_EXTERNAL */
1202 };
1203 
1208 static enum_operation_type socket_operation_map[]=
1209 {
1210  OPERATION_TYPE_SOCKETCREATE,
1211  OPERATION_TYPE_SOCKETCONNECT,
1212  OPERATION_TYPE_SOCKETBIND,
1213  OPERATION_TYPE_SOCKETCLOSE,
1214  OPERATION_TYPE_SOCKETSEND,
1215  OPERATION_TYPE_SOCKETRECV,
1216  OPERATION_TYPE_SOCKETSENDTO,
1217  OPERATION_TYPE_SOCKETRECVFROM,
1218  OPERATION_TYPE_SOCKETSENDMSG,
1219  OPERATION_TYPE_SOCKETRECVMSG,
1220  OPERATION_TYPE_SOCKETSEEK,
1221  OPERATION_TYPE_SOCKETOPT,
1222  OPERATION_TYPE_SOCKETSTAT,
1223  OPERATION_TYPE_SOCKETSHUTDOWN,
1224  OPERATION_TYPE_SOCKETSELECT
1225 };
1226 
1239 static int build_prefix(const LEX_STRING *prefix, const char *category,
1240  char *output, int *output_length)
1241 {
1242  int len= strlen(category);
1243  char *out_ptr= output;
1244  int prefix_length= prefix->length;
1245 
1246  if (unlikely((prefix_length + len + 1) >=
1248  {
1249  pfs_print_error("build_prefix: prefix+category is too long <%s> <%s>\n",
1250  prefix->str, category);
1251  return 1;
1252  }
1253 
1254  if (unlikely(strchr(category, '/') != NULL))
1255  {
1256  pfs_print_error("build_prefix: invalid category <%s>\n",
1257  category);
1258  return 1;
1259  }
1260 
1261  /* output = prefix + category + '/' */
1262  memcpy(out_ptr, prefix->str, prefix_length);
1263  out_ptr+= prefix_length;
1264  memcpy(out_ptr, category, len);
1265  out_ptr+= len;
1266  *out_ptr= '/';
1267  out_ptr++;
1268  *output_length= out_ptr - output;
1269 
1270  return 0;
1271 }
1272 
1273 #define REGISTER_BODY_V1(KEY_T, PREFIX, REGISTER_FUNC) \
1274  KEY_T key; \
1275  char formatted_name[PFS_MAX_INFO_NAME_LENGTH]; \
1276  int prefix_length; \
1277  int len; \
1278  int full_length; \
1279  \
1280  DBUG_ASSERT(category != NULL); \
1281  DBUG_ASSERT(info != NULL); \
1282  if (unlikely(build_prefix(&PREFIX, category, \
1283  formatted_name, &prefix_length))) \
1284  { \
1285  for (; count>0; count--, info++) \
1286  *(info->m_key)= 0; \
1287  return ; \
1288  } \
1289  \
1290  for (; count>0; count--, info++) \
1291  { \
1292  DBUG_ASSERT(info->m_key != NULL); \
1293  DBUG_ASSERT(info->m_name != NULL); \
1294  len= strlen(info->m_name); \
1295  full_length= prefix_length + len; \
1296  if (likely(full_length <= PFS_MAX_INFO_NAME_LENGTH)) \
1297  { \
1298  memcpy(formatted_name + prefix_length, info->m_name, len); \
1299  key= REGISTER_FUNC(formatted_name, full_length, info->m_flags); \
1300  } \
1301  else \
1302  { \
1303  pfs_print_error("REGISTER_BODY_V1: name too long <%s> <%s>\n", \
1304  category, info->m_name); \
1305  key= 0; \
1306  } \
1307  \
1308  *(info->m_key)= key; \
1309  } \
1310  return;
1311 
1312 /* Use C linkage for the interface functions. */
1313 
1314 C_MODE_START
1315 
1320 static void register_mutex_v1(const char *category,
1321  PSI_mutex_info_v1 *info,
1322  int count)
1323 {
1324  REGISTER_BODY_V1(PSI_mutex_key,
1327 }
1328 
1333 static void register_rwlock_v1(const char *category,
1334  PSI_rwlock_info_v1 *info,
1335  int count)
1336 {
1337  REGISTER_BODY_V1(PSI_rwlock_key,
1340 }
1341 
1346 static void register_cond_v1(const char *category,
1347  PSI_cond_info_v1 *info,
1348  int count)
1349 {
1350  REGISTER_BODY_V1(PSI_cond_key,
1353 }
1354 
1359 static void register_thread_v1(const char *category,
1360  PSI_thread_info_v1 *info,
1361  int count)
1362 {
1363  REGISTER_BODY_V1(PSI_thread_key,
1366 }
1367 
1372 static void register_file_v1(const char *category,
1373  PSI_file_info_v1 *info,
1374  int count)
1375 {
1376  REGISTER_BODY_V1(PSI_file_key,
1379 }
1380 
1381 static void register_stage_v1(const char *category,
1382  PSI_stage_info_v1 **info_array,
1383  int count)
1384 {
1385  char formatted_name[PFS_MAX_INFO_NAME_LENGTH];
1386  int prefix_length;
1387  int len;
1388  int full_length;
1389  PSI_stage_info_v1 *info;
1390 
1391  DBUG_ASSERT(category != NULL);
1392  DBUG_ASSERT(info_array != NULL);
1393  if (unlikely(build_prefix(&stage_instrument_prefix, category,
1394  formatted_name, &prefix_length)))
1395  {
1396  for (; count>0; count--, info_array++)
1397  (*info_array)->m_key= 0;
1398  return ;
1399  }
1400 
1401  for (; count>0; count--, info_array++)
1402  {
1403  info= *info_array;
1404  DBUG_ASSERT(info != NULL);
1405  DBUG_ASSERT(info->m_name != NULL);
1406  len= strlen(info->m_name);
1407  full_length= prefix_length + len;
1408  if (likely(full_length <= PFS_MAX_INFO_NAME_LENGTH))
1409  {
1410  memcpy(formatted_name + prefix_length, info->m_name, len);
1411  info->m_key= register_stage_class(formatted_name,
1412  prefix_length,
1413  full_length,
1414  info->m_flags);
1415  }
1416  else
1417  {
1418  pfs_print_error("register_stage_v1: name too long <%s> <%s>\n",
1419  category, info->m_name);
1420  info->m_key= 0;
1421  }
1422  }
1423  return;
1424 }
1425 
1426 static void register_statement_v1(const char *category,
1427  PSI_statement_info_v1 *info,
1428  int count)
1429 {
1430  char formatted_name[PFS_MAX_INFO_NAME_LENGTH];
1431  int prefix_length;
1432  int len;
1433  int full_length;
1434 
1435  DBUG_ASSERT(category != NULL);
1436  DBUG_ASSERT(info != NULL);
1437  if (unlikely(build_prefix(&statement_instrument_prefix,
1438  category, formatted_name, &prefix_length)))
1439  {
1440  for (; count>0; count--, info++)
1441  info->m_key= 0;
1442  return ;
1443  }
1444 
1445  for (; count>0; count--, info++)
1446  {
1447  DBUG_ASSERT(info->m_name != NULL);
1448  len= strlen(info->m_name);
1449  full_length= prefix_length + len;
1450  if (likely(full_length <= PFS_MAX_INFO_NAME_LENGTH))
1451  {
1452  memcpy(formatted_name + prefix_length, info->m_name, len);
1453  info->m_key= register_statement_class(formatted_name, full_length, info->m_flags);
1454  }
1455  else
1456  {
1457  pfs_print_error("register_statement_v1: name too long <%s>\n",
1458  info->m_name);
1459  info->m_key= 0;
1460  }
1461  }
1462  return;
1463 }
1464 
1465 static void register_socket_v1(const char *category,
1466  PSI_socket_info_v1 *info,
1467  int count)
1468 {
1469  REGISTER_BODY_V1(PSI_socket_key,
1470  socket_instrument_prefix,
1472 }
1473 
1474 #define INIT_BODY_V1(T, KEY, ID) \
1475  PFS_##T##_class *klass; \
1476  PFS_##T *pfs; \
1477  klass= find_##T##_class(KEY); \
1478  if (unlikely(klass == NULL)) \
1479  return NULL; \
1480  if (! klass->m_enabled) \
1481  return NULL; \
1482  pfs= create_##T(klass, ID); \
1483  return reinterpret_cast<PSI_##T *> (pfs)
1484 
1489 static PSI_mutex*
1490 init_mutex_v1(PSI_mutex_key key, const void *identity)
1491 {
1492  INIT_BODY_V1(mutex, key, identity);
1493 }
1494 
1499 static void destroy_mutex_v1(PSI_mutex* mutex)
1500 {
1501  PFS_mutex *pfs= reinterpret_cast<PFS_mutex*> (mutex);
1502 
1503  DBUG_ASSERT(pfs != NULL);
1504 
1505  destroy_mutex(pfs);
1506 }
1507 
1512 static PSI_rwlock*
1513 init_rwlock_v1(PSI_rwlock_key key, const void *identity)
1514 {
1515  INIT_BODY_V1(rwlock, key, identity);
1516 }
1517 
1522 static void destroy_rwlock_v1(PSI_rwlock* rwlock)
1523 {
1524  PFS_rwlock *pfs= reinterpret_cast<PFS_rwlock*> (rwlock);
1525 
1526  DBUG_ASSERT(pfs != NULL);
1527 
1528  destroy_rwlock(pfs);
1529 }
1530 
1535 static PSI_cond*
1536 init_cond_v1(PSI_cond_key key, const void *identity)
1537 {
1538  INIT_BODY_V1(cond, key, identity);
1539 }
1540 
1545 static void destroy_cond_v1(PSI_cond* cond)
1546 {
1547  PFS_cond *pfs= reinterpret_cast<PFS_cond*> (cond);
1548 
1549  DBUG_ASSERT(pfs != NULL);
1550 
1551  destroy_cond(pfs);
1552 }
1553 
1558 static PSI_table_share*
1559 get_table_share_v1(my_bool temporary, TABLE_SHARE *share)
1560 {
1561  /* Ignore temporary tables and views. */
1562  if (temporary || share->is_view)
1563  return NULL;
1564  /* An instrumented thread is required, for LF_PINS. */
1565  PFS_thread *pfs_thread= my_pthread_getspecific_ptr(PFS_thread*, THR_PFS);
1566  if (unlikely(pfs_thread == NULL))
1567  return NULL;
1568  PFS_table_share* pfs_share;
1569  pfs_share= find_or_create_table_share(pfs_thread, temporary, share);
1570  return reinterpret_cast<PSI_table_share*> (pfs_share);
1571 }
1572 
1577 static void release_table_share_v1(PSI_table_share* share)
1578 {
1579  PFS_table_share* pfs= reinterpret_cast<PFS_table_share*> (share);
1580 
1581  if (unlikely(pfs == NULL))
1582  return;
1583 
1584  release_table_share(pfs);
1585 }
1586 
1591 static void
1592 drop_table_share_v1(my_bool temporary,
1593  const char *schema_name, int schema_name_length,
1594  const char *table_name, int table_name_length)
1595 {
1596  /* Ignore temporary tables. */
1597  if (temporary)
1598  return;
1599  PFS_thread *pfs_thread= my_pthread_getspecific_ptr(PFS_thread*, THR_PFS);
1600  if (unlikely(pfs_thread == NULL))
1601  return;
1602  /* TODO: temporary tables */
1603  drop_table_share(pfs_thread, temporary, schema_name, schema_name_length,
1604  table_name, table_name_length);
1605 }
1606 
1611 static PSI_table*
1612 open_table_v1(PSI_table_share *share, const void *identity)
1613 {
1614  PFS_table_share *pfs_table_share= reinterpret_cast<PFS_table_share*> (share);
1615 
1616  if (unlikely(pfs_table_share == NULL))
1617  return NULL;
1618 
1619  /* This object is not to be instrumented. */
1620  if (! pfs_table_share->m_enabled)
1621  return NULL;
1622 
1623  /* This object is instrumented, but all table instruments are disabled. */
1625  return NULL;
1626 
1627  /*
1628  When the performance schema is off, do not instrument anything.
1629  Table handles have short life cycle, instrumentation will happen
1630  again if needed during the next open().
1631  */
1633  return NULL;
1634 
1635  PFS_thread *thread= my_pthread_getspecific_ptr(PFS_thread*, THR_PFS);
1636  if (unlikely(thread == NULL))
1637  return NULL;
1638 
1639  PFS_table *pfs_table= create_table(pfs_table_share, thread, identity);
1640  return reinterpret_cast<PSI_table *> (pfs_table);
1641 }
1642 
1647 static void unbind_table_v1(PSI_table *table)
1648 {
1649  PFS_table *pfs= reinterpret_cast<PFS_table*> (table);
1650  if (likely(pfs != NULL))
1651  {
1652  pfs->m_thread_owner= NULL;
1653  }
1654 }
1655 
1660 static PSI_table *
1661 rebind_table_v1(PSI_table_share *share, const void *identity, PSI_table *table)
1662 {
1663  PFS_table *pfs= reinterpret_cast<PFS_table*> (table);
1664  if (likely(pfs != NULL))
1665  {
1666  PFS_thread *thread;
1667  DBUG_ASSERT(pfs->m_thread_owner == NULL);
1668 
1669  /* The table handle was already instrumented, reuse it for this thread. */
1670  thread= my_pthread_getspecific_ptr(PFS_thread*, THR_PFS);
1671 
1672  if (unlikely(! pfs->m_share->m_enabled))
1673  {
1674  destroy_table(pfs);
1675  return NULL;
1676  }
1677 
1679  {
1680  destroy_table(pfs);
1681  return NULL;
1682  }
1683 
1684  if (unlikely(! flag_global_instrumentation))
1685  {
1686  destroy_table(pfs);
1687  return NULL;
1688  }
1689 
1690  pfs->m_thread_owner= thread;
1691  return table;
1692  }
1693 
1694  /* See open_table_v1() */
1695 
1696  PFS_table_share *pfs_table_share= reinterpret_cast<PFS_table_share*> (share);
1697 
1698  if (unlikely(pfs_table_share == NULL))
1699  return NULL;
1700 
1701  if (! pfs_table_share->m_enabled)
1702  return NULL;
1703 
1705  return NULL;
1706 
1708  return NULL;
1709 
1710  PFS_thread *thread= my_pthread_getspecific_ptr(PFS_thread*, THR_PFS);
1711 
1712  PFS_table *pfs_table= create_table(pfs_table_share, thread, identity);
1713  return reinterpret_cast<PSI_table *> (pfs_table);
1714 }
1715 
1720 static void close_table_v1(PSI_table *table)
1721 {
1722  PFS_table *pfs= reinterpret_cast<PFS_table*> (table);
1723  if (unlikely(pfs == NULL))
1724  return;
1725  pfs->aggregate();
1726  destroy_table(pfs);
1727 }
1728 
1729 static PSI_socket*
1730 init_socket_v1(PSI_socket_key key, const my_socket *fd,
1731  const struct sockaddr *addr, socklen_t addr_len)
1732 {
1733  PFS_socket_class *klass;
1734  PFS_socket *pfs;
1735  klass= find_socket_class(key);
1736  if (unlikely(klass == NULL))
1737  return NULL;
1738  if (! klass->m_enabled)
1739  return NULL;
1740  pfs= create_socket(klass, fd, addr, addr_len);
1741  return reinterpret_cast<PSI_socket *> (pfs);
1742 }
1743 
1744 static void destroy_socket_v1(PSI_socket *socket)
1745 {
1746  PFS_socket *pfs= reinterpret_cast<PFS_socket*> (socket);
1747 
1748  DBUG_ASSERT(pfs != NULL);
1749 
1750  destroy_socket(pfs);
1751 }
1752 
1757 static void create_file_v1(PSI_file_key key, const char *name, File file)
1758 {
1760  return;
1761  int index= (int) file;
1762  if (unlikely(index < 0))
1763  return;
1764  PFS_file_class *klass= find_file_class(key);
1765  if (unlikely(klass == NULL))
1766  return;
1767  if (! klass->m_enabled)
1768  return;
1769 
1770  /* A thread is needed for LF_PINS */
1771  PFS_thread *pfs_thread= my_pthread_getspecific_ptr(PFS_thread*, THR_PFS);
1772  if (unlikely(pfs_thread == NULL))
1773  return;
1774 
1775  if (flag_thread_instrumentation && ! pfs_thread->m_enabled)
1776  return;
1777 
1778  /*
1779  We want this check after pfs_thread->m_enabled,
1780  to avoid reporting false loss.
1781  */
1782  if (unlikely(index >= file_handle_max))
1783  {
1784  file_handle_lost++;
1785  return;
1786  }
1787 
1788  uint len= strlen(name);
1789  PFS_file *pfs_file= find_or_create_file(pfs_thread, klass, name, len, true);
1790 
1791  file_handle_array[index]= pfs_file;
1792 }
1793 
1800 {
1801  ulonglong m_thread_internal_id;
1802  char m_username[USERNAME_LENGTH];
1803  uint m_username_length;
1804  char m_hostname[HOSTNAME_LENGTH];
1805  uint m_hostname_length;
1806 
1807  PSI_thread_key m_child_key;
1808  const void *m_child_identity;
1809  void *(*m_user_start_routine)(void*);
1810  void *m_user_arg;
1811 };
1812 
1813 void* pfs_spawn_thread(void *arg)
1814 {
1815  PFS_spawn_thread_arg *typed_arg= (PFS_spawn_thread_arg*) arg;
1816  void *user_arg;
1817  void *(*user_start_routine)(void*);
1818 
1819  PFS_thread *pfs;
1820 
1821  /* First, attach instrumentation to this newly created pthread. */
1822  PFS_thread_class *klass= find_thread_class(typed_arg->m_child_key);
1823  if (likely(klass != NULL))
1824  {
1825  pfs= create_thread(klass, typed_arg->m_child_identity, 0);
1826  if (likely(pfs != NULL))
1827  {
1828  clear_thread_account(pfs);
1829 
1830  pfs->m_parent_thread_internal_id= typed_arg->m_thread_internal_id;
1831 
1832  memcpy(pfs->m_username, typed_arg->m_username, sizeof(pfs->m_username));
1833  pfs->m_username_length= typed_arg->m_username_length;
1834 
1835  memcpy(pfs->m_hostname, typed_arg->m_hostname, sizeof(pfs->m_hostname));
1836  pfs->m_hostname_length= typed_arg->m_hostname_length;
1837 
1838  set_thread_account(pfs);
1839  }
1840  }
1841  else
1842  {
1843  pfs= NULL;
1844  }
1845  my_pthread_setspecific_ptr(THR_PFS, pfs);
1846 
1847  /*
1848  Secondly, free the memory allocated in spawn_thread_v1().
1849  It is preferable to do this before invoking the user
1850  routine, to avoid memory leaks at shutdown, in case
1851  the server exits without waiting for this thread.
1852  */
1853  user_start_routine= typed_arg->m_user_start_routine;
1854  user_arg= typed_arg->m_user_arg;
1855  my_free(typed_arg);
1856 
1857  /* Then, execute the user code for this thread. */
1858  (*user_start_routine)(user_arg);
1859 
1860  return NULL;
1861 }
1862 
1867 static int spawn_thread_v1(PSI_thread_key key,
1868  pthread_t *thread, const pthread_attr_t *attr,
1869  void *(*start_routine)(void*), void *arg)
1870 {
1871  PFS_spawn_thread_arg *psi_arg;
1872  PFS_thread *parent;
1873 
1874  /* psi_arg can not be global, and can not be a local variable. */
1875  psi_arg= (PFS_spawn_thread_arg*) my_malloc(sizeof(PFS_spawn_thread_arg),
1876  MYF(MY_WME));
1877  if (unlikely(psi_arg == NULL))
1878  return EAGAIN;
1879 
1880  psi_arg->m_child_key= key;
1881  psi_arg->m_child_identity= (arg ? arg : thread);
1882  psi_arg->m_user_start_routine= start_routine;
1883  psi_arg->m_user_arg= arg;
1884 
1885  parent= my_pthread_getspecific_ptr(PFS_thread*, THR_PFS);
1886  if (parent != NULL)
1887  {
1888  /*
1889  Make a copy of the parent attributes.
1890  This is required, because instrumentation for this thread (the parent)
1891  may be destroyed before the child thread instrumentation is created.
1892  */
1893  psi_arg->m_thread_internal_id= parent->m_thread_internal_id;
1894 
1895  memcpy(psi_arg->m_username, parent->m_username, sizeof(psi_arg->m_username));
1896  psi_arg->m_username_length= parent->m_username_length;
1897 
1898  memcpy(psi_arg->m_hostname, parent->m_hostname, sizeof(psi_arg->m_hostname));
1899  psi_arg->m_hostname_length= parent->m_hostname_length;
1900  }
1901  else
1902  {
1903  psi_arg->m_thread_internal_id= 0;
1904  psi_arg->m_username_length= 0;
1905  psi_arg->m_hostname_length= 0;
1906  }
1907 
1908  int result= pthread_create(thread, attr, pfs_spawn_thread, psi_arg);
1909  if (unlikely(result != 0))
1910  my_free(psi_arg);
1911  return result;
1912 }
1913 
1918 static PSI_thread*
1919 new_thread_v1(PSI_thread_key key, const void *identity, ulonglong processlist_id)
1920 {
1921  PFS_thread *pfs;
1922 
1923  PFS_thread_class *klass= find_thread_class(key);
1924  if (likely(klass != NULL))
1925  pfs= create_thread(klass, identity, processlist_id);
1926  else
1927  pfs= NULL;
1928 
1929  return reinterpret_cast<PSI_thread*> (pfs);
1930 }
1931 
1936 static void set_thread_id_v1(PSI_thread *thread, ulonglong processlist_id)
1937 {
1938  PFS_thread *pfs= reinterpret_cast<PFS_thread*> (thread);
1939  if (unlikely(pfs == NULL))
1940  return;
1941  pfs->m_processlist_id= processlist_id;
1942 }
1943 
1948 static PSI_thread*
1949 get_thread_v1(void)
1950 {
1951  PFS_thread *pfs= my_pthread_getspecific_ptr(PFS_thread*, THR_PFS);
1952  return reinterpret_cast<PSI_thread*> (pfs);
1953 }
1954 
1959 static void set_thread_user_v1(const char *user, int user_len)
1960 {
1961  PFS_thread *pfs= my_pthread_getspecific_ptr(PFS_thread*, THR_PFS);
1962 
1963  DBUG_ASSERT((user != NULL) || (user_len == 0));
1964  DBUG_ASSERT(user_len >= 0);
1965  DBUG_ASSERT((uint) user_len <= sizeof(pfs->m_username));
1966 
1967  if (unlikely(pfs == NULL))
1968  return;
1969 
1970  aggregate_thread(pfs, pfs->m_account, pfs->m_user, pfs->m_host);
1971 
1973 
1974  clear_thread_account(pfs);
1975 
1976  if (user_len > 0)
1977  memcpy(pfs->m_username, user, user_len);
1978  pfs->m_username_length= user_len;
1979 
1980  set_thread_account(pfs);
1981 
1982  bool enabled= true;
1984  {
1985  if ((pfs->m_username_length > 0) && (pfs->m_hostname_length > 0))
1986  {
1987  /*
1988  TODO: performance improvement.
1989  Once performance_schema.USERS is exposed,
1990  we can use PFS_user::m_enabled instead of looking up
1991  SETUP_ACTORS every time.
1992  */
1993  lookup_setup_actor(pfs,
1994  pfs->m_username, pfs->m_username_length,
1995  pfs->m_hostname, pfs->m_hostname_length,
1996  &enabled);
1997  }
1998  }
1999 
2000  pfs->m_enabled= enabled;
2001 
2003 }
2004 
2009 static void set_thread_account_v1(const char *user, int user_len,
2010  const char *host, int host_len)
2011 {
2012  PFS_thread *pfs= my_pthread_getspecific_ptr(PFS_thread*, THR_PFS);
2013 
2014  DBUG_ASSERT((user != NULL) || (user_len == 0));
2015  DBUG_ASSERT(user_len >= 0);
2016  DBUG_ASSERT((uint) user_len <= sizeof(pfs->m_username));
2017  DBUG_ASSERT((host != NULL) || (host_len == 0));
2018  DBUG_ASSERT(host_len >= 0);
2019  DBUG_ASSERT((uint) host_len <= sizeof(pfs->m_hostname));
2020 
2021  if (unlikely(pfs == NULL))
2022  return;
2023 
2025 
2026  clear_thread_account(pfs);
2027 
2028  if (host_len > 0)
2029  memcpy(pfs->m_hostname, host, host_len);
2030  pfs->m_hostname_length= host_len;
2031 
2032  if (user_len > 0)
2033  memcpy(pfs->m_username, user, user_len);
2034  pfs->m_username_length= user_len;
2035 
2036  set_thread_account(pfs);
2037 
2038  bool enabled= true;
2040  {
2041  if ((pfs->m_username_length > 0) && (pfs->m_hostname_length > 0))
2042  {
2043  /*
2044  TODO: performance improvement.
2045  Once performance_schema.USERS is exposed,
2046  we can use PFS_user::m_enabled instead of looking up
2047  SETUP_ACTORS every time.
2048  */
2049  lookup_setup_actor(pfs,
2050  pfs->m_username, pfs->m_username_length,
2051  pfs->m_hostname, pfs->m_hostname_length,
2052  &enabled);
2053  }
2054  }
2055  pfs->m_enabled= enabled;
2056 
2058 }
2059 
2064 static void set_thread_db_v1(const char* db, int db_len)
2065 {
2066  PFS_thread *pfs= my_pthread_getspecific_ptr(PFS_thread*, THR_PFS);
2067 
2068  DBUG_ASSERT((db != NULL) || (db_len == 0));
2069  DBUG_ASSERT(db_len >= 0);
2070  DBUG_ASSERT((uint) db_len <= sizeof(pfs->m_dbname));
2071 
2072  if (likely(pfs != NULL))
2073  {
2075  if (db_len > 0)
2076  memcpy(pfs->m_dbname, db, db_len);
2077  pfs->m_dbname_length= db_len;
2079  }
2080 }
2081 
2086 static void set_thread_command_v1(int command)
2087 {
2088  PFS_thread *pfs= my_pthread_getspecific_ptr(PFS_thread*, THR_PFS);
2089 
2090  DBUG_ASSERT(command >= 0);
2091  DBUG_ASSERT(command <= (int) COM_END);
2092 
2093  if (likely(pfs != NULL))
2094  {
2095  pfs->m_command= command;
2096  }
2097 }
2098 
2103 static void set_thread_start_time_v1(time_t start_time)
2104 {
2105  PFS_thread *pfs= my_pthread_getspecific_ptr(PFS_thread*, THR_PFS);
2106 
2107  if (likely(pfs != NULL))
2108  {
2109  pfs->m_start_time= start_time;
2110  }
2111 }
2112 
2117 static void set_thread_state_v1(const char* state)
2118 {
2119  /* DEPRECATED. */
2120 }
2121 
2126 static void set_thread_info_v1(const char* info, uint info_len)
2127 {
2128  PFS_thread *pfs= my_pthread_getspecific_ptr(PFS_thread*, THR_PFS);
2129 
2130  DBUG_ASSERT((info != NULL) || (info_len == 0));
2131 
2132  if (likely(pfs != NULL))
2133  {
2134  if ((info != NULL) && (info_len > 0))
2135  {
2136  if (info_len > sizeof(pfs->m_processlist_info))
2137  info_len= sizeof(pfs->m_processlist_info);
2138 
2140  memcpy(pfs->m_processlist_info, info, info_len);
2141  pfs->m_processlist_info_length= info_len;
2143  }
2144  else
2145  {
2147  pfs->m_processlist_info_length= 0;
2149  }
2150  }
2151 }
2152 
2157 static void set_thread_v1(PSI_thread* thread)
2158 {
2159  PFS_thread *pfs= reinterpret_cast<PFS_thread*> (thread);
2160  my_pthread_setspecific_ptr(THR_PFS, pfs);
2161 }
2162 
2167 static void delete_current_thread_v1(void)
2168 {
2169  PFS_thread *thread= my_pthread_getspecific_ptr(PFS_thread*, THR_PFS);
2170  if (thread != NULL)
2171  {
2172  aggregate_thread(thread, thread->m_account, thread->m_user, thread->m_host);
2173  my_pthread_setspecific_ptr(THR_PFS, NULL);
2174  destroy_thread(thread);
2175  }
2176 }
2177 
2182 static void delete_thread_v1(PSI_thread *thread)
2183 {
2184  PFS_thread *pfs= reinterpret_cast<PFS_thread*> (thread);
2185 
2186  if (pfs != NULL)
2187  {
2188  aggregate_thread(pfs, pfs->m_account, pfs->m_user, pfs->m_host);
2189  destroy_thread(pfs);
2190  }
2191 }
2192 
2197 static PSI_mutex_locker*
2198 start_mutex_wait_v1(PSI_mutex_locker_state *state,
2199  PSI_mutex *mutex, PSI_mutex_operation op,
2200  const char *src_file, uint src_line)
2201 {
2202  PFS_mutex *pfs_mutex= reinterpret_cast<PFS_mutex*> (mutex);
2203  DBUG_ASSERT((int) op >= 0);
2204  DBUG_ASSERT((uint) op < array_elements(mutex_operation_map));
2205  DBUG_ASSERT(state != NULL);
2206 
2207  DBUG_ASSERT(pfs_mutex != NULL);
2208  DBUG_ASSERT(pfs_mutex->m_class != NULL);
2209 
2210  if (! pfs_mutex->m_enabled)
2211  return NULL;
2212 
2213  register uint flags;
2214  ulonglong timer_start= 0;
2215 
2217  {
2218  PFS_thread *pfs_thread= my_pthread_getspecific_ptr(PFS_thread*, THR_PFS);
2219  if (unlikely(pfs_thread == NULL))
2220  return NULL;
2221  if (! pfs_thread->m_enabled)
2222  return NULL;
2223  state->m_thread= reinterpret_cast<PSI_thread *> (pfs_thread);
2224  flags= STATE_FLAG_THREAD;
2225 
2226  if (pfs_mutex->m_timed)
2227  {
2228  timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
2229  state->m_timer_start= timer_start;
2230  flags|= STATE_FLAG_TIMED;
2231  }
2232 
2234  {
2235  if (unlikely(pfs_thread->m_events_waits_current >=
2236  & pfs_thread->m_events_waits_stack[WAIT_STACK_SIZE]))
2237  {
2238  locker_lost++;
2239  return NULL;
2240  }
2241  PFS_events_waits *wait= pfs_thread->m_events_waits_current;
2242  state->m_wait= wait;
2243  flags|= STATE_FLAG_EVENT;
2244 
2245  PFS_events_waits *parent_event= wait - 1;
2246  wait->m_event_type= EVENT_TYPE_WAIT;
2247  wait->m_nesting_event_id= parent_event->m_event_id;
2248  wait->m_nesting_event_type= parent_event->m_event_type;
2249 
2250  wait->m_thread= pfs_thread;
2251  wait->m_class= pfs_mutex->m_class;
2252  wait->m_timer_start= timer_start;
2253  wait->m_timer_end= 0;
2254  wait->m_object_instance_addr= pfs_mutex->m_identity;
2255  wait->m_event_id= pfs_thread->m_event_id++;
2256  wait->m_end_event_id= 0;
2257  wait->m_operation= mutex_operation_map[(int) op];
2258  wait->m_source_file= src_file;
2259  wait->m_source_line= src_line;
2260  wait->m_wait_class= WAIT_CLASS_MUTEX;
2261 
2262  pfs_thread->m_events_waits_current++;
2263  }
2264  }
2265  else
2266  {
2267  if (pfs_mutex->m_timed)
2268  {
2269  timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
2270  state->m_timer_start= timer_start;
2271  flags= STATE_FLAG_TIMED;
2272  state->m_thread= NULL;
2273  }
2274  else
2275  {
2276  /*
2277  Complete shortcut.
2278  */
2279  /* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (counted) */
2280  pfs_mutex->m_mutex_stat.m_wait_stat.aggregate_counted();
2281  return NULL;
2282  }
2283  }
2284 
2285  state->m_flags= flags;
2286  state->m_mutex= mutex;
2287  return reinterpret_cast<PSI_mutex_locker*> (state);
2288 }
2289 
2295 static PSI_rwlock_locker*
2296 start_rwlock_wait_v1(PSI_rwlock_locker_state *state,
2297  PSI_rwlock *rwlock,
2298  PSI_rwlock_operation op,
2299  const char *src_file, uint src_line)
2300 {
2301  PFS_rwlock *pfs_rwlock= reinterpret_cast<PFS_rwlock*> (rwlock);
2302  DBUG_ASSERT(static_cast<int> (op) >= 0);
2303  DBUG_ASSERT(static_cast<uint> (op) < array_elements(rwlock_operation_map));
2304  DBUG_ASSERT(state != NULL);
2305  DBUG_ASSERT(pfs_rwlock != NULL);
2306  DBUG_ASSERT(pfs_rwlock->m_class != NULL);
2307 
2308  if (! pfs_rwlock->m_enabled)
2309  return NULL;
2310 
2311  register uint flags;
2312  ulonglong timer_start= 0;
2313 
2315  {
2316  PFS_thread *pfs_thread= my_pthread_getspecific_ptr(PFS_thread*, THR_PFS);
2317  if (unlikely(pfs_thread == NULL))
2318  return NULL;
2319  if (! pfs_thread->m_enabled)
2320  return NULL;
2321  state->m_thread= reinterpret_cast<PSI_thread *> (pfs_thread);
2322  flags= STATE_FLAG_THREAD;
2323 
2324  if (pfs_rwlock->m_timed)
2325  {
2326  timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
2327  state->m_timer_start= timer_start;
2328  flags|= STATE_FLAG_TIMED;
2329  }
2330 
2332  {
2333  if (unlikely(pfs_thread->m_events_waits_current >=
2334  & pfs_thread->m_events_waits_stack[WAIT_STACK_SIZE]))
2335  {
2336  locker_lost++;
2337  return NULL;
2338  }
2339  PFS_events_waits *wait= pfs_thread->m_events_waits_current;
2340  state->m_wait= wait;
2341  flags|= STATE_FLAG_EVENT;
2342 
2343  PFS_events_waits *parent_event= wait - 1;
2344  wait->m_event_type= EVENT_TYPE_WAIT;
2345  wait->m_nesting_event_id= parent_event->m_event_id;
2346  wait->m_nesting_event_type= parent_event->m_event_type;
2347 
2348  wait->m_thread= pfs_thread;
2349  wait->m_class= pfs_rwlock->m_class;
2350  wait->m_timer_start= timer_start;
2351  wait->m_timer_end= 0;
2352  wait->m_object_instance_addr= pfs_rwlock->m_identity;
2353  wait->m_event_id= pfs_thread->m_event_id++;
2354  wait->m_end_event_id= 0;
2355  wait->m_operation= rwlock_operation_map[static_cast<int> (op)];
2356  wait->m_source_file= src_file;
2357  wait->m_source_line= src_line;
2358  wait->m_wait_class= WAIT_CLASS_RWLOCK;
2359 
2360  pfs_thread->m_events_waits_current++;
2361  }
2362  }
2363  else
2364  {
2365  if (pfs_rwlock->m_timed)
2366  {
2367  timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
2368  state->m_timer_start= timer_start;
2369  flags= STATE_FLAG_TIMED;
2370  state->m_thread= NULL;
2371  }
2372  else
2373  {
2374  /*
2375  Complete shortcut.
2376  */
2377  /* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (counted) */
2378  pfs_rwlock->m_rwlock_stat.m_wait_stat.aggregate_counted();
2379  return NULL;
2380  }
2381  }
2382 
2383  state->m_flags= flags;
2384  state->m_rwlock= rwlock;
2385  return reinterpret_cast<PSI_rwlock_locker*> (state);
2386 }
2387 
2392 static PSI_cond_locker*
2393 start_cond_wait_v1(PSI_cond_locker_state *state,
2394  PSI_cond *cond, PSI_mutex *mutex,
2395  PSI_cond_operation op,
2396  const char *src_file, uint src_line)
2397 {
2398  /*
2399  Note about the unused PSI_mutex *mutex parameter:
2400  In the pthread library, a call to pthread_cond_wait()
2401  causes an unlock() + lock() on the mutex associated with the condition.
2402  This mutex operation is not instrumented, so the mutex will still
2403  appear as locked when a thread is waiting on a condition.
2404  This has no impact now, as unlock_mutex() is not recording events.
2405  When unlock_mutex() is implemented by later work logs,
2406  this parameter here will be used to adjust the mutex state,
2407  in start_cond_wait_v1() and end_cond_wait_v1().
2408  */
2409  PFS_cond *pfs_cond= reinterpret_cast<PFS_cond*> (cond);
2410  DBUG_ASSERT(static_cast<int> (op) >= 0);
2411  DBUG_ASSERT(static_cast<uint> (op) < array_elements(cond_operation_map));
2412  DBUG_ASSERT(state != NULL);
2413  DBUG_ASSERT(pfs_cond != NULL);
2414  DBUG_ASSERT(pfs_cond->m_class != NULL);
2415 
2416  if (! pfs_cond->m_enabled)
2417  return NULL;
2418 
2419  register uint flags;
2420  ulonglong timer_start= 0;
2421 
2423  {
2424  PFS_thread *pfs_thread= my_pthread_getspecific_ptr(PFS_thread*, THR_PFS);
2425  if (unlikely(pfs_thread == NULL))
2426  return NULL;
2427  if (! pfs_thread->m_enabled)
2428  return NULL;
2429  state->m_thread= reinterpret_cast<PSI_thread *> (pfs_thread);
2430  flags= STATE_FLAG_THREAD;
2431 
2432  if (pfs_cond->m_timed)
2433  {
2434  timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
2435  state->m_timer_start= timer_start;
2436  flags|= STATE_FLAG_TIMED;
2437  }
2438 
2440  {
2441  if (unlikely(pfs_thread->m_events_waits_current >=
2442  & pfs_thread->m_events_waits_stack[WAIT_STACK_SIZE]))
2443  {
2444  locker_lost++;
2445  return NULL;
2446  }
2447  PFS_events_waits *wait= pfs_thread->m_events_waits_current;
2448  state->m_wait= wait;
2449  flags|= STATE_FLAG_EVENT;
2450 
2451  PFS_events_waits *parent_event= wait - 1;
2452  wait->m_event_type= EVENT_TYPE_WAIT;
2453  wait->m_nesting_event_id= parent_event->m_event_id;
2454  wait->m_nesting_event_type= parent_event->m_event_type;
2455 
2456  wait->m_thread= pfs_thread;
2457  wait->m_class= pfs_cond->m_class;
2458  wait->m_timer_start= timer_start;
2459  wait->m_timer_end= 0;
2460  wait->m_object_instance_addr= pfs_cond->m_identity;
2461  wait->m_event_id= pfs_thread->m_event_id++;
2462  wait->m_end_event_id= 0;
2463  wait->m_operation= cond_operation_map[static_cast<int> (op)];
2464  wait->m_source_file= src_file;
2465  wait->m_source_line= src_line;
2466  wait->m_wait_class= WAIT_CLASS_COND;
2467 
2468  pfs_thread->m_events_waits_current++;
2469  }
2470  }
2471  else
2472  {
2473  if (pfs_cond->m_timed)
2474  {
2475  timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
2476  state->m_timer_start= timer_start;
2477  flags= STATE_FLAG_TIMED;
2478  }
2479  else
2480  {
2481  /*
2482  Complete shortcut.
2483  */
2484  /* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (counted) */
2485  pfs_cond->m_cond_stat.m_wait_stat.aggregate_counted();
2486  return NULL;
2487  }
2488  }
2489 
2490  state->m_flags= flags;
2491  state->m_cond= cond;
2492  state->m_mutex= mutex;
2493  return reinterpret_cast<PSI_cond_locker*> (state);
2494 }
2495 
2496 static inline PFS_TL_LOCK_TYPE lock_flags_to_lock_type(uint flags)
2497 {
2498  enum thr_lock_type value= static_cast<enum thr_lock_type> (flags);
2499 
2500  switch (value)
2501  {
2502  case TL_READ:
2503  return PFS_TL_READ;
2504  case TL_READ_WITH_SHARED_LOCKS:
2505  return PFS_TL_READ_WITH_SHARED_LOCKS;
2506  case TL_READ_HIGH_PRIORITY:
2507  return PFS_TL_READ_HIGH_PRIORITY;
2508  case TL_READ_NO_INSERT:
2509  return PFS_TL_READ_NO_INSERT;
2510  case TL_WRITE_ALLOW_WRITE:
2511  return PFS_TL_WRITE_ALLOW_WRITE;
2512  case TL_WRITE_CONCURRENT_INSERT:
2513  return PFS_TL_WRITE_CONCURRENT_INSERT;
2514  case TL_WRITE_DELAYED:
2515  return PFS_TL_WRITE_DELAYED;
2516  case TL_WRITE_LOW_PRIORITY:
2517  return PFS_TL_WRITE_LOW_PRIORITY;
2518  case TL_WRITE:
2519  return PFS_TL_WRITE;
2520 
2521  case TL_WRITE_ONLY:
2522  case TL_IGNORE:
2523  case TL_UNLOCK:
2524  case TL_READ_DEFAULT:
2525  case TL_WRITE_DEFAULT:
2526  default:
2527  DBUG_ASSERT(false);
2528  }
2529 
2530  /* Dead code */
2531  return PFS_TL_READ;
2532 }
2533 
2534 static inline PFS_TL_LOCK_TYPE external_lock_flags_to_lock_type(uint flags)
2535 {
2536  DBUG_ASSERT(flags == F_RDLCK || flags == F_WRLCK);
2537  return (flags == F_RDLCK ? PFS_TL_READ_EXTERNAL : PFS_TL_WRITE_EXTERNAL);
2538 }
2539 
2544 static PSI_table_locker*
2545 start_table_io_wait_v1(PSI_table_locker_state *state,
2546  PSI_table *table,
2547  PSI_table_io_operation op,
2548  uint index,
2549  const char *src_file, uint src_line)
2550 {
2551  DBUG_ASSERT(static_cast<int> (op) >= 0);
2552  DBUG_ASSERT(static_cast<uint> (op) < array_elements(table_io_operation_map));
2553  DBUG_ASSERT(state != NULL);
2554  PFS_table *pfs_table= reinterpret_cast<PFS_table*> (table);
2555  DBUG_ASSERT(pfs_table != NULL);
2556  DBUG_ASSERT(pfs_table->m_share != NULL);
2557 
2558  if (! pfs_table->m_io_enabled)
2559  return NULL;
2560 
2561  PFS_thread *pfs_thread= pfs_table->m_thread_owner;
2562 
2563  DBUG_ASSERT(pfs_thread ==
2564  my_pthread_getspecific_ptr(PFS_thread*, THR_PFS));
2565 
2566  register uint flags;
2567  ulonglong timer_start= 0;
2568 
2570  {
2571  if (pfs_thread == NULL)
2572  return NULL;
2573  if (! pfs_thread->m_enabled)
2574  return NULL;
2575  state->m_thread= reinterpret_cast<PSI_thread *> (pfs_thread);
2576  flags= STATE_FLAG_THREAD;
2577 
2578  if (pfs_table->m_io_timed)
2579  {
2580  timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
2581  state->m_timer_start= timer_start;
2582  flags|= STATE_FLAG_TIMED;
2583  }
2584 
2586  {
2587  if (unlikely(pfs_thread->m_events_waits_current >=
2588  & pfs_thread->m_events_waits_stack[WAIT_STACK_SIZE]))
2589  {
2590  locker_lost++;
2591  return NULL;
2592  }
2593  PFS_events_waits *wait= pfs_thread->m_events_waits_current;
2594  state->m_wait= wait;
2595  flags|= STATE_FLAG_EVENT;
2596 
2597  PFS_events_waits *parent_event= wait - 1;
2598  wait->m_event_type= EVENT_TYPE_WAIT;
2599  wait->m_nesting_event_id= parent_event->m_event_id;
2600  wait->m_nesting_event_type= parent_event->m_event_type;
2601 
2602  PFS_table_share *share= pfs_table->m_share;
2603  wait->m_thread= pfs_thread;
2605  wait->m_timer_start= timer_start;
2606  wait->m_timer_end= 0;
2607  wait->m_object_instance_addr= pfs_table->m_identity;
2608  wait->m_event_id= pfs_thread->m_event_id++;
2609  wait->m_end_event_id= 0;
2610  wait->m_operation= table_io_operation_map[static_cast<int> (op)];
2611  wait->m_flags= 0;
2612  wait->m_object_type= share->get_object_type();
2613  wait->m_weak_table_share= share;
2614  wait->m_weak_version= share->get_version();
2615  wait->m_index= index;
2616  wait->m_source_file= src_file;
2617  wait->m_source_line= src_line;
2618  wait->m_wait_class= WAIT_CLASS_TABLE;
2619 
2620  pfs_thread->m_events_waits_current++;
2621  }
2622  }
2623  else
2624  {
2625  if (pfs_table->m_io_timed)
2626  {
2627  timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
2628  state->m_timer_start= timer_start;
2629  flags= STATE_FLAG_TIMED;
2630  }
2631  else
2632  {
2633  /* TODO: consider a shortcut here */
2634  flags= 0;
2635  }
2636  }
2637 
2638  state->m_flags= flags;
2639  state->m_table= table;
2640  state->m_io_operation= op;
2641  state->m_index= index;
2642  return reinterpret_cast<PSI_table_locker*> (state);
2643 }
2644 
2649 static PSI_table_locker*
2650 start_table_lock_wait_v1(PSI_table_locker_state *state,
2651  PSI_table *table,
2652  PSI_table_lock_operation op,
2653  ulong op_flags,
2654  const char *src_file, uint src_line)
2655 {
2656  DBUG_ASSERT(state != NULL);
2657  DBUG_ASSERT((op == PSI_TABLE_LOCK) || (op == PSI_TABLE_EXTERNAL_LOCK));
2658 
2659  PFS_table *pfs_table= reinterpret_cast<PFS_table*> (table);
2660 
2661  DBUG_ASSERT(pfs_table != NULL);
2662  DBUG_ASSERT(pfs_table->m_share != NULL);
2663 
2664  if (! pfs_table->m_lock_enabled)
2665  return NULL;
2666 
2667  PFS_thread *pfs_thread= pfs_table->m_thread_owner;
2668 
2669  PFS_TL_LOCK_TYPE lock_type;
2670 
2671  switch (op)
2672  {
2673  case PSI_TABLE_LOCK:
2674  lock_type= lock_flags_to_lock_type(op_flags);
2675  break;
2676  case PSI_TABLE_EXTERNAL_LOCK:
2677  /*
2678  See the handler::external_lock() API design,
2679  there is no handler::external_unlock().
2680  */
2681  if (op_flags == F_UNLCK)
2682  return NULL;
2683  lock_type= external_lock_flags_to_lock_type(op_flags);
2684  break;
2685  default:
2686  lock_type= PFS_TL_READ;
2687  DBUG_ASSERT(false);
2688  }
2689 
2690  DBUG_ASSERT((uint) lock_type < array_elements(table_lock_operation_map));
2691 
2692  register uint flags;
2693  ulonglong timer_start= 0;
2694 
2696  {
2697  if (pfs_thread == NULL)
2698  return NULL;
2699  if (! pfs_thread->m_enabled)
2700  return NULL;
2701  state->m_thread= reinterpret_cast<PSI_thread *> (pfs_thread);
2702  flags= STATE_FLAG_THREAD;
2703 
2704  if (pfs_table->m_lock_timed)
2705  {
2706  timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
2707  state->m_timer_start= timer_start;
2708  flags|= STATE_FLAG_TIMED;
2709  }
2710 
2712  {
2713  if (unlikely(pfs_thread->m_events_waits_current >=
2714  & pfs_thread->m_events_waits_stack[WAIT_STACK_SIZE]))
2715  {
2716  locker_lost++;
2717  return NULL;
2718  }
2719  PFS_events_waits *wait= pfs_thread->m_events_waits_current;
2720  state->m_wait= wait;
2721  flags|= STATE_FLAG_EVENT;
2722 
2723  PFS_events_waits *parent_event= wait - 1;
2724  wait->m_event_type= EVENT_TYPE_WAIT;
2725  wait->m_nesting_event_id= parent_event->m_event_id;
2726  wait->m_nesting_event_type= parent_event->m_event_type;
2727 
2728  PFS_table_share *share= pfs_table->m_share;
2729  wait->m_thread= pfs_thread;
2731  wait->m_timer_start= timer_start;
2732  wait->m_timer_end= 0;
2733  wait->m_object_instance_addr= pfs_table->m_identity;
2734  wait->m_event_id= pfs_thread->m_event_id++;
2735  wait->m_end_event_id= 0;
2736  wait->m_operation= table_lock_operation_map[lock_type];
2737  wait->m_flags= 0;
2738  wait->m_object_type= share->get_object_type();
2739  wait->m_weak_table_share= share;
2740  wait->m_weak_version= share->get_version();
2741  wait->m_index= 0;
2742  wait->m_source_file= src_file;
2743  wait->m_source_line= src_line;
2744  wait->m_wait_class= WAIT_CLASS_TABLE;
2745 
2746  pfs_thread->m_events_waits_current++;
2747  }
2748  }
2749  else
2750  {
2751  if (pfs_table->m_lock_timed)
2752  {
2753  timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
2754  state->m_timer_start= timer_start;
2755  flags= STATE_FLAG_TIMED;
2756  }
2757  else
2758  {
2759  /* TODO: consider a shortcut here */
2760  flags= 0;
2761  }
2762  }
2763 
2764  state->m_flags= flags;
2765  state->m_table= table;
2766  state->m_index= lock_type;
2767  return reinterpret_cast<PSI_table_locker*> (state);
2768 }
2769 
2774 static PSI_file_locker*
2775 get_thread_file_name_locker_v1(PSI_file_locker_state *state,
2776  PSI_file_key key,
2777  PSI_file_operation op,
2778  const char *name, const void *identity)
2779 {
2780  DBUG_ASSERT(static_cast<int> (op) >= 0);
2781  DBUG_ASSERT(static_cast<uint> (op) < array_elements(file_operation_map));
2782  DBUG_ASSERT(state != NULL);
2783 
2785  return NULL;
2786  PFS_file_class *klass= find_file_class(key);
2787  if (unlikely(klass == NULL))
2788  return NULL;
2789  if (! klass->m_enabled)
2790  return NULL;
2791 
2792  /* Needed for the LF_HASH */
2793  PFS_thread *pfs_thread= my_pthread_getspecific_ptr(PFS_thread*, THR_PFS);
2794  if (unlikely(pfs_thread == NULL))
2795  return NULL;
2796 
2797  if (flag_thread_instrumentation && ! pfs_thread->m_enabled)
2798  return NULL;
2799 
2800  register uint flags;
2801 
2802  state->m_thread= reinterpret_cast<PSI_thread *> (pfs_thread);
2803  flags= STATE_FLAG_THREAD;
2804 
2805  if (klass->m_timed)
2806  flags|= STATE_FLAG_TIMED;
2807 
2809  {
2810  if (unlikely(pfs_thread->m_events_waits_current >=
2811  & pfs_thread->m_events_waits_stack[WAIT_STACK_SIZE]))
2812  {
2813  locker_lost++;
2814  return NULL;
2815  }
2816  PFS_events_waits *wait= pfs_thread->m_events_waits_current;
2817  state->m_wait= wait;
2818  flags|= STATE_FLAG_EVENT;
2819 
2820  PFS_events_waits *parent_event= wait - 1;
2821  wait->m_event_type= EVENT_TYPE_WAIT;
2822  wait->m_nesting_event_id= parent_event->m_event_id;
2823  wait->m_nesting_event_type= parent_event->m_event_type;
2824 
2825  wait->m_thread= pfs_thread;
2826  wait->m_class= klass;
2827  wait->m_timer_start= 0;
2828  wait->m_timer_end= 0;
2829  wait->m_object_instance_addr= NULL;
2830  wait->m_weak_file= NULL;
2831  wait->m_weak_version= 0;
2832  wait->m_event_id= pfs_thread->m_event_id++;
2833  wait->m_end_event_id= 0;
2834  wait->m_operation= file_operation_map[static_cast<int> (op)];
2835  wait->m_wait_class= WAIT_CLASS_FILE;
2836 
2837  pfs_thread->m_events_waits_current++;
2838  }
2839 
2840  state->m_flags= flags;
2841  state->m_file= NULL;
2842  state->m_name= name;
2843  state->m_class= klass;
2844  state->m_operation= op;
2845  return reinterpret_cast<PSI_file_locker*> (state);
2846 }
2847 
2852 static PSI_file_locker*
2853 get_thread_file_stream_locker_v1(PSI_file_locker_state *state,
2854  PSI_file *file, PSI_file_operation op)
2855 {
2856  PFS_file *pfs_file= reinterpret_cast<PFS_file*> (file);
2857  DBUG_ASSERT(static_cast<int> (op) >= 0);
2858  DBUG_ASSERT(static_cast<uint> (op) < array_elements(file_operation_map));
2859  DBUG_ASSERT(state != NULL);
2860 
2861  if (unlikely(pfs_file == NULL))
2862  return NULL;
2863  DBUG_ASSERT(pfs_file->m_class != NULL);
2864  PFS_file_class *klass= pfs_file->m_class;
2865 
2866  if (! pfs_file->m_enabled)
2867  return NULL;
2868 
2869  register uint flags;
2870 
2872  {
2873  PFS_thread *pfs_thread= my_pthread_getspecific_ptr(PFS_thread*, THR_PFS);
2874  if (unlikely(pfs_thread == NULL))
2875  return NULL;
2876  if (! pfs_thread->m_enabled)
2877  return NULL;
2878  state->m_thread= reinterpret_cast<PSI_thread *> (pfs_thread);
2879  flags= STATE_FLAG_THREAD;
2880 
2881  if (pfs_file->m_timed)
2882  flags|= STATE_FLAG_TIMED;
2883 
2885  {
2886  if (unlikely(pfs_thread->m_events_waits_current >=
2887  & pfs_thread->m_events_waits_stack[WAIT_STACK_SIZE]))
2888  {
2889  locker_lost++;
2890  return NULL;
2891  }
2892  PFS_events_waits *wait= pfs_thread->m_events_waits_current;
2893  state->m_wait= wait;
2894  flags|= STATE_FLAG_EVENT;
2895 
2896  PFS_events_waits *parent_event= wait - 1;
2897  wait->m_event_type= EVENT_TYPE_WAIT;
2898  wait->m_nesting_event_id= parent_event->m_event_id;
2899  wait->m_nesting_event_type= parent_event->m_event_type;
2900 
2901  wait->m_thread= pfs_thread;
2902  wait->m_class= klass;
2903  wait->m_timer_start= 0;
2904  wait->m_timer_end= 0;
2905  wait->m_object_instance_addr= pfs_file;
2906  wait->m_weak_file= pfs_file;
2907  wait->m_weak_version= pfs_file->get_version();
2908  wait->m_event_id= pfs_thread->m_event_id++;
2909  wait->m_end_event_id= 0;
2910  wait->m_operation= file_operation_map[static_cast<int> (op)];
2911  wait->m_wait_class= WAIT_CLASS_FILE;
2912 
2913  pfs_thread->m_events_waits_current++;
2914  }
2915  }
2916  else
2917  {
2918  state->m_thread= NULL;
2919  if (pfs_file->m_timed)
2920  {
2921  flags= STATE_FLAG_TIMED;
2922  }
2923  else
2924  {
2925  /* TODO: consider a shortcut. */
2926  flags= 0;
2927  }
2928  }
2929 
2930  state->m_flags= flags;
2931  state->m_file= reinterpret_cast<PSI_file*> (pfs_file);
2932  state->m_operation= op;
2933  state->m_name= NULL;
2934  state->m_class= klass;
2935  return reinterpret_cast<PSI_file_locker*> (state);
2936 }
2937 
2942 static PSI_file_locker*
2943 get_thread_file_descriptor_locker_v1(PSI_file_locker_state *state,
2944  File file, PSI_file_operation op)
2945 {
2946  int index= static_cast<int> (file);
2947  DBUG_ASSERT(static_cast<int> (op) >= 0);
2948  DBUG_ASSERT(static_cast<uint> (op) < array_elements(file_operation_map));
2949  DBUG_ASSERT(state != NULL);
2950 
2951  if (unlikely((index < 0) || (index >= file_handle_max)))
2952  return NULL;
2953 
2954  PFS_file *pfs_file= file_handle_array[index];
2955  if (unlikely(pfs_file == NULL))
2956  return NULL;
2957 
2958  /*
2959  We are about to close a file by descriptor number,
2960  and the calling code still holds the descriptor.
2961  Cleanup the file descriptor <--> file instrument association.
2962  Remove the instrumentation *before* the close to avoid race
2963  conditions with another thread opening a file
2964  (that could be given the same descriptor).
2965  */
2966  if (op == PSI_FILE_CLOSE)
2967  file_handle_array[index]= NULL;
2968 
2969  if (! pfs_file->m_enabled)
2970  return NULL;
2971 
2972  DBUG_ASSERT(pfs_file->m_class != NULL);
2973  PFS_file_class *klass= pfs_file->m_class;
2974 
2975  register uint flags;
2976 
2978  {
2979  PFS_thread *pfs_thread= my_pthread_getspecific_ptr(PFS_thread*, THR_PFS);
2980  if (unlikely(pfs_thread == NULL))
2981  return NULL;
2982  if (! pfs_thread->m_enabled)
2983  return NULL;
2984  state->m_thread= reinterpret_cast<PSI_thread *> (pfs_thread);
2985  flags= STATE_FLAG_THREAD;
2986 
2987  if (pfs_file->m_timed)
2988  flags|= STATE_FLAG_TIMED;
2989 
2991  {
2992  if (unlikely(pfs_thread->m_events_waits_current >=
2993  & pfs_thread->m_events_waits_stack[WAIT_STACK_SIZE]))
2994  {
2995  locker_lost++;
2996  return NULL;
2997  }
2998  PFS_events_waits *wait= pfs_thread->m_events_waits_current;
2999  state->m_wait= wait;
3000  flags|= STATE_FLAG_EVENT;
3001 
3002  PFS_events_waits *parent_event= wait - 1;
3003  wait->m_event_type= EVENT_TYPE_WAIT;
3004  wait->m_nesting_event_id= parent_event->m_event_id;
3005  wait->m_nesting_event_type= parent_event->m_event_type;
3006 
3007  wait->m_thread= pfs_thread;
3008  wait->m_class= klass;
3009  wait->m_timer_start= 0;
3010  wait->m_timer_end= 0;
3011  wait->m_object_instance_addr= pfs_file;
3012  wait->m_weak_file= pfs_file;
3013  wait->m_weak_version= pfs_file->get_version();
3014  wait->m_event_id= pfs_thread->m_event_id++;
3015  wait->m_end_event_id= 0;
3016  wait->m_operation= file_operation_map[static_cast<int> (op)];
3017  wait->m_wait_class= WAIT_CLASS_FILE;
3018 
3019  pfs_thread->m_events_waits_current++;
3020  }
3021  }
3022  else
3023  {
3024  state->m_thread= NULL;
3025  if (pfs_file->m_timed)
3026  {
3027  flags= STATE_FLAG_TIMED;
3028  }
3029  else
3030  {
3031  /* TODO: consider a shortcut. */
3032  flags= 0;
3033  }
3034  }
3035 
3036  state->m_flags= flags;
3037  state->m_file= reinterpret_cast<PSI_file*> (pfs_file);
3038  state->m_operation= op;
3039  state->m_name= NULL;
3040  state->m_class= klass;
3041  return reinterpret_cast<PSI_file_locker*> (state);
3042 }
3043 
3046 static PSI_socket_locker*
3047 start_socket_wait_v1(PSI_socket_locker_state *state,
3048  PSI_socket *socket,
3049  PSI_socket_operation op,
3050  size_t count,
3051  const char *src_file, uint src_line)
3052 {
3053  DBUG_ASSERT(static_cast<int> (op) >= 0);
3054  DBUG_ASSERT(static_cast<uint> (op) < array_elements(socket_operation_map));
3055  DBUG_ASSERT(state != NULL);
3056  PFS_socket *pfs_socket= reinterpret_cast<PFS_socket*> (socket);
3057 
3058  DBUG_ASSERT(pfs_socket != NULL);
3059  DBUG_ASSERT(pfs_socket->m_class != NULL);
3060 
3061  if (!pfs_socket->m_enabled || pfs_socket->m_idle)
3062  return NULL;
3063 
3064  register uint flags= 0;
3065  ulonglong timer_start= 0;
3066 
3068  {
3069  PFS_thread *pfs_thread= pfs_socket->m_thread_owner;
3070 
3071  if (unlikely(pfs_thread == NULL))
3072  return NULL;
3073 
3074  if (!pfs_thread->m_enabled)
3075  return NULL;
3076 
3077  state->m_thread= reinterpret_cast<PSI_thread *> (pfs_thread);
3078  flags= STATE_FLAG_THREAD;
3079 
3080  if (pfs_socket->m_timed)
3081  {
3082  timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
3083  state->m_timer_start= timer_start;
3084  flags|= STATE_FLAG_TIMED;
3085  }
3086 
3088  {
3089  if (unlikely(pfs_thread->m_events_waits_current >=
3090  & pfs_thread->m_events_waits_stack[WAIT_STACK_SIZE]))
3091  {
3092  locker_lost++;
3093  return NULL;
3094  }
3095  PFS_events_waits *wait= pfs_thread->m_events_waits_current;
3096  state->m_wait= wait;
3097  flags|= STATE_FLAG_EVENT;
3098 
3099  PFS_events_waits *parent_event= wait - 1;
3100  wait->m_event_type= EVENT_TYPE_WAIT;
3101  wait->m_nesting_event_id= parent_event->m_event_id;
3102  wait->m_nesting_event_type= parent_event->m_event_type;
3103  wait->m_thread= pfs_thread;
3104  wait->m_class= pfs_socket->m_class;
3105  wait->m_timer_start= timer_start;
3106  wait->m_timer_end= 0;
3107  wait->m_object_instance_addr= pfs_socket->m_identity;
3108  wait->m_weak_socket= pfs_socket;
3109  wait->m_weak_version= pfs_socket->get_version();
3110  wait->m_event_id= pfs_thread->m_event_id++;
3111  wait->m_end_event_id= 0;
3112  wait->m_operation= socket_operation_map[static_cast<int>(op)];
3113  wait->m_source_file= src_file;
3114  wait->m_source_line= src_line;
3115  wait->m_number_of_bytes= count;
3116  wait->m_wait_class= WAIT_CLASS_SOCKET;
3117 
3118  pfs_thread->m_events_waits_current++;
3119  }
3120  }
3121  else
3122  {
3123  if (pfs_socket->m_timed)
3124  {
3125  timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
3126  state->m_timer_start= timer_start;
3127  flags= STATE_FLAG_TIMED;
3128  }
3129  else
3130  {
3131  /*
3132  Even if timing is disabled, end_socket_wait() still needs a locker to
3133  capture the number of bytes sent or received by the socket operation.
3134  For operations that do not have a byte count, then just increment the
3135  event counter and return a NULL locker.
3136  */
3137  switch (op)
3138  {
3139  case PSI_SOCKET_CONNECT:
3140  case PSI_SOCKET_CREATE:
3141  case PSI_SOCKET_BIND:
3142  case PSI_SOCKET_SEEK:
3143  case PSI_SOCKET_OPT:
3144  case PSI_SOCKET_STAT:
3145  case PSI_SOCKET_SHUTDOWN:
3146  case PSI_SOCKET_CLOSE:
3147  case PSI_SOCKET_SELECT:
3148  pfs_socket->m_socket_stat.m_io_stat.m_misc.aggregate_counted();
3149  return NULL;
3150  default:
3151  break;
3152  }
3153  }
3154  }
3155 
3156  state->m_flags= flags;
3157  state->m_socket= socket;
3158  state->m_operation= op;
3159  return reinterpret_cast<PSI_socket_locker*> (state);
3160 }
3161 
3166 static void unlock_mutex_v1(PSI_mutex *mutex)
3167 {
3168  PFS_mutex *pfs_mutex= reinterpret_cast<PFS_mutex*> (mutex);
3169 
3170  DBUG_ASSERT(pfs_mutex != NULL);
3171 
3172  /*
3173  Note that this code is still protected by the instrumented mutex,
3174  and therefore is thread safe. See inline_mysql_mutex_unlock().
3175  */
3176 
3177  /* Always update the instrumented state */
3178  pfs_mutex->m_owner= NULL;
3179  pfs_mutex->m_last_locked= 0;
3180 
3181 #ifdef LATER_WL2333
3182  /*
3183  See WL#2333: SHOW ENGINE ... LOCK STATUS.
3184  PFS_mutex::m_lock_stat is not exposed in user visible tables
3185  currently, so there is no point spending time computing it.
3186  */
3187  if (! pfs_mutex->m_enabled)
3188  return;
3189 
3190  if (! pfs_mutex->m_timed)
3191  return;
3192 
3193  ulonglong locked_time;
3194  locked_time= get_timer_pico_value(wait_timer) - pfs_mutex->m_last_locked;
3195  pfs_mutex->m_mutex_stat.m_lock_stat.aggregate_value(locked_time);
3196 #endif
3197 }
3198 
3203 static void unlock_rwlock_v1(PSI_rwlock *rwlock)
3204 {
3205  PFS_rwlock *pfs_rwlock= reinterpret_cast<PFS_rwlock*> (rwlock);
3206  DBUG_ASSERT(pfs_rwlock != NULL);
3207  DBUG_ASSERT(pfs_rwlock == sanitize_rwlock(pfs_rwlock));
3208  DBUG_ASSERT(pfs_rwlock->m_class != NULL);
3209  DBUG_ASSERT(pfs_rwlock->m_lock.is_populated());
3210 
3211  bool last_writer= false;
3212  bool last_reader= false;
3213 
3214  /*
3215  Note that this code is still protected by the instrumented rwlock,
3216  and therefore is:
3217  - thread safe for write locks
3218  - almost thread safe for read locks (pfs_rwlock->m_readers is unsafe).
3219  See inline_mysql_rwlock_unlock()
3220  */
3221 
3222  /* Always update the instrumented state */
3223  if (pfs_rwlock->m_writer != NULL)
3224  {
3225  /* Nominal case, a writer is unlocking. */
3226  last_writer= true;
3227  pfs_rwlock->m_writer= NULL;
3228  /* Reset the readers stats, they could be off */
3229  pfs_rwlock->m_readers= 0;
3230  }
3231  else if (likely(pfs_rwlock->m_readers > 0))
3232  {
3233  /* Nominal case, a reader is unlocking. */
3234  if (--(pfs_rwlock->m_readers) == 0)
3235  last_reader= true;
3236  }
3237  else
3238  {
3239  /*
3240  Edge case, we have no writer and no readers,
3241  on an unlock event.
3242  This is possible for:
3243  - partial instrumentation
3244  - instrumentation disabled at runtime,
3245  see when get_thread_rwlock_locker_v1() returns NULL
3246  No further action is taken here, the next
3247  write lock will put the statistics is a valid state.
3248  */
3249  }
3250 
3251 #ifdef LATER_WL2333
3252  /* See WL#2333: SHOW ENGINE ... LOCK STATUS. */
3253 
3254  if (! pfs_rwlock->m_enabled)
3255  return;
3256 
3257  if (! pfs_rwlock->m_timed)
3258  return;
3259 
3260  ulonglong locked_time;
3261  if (last_writer)
3262  {
3263  locked_time= get_timer_pico_value(wait_timer) - pfs_rwlock->m_last_written;
3264  pfs_rwlock->m_rwlock_stat.m_write_lock_stat.aggregate_value(locked_time);
3265  }
3266  else if (last_reader)
3267  {
3268  locked_time= get_timer_pico_value(wait_timer) - pfs_rwlock->m_last_read;
3269  pfs_rwlock->m_rwlock_stat.m_read_lock_stat.aggregate_value(locked_time);
3270  }
3271 #else
3272  (void) last_reader;
3273  (void) last_writer;
3274 #endif
3275 }
3276 
3281 static void signal_cond_v1(PSI_cond* cond)
3282 {
3283  PFS_cond *pfs_cond= reinterpret_cast<PFS_cond*> (cond);
3284 
3285  DBUG_ASSERT(pfs_cond != NULL);
3286 
3287  pfs_cond->m_cond_stat.m_signal_count++;
3288 }
3289 
3294 static void broadcast_cond_v1(PSI_cond* cond)
3295 {
3296  PFS_cond *pfs_cond= reinterpret_cast<PFS_cond*> (cond);
3297 
3298  DBUG_ASSERT(pfs_cond != NULL);
3299 
3300  pfs_cond->m_cond_stat.m_broadcast_count++;
3301 }
3302 
3307 static PSI_idle_locker*
3308 start_idle_wait_v1(PSI_idle_locker_state* state, const char *src_file, uint src_line)
3309 {
3310  DBUG_ASSERT(state != NULL);
3311 
3313  return NULL;
3314 
3316  return NULL;
3317 
3318  register uint flags= 0;
3319  ulonglong timer_start= 0;
3320 
3322  {
3323  PFS_thread *pfs_thread= my_pthread_getspecific_ptr(PFS_thread*, THR_PFS);
3324  if (unlikely(pfs_thread == NULL))
3325  return NULL;
3326  if (!pfs_thread->m_enabled)
3327  return NULL;
3328  state->m_thread= reinterpret_cast<PSI_thread *> (pfs_thread);
3329  flags= STATE_FLAG_THREAD;
3330 
3331  DBUG_ASSERT(pfs_thread->m_events_statements_count == 0);
3332 
3334  {
3335  timer_start= get_timer_raw_value_and_function(idle_timer, &state->m_timer);
3336  state->m_timer_start= timer_start;
3337  flags|= STATE_FLAG_TIMED;
3338  }
3339 
3341  {
3342  if (unlikely(pfs_thread->m_events_waits_current >=
3343  & pfs_thread->m_events_waits_stack[WAIT_STACK_SIZE]))
3344  {
3345  locker_lost++;
3346  return NULL;
3347  }
3348  PFS_events_waits *wait= pfs_thread->m_events_waits_current;
3349  state->m_wait= wait;
3350  flags|= STATE_FLAG_EVENT;
3351 
3352  wait->m_event_type= EVENT_TYPE_WAIT;
3353  /*
3354  IDLE events are waits, but by definition we know that
3355  such waits happen outside of any STAGE and STATEMENT,
3356  so they have no parents.
3357  */
3358  wait->m_nesting_event_id= 0;
3359  /* no need to set wait->m_nesting_event_type */
3360 
3361  wait->m_thread= pfs_thread;
3362  wait->m_class= &global_idle_class;
3363  wait->m_timer_start= timer_start;
3364  wait->m_timer_end= 0;
3365  wait->m_event_id= pfs_thread->m_event_id++;
3366  wait->m_end_event_id= 0;
3367  wait->m_operation= OPERATION_TYPE_IDLE;
3368  wait->m_source_file= src_file;
3369  wait->m_source_line= src_line;
3370  wait->m_wait_class= WAIT_CLASS_IDLE;
3371 
3372  pfs_thread->m_events_waits_current++;
3373  }
3374  }
3375  else
3376  {
3378  {
3379  timer_start= get_timer_raw_value_and_function(idle_timer, &state->m_timer);
3380  state->m_timer_start= timer_start;
3381  flags= STATE_FLAG_TIMED;
3382  }
3383  }
3384 
3385  state->m_flags= flags;
3386  return reinterpret_cast<PSI_idle_locker*> (state);
3387 }
3388 
3393 static void end_idle_wait_v1(PSI_idle_locker* locker)
3394 {
3395  PSI_idle_locker_state *state= reinterpret_cast<PSI_idle_locker_state*> (locker);
3396  DBUG_ASSERT(state != NULL);
3397  ulonglong timer_end= 0;
3398  ulonglong wait_time= 0;
3399 
3400  register uint flags= state->m_flags;
3401 
3402  if (flags & STATE_FLAG_TIMED)
3403  {
3404  timer_end= state->m_timer();
3405  wait_time= timer_end - state->m_timer_start;
3406  }
3407 
3408  if (flags & STATE_FLAG_THREAD)
3409  {
3410  PFS_thread *thread= reinterpret_cast<PFS_thread *> (state->m_thread);
3411  PFS_single_stat *event_name_array;
3412  event_name_array= thread->m_instr_class_waits_stats;
3413 
3414  if (flags & STATE_FLAG_TIMED)
3415  {
3416  /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (timed) */
3417  event_name_array[GLOBAL_IDLE_EVENT_INDEX].aggregate_value(wait_time);
3418  }
3419  else
3420  {
3421  /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (counted) */
3422  event_name_array[GLOBAL_IDLE_EVENT_INDEX].aggregate_counted();
3423  }
3424 
3425  if (flags & STATE_FLAG_EVENT)
3426  {
3427  PFS_events_waits *wait= reinterpret_cast<PFS_events_waits*> (state->m_wait);
3428  DBUG_ASSERT(wait != NULL);
3429 
3430  wait->m_timer_end= timer_end;
3431  wait->m_end_event_id= thread->m_event_id;
3433  insert_events_waits_history(thread, wait);
3436  thread->m_events_waits_current--;
3437  }
3438  }
3439 
3440  if (flags & STATE_FLAG_TIMED)
3441  {
3442  /* Aggregate to EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME (timed) */
3443  global_idle_stat.aggregate_value(wait_time);
3444  }
3445  else
3446  {
3447  /* Aggregate to EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME (counted) */
3448  global_idle_stat.aggregate_counted();
3449  }
3450 }
3451 
3456 static void end_mutex_wait_v1(PSI_mutex_locker* locker, int rc)
3457 {
3458  PSI_mutex_locker_state *state= reinterpret_cast<PSI_mutex_locker_state*> (locker);
3459  DBUG_ASSERT(state != NULL);
3460 
3461  ulonglong timer_end= 0;
3462  ulonglong wait_time= 0;
3463 
3464  PFS_mutex *mutex= reinterpret_cast<PFS_mutex *> (state->m_mutex);
3465  DBUG_ASSERT(mutex != NULL);
3466  PFS_thread *thread= reinterpret_cast<PFS_thread *> (state->m_thread);
3467 
3468  register uint flags= state->m_flags;
3469 
3470  if (flags & STATE_FLAG_TIMED)
3471  {
3472  timer_end= state->m_timer();
3473  wait_time= timer_end - state->m_timer_start;
3474  /* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (timed) */
3475  mutex->m_mutex_stat.m_wait_stat.aggregate_value(wait_time);
3476  }
3477  else
3478  {
3479  /* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (counted) */
3480  mutex->m_mutex_stat.m_wait_stat.aggregate_counted();
3481  }
3482 
3483  if (likely(rc == 0))
3484  {
3485  mutex->m_owner= thread;
3486  mutex->m_last_locked= timer_end;
3487  }
3488 
3489  if (flags & STATE_FLAG_THREAD)
3490  {
3491  PFS_single_stat *event_name_array;
3492  event_name_array= thread->m_instr_class_waits_stats;
3493  uint index= mutex->m_class->m_event_name_index;
3494 
3495  if (flags & STATE_FLAG_TIMED)
3496  {
3497  /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (timed) */
3498  event_name_array[index].aggregate_value(wait_time);
3499  }
3500  else
3501  {
3502  /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (counted) */
3503  event_name_array[index].aggregate_counted();
3504  }
3505 
3506  if (flags & STATE_FLAG_EVENT)
3507  {
3508  PFS_events_waits *wait= reinterpret_cast<PFS_events_waits*> (state->m_wait);
3509  DBUG_ASSERT(wait != NULL);
3510 
3511  wait->m_timer_end= timer_end;
3512  wait->m_end_event_id= thread->m_event_id;
3514  insert_events_waits_history(thread, wait);
3517  thread->m_events_waits_current--;
3518  }
3519  }
3520 }
3521 
3526 static void end_rwlock_rdwait_v1(PSI_rwlock_locker* locker, int rc)
3527 {
3528  PSI_rwlock_locker_state *state= reinterpret_cast<PSI_rwlock_locker_state*> (locker);
3529  DBUG_ASSERT(state != NULL);
3530 
3531  ulonglong timer_end= 0;
3532  ulonglong wait_time= 0;
3533 
3534  PFS_rwlock *rwlock= reinterpret_cast<PFS_rwlock *> (state->m_rwlock);
3535  DBUG_ASSERT(rwlock != NULL);
3536 
3537  if (state->m_flags & STATE_FLAG_TIMED)
3538  {
3539  timer_end= state->m_timer();
3540  wait_time= timer_end - state->m_timer_start;
3541  /* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (timed) */
3542  rwlock->m_rwlock_stat.m_wait_stat.aggregate_value(wait_time);
3543  }
3544  else
3545  {
3546  /* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (counted) */
3547  rwlock->m_rwlock_stat.m_wait_stat.aggregate_counted();
3548  }
3549 
3550  if (rc == 0)
3551  {
3552  /*
3553  Warning:
3554  Multiple threads can execute this section concurrently
3555  (since multiple readers can execute in parallel).
3556  The statistics generated are not safe, which is why they are
3557  just statistics, not facts.
3558  */
3559  if (rwlock->m_readers == 0)
3560  rwlock->m_last_read= timer_end;
3561  rwlock->m_writer= NULL;
3562  rwlock->m_readers++;
3563  }
3564 
3565  if (state->m_flags & STATE_FLAG_THREAD)
3566  {
3567  PFS_thread *thread= reinterpret_cast<PFS_thread *> (state->m_thread);
3568  DBUG_ASSERT(thread != NULL);
3569 
3570  PFS_single_stat *event_name_array;
3571  event_name_array= thread->m_instr_class_waits_stats;
3572  uint index= rwlock->m_class->m_event_name_index;
3573 
3574  if (state->m_flags & STATE_FLAG_TIMED)
3575  {
3576  /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (timed) */
3577  event_name_array[index].aggregate_value(wait_time);
3578  }
3579  else
3580  {
3581  /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (counted) */
3582  event_name_array[index].aggregate_counted();
3583  }
3584 
3585  if (state->m_flags & STATE_FLAG_EVENT)
3586  {
3587  PFS_events_waits *wait= reinterpret_cast<PFS_events_waits*> (state->m_wait);
3588  DBUG_ASSERT(wait != NULL);
3589 
3590  wait->m_timer_end= timer_end;
3591  wait->m_end_event_id= thread->m_event_id;
3593  insert_events_waits_history(thread, wait);
3596  thread->m_events_waits_current--;
3597  }
3598  }
3599 }
3600 
3605 static void end_rwlock_wrwait_v1(PSI_rwlock_locker* locker, int rc)
3606 {
3607  PSI_rwlock_locker_state *state= reinterpret_cast<PSI_rwlock_locker_state*> (locker);
3608  DBUG_ASSERT(state != NULL);
3609 
3610  ulonglong timer_end= 0;
3611  ulonglong wait_time= 0;
3612 
3613  PFS_rwlock *rwlock= reinterpret_cast<PFS_rwlock *> (state->m_rwlock);
3614  DBUG_ASSERT(rwlock != NULL);
3615  PFS_thread *thread= reinterpret_cast<PFS_thread *> (state->m_thread);
3616 
3617  if (state->m_flags & STATE_FLAG_TIMED)
3618  {
3619  timer_end= state->m_timer();
3620  wait_time= timer_end - state->m_timer_start;
3621  /* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (timed) */
3622  rwlock->m_rwlock_stat.m_wait_stat.aggregate_value(wait_time);
3623  }
3624  else
3625  {
3626  /* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (counted) */
3627  rwlock->m_rwlock_stat.m_wait_stat.aggregate_counted();
3628  }
3629 
3630  if (likely(rc == 0))
3631  {
3632  /* Thread safe : we are protected by the instrumented rwlock */
3633  rwlock->m_writer= thread;
3634  rwlock->m_last_written= timer_end;
3635  /* Reset the readers stats, they could be off */
3636  rwlock->m_readers= 0;
3637  rwlock->m_last_read= 0;
3638  }
3639 
3640  if (state->m_flags & STATE_FLAG_THREAD)
3641  {
3642  PFS_single_stat *event_name_array;
3643  event_name_array= thread->m_instr_class_waits_stats;
3644  uint index= rwlock->m_class->m_event_name_index;
3645 
3646  if (state->m_flags & STATE_FLAG_TIMED)
3647  {
3648  /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (timed) */
3649  event_name_array[index].aggregate_value(wait_time);
3650  }
3651  else
3652  {
3653  /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (counted) */
3654  event_name_array[index].aggregate_counted();
3655  }
3656 
3657  if (state->m_flags & STATE_FLAG_EVENT)
3658  {
3659  PFS_events_waits *wait= reinterpret_cast<PFS_events_waits*> (state->m_wait);
3660  DBUG_ASSERT(wait != NULL);
3661 
3662  wait->m_timer_end= timer_end;
3663  wait->m_end_event_id= thread->m_event_id;
3665  insert_events_waits_history(thread, wait);
3668  thread->m_events_waits_current--;
3669  }
3670  }
3671 }
3672 
3677 static void end_cond_wait_v1(PSI_cond_locker* locker, int rc)
3678 {
3679  PSI_cond_locker_state *state= reinterpret_cast<PSI_cond_locker_state*> (locker);
3680  DBUG_ASSERT(state != NULL);
3681 
3682  ulonglong timer_end= 0;
3683  ulonglong wait_time= 0;
3684 
3685  PFS_cond *cond= reinterpret_cast<PFS_cond *> (state->m_cond);
3686  /* PFS_mutex *mutex= reinterpret_cast<PFS_mutex *> (state->m_mutex); */
3687 
3688  if (state->m_flags & STATE_FLAG_TIMED)
3689  {
3690  timer_end= state->m_timer();
3691  wait_time= timer_end - state->m_timer_start;
3692  /* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (timed) */
3693  cond->m_cond_stat.m_wait_stat.aggregate_value(wait_time);
3694  }
3695  else
3696  {
3697  /* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (counted) */
3698  cond->m_cond_stat.m_wait_stat.aggregate_counted();
3699  }
3700 
3701  if (state->m_flags & STATE_FLAG_THREAD)
3702  {
3703  PFS_thread *thread= reinterpret_cast<PFS_thread *> (state->m_thread);
3704  DBUG_ASSERT(thread != NULL);
3705 
3706  PFS_single_stat *event_name_array;
3707  event_name_array= thread->m_instr_class_waits_stats;
3708  uint index= cond->m_class->m_event_name_index;
3709 
3710  if (state->m_flags & STATE_FLAG_TIMED)
3711  {
3712  /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (timed) */
3713  event_name_array[index].aggregate_value(wait_time);
3714  }
3715  else
3716  {
3717  /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (counted) */
3718  event_name_array[index].aggregate_counted();
3719  }
3720 
3721  if (state->m_flags & STATE_FLAG_EVENT)
3722  {
3723  PFS_events_waits *wait= reinterpret_cast<PFS_events_waits*> (state->m_wait);
3724  DBUG_ASSERT(wait != NULL);
3725 
3726  wait->m_timer_end= timer_end;
3727  wait->m_end_event_id= thread->m_event_id;
3729  insert_events_waits_history(thread, wait);
3732  thread->m_events_waits_current--;
3733  }
3734  }
3735 }
3736 
3741 static void end_table_io_wait_v1(PSI_table_locker* locker)
3742 {
3743  PSI_table_locker_state *state= reinterpret_cast<PSI_table_locker_state*> (locker);
3744  DBUG_ASSERT(state != NULL);
3745 
3746  ulonglong timer_end= 0;
3747  ulonglong wait_time= 0;
3748 
3749  PFS_table *table= reinterpret_cast<PFS_table *> (state->m_table);
3750  DBUG_ASSERT(table != NULL);
3751 
3752  PFS_single_stat *stat;
3753  PFS_table_io_stat *table_io_stat;
3754 
3755  DBUG_ASSERT((state->m_index < table->m_share->m_key_count) ||
3756  (state->m_index == MAX_INDEXES));
3757 
3758  table_io_stat= & table->m_table_stat.m_index_stat[state->m_index];
3759  table_io_stat->m_has_data= true;
3760 
3761  switch (state->m_io_operation)
3762  {
3763  case PSI_TABLE_FETCH_ROW:
3764  stat= & table_io_stat->m_fetch;
3765  break;
3766  case PSI_TABLE_WRITE_ROW:
3767  stat= & table_io_stat->m_insert;
3768  break;
3769  case PSI_TABLE_UPDATE_ROW:
3770  stat= & table_io_stat->m_update;
3771  break;
3772  case PSI_TABLE_DELETE_ROW:
3773  stat= & table_io_stat->m_delete;
3774  break;
3775  default:
3776  DBUG_ASSERT(false);
3777  stat= NULL;
3778  break;
3779  }
3780 
3781  register uint flags= state->m_flags;
3782 
3783  if (flags & STATE_FLAG_TIMED)
3784  {
3785  timer_end= state->m_timer();
3786  wait_time= timer_end - state->m_timer_start;
3787  stat->aggregate_value(wait_time);
3788  }
3789  else
3790  {
3791  stat->aggregate_counted();
3792  }
3793 
3794  if (flags & STATE_FLAG_THREAD)
3795  {
3796  PFS_thread *thread= reinterpret_cast<PFS_thread *> (state->m_thread);
3797  DBUG_ASSERT(thread != NULL);
3798 
3799  PFS_single_stat *event_name_array;
3800  event_name_array= thread->m_instr_class_waits_stats;
3801 
3802  /*
3803  Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME
3804  (for wait/io/table/sql/handler)
3805  */
3806  if (flags & STATE_FLAG_TIMED)
3807  {
3808  event_name_array[GLOBAL_TABLE_IO_EVENT_INDEX].aggregate_value(wait_time);
3809  }
3810  else
3811  {
3812  event_name_array[GLOBAL_TABLE_IO_EVENT_INDEX].aggregate_counted();
3813  }
3814 
3815  if (flags & STATE_FLAG_EVENT)
3816  {
3817  PFS_events_waits *wait= reinterpret_cast<PFS_events_waits*> (state->m_wait);
3818  DBUG_ASSERT(wait != NULL);
3819 
3820  wait->m_timer_end= timer_end;
3821  wait->m_end_event_id= thread->m_event_id;
3823  insert_events_waits_history(thread, wait);
3826  thread->m_events_waits_current--;
3827  }
3828  }
3829 
3830  table->m_has_io_stats= true;
3831 }
3832 
3837 static void end_table_lock_wait_v1(PSI_table_locker* locker)
3838 {
3839  PSI_table_locker_state *state= reinterpret_cast<PSI_table_locker_state*> (locker);
3840  DBUG_ASSERT(state != NULL);
3841 
3842  ulonglong timer_end= 0;
3843  ulonglong wait_time= 0;
3844 
3845  PFS_table *table= reinterpret_cast<PFS_table *> (state->m_table);
3846  DBUG_ASSERT(table != NULL);
3847 
3848  PFS_single_stat *stat= & table->m_table_stat.m_lock_stat.m_stat[state->m_index];
3849 
3850  register uint flags= state->m_flags;
3851 
3852  if (flags & STATE_FLAG_TIMED)
3853  {
3854  timer_end= state->m_timer();
3855  wait_time= timer_end - state->m_timer_start;
3856  stat->aggregate_value(wait_time);
3857  }
3858  else
3859  {
3860  stat->aggregate_counted();
3861  }
3862 
3863  if (flags & STATE_FLAG_THREAD)
3864  {
3865  PFS_thread *thread= reinterpret_cast<PFS_thread *> (state->m_thread);
3866  DBUG_ASSERT(thread != NULL);
3867 
3868  PFS_single_stat *event_name_array;
3869  event_name_array= thread->m_instr_class_waits_stats;
3870 
3871  /*
3872  Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME
3873  (for wait/lock/table/sql/handler)
3874  */
3875  if (flags & STATE_FLAG_TIMED)
3876  {
3877  event_name_array[GLOBAL_TABLE_LOCK_EVENT_INDEX].aggregate_value(wait_time);
3878  }
3879  else
3880  {
3881  event_name_array[GLOBAL_TABLE_LOCK_EVENT_INDEX].aggregate_counted();
3882  }
3883 
3884  if (flags & STATE_FLAG_EVENT)
3885  {
3886  PFS_events_waits *wait= reinterpret_cast<PFS_events_waits*> (state->m_wait);
3887  DBUG_ASSERT(wait != NULL);
3888 
3889  wait->m_timer_end= timer_end;
3890  wait->m_end_event_id= thread->m_event_id;
3892  insert_events_waits_history(thread, wait);
3895  thread->m_events_waits_current--;
3896  }
3897  }
3898 
3899  table->m_has_lock_stats= true;
3900 }
3901 
3902 static void start_file_wait_v1(PSI_file_locker *locker,
3903  size_t count,
3904  const char *src_file,
3905  uint src_line);
3906 
3907 static void end_file_wait_v1(PSI_file_locker *locker,
3908  size_t count);
3909 
3914 static void start_file_open_wait_v1(PSI_file_locker *locker,
3915  const char *src_file,
3916  uint src_line)
3917 {
3918  start_file_wait_v1(locker, 0, src_file, src_line);
3919 
3920  return;
3921 }
3922 
3927 static PSI_file* end_file_open_wait_v1(PSI_file_locker *locker,
3928  void *result)
3929 {
3930  PSI_file_locker_state *state= reinterpret_cast<PSI_file_locker_state*> (locker);
3931  DBUG_ASSERT(state != NULL);
3932 
3933  switch (state->m_operation)
3934  {
3935  case PSI_FILE_STAT:
3936  break;
3937  case PSI_FILE_STREAM_OPEN:
3938  case PSI_FILE_CREATE:
3939  if (result != NULL)
3940  {
3941  PFS_file_class *klass= reinterpret_cast<PFS_file_class*> (state->m_class);
3942  PFS_thread *thread= reinterpret_cast<PFS_thread*> (state->m_thread);
3943  const char *name= state->m_name;
3944  uint len= strlen(name);
3945  PFS_file *pfs_file= find_or_create_file(thread, klass, name, len, true);
3946  state->m_file= reinterpret_cast<PSI_file*> (pfs_file);
3947  }
3948  break;
3949  case PSI_FILE_OPEN:
3950  default:
3951  DBUG_ASSERT(false);
3952  break;
3953  }
3954 
3955  end_file_wait_v1(locker, 0);
3956 
3957  return state->m_file;
3958 }
3959 
3964 static void end_file_open_wait_and_bind_to_descriptor_v1
3965  (PSI_file_locker *locker, File file)
3966 {
3967  PFS_file *pfs_file= NULL;
3968  int index= (int) file;
3969  PSI_file_locker_state *state= reinterpret_cast<PSI_file_locker_state*> (locker);
3970  DBUG_ASSERT(state != NULL);
3971 
3972  if (index >= 0)
3973  {
3974  PFS_file_class *klass= reinterpret_cast<PFS_file_class*> (state->m_class);
3975  PFS_thread *thread= reinterpret_cast<PFS_thread*> (state->m_thread);
3976  const char *name= state->m_name;
3977  uint len= strlen(name);
3978  pfs_file= find_or_create_file(thread, klass, name, len, true);
3979  state->m_file= reinterpret_cast<PSI_file*> (pfs_file);
3980  }
3981 
3982  end_file_wait_v1(locker, 0);
3983 
3984  if (likely(index >= 0))
3985  {
3986  if (likely(index < file_handle_max))
3987  file_handle_array[index]= pfs_file;
3988  else
3989  {
3990  if (pfs_file != NULL)
3991  release_file(pfs_file);
3992  file_handle_lost++;
3993  }
3994  }
3995 }
3996 
4001 static void start_file_wait_v1(PSI_file_locker *locker,
4002  size_t count,
4003  const char *src_file,
4004  uint src_line)
4005 {
4006  ulonglong timer_start= 0;
4007  PSI_file_locker_state *state= reinterpret_cast<PSI_file_locker_state*> (locker);
4008  DBUG_ASSERT(state != NULL);
4009 
4010  register uint flags= state->m_flags;
4011 
4012  if (flags & STATE_FLAG_TIMED)
4013  {
4014  timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
4015  state->m_timer_start= timer_start;
4016  }
4017 
4018  if (flags & STATE_FLAG_EVENT)
4019  {
4020  PFS_events_waits *wait= reinterpret_cast<PFS_events_waits*> (state->m_wait);
4021  DBUG_ASSERT(wait != NULL);
4022 
4023  wait->m_timer_start= timer_start;
4024  wait->m_source_file= src_file;
4025  wait->m_source_line= src_line;
4026  wait->m_number_of_bytes= count;
4027  }
4028 }
4029 
4034 static void end_file_wait_v1(PSI_file_locker *locker,
4035  size_t byte_count)
4036 {
4037  PSI_file_locker_state *state= reinterpret_cast<PSI_file_locker_state*> (locker);
4038  DBUG_ASSERT(state != NULL);
4039  PFS_file *file= reinterpret_cast<PFS_file *> (state->m_file);
4040  PFS_file_class *klass= reinterpret_cast<PFS_file_class *> (state->m_class);
4041  PFS_thread *thread= reinterpret_cast<PFS_thread *> (state->m_thread);
4042 
4043  ulonglong timer_end= 0;
4044  ulonglong wait_time= 0;
4045  PFS_byte_stat *byte_stat;
4046  register uint flags= state->m_flags;
4047  size_t bytes= ((int)byte_count > -1 ? byte_count : 0);
4048 
4049  PFS_file_stat *file_stat;
4050 
4051  if (file != NULL)
4052  {
4053  file_stat= & file->m_file_stat;
4054  }
4055  else
4056  {
4057  file_stat= & klass->m_file_stat;
4058  }
4059 
4060  switch (state->m_operation)
4061  {
4062  /* Group read operations */
4063  case PSI_FILE_READ:
4064  byte_stat= &file_stat->m_io_stat.m_read;
4065  break;
4066  /* Group write operations */
4067  case PSI_FILE_WRITE:
4068  byte_stat= &file_stat->m_io_stat.m_write;
4069  break;
4070  /* Group remaining operations as miscellaneous */
4071  case PSI_FILE_CREATE:
4072  case PSI_FILE_CREATE_TMP:
4073  case PSI_FILE_OPEN:
4074  case PSI_FILE_STREAM_OPEN:
4075  case PSI_FILE_STREAM_CLOSE:
4076  case PSI_FILE_SEEK:
4077  case PSI_FILE_TELL:
4078  case PSI_FILE_FLUSH:
4079  case PSI_FILE_FSTAT:
4080  case PSI_FILE_CHSIZE:
4081  case PSI_FILE_DELETE:
4082  case PSI_FILE_RENAME:
4083  case PSI_FILE_SYNC:
4084  case PSI_FILE_STAT:
4085  case PSI_FILE_CLOSE:
4086  byte_stat= &file_stat->m_io_stat.m_misc;
4087  break;
4088  default:
4089  DBUG_ASSERT(false);
4090  byte_stat= NULL;
4091  break;
4092  }
4093 
4094  /* Aggregation for EVENTS_WAITS_SUMMARY_BY_INSTANCE */
4095  if (flags & STATE_FLAG_TIMED)
4096  {
4097  timer_end= state->m_timer();
4098  wait_time= timer_end - state->m_timer_start;
4099  /* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (timed) */
4100  byte_stat->aggregate(wait_time, bytes);
4101  }
4102  else
4103  {
4104  /* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (counted) */
4105  byte_stat->aggregate_counted(bytes);
4106  }
4107 
4108  if (flags & STATE_FLAG_THREAD)
4109  {
4110  DBUG_ASSERT(thread != NULL);
4111 
4112  PFS_single_stat *event_name_array;
4113  event_name_array= thread->m_instr_class_waits_stats;
4114  uint index= klass->m_event_name_index;
4115 
4116  if (flags & STATE_FLAG_TIMED)
4117  {
4118  /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (timed) */
4119  event_name_array[index].aggregate_value(wait_time);
4120  }
4121  else
4122  {
4123  /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (counted) */
4124  event_name_array[index].aggregate_counted();
4125  }
4126 
4127  if (state->m_flags & STATE_FLAG_EVENT)
4128  {
4129  PFS_events_waits *wait= reinterpret_cast<PFS_events_waits*> (state->m_wait);
4130  DBUG_ASSERT(wait != NULL);
4131 
4132  wait->m_timer_end= timer_end;
4133  wait->m_number_of_bytes= bytes;
4134  wait->m_end_event_id= thread->m_event_id;
4136  wait->m_weak_file= file;
4137  wait->m_weak_version= (file ? file->get_version() : 0);
4138 
4140  insert_events_waits_history(thread, wait);
4143  thread->m_events_waits_current--;
4144  }
4145  }
4146 }
4147 
4152 static void start_file_close_wait_v1(PSI_file_locker *locker,
4153  const char *src_file,
4154  uint src_line)
4155 {
4156  PFS_thread *thread;
4157  const char *name;
4158  uint len;
4159  PFS_file *pfs_file;
4160  PSI_file_locker_state *state= reinterpret_cast<PSI_file_locker_state*> (locker);
4161  DBUG_ASSERT(state != NULL);
4162 
4163  switch (state->m_operation)
4164  {
4165  case PSI_FILE_DELETE:
4166  thread= reinterpret_cast<PFS_thread*> (state->m_thread);
4167  name= state->m_name;
4168  len= strlen(name);
4169  pfs_file= find_or_create_file(thread, NULL, name, len, false);
4170  state->m_file= reinterpret_cast<PSI_file*> (pfs_file);
4171  break;
4172  case PSI_FILE_STREAM_CLOSE:
4173  case PSI_FILE_CLOSE:
4174  break;
4175  default:
4176  DBUG_ASSERT(false);
4177  break;
4178  }
4179 
4180  start_file_wait_v1(locker, 0, src_file, src_line);
4181 
4182  return;
4183 }
4184 
4189 static void end_file_close_wait_v1(PSI_file_locker *locker, int rc)
4190 {
4191  PSI_file_locker_state *state= reinterpret_cast<PSI_file_locker_state*> (locker);
4192  DBUG_ASSERT(state != NULL);
4193 
4194  end_file_wait_v1(locker, 0);
4195 
4196  if (rc == 0)
4197  {
4198  PFS_thread *thread= reinterpret_cast<PFS_thread*> (state->m_thread);
4199  PFS_file *file= reinterpret_cast<PFS_file*> (state->m_file);
4200 
4201  /* Release or destroy the file if necessary */
4202  switch(state->m_operation)
4203  {
4204  case PSI_FILE_CLOSE:
4205  case PSI_FILE_STREAM_CLOSE:
4206  if (file != NULL)
4207  release_file(file);
4208  break;
4209  case PSI_FILE_DELETE:
4210  if (file != NULL)
4211  destroy_file(thread, file);
4212  break;
4213  default:
4214  DBUG_ASSERT(false);
4215  break;
4216  }
4217  }
4218  return;
4219 }
4220 
4221 static void start_stage_v1(PSI_stage_key key, const char *src_file, int src_line)
4222 {
4223  ulonglong timer_value= 0;
4224 
4225  PFS_thread *pfs_thread= my_pthread_getspecific_ptr(PFS_thread*, THR_PFS);
4226  if (unlikely(pfs_thread == NULL))
4227  return;
4228 
4229  /* Always update column threads.processlist_state. */
4230  pfs_thread->m_stage= key;
4231 
4233  return;
4234 
4235  if (flag_thread_instrumentation && ! pfs_thread->m_enabled)
4236  return;
4237 
4238  PFS_events_stages *pfs= & pfs_thread->m_stage_current;
4239  PFS_events_waits *child_wait= & pfs_thread->m_events_waits_stack[0];
4240  PFS_events_statements *parent_statement= & pfs_thread->m_statement_stack[0];
4241 
4242  PFS_instr_class *old_class= pfs->m_class;
4243  if (old_class != NULL)
4244  {
4245  PFS_stage_stat *event_name_array;
4246  event_name_array= pfs_thread->m_instr_class_stages_stats;
4247  uint index= old_class->m_event_name_index;
4248 
4249  /* Finish old event */
4250  if (old_class->m_timed)
4251  {
4252  timer_value= get_timer_raw_value(stage_timer);;
4253  pfs->m_timer_end= timer_value;
4254 
4255  /* Aggregate to EVENTS_STAGES_SUMMARY_BY_THREAD_BY_EVENT_NAME (timed) */
4256  ulonglong stage_time= timer_value - pfs->m_timer_start;
4257  event_name_array[index].aggregate_value(stage_time);
4258  }
4259  else
4260  {
4261  /* Aggregate to EVENTS_STAGES_SUMMARY_BY_THREAD_BY_EVENT_NAME (counted) */
4262  event_name_array[index].aggregate_counted();
4263  }
4264 
4266  {
4267  pfs->m_end_event_id= pfs_thread->m_event_id;
4269  insert_events_stages_history(pfs_thread, pfs);
4272  }
4273 
4274  /* This stage event is now complete. */
4275  pfs->m_class= NULL;
4276 
4277  /* New waits will now be attached directly to the parent statement. */
4278  child_wait->m_event_id= parent_statement->m_event_id;
4279  child_wait->m_event_type= parent_statement->m_event_type;
4280  /* See below for new stages, that may overwrite this. */
4281  }
4282 
4283  /* Start new event */
4284 
4285  PFS_stage_class *new_klass= find_stage_class(key);
4286  if (unlikely(new_klass == NULL))
4287  return;
4288 
4289  if (! new_klass->m_enabled)
4290  return;
4291 
4292  pfs->m_class= new_klass;
4293  if (new_klass->m_timed)
4294  {
4295  /*
4296  Do not call the timer again if we have a
4297  TIMER_END for the previous stage already.
4298  */
4299  if (timer_value == 0)
4300  timer_value= get_timer_raw_value(stage_timer);
4301  pfs->m_timer_start= timer_value;
4302  }
4303  else
4304  pfs->m_timer_start= 0;
4305  pfs->m_timer_end= 0;
4306 
4308  {
4309  /* m_thread_internal_id is immutable and already set */
4310  DBUG_ASSERT(pfs->m_thread_internal_id == pfs_thread->m_thread_internal_id);
4311  pfs->m_event_id= pfs_thread->m_event_id++;
4312  pfs->m_end_event_id= 0;
4313  pfs->m_source_file= src_file;
4314  pfs->m_source_line= src_line;
4315 
4316  /* New wait events will have this new stage as parent. */
4317  child_wait->m_event_id= pfs->m_event_id;
4318  child_wait->m_event_type= EVENT_TYPE_STAGE;
4319  }
4320 }
4321 
4322 static void end_stage_v1()
4323 {
4324  ulonglong timer_value= 0;
4325 
4326  PFS_thread *pfs_thread= my_pthread_getspecific_ptr(PFS_thread*, THR_PFS);
4327  if (unlikely(pfs_thread == NULL))
4328  return;
4329 
4330  pfs_thread->m_stage= 0;
4331 
4333  return;
4334 
4335  if (flag_thread_instrumentation && ! pfs_thread->m_enabled)
4336  return;
4337 
4338  PFS_events_stages *pfs= & pfs_thread->m_stage_current;
4339 
4340  PFS_instr_class *old_class= pfs->m_class;
4341  if (old_class != NULL)
4342  {
4343  PFS_stage_stat *event_name_array;
4344  event_name_array= pfs_thread->m_instr_class_stages_stats;
4345  uint index= old_class->m_event_name_index;
4346 
4347  /* Finish old event */
4348  if (old_class->m_timed)
4349  {
4350  timer_value= get_timer_raw_value(stage_timer);;
4351  pfs->m_timer_end= timer_value;
4352 
4353  /* Aggregate to EVENTS_STAGES_SUMMARY_BY_THREAD_BY_EVENT_NAME (timed) */
4354  ulonglong stage_time= timer_value - pfs->m_timer_start;
4355  event_name_array[index].aggregate_value(stage_time);
4356  }
4357  else
4358  {
4359  /* Aggregate to EVENTS_STAGES_SUMMARY_BY_THREAD_BY_EVENT_NAME (counted) */
4360  event_name_array[index].aggregate_counted();
4361  }
4362 
4364  {
4365  pfs->m_end_event_id= pfs_thread->m_event_id;
4367  insert_events_stages_history(pfs_thread, pfs);
4370  }
4371 
4372  /* New waits will now be attached directly to the parent statement. */
4373  PFS_events_waits *child_wait= & pfs_thread->m_events_waits_stack[0];
4374  PFS_events_statements *parent_statement= & pfs_thread->m_statement_stack[0];
4375  child_wait->m_event_id= parent_statement->m_event_id;
4376  child_wait->m_event_type= parent_statement->m_event_type;
4377 
4378  /* This stage is completed */
4379  pfs->m_class= NULL;
4380  }
4381 }
4382 
4383 static PSI_statement_locker*
4384 get_thread_statement_locker_v1(PSI_statement_locker_state *state,
4385  PSI_statement_key key,
4386  const void *charset)
4387 {
4388  DBUG_ASSERT(state != NULL);
4390  return NULL;
4392  if (unlikely(klass == NULL))
4393  return NULL;
4394  if (! klass->m_enabled)
4395  return NULL;
4396 
4397  register uint flags;
4398 
4400  {
4401  PFS_thread *pfs_thread= my_pthread_getspecific_ptr(PFS_thread*, THR_PFS);
4402  if (unlikely(pfs_thread == NULL))
4403  return NULL;
4404  if (! pfs_thread->m_enabled)
4405  return NULL;
4406  state->m_thread= reinterpret_cast<PSI_thread *> (pfs_thread);
4407  flags= STATE_FLAG_THREAD;
4408 
4409  if (klass->m_timed)
4410  flags|= STATE_FLAG_TIMED;
4411 
4413  {
4414  ulonglong event_id= pfs_thread->m_event_id++;
4415 
4417  {
4418  return NULL;
4419  }
4420 
4421  PFS_events_statements *pfs= & pfs_thread->m_statement_stack[pfs_thread->m_events_statements_count];
4422  /* m_thread_internal_id is immutable and already set */
4423  DBUG_ASSERT(pfs->m_thread_internal_id == pfs_thread->m_thread_internal_id);
4424  pfs->m_event_id= event_id;
4425  pfs->m_end_event_id= 0;
4426  pfs->m_class= klass;
4427  pfs->m_timer_start= 0;
4428  pfs->m_timer_end= 0;
4429  pfs->m_lock_time= 0;
4431  pfs->m_sqltext_length= 0;
4432 
4433  pfs->m_message_text[0]= '\0';
4434  pfs->m_sql_errno= 0;
4435  pfs->m_sqlstate[0]= '\0';
4436  pfs->m_error_count= 0;
4437  pfs->m_warning_count= 0;
4438  pfs->m_rows_affected= 0;
4439 
4440  pfs->m_rows_sent= 0;
4441  pfs->m_rows_examined= 0;
4442  pfs->m_created_tmp_disk_tables= 0;
4443  pfs->m_created_tmp_tables= 0;
4444  pfs->m_select_full_join= 0;
4445  pfs->m_select_full_range_join= 0;
4446  pfs->m_select_range= 0;
4447  pfs->m_select_range_check= 0;
4448  pfs->m_select_scan= 0;
4449  pfs->m_sort_merge_passes= 0;
4450  pfs->m_sort_range= 0;
4451  pfs->m_sort_rows= 0;
4452  pfs->m_sort_scan= 0;
4453  pfs->m_no_index_used= 0;
4454  pfs->m_no_good_index_used= 0;
4455  digest_reset(& pfs->m_digest_storage);
4456 
4457  /* New stages will have this statement as parent */
4458  PFS_events_stages *child_stage= & pfs_thread->m_stage_current;
4459  child_stage->m_nesting_event_id= event_id;
4460  child_stage->m_nesting_event_type= EVENT_TYPE_STATEMENT;
4461 
4462  /* New waits will have this statement as parent, if no stage is instrumented */
4463  PFS_events_waits *child_wait= & pfs_thread->m_events_waits_stack[0];
4464  child_wait->m_nesting_event_id= event_id;
4465  child_wait->m_nesting_event_type= EVENT_TYPE_STATEMENT;
4466 
4467  state->m_statement= pfs;
4468  flags|= STATE_FLAG_EVENT;
4469 
4470  pfs_thread->m_events_statements_count++;
4471  }
4472  }
4473  else
4474  {
4475  if (klass->m_timed)
4476  flags= STATE_FLAG_TIMED;
4477  else
4478  flags= 0;
4479  }
4480 
4481  if (flag_statements_digest)
4482  {
4483  const CHARSET_INFO *cs= static_cast <const CHARSET_INFO*> (charset);
4484  flags|= STATE_FLAG_DIGEST;
4485  state->m_digest_state.m_last_id_index= 0;
4486  digest_reset(& state->m_digest_state.m_digest_storage);
4487  state->m_digest_state.m_digest_storage.m_charset_number= cs->number;
4488  }
4489 
4490  state->m_discarded= false;
4491  state->m_class= klass;
4492  state->m_flags= flags;
4493 
4494  state->m_lock_time= 0;
4495  state->m_rows_sent= 0;
4496  state->m_rows_examined= 0;
4497  state->m_created_tmp_disk_tables= 0;
4498  state->m_created_tmp_tables= 0;
4499  state->m_select_full_join= 0;
4500  state->m_select_full_range_join= 0;
4501  state->m_select_range= 0;
4502  state->m_select_range_check= 0;
4503  state->m_select_scan= 0;
4504  state->m_sort_merge_passes= 0;
4505  state->m_sort_range= 0;
4506  state->m_sort_rows= 0;
4507  state->m_sort_scan= 0;
4508  state->m_no_index_used= 0;
4509  state->m_no_good_index_used= 0;
4510 
4511  state->m_schema_name_length= 0;
4512 
4513  return reinterpret_cast<PSI_statement_locker*> (state);
4514 }
4515 
4516 static PSI_statement_locker*
4517 refine_statement_v1(PSI_statement_locker *locker,
4518  PSI_statement_key key)
4519 {
4520  PSI_statement_locker_state *state= reinterpret_cast<PSI_statement_locker_state*> (locker);
4521  if (state == NULL)
4522  return NULL;
4523  DBUG_ASSERT(state->m_class != NULL);
4524  PFS_statement_class *klass;
4525  /* Only refine statements for mutable instrumentation */
4526  klass= reinterpret_cast<PFS_statement_class*> (state->m_class);
4527  DBUG_ASSERT(klass->m_flags & PSI_FLAG_MUTABLE);
4528  klass= find_statement_class(key);
4529 
4530  uint flags= state->m_flags;
4531 
4532  if (unlikely(klass == NULL) || !klass->m_enabled)
4533  {
4534  /* pop statement stack */
4535  if (flags & STATE_FLAG_THREAD)
4536  {
4537  PFS_thread *pfs_thread= reinterpret_cast<PFS_thread *> (state->m_thread);
4538  DBUG_ASSERT(pfs_thread != NULL);
4539  if (pfs_thread->m_events_statements_count > 0)
4540  pfs_thread->m_events_statements_count--;
4541  }
4542 
4543  state->m_discarded= true;
4544  return NULL;
4545  }
4546 
4547  if ((flags & STATE_FLAG_TIMED) && ! klass->m_timed)
4548  flags= flags & ~STATE_FLAG_TIMED;
4549 
4550  if (flags & STATE_FLAG_EVENT)
4551  {
4552  PFS_events_statements *pfs= reinterpret_cast<PFS_events_statements*> (state->m_statement);
4553  DBUG_ASSERT(pfs != NULL);
4554 
4555  /* mutate EVENTS_STATEMENTS_CURRENT.EVENT_NAME */
4556  pfs->m_class= klass;
4557  }
4558 
4559  state->m_class= klass;
4560  state->m_flags= flags;
4561  return reinterpret_cast<PSI_statement_locker*> (state);
4562 }
4563 
4564 static void start_statement_v1(PSI_statement_locker *locker,
4565  const char *db, uint db_len,
4566  const char *src_file, uint src_line)
4567 {
4568  PSI_statement_locker_state *state= reinterpret_cast<PSI_statement_locker_state*> (locker);
4569  DBUG_ASSERT(state != NULL);
4570 
4571  register uint flags= state->m_flags;
4572  ulonglong timer_start= 0;
4573 
4574  if (flags & STATE_FLAG_TIMED)
4575  {
4576  timer_start= get_timer_raw_value_and_function(statement_timer, & state->m_timer);
4577  state->m_timer_start= timer_start;
4578  }
4579 
4580  compile_time_assert(PSI_SCHEMA_NAME_LEN == NAME_LEN);
4581  DBUG_ASSERT(db_len <= sizeof(state->m_schema_name));
4582 
4583  if (db_len > 0)
4584  memcpy(state->m_schema_name, db, db_len);
4585  state->m_schema_name_length= db_len;
4586 
4587  if (flags & STATE_FLAG_EVENT)
4588  {
4589  PFS_events_statements *pfs= reinterpret_cast<PFS_events_statements*> (state->m_statement);
4590  DBUG_ASSERT(pfs != NULL);
4591 
4592  pfs->m_timer_start= timer_start;
4593  pfs->m_source_file= src_file;
4594  pfs->m_source_line= src_line;
4595 
4596  DBUG_ASSERT(db_len <= sizeof(pfs->m_current_schema_name));
4597  if (db_len > 0)
4598  memcpy(pfs->m_current_schema_name, db, db_len);
4599  pfs->m_current_schema_name_length= db_len;
4600  }
4601 }
4602 
4603 static void set_statement_text_v1(PSI_statement_locker *locker,
4604  const char *text, uint text_len)
4605 {
4606  PSI_statement_locker_state *state= reinterpret_cast<PSI_statement_locker_state*> (locker);
4607  DBUG_ASSERT(state != NULL);
4608 
4609  if (state->m_discarded)
4610  return;
4611 
4612  if (state->m_flags & STATE_FLAG_EVENT)
4613  {
4614  PFS_events_statements *pfs= reinterpret_cast<PFS_events_statements*> (state->m_statement);
4615  DBUG_ASSERT(pfs != NULL);
4616  if (text_len > sizeof (pfs->m_sqltext))
4617  text_len= sizeof(pfs->m_sqltext);
4618  if (text_len)
4619  memcpy(pfs->m_sqltext, text, text_len);
4620  pfs->m_sqltext_length= text_len;
4621  }
4622 
4623  return;
4624 }
4625 
4626 #define SET_STATEMENT_ATTR_BODY(LOCKER, ATTR, VALUE) \
4627  PSI_statement_locker_state *state; \
4628  state= reinterpret_cast<PSI_statement_locker_state*> (LOCKER); \
4629  if (unlikely(state == NULL)) \
4630  return; \
4631  if (state->m_discarded) \
4632  return; \
4633  state->ATTR= VALUE; \
4634  if (state->m_flags & STATE_FLAG_EVENT) \
4635  { \
4636  PFS_events_statements *pfs; \
4637  pfs= reinterpret_cast<PFS_events_statements*> (state->m_statement); \
4638  DBUG_ASSERT(pfs != NULL); \
4639  pfs->ATTR= VALUE; \
4640  } \
4641  return;
4642 
4643 #define INC_STATEMENT_ATTR_BODY(LOCKER, ATTR, VALUE) \
4644  PSI_statement_locker_state *state; \
4645  state= reinterpret_cast<PSI_statement_locker_state*> (LOCKER); \
4646  if (unlikely(state == NULL)) \
4647  return; \
4648  if (state->m_discarded) \
4649  return; \
4650  state->ATTR+= VALUE; \
4651  if (state->m_flags & STATE_FLAG_EVENT) \
4652  { \
4653  PFS_events_statements *pfs; \
4654  pfs= reinterpret_cast<PFS_events_statements*> (state->m_statement); \
4655  DBUG_ASSERT(pfs != NULL); \
4656  pfs->ATTR+= VALUE; \
4657  } \
4658  return;
4659 
4660 static void set_statement_lock_time_v1(PSI_statement_locker *locker,
4661  ulonglong count)
4662 {
4663  SET_STATEMENT_ATTR_BODY(locker, m_lock_time, count);
4664 }
4665 
4666 static void set_statement_rows_sent_v1(PSI_statement_locker *locker,
4667  ulonglong count)
4668 {
4669  SET_STATEMENT_ATTR_BODY(locker, m_rows_sent, count);
4670 }
4671 
4672 static void set_statement_rows_examined_v1(PSI_statement_locker *locker,
4673  ulonglong count)
4674 {
4675  SET_STATEMENT_ATTR_BODY(locker, m_rows_examined, count);
4676 }
4677 
4678 static void inc_statement_created_tmp_disk_tables_v1(PSI_statement_locker *locker,
4679  ulong count)
4680 {
4681  INC_STATEMENT_ATTR_BODY(locker, m_created_tmp_disk_tables, count);
4682 }
4683 
4684 static void inc_statement_created_tmp_tables_v1(PSI_statement_locker *locker,
4685  ulong count)
4686 {
4687  INC_STATEMENT_ATTR_BODY(locker, m_created_tmp_tables, count);
4688 }
4689 
4690 static void inc_statement_select_full_join_v1(PSI_statement_locker *locker,
4691  ulong count)
4692 {
4693  INC_STATEMENT_ATTR_BODY(locker, m_select_full_join, count);
4694 }
4695 
4696 static void inc_statement_select_full_range_join_v1(PSI_statement_locker *locker,
4697  ulong count)
4698 {
4699  INC_STATEMENT_ATTR_BODY(locker, m_select_full_range_join, count);
4700 }
4701 
4702 static void inc_statement_select_range_v1(PSI_statement_locker *locker,
4703  ulong count)
4704 {
4705  INC_STATEMENT_ATTR_BODY(locker, m_select_range, count);
4706 }
4707 
4708 static void inc_statement_select_range_check_v1(PSI_statement_locker *locker,
4709  ulong count)
4710 {
4711  INC_STATEMENT_ATTR_BODY(locker, m_select_range_check, count);
4712 }
4713 
4714 static void inc_statement_select_scan_v1(PSI_statement_locker *locker,
4715  ulong count)
4716 {
4717  INC_STATEMENT_ATTR_BODY(locker, m_select_scan, count);
4718 }
4719 
4720 static void inc_statement_sort_merge_passes_v1(PSI_statement_locker *locker,
4721  ulong count)
4722 {
4723  INC_STATEMENT_ATTR_BODY(locker, m_sort_merge_passes, count);
4724 }
4725 
4726 static void inc_statement_sort_range_v1(PSI_statement_locker *locker,
4727  ulong count)
4728 {
4729  INC_STATEMENT_ATTR_BODY(locker, m_sort_range, count);
4730 }
4731 
4732 static void inc_statement_sort_rows_v1(PSI_statement_locker *locker,
4733  ulong count)
4734 {
4735  INC_STATEMENT_ATTR_BODY(locker, m_sort_rows, count);
4736 }
4737 
4738 static void inc_statement_sort_scan_v1(PSI_statement_locker *locker,
4739  ulong count)
4740 {
4741  INC_STATEMENT_ATTR_BODY(locker, m_sort_scan, count);
4742 }
4743 
4744 static void set_statement_no_index_used_v1(PSI_statement_locker *locker)
4745 {
4746  SET_STATEMENT_ATTR_BODY(locker, m_no_index_used, 1);
4747 }
4748 
4749 static void set_statement_no_good_index_used_v1(PSI_statement_locker *locker)
4750 {
4751  SET_STATEMENT_ATTR_BODY(locker, m_no_good_index_used, 1);
4752 }
4753 
4754 static void end_statement_v1(PSI_statement_locker *locker, void *stmt_da)
4755 {
4756  PSI_statement_locker_state *state= reinterpret_cast<PSI_statement_locker_state*> (locker);
4757  Diagnostics_area *da= reinterpret_cast<Diagnostics_area*> (stmt_da);
4758  DBUG_ASSERT(state != NULL);
4759  DBUG_ASSERT(da != NULL);
4760 
4761  if (state->m_discarded)
4762  return;
4763 
4764  PFS_statement_class *klass= reinterpret_cast<PFS_statement_class *> (state->m_class);
4765  DBUG_ASSERT(klass != NULL);
4766 
4767  ulonglong timer_end= 0;
4768  ulonglong wait_time= 0;
4769  register uint flags= state->m_flags;
4770 
4771  if (flags & STATE_FLAG_TIMED)
4772  {
4773  timer_end= state->m_timer();
4774  wait_time= timer_end - state->m_timer_start;
4775  }
4776 
4777  PFS_statement_stat *event_name_array;
4778  uint index= klass->m_event_name_index;
4779  PFS_statement_stat *stat;
4780 
4781  /*
4782  Capture statement stats by digest.
4783  */
4784  PSI_digest_storage *digest_storage= NULL;
4785  PFS_statement_stat *digest_stat= NULL;
4786 
4787  if (flags & STATE_FLAG_THREAD)
4788  {
4789  PFS_thread *thread= reinterpret_cast<PFS_thread *> (state->m_thread);
4790  DBUG_ASSERT(thread != NULL);
4791  event_name_array= thread->m_instr_class_statements_stats;
4792  /* Aggregate to EVENTS_STATEMENTS_SUMMARY_BY_THREAD_BY_EVENT_NAME */
4793  stat= & event_name_array[index];
4794 
4795  if (flags & STATE_FLAG_DIGEST)
4796  {
4797  digest_storage= &state->m_digest_state.m_digest_storage;
4798  /* Populate PFS_statements_digest_stat with computed digest information.*/
4799  digest_stat= find_or_create_digest(thread, digest_storage,
4800  state->m_schema_name,
4801  state->m_schema_name_length);
4802  }
4803 
4804  if (flags & STATE_FLAG_EVENT)
4805  {
4806  PFS_events_statements *pfs= reinterpret_cast<PFS_events_statements*> (state->m_statement);
4807  DBUG_ASSERT(pfs != NULL);
4808 
4809  switch(da->status())
4810  {
4812  break;
4814  memcpy(pfs->m_message_text, da->message(), MYSQL_ERRMSG_SIZE);
4815  pfs->m_message_text[MYSQL_ERRMSG_SIZE]= 0;
4816  pfs->m_rows_affected= da->affected_rows();
4817  pfs->m_warning_count= da->statement_warn_count();
4818  memcpy(pfs->m_sqlstate, "00000", SQLSTATE_LENGTH);
4819  break;
4821  pfs->m_warning_count= da->statement_warn_count();
4822  break;
4824  memcpy(pfs->m_message_text, da->message(), MYSQL_ERRMSG_SIZE);
4825  pfs->m_message_text[MYSQL_ERRMSG_SIZE]= 0;
4826  pfs->m_sql_errno= da->sql_errno();
4827  memcpy(pfs->m_sqlstate, da->get_sqlstate(), SQLSTATE_LENGTH);
4828  break;
4830  break;
4831  }
4832 
4833  pfs->m_timer_end= timer_end;
4834  pfs->m_end_event_id= thread->m_event_id;
4835 
4836  if (flags & STATE_FLAG_DIGEST)
4837  {
4838  /*
4839  The following columns in events_statement_current:
4840  - DIGEST,
4841  - DIGEST_TEXT
4842  are computed from the digest storage.
4843  */
4844  digest_copy(& pfs->m_digest_storage, digest_storage);
4845  }
4846 
4848  insert_events_statements_history(thread, pfs);
4851 
4852  DBUG_ASSERT(thread->m_events_statements_count > 0);
4853  thread->m_events_statements_count--;
4854  }
4855  }
4856  else
4857  {
4858  if (flags & STATE_FLAG_DIGEST)
4859  {
4860  PFS_thread *thread= my_pthread_getspecific_ptr(PFS_thread*, THR_PFS);
4861 
4862  /* An instrumented thread is required, for LF_PINS. */
4863  if (thread != NULL)
4864  {
4865  /* Set digest stat. */
4866  digest_storage= &state->m_digest_state.m_digest_storage;
4867  /* Populate statements_digest_stat with computed digest information. */
4868  digest_stat= find_or_create_digest(thread, digest_storage,
4869  state->m_schema_name,
4870  state->m_schema_name_length);
4871  }
4872  }
4873 
4874  event_name_array= global_instr_class_statements_array;
4875  /* Aggregate to EVENTS_STATEMENTS_SUMMARY_GLOBAL_BY_EVENT_NAME */
4876  stat= & event_name_array[index];
4877  }
4878 
4879  if (flags & STATE_FLAG_TIMED)
4880  {
4881  /* Aggregate to EVENTS_STATEMENTS_SUMMARY_..._BY_EVENT_NAME (timed) */
4882  stat->aggregate_value(wait_time);
4883  }
4884  else
4885  {
4886  /* Aggregate to EVENTS_STATEMENTS_SUMMARY_..._BY_EVENT_NAME (counted) */
4887  stat->aggregate_counted();
4888  }
4889 
4890  stat->m_lock_time+= state->m_lock_time;
4891  stat->m_rows_sent+= state->m_rows_sent;
4892  stat->m_rows_examined+= state->m_rows_examined;
4893  stat->m_created_tmp_disk_tables+= state->m_created_tmp_disk_tables;
4894  stat->m_created_tmp_tables+= state->m_created_tmp_tables;
4895  stat->m_select_full_join+= state->m_select_full_join;
4896  stat->m_select_full_range_join+= state->m_select_full_range_join;
4897  stat->m_select_range+= state->m_select_range;
4898  stat->m_select_range_check+= state->m_select_range_check;
4899  stat->m_select_scan+= state->m_select_scan;
4900  stat->m_sort_merge_passes+= state->m_sort_merge_passes;
4901  stat->m_sort_range+= state->m_sort_range;
4902  stat->m_sort_rows+= state->m_sort_rows;
4903  stat->m_sort_scan+= state->m_sort_scan;
4904  stat->m_no_index_used+= state->m_no_index_used;
4905  stat->m_no_good_index_used+= state->m_no_good_index_used;
4906 
4907  if (digest_stat != NULL)
4908  {
4909  if (flags & STATE_FLAG_TIMED)
4910  {
4911  digest_stat->aggregate_value(wait_time);
4912  }
4913  else
4914  {
4915  digest_stat->aggregate_counted();
4916  }
4917 
4918  digest_stat->m_lock_time+= state->m_lock_time;
4919  digest_stat->m_rows_sent+= state->m_rows_sent;
4920  digest_stat->m_rows_examined+= state->m_rows_examined;
4921  digest_stat->m_created_tmp_disk_tables+= state->m_created_tmp_disk_tables;
4922  digest_stat->m_created_tmp_tables+= state->m_created_tmp_tables;
4923  digest_stat->m_select_full_join+= state->m_select_full_join;
4924  digest_stat->m_select_full_range_join+= state->m_select_full_range_join;
4925  digest_stat->m_select_range+= state->m_select_range;
4926  digest_stat->m_select_range_check+= state->m_select_range_check;
4927  digest_stat->m_select_scan+= state->m_select_scan;
4928  digest_stat->m_sort_merge_passes+= state->m_sort_merge_passes;
4929  digest_stat->m_sort_range+= state->m_sort_range;
4930  digest_stat->m_sort_rows+= state->m_sort_rows;
4931  digest_stat->m_sort_scan+= state->m_sort_scan;
4932  digest_stat->m_no_index_used+= state->m_no_index_used;
4933  digest_stat->m_no_good_index_used+= state->m_no_good_index_used;
4934  }
4935 
4936  switch (da->status())
4937  {
4939  break;
4941  stat->m_rows_affected+= da->affected_rows();
4942  stat->m_warning_count+= da->statement_warn_count();
4943  if (digest_stat != NULL)
4944  {
4945  digest_stat->m_rows_affected+= da->affected_rows();
4946  digest_stat->m_warning_count+= da->statement_warn_count();
4947  }
4948  break;
4950  stat->m_warning_count+= da->statement_warn_count();
4951  if (digest_stat != NULL)
4952  {
4953  digest_stat->m_warning_count+= da->statement_warn_count();
4954  }
4955  break;
4957  stat->m_error_count++;
4958  if (digest_stat != NULL)
4959  {
4960  digest_stat->m_error_count++;
4961  }
4962  break;
4964  break;
4965  }
4966 }
4967 
4972 static void end_socket_wait_v1(PSI_socket_locker *locker, size_t byte_count)
4973 {
4974  PSI_socket_locker_state *state= reinterpret_cast<PSI_socket_locker_state*> (locker);
4975  DBUG_ASSERT(state != NULL);
4976 
4977  PFS_socket *socket= reinterpret_cast<PFS_socket *>(state->m_socket);
4978  DBUG_ASSERT(socket != NULL);
4979 
4980  ulonglong timer_end= 0;
4981  ulonglong wait_time= 0;
4982  PFS_byte_stat *byte_stat;
4983  register uint flags= state->m_flags;
4984  size_t bytes= ((int)byte_count > -1 ? byte_count : 0);
4985 
4986  switch (state->m_operation)
4987  {
4988  /* Group read operations */
4989  case PSI_SOCKET_RECV:
4990  case PSI_SOCKET_RECVFROM:
4991  case PSI_SOCKET_RECVMSG:
4992  byte_stat= &socket->m_socket_stat.m_io_stat.m_read;
4993  break;
4994  /* Group write operations */
4995  case PSI_SOCKET_SEND:
4996  case PSI_SOCKET_SENDTO:
4997  case PSI_SOCKET_SENDMSG:
4998  byte_stat= &socket->m_socket_stat.m_io_stat.m_write;
4999  break;
5000  /* Group remaining operations as miscellaneous */
5001  case PSI_SOCKET_CONNECT:
5002  case PSI_SOCKET_CREATE:
5003  case PSI_SOCKET_BIND:
5004  case PSI_SOCKET_SEEK:
5005  case PSI_SOCKET_OPT:
5006  case PSI_SOCKET_STAT:
5007  case PSI_SOCKET_SHUTDOWN:
5008  case PSI_SOCKET_SELECT:
5009  case PSI_SOCKET_CLOSE:
5010  byte_stat= &socket->m_socket_stat.m_io_stat.m_misc;
5011  break;
5012  default:
5013  DBUG_ASSERT(false);
5014  byte_stat= NULL;
5015  break;
5016  }
5017 
5018  /* Aggregation for EVENTS_WAITS_SUMMARY_BY_INSTANCE */
5019  if (flags & STATE_FLAG_TIMED)
5020  {
5021  timer_end= state->m_timer();
5022  wait_time= timer_end - state->m_timer_start;
5023 
5024  /* Aggregate to the socket instrument for now (timed) */
5025  byte_stat->aggregate(wait_time, bytes);
5026  }
5027  else
5028  {
5029  /* Aggregate to the socket instrument (event count and byte count) */
5030  byte_stat->aggregate_counted(bytes);
5031  }
5032 
5033  /* Aggregate to EVENTS_WAITS_HISTORY and EVENTS_WAITS_HISTORY_LONG */
5034  if (flags & STATE_FLAG_EVENT)
5035  {
5036  PFS_thread *thread= reinterpret_cast<PFS_thread *>(state->m_thread);
5037  DBUG_ASSERT(thread != NULL);
5038  PFS_events_waits *wait= reinterpret_cast<PFS_events_waits*> (state->m_wait);
5039  DBUG_ASSERT(wait != NULL);
5040 
5041  wait->m_timer_end= timer_end;
5042  wait->m_end_event_id= thread->m_event_id;
5043  wait->m_number_of_bytes= bytes;
5044 
5046  insert_events_waits_history(thread, wait);
5049  thread->m_events_waits_current--;
5050  }
5051 }
5052 
5053 static void set_socket_state_v1(PSI_socket *socket, PSI_socket_state state)
5054 {
5055  DBUG_ASSERT((state == PSI_SOCKET_STATE_IDLE) || (state == PSI_SOCKET_STATE_ACTIVE));
5056  PFS_socket *pfs= reinterpret_cast<PFS_socket*>(socket);
5057  DBUG_ASSERT(pfs != NULL);
5058  DBUG_ASSERT(pfs->m_idle || (state == PSI_SOCKET_STATE_IDLE));
5059  DBUG_ASSERT(!pfs->m_idle || (state == PSI_SOCKET_STATE_ACTIVE));
5060  pfs->m_idle= (state == PSI_SOCKET_STATE_IDLE);
5061 }
5062 
5066 static void set_socket_info_v1(PSI_socket *socket,
5067  const my_socket *fd,
5068  const struct sockaddr *addr,
5069  socklen_t addr_len)
5070 {
5071  PFS_socket *pfs= reinterpret_cast<PFS_socket*>(socket);
5072  DBUG_ASSERT(pfs != NULL);
5073 
5075  if (fd != NULL)
5076  pfs->m_fd= *fd;
5077 
5079  if (likely(addr != NULL && addr_len > 0))
5080  {
5081  pfs->m_addr_len= addr_len;
5082 
5084  if (unlikely(pfs->m_addr_len > sizeof(sockaddr_storage)))
5085  pfs->m_addr_len= sizeof(struct sockaddr_storage);
5086 
5087  memcpy(&pfs->m_sock_addr, addr, pfs->m_addr_len);
5088  }
5089 }
5090 
5095 static void set_socket_thread_owner_v1(PSI_socket *socket)
5096 {
5097  PFS_socket *pfs_socket= reinterpret_cast<PFS_socket*>(socket);
5098  DBUG_ASSERT(pfs_socket != NULL);
5099  pfs_socket->m_thread_owner= my_pthread_getspecific_ptr(PFS_thread*, THR_PFS);
5100 }
5101 
5102 
5107 static int set_thread_connect_attrs_v1(const char *buffer, uint length,
5108  const void *from_cs)
5109 {
5110 
5111  PFS_thread *thd= my_pthread_getspecific_ptr(PFS_thread*, THR_PFS);
5112 
5113  DBUG_ASSERT(buffer != NULL);
5114 
5115  if (likely(thd != NULL) && session_connect_attrs_size_per_thread > 0)
5116  {
5117  /* copy from the input buffer as much as we can fit */
5118  uint copy_size= (uint)(length < session_connect_attrs_size_per_thread ?
5121  memcpy(thd->m_session_connect_attrs, buffer, copy_size);
5122  thd->m_session_connect_attrs_length= copy_size;
5123  thd->m_session_connect_attrs_cs= (const CHARSET_INFO *) from_cs;
5125 
5126  if (copy_size == length)
5127  return 0;
5128 
5130  return 1;
5131  }
5132  return 0;
5133 }
5134 
5135 
5140 PSI_v1 PFS_v1=
5141 {
5142  register_mutex_v1,
5143  register_rwlock_v1,
5144  register_cond_v1,
5145  register_thread_v1,
5146  register_file_v1,
5147  register_stage_v1,
5148  register_statement_v1,
5149  register_socket_v1,
5150  init_mutex_v1,
5151  destroy_mutex_v1,
5152  init_rwlock_v1,
5153  destroy_rwlock_v1,
5154  init_cond_v1,
5155  destroy_cond_v1,
5156  init_socket_v1,
5157  destroy_socket_v1,
5158  get_table_share_v1,
5159  release_table_share_v1,
5160  drop_table_share_v1,
5161  open_table_v1,
5162  unbind_table_v1,
5163  rebind_table_v1,
5164  close_table_v1,
5165  create_file_v1,
5166  spawn_thread_v1,
5167  new_thread_v1,
5168  set_thread_id_v1,
5169  get_thread_v1,
5170  set_thread_user_v1,
5171  set_thread_account_v1,
5172  set_thread_db_v1,
5173  set_thread_command_v1,
5174  set_thread_start_time_v1,
5175  set_thread_state_v1,
5176  set_thread_info_v1,
5177  set_thread_v1,
5178  delete_current_thread_v1,
5179  delete_thread_v1,
5180  get_thread_file_name_locker_v1,
5181  get_thread_file_stream_locker_v1,
5182  get_thread_file_descriptor_locker_v1,
5183  unlock_mutex_v1,
5184  unlock_rwlock_v1,
5185  signal_cond_v1,
5186  broadcast_cond_v1,
5187  start_idle_wait_v1,
5188  end_idle_wait_v1,
5189  start_mutex_wait_v1,
5190  end_mutex_wait_v1,
5191  start_rwlock_wait_v1, /* read */
5192  end_rwlock_rdwait_v1,
5193  start_rwlock_wait_v1, /* write */
5194  end_rwlock_wrwait_v1,
5195  start_cond_wait_v1,
5196  end_cond_wait_v1,
5197  start_table_io_wait_v1,
5198  end_table_io_wait_v1,
5199  start_table_lock_wait_v1,
5200  end_table_lock_wait_v1,
5201  start_file_open_wait_v1,
5202  end_file_open_wait_v1,
5203  end_file_open_wait_and_bind_to_descriptor_v1,
5204  start_file_wait_v1,
5205  end_file_wait_v1,
5206  start_file_close_wait_v1,
5207  end_file_close_wait_v1,
5208  start_stage_v1,
5209  end_stage_v1,
5210  get_thread_statement_locker_v1,
5211  refine_statement_v1,
5212  start_statement_v1,
5213  set_statement_text_v1,
5214  set_statement_lock_time_v1,
5215  set_statement_rows_sent_v1,
5216  set_statement_rows_examined_v1,
5217  inc_statement_created_tmp_disk_tables_v1,
5218  inc_statement_created_tmp_tables_v1,
5219  inc_statement_select_full_join_v1,
5220  inc_statement_select_full_range_join_v1,
5221  inc_statement_select_range_v1,
5222  inc_statement_select_range_check_v1,
5223  inc_statement_select_scan_v1,
5224  inc_statement_sort_merge_passes_v1,
5225  inc_statement_sort_range_v1,
5226  inc_statement_sort_rows_v1,
5227  inc_statement_sort_scan_v1,
5228  set_statement_no_index_used_v1,
5229  set_statement_no_good_index_used_v1,
5230  end_statement_v1,
5231  start_socket_wait_v1,
5232  end_socket_wait_v1,
5233  set_socket_state_v1,
5234  set_socket_info_v1,
5235  set_socket_thread_owner_v1,
5236  pfs_digest_start_v1,
5237  pfs_digest_add_token_v1,
5238  set_thread_connect_attrs_v1,
5239 };
5240 
5241 static void* get_interface(int version)
5242 {
5243  switch (version)
5244  {
5245  case PSI_VERSION_1:
5246  return &PFS_v1;
5247  default:
5248  return NULL;
5249  }
5250 }
5251 
5252 C_MODE_END
5253 
5255 {
5257 };