MySQL 5.6.14 Source Code Document
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
ClusterConfiguration.cpp
1 /*
2  Copyright (C) 2003-2006 MySQL AB
3  All rights reserved. Use is subject to license terms.
4 
5  This program is free software; you can redistribute it and/or modify
6  it under the terms of the GNU General Public License as published by
7  the Free Software Foundation; version 2 of the License.
8 
9  This program is distributed in the hope that it will be useful,
10  but WITHOUT ANY WARRANTY; without even the implied warranty of
11  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12  GNU General Public License for more details.
13 
14  You should have received a copy of the GNU General Public License
15  along with this program; if not, write to the Free Software
16  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 
19 #include <ndb_global.h>
20 
21 #include "ClusterConfiguration.hpp"
22 #include <ErrorHandlingMacros.hpp>
23 
24 #include <pc.hpp>
25 #include <BlockNumbers.h>
26 #include <signaldata/AccSizeAltReq.hpp>
27 #include <signaldata/DictSizeAltReq.hpp>
28 #include <signaldata/DihSizeAltReq.hpp>
29 #include <signaldata/LqhSizeAltReq.hpp>
30 #include <signaldata/TcSizeAltReq.hpp>
31 #include <signaldata/TupSizeAltReq.hpp>
32 #include <signaldata/TuxSizeAltReq.hpp>
33 
34 ClusterConfiguration::ClusterConfiguration()
35 {
36  for (unsigned i= 0; i< MAX_SIZEALT_BLOCKS; i++) // initialize
37  for (unsigned j= 0; j< MAX_SIZEALT_RECORD; j++) {
38  the_clusterData.SizeAltData.varSize[i][j].valid = false;
39  the_clusterData.SizeAltData.varSize[i][j].nrr = 0;
40  }
41 
42  for (unsigned i1 = 0; i1< 5; i1++) // initialize
43  for (unsigned j1= 0; j1< CmvmiCfgConf::NO_OF_WORDS; j1++)
44  the_clusterData.ispValues[i1][j1] = 0;
45 
46  the_clusterData.SizeAltData.noOfNodes = 0;
47  the_clusterData.SizeAltData.noOfNDBNodes = 0;
48  the_clusterData.SizeAltData.noOfAPINodes = 0;
49  the_clusterData.SizeAltData.noOfMGMNodes = 0;
50 }
51 
52 ClusterConfiguration::~ClusterConfiguration(){
53 }
54 
55 void
56 setValue(VarSize* dst, const int index, UintR variableValue){
57  assert(dst != NULL);
58  assert(index >= 0 && index < MAX_SIZEALT_RECORD);
59 
60  dst[index].nrr = variableValue;
61  dst[index].valid = true;
62 }
63 
64 void
65 ClusterConfiguration::calcSizeAlteration()
66 {
67  SizeAlt *size = &the_clusterData.SizeAltData;
68 
69  size->noOfTables++; // Remove impact of system table
70  size->noOfTables += size->noOfIndexes; // Indexes are tables too
71  size->noOfAttributes += 2; // ---"----
72 
73  size->noOfTables *= 2; // Remove impact of Dict need 2 ids for each table
74 
75  Uint32 noOfDBNodes = size->noOfNDBNodes;
76  if (noOfDBNodes > 15) {
77  noOfDBNodes = 15;
78  }//if
79  Uint32 noOfLocalScanRecords = (noOfDBNodes * size->noOfScanRecords) + 1;
80  Uint32 noOfTCScanRecords = size->noOfScanRecords;
81  {
85  size->blockNo[ACC] = DBACC;
86 
87  VarSize * const acc = &(size->varSize[ACC][0]);
88 
89  // Can keep 65536 pages (= 0.5 GByte)
90  setValue(acc, AccSizeAltReq::IND_DIR_RANGE,
91  4 * NO_OF_FRAG_PER_NODE * size->noOfTables* size->noOfReplicas);
92 
93  setValue(acc, AccSizeAltReq::IND_DIR_ARRAY,
94  (size->noOfIndexPages >> 8) +
95  4 * NO_OF_FRAG_PER_NODE * size->noOfTables* size->noOfReplicas);
96 
97  setValue(acc, AccSizeAltReq::IND_FRAGMENT,
98  2 * NO_OF_FRAG_PER_NODE * size->noOfTables* size->noOfReplicas);
99 
100  /*-----------------------------------------------------------------------*/
101  // The extra operation records added are used by the scan and node
102  // recovery process.
103  // Node recovery process will have its operations dedicated to ensure
104  // that they never have a problem with allocation of the operation record.
105  // The remainder are allowed for use by the scan processes.
106  /*-----------------------------------------------------------------------*/
107  setValue(acc, AccSizeAltReq::IND_OP_RECS,
108  size->noOfReplicas*((16 * size->noOfOperations) / 10 + 50) +
109  (noOfLocalScanRecords * MAX_PARALLEL_SCANS_PER_FRAG) +
110  NODE_RECOVERY_SCAN_OP_RECORDS);
111 
112  setValue(acc, AccSizeAltReq::IND_OVERFLOW_RECS,
113  size->noOfIndexPages +
114  2 * NO_OF_FRAG_PER_NODE * size->noOfTables* size->noOfReplicas);
115 
116  setValue(acc, AccSizeAltReq::IND_PAGE8,
117  size->noOfIndexPages + 32);
118 
119  setValue(acc, AccSizeAltReq::IND_ROOT_FRAG,
120  NO_OF_FRAG_PER_NODE * size->noOfTables* size->noOfReplicas);
121 
122  setValue(acc, AccSizeAltReq::IND_TABLE,
123  size->noOfTables);
124 
125  setValue(acc, AccSizeAltReq::IND_SCAN,
126  noOfLocalScanRecords);
127  }
128 
129  {
133  size->blockNo[DICT] = DBDICT;
134 
135  VarSize * const dict = &(size->varSize[DICT][0]);
136 
137  setValue(dict, DictSizeAltReq::IND_ATTRIBUTE,
138  size->noOfAttributes);
139 
140  setValue(dict, DictSizeAltReq::IND_CONNECT,
141  size->noOfOperations + 32);
142 
143  setValue(dict, DictSizeAltReq::IND_FRAG_CONNECT,
144  NO_OF_FRAG_PER_NODE * size->noOfNDBNodes * size->noOfReplicas);
145 
146  setValue(dict, DictSizeAltReq::IND_TABLE,
147  size->noOfTables);
148 
149  setValue(dict, DictSizeAltReq::IND_TC_CONNECT,
150  2* size->noOfOperations);
151  }
152 
153  {
157  size->blockNo[DIH] = DBDIH;
158 
159  VarSize * const dih = &(size->varSize[DIH][0]);
160 
161  setValue(dih, DihSizeAltReq::IND_API_CONNECT,
162  2 * size->noOfTransactions);
163 
164  setValue(dih, DihSizeAltReq::IND_CONNECT,
165  size->noOfOperations + 46);
166 
167  setValue(dih, DihSizeAltReq::IND_FRAG_CONNECT,
168  NO_OF_FRAG_PER_NODE * size->noOfTables * size->noOfNDBNodes);
169 
170  int temp;
171  temp = size->noOfReplicas - 2;
172  if (temp < 0)
173  temp = 1;
174  else
175  temp++;
176  setValue(dih, DihSizeAltReq::IND_MORE_NODES,
177  temp * NO_OF_FRAG_PER_NODE *
178  size->noOfTables * size->noOfNDBNodes);
179 
180  setValue(dih, DihSizeAltReq::IND_REPLICAS,
181  NO_OF_FRAG_PER_NODE * size->noOfTables *
182  size->noOfNDBNodes * size->noOfReplicas);
183 
184  setValue(dih, DihSizeAltReq::IND_TABLE,
185  size->noOfTables);
186  }
187 
188  {
192  size->blockNo[LQH] = DBLQH;
193 
194  VarSize * const lqh = &(size->varSize[LQH][0]);
195 
196  setValue(lqh, LqhSizeAltReq::IND_FRAG,
197  NO_OF_FRAG_PER_NODE * size->noOfTables * size->noOfReplicas);
198 
199  setValue(lqh, LqhSizeAltReq::IND_CONNECT,
200  size->noOfReplicas*((11 * size->noOfOperations) / 10 + 50));
201 
202  setValue(lqh, LqhSizeAltReq::IND_TABLE,
203  size->noOfTables);
204 
205  setValue(lqh, LqhSizeAltReq::IND_TC_CONNECT,
206  size->noOfReplicas*((16 * size->noOfOperations) / 10 + 50));
207 
208  setValue(lqh, LqhSizeAltReq::IND_REPLICAS,
209  size->noOfReplicas);
210 
211  setValue(lqh, LqhSizeAltReq::IND_LOG_FILES,
212  (4 * the_clusterData.ispValues[1][4]));
213 
214  setValue(lqh, LqhSizeAltReq::IND_SCAN,
215  noOfLocalScanRecords);
216 
217  }
218 
219  {
223  size->blockNo[TC] = DBTC;
224 
225  VarSize * const tc = &(size->varSize[TC][0]);
226 
227  setValue(tc, TcSizeAltReq::IND_API_CONNECT,
228  3 * size->noOfTransactions);
229 
230  setValue(tc, TcSizeAltReq::IND_TC_CONNECT,
231  size->noOfOperations + 16 + size->noOfTransactions);
232 
233  setValue(tc, TcSizeAltReq::IND_TABLE,
234  size->noOfTables);
235 
236  setValue(tc, TcSizeAltReq::IND_LOCAL_SCAN,
237  noOfLocalScanRecords);
238 
239  setValue(tc, TcSizeAltReq::IND_TC_SCAN,
240  noOfTCScanRecords);
241  }
242 
243  {
247  size->blockNo[TUP] = DBTUP;
248 
249  VarSize * const tup = &(size->varSize[TUP][0]);
250 
251  setValue(tup, TupSizeAltReq::IND_DISK_PAGE_ARRAY,
252  2 * NO_OF_FRAG_PER_NODE * size->noOfTables* size->noOfReplicas);
253 
254  setValue(tup, TupSizeAltReq::IND_DISK_PAGE_REPRESENT,
255  size->noOfDiskClusters);
256 
257  setValue(tup, TupSizeAltReq::IND_FRAG,
258  2 * NO_OF_FRAG_PER_NODE * size->noOfTables* size->noOfReplicas);
259 
260  setValue(tup, TupSizeAltReq::IND_PAGE_CLUSTER,
261  size->noOfFreeClusters);
262 
263  setValue(tup, TupSizeAltReq::IND_LOGIC_PAGE,
264  size->noOfDiskBufferPages + size->noOfDiskClusters);
265 
266  setValue(tup, TupSizeAltReq::IND_OP_RECS,
267  size->noOfReplicas*((16 * size->noOfOperations) / 10 + 50));
268 
269  setValue(tup, TupSizeAltReq::IND_PAGE,
270  size->noOfDataPages);
271 
272  setValue(tup, TupSizeAltReq::IND_PAGE_RANGE,
273  4 * NO_OF_FRAG_PER_NODE * size->noOfTables* size->noOfReplicas);
274 
275  setValue(tup, TupSizeAltReq::IND_TABLE,
276  size->noOfTables);
277 
278  setValue(tup, TupSizeAltReq::IND_TABLE_DESC,
279  4 * NO_OF_FRAG_PER_NODE * size->noOfAttributes* size->noOfReplicas +
280  12 * NO_OF_FRAG_PER_NODE * size->noOfTables* size->noOfReplicas );
281 
282  setValue(tup, TupSizeAltReq::IND_DELETED_BLOCKS,
283  size->noOfFreeClusters);
284 
285  setValue(tup, TupSizeAltReq::IND_STORED_PROC,
286  noOfLocalScanRecords);
287  }
288 
289  {
293  size->blockNo[TUX] = DBTUX;
294 
295  VarSize * const tux = &(size->varSize[TUX][0]);
296 
297  setValue(tux, TuxSizeAltReq::IND_INDEX,
298  size->noOfTables);
299 
300  setValue(tux, TuxSizeAltReq::IND_FRAGMENT,
301  2 * NO_OF_FRAG_PER_NODE * size->noOfTables * size->noOfReplicas);
302 
303  setValue(tux, TuxSizeAltReq::IND_ATTRIBUTE,
304  size->noOfIndexes * 4);
305 
306  setValue(tux, TuxSizeAltReq::IND_SCAN,
307  noOfLocalScanRecords);
308  }
309 }
310 
312 ClusterConfiguration::clusterData() const
313 {
314  return the_clusterData;
315 }
316 
317 void ClusterConfiguration::init(const Properties & p, const Properties & db){
318  const char * msg = "Invalid configuration fetched";
319 
320  ClusterData & cd = the_clusterData;
321 
322  struct AttribStorage { const char * attrib; Uint32 * storage; };
323  AttribStorage tmp[] = {
324  {"MaxNoOfConcurrentScans", &cd.SizeAltData.noOfScanRecords },
325  {"MaxNoOfTables", &cd.SizeAltData.noOfTables },
326  {"MaxNoOfIndexes", &cd.SizeAltData.noOfIndexes },
327  {"NoOfReplicas", &cd.SizeAltData.noOfReplicas },
328  {"MaxNoOfAttributes", &cd.SizeAltData.noOfAttributes },
329  {"MaxNoOfConcurrentOperations", &cd.SizeAltData.noOfOperations },
330  {"MaxNoOfConcurrentTransactions", &cd.SizeAltData.noOfTransactions },
331  {"NoOfIndexPages", &cd.SizeAltData.noOfIndexPages },
332  {"NoOfDataPages", &cd.SizeAltData.noOfDataPages },
333  {"NoOfDiskBufferPages", &cd.SizeAltData.noOfDiskBufferPages },
334  {"NoOfDiskClusters", &cd.SizeAltData.noOfDiskClusters },
335  {"NoOfFreeDiskClusters", &cd.SizeAltData.noOfFreeClusters },
336  {"TimeToWaitAlive", &cd.ispValues[0][0] },
337  {"HeartbeatIntervalDbDb", &cd.ispValues[0][2] },
338  {"HeartbeatIntervalDbApi", &cd.ispValues[0][3] },
339  {"ArbitrationTimeout", &cd.ispValues[0][5] },
340  {"TimeBetweenLocalCheckpoints", &cd.ispValues[1][2] },
341  {"NoOfFragmentLogFiles", &cd.ispValues[1][4] },
342  {"MaxNoOfConcurrentScans", &cd.SizeAltData.noOfScanRecords },
343  {"NoOfConcurrentCheckpointsDuringRestart", &cd.ispValues[1][5] },
344  {"TransactionDeadlockDetectionTimeout", &cd.ispValues[1][6] },
345  {"NoOfConcurrentProcessesHandleTakeover", &cd.ispValues[1][7] },
346  {"TimeBetweenGlobalCheckpoints", &cd.ispValues[2][3] },
347  {"NoOfConcurrentCheckpointsAfterRestart", &cd.ispValues[2][4] },
348  {"TransactionInactiveTimeout", &cd.ispValues[2][7] },
349  {"NoOfDiskPagesToDiskDuringRestartTUP", &cd.ispValues[3][8] },
350  {"NoOfDiskPagesToDiskAfterRestartTUP", &cd.ispValues[3][9] },
351  {"NoOfDiskPagesToDiskDuringRestartACC", &cd.ispValues[3][10] },
352  {"NoOfDiskPagesToDiskAfterRestartACC", &cd.ispValues[3][11] },
353  {"NoOfDiskClustersPerDiskFile", &cd.ispValues[4][8] },
354  {"NoOfDiskFiles", &cd.ispValues[4][9] },
355  {"NoOfReplicas", &cd.ispValues[2][2] }
356  };
357 
358 
359  const int sz = sizeof(tmp)/sizeof(AttribStorage);
360  for(int i = 0; i<sz; i++){
361  if(!db.get(tmp[i].attrib, tmp[i].storage)){
362  char buf[255];
363  BaseString::snprintf(buf, sizeof(buf), "%s not found", tmp[i].attrib);
364  ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG, msg, buf);
365  }
366  }
367 
368  if(!p.get("NoOfNodes", &cd.SizeAltData.noOfNodes)){
369  ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG, msg, "NoOfNodes missing");
370  }
371 
372  Properties::Iterator it(&p);
373  const char * name = 0;
374  Uint32 nodeNo = 0;
375  for(name = it.first(); name != NULL; name = it.next()){
376  if(strncmp(name, "Node_", strlen("Node_")) == 0){
377 
378  Uint32 nodeId;
379  const char * nodeType;
380  const Properties * node;
381 
382  if(!p.get(name, &node)){
383  ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG, msg, "Node data missing");
384  }
385 
386  if(!node->get("Id", &nodeId)){
387  ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG, msg, "Node data (Id) missing");
388  }
389 
390  if(!node->get("Type", &nodeType)){
391  ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG, msg, "Node data (Type) missing");
392  }
393 
394  if(nodeId > MAX_NODES){
395  char buf[255];
396  snprintf(buf, sizeof(buf),
397  "Maximum DB node id allowed is: %d", MAX_NDB_NODES);
398  ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG, msg, buf);
399  }
400 
401  if(nodeId == 0){
402  char buf[255];
403  snprintf(buf, sizeof(buf),
404  "Minimum node id allowed in the cluster is: 1");
405  ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG, msg, buf);
406  }
407 
408  for(unsigned j = 0; j<nodeNo; j++){
409  if(cd.nodeData[j].nodeId == nodeId){
410  char buf[255];
411  BaseString::snprintf(buf, sizeof(buf), "Two node can not have the same node id");
412  ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG, msg, buf);
413  }
414  }
415 
416  {
417  for(unsigned j = 0; j<LogLevel::LOGLEVEL_CATEGORIES; j++){
418  Uint32 logLevel;
419  if(db.get(LogLevel::LOGLEVEL_CATEGORY_NAME[j].name, &logLevel)){
420  cd.SizeAltData.logLevel.setLogLevel((LogLevel::EventCategory)j,
421  logLevel);
422  }
423  }
424  }
425 
426  cd.nodeData[nodeNo].nodeId = nodeId;
427  const char* tmpApiMgmProperties = 0;
428  if(strcmp("DB", nodeType) == 0){
429  cd.nodeData[nodeNo].nodeType = NodeInfo::DB;
430  cd.SizeAltData.noOfNDBNodes++; // No of NDB processes
431 
432  if(nodeId > MAX_NDB_NODES){
433  char buf[255];
434  BaseString::snprintf(buf, sizeof(buf), "Maximum node id for a ndb node is: %d", MAX_NDB_NODES);
435  ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG, msg, buf);
436  }
437  if(cd.SizeAltData.noOfNDBNodes > MAX_NDB_NODES){
438  char buf[255];
439  BaseString::snprintf(buf, sizeof(buf),
440  "Maximum %d ndb nodes is allowed in the cluster",
441  MAX_NDB_NODES);
442  ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG, msg, buf);
443  }
444  } else if(strcmp("API", nodeType) == 0){
445  cd.nodeData[nodeNo].nodeType = NodeInfo::API;
446  cd.SizeAltData.noOfAPINodes++; // No of API processes
447  tmpApiMgmProperties = "API";
448  } else if(strcmp("REP", nodeType) == 0){
449  cd.nodeData[nodeNo].nodeType = NodeInfo::REP;
450  //cd.SizeAltData.noOfAPINodes++; // No of API processes
451  tmpApiMgmProperties = "REP";
452  } else if(strcmp("MGM", nodeType) == 0){
453  cd.nodeData[nodeNo].nodeType = NodeInfo::MGM;
454  cd.SizeAltData.noOfMGMNodes++; // No of MGM processes
455  tmpApiMgmProperties = "MGM";
456  } else {
457  ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG,
458  "Invalid configuration: Unknown node type",
459  nodeType);
460  }
461 
462  if (tmpApiMgmProperties) {
463  /*
464  const Properties* q = 0;
465 
466  if (!p.get(tmpApiMgmProperties, nodeId, &q)) {
467  ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG, msg, tmpApiMgmProperties);
468  } else {
469  */
470  Uint32 rank = 0;
471  if (node->get("ArbitrationRank", &rank) && rank > 0) {
472  cd.nodeData[nodeNo].arbitRank = rank;
473  // }
474  }
475  } else {
476  cd.nodeData[nodeNo].arbitRank = 0;
477  }
478 
479  nodeNo++;
480  }
481  }
482  cd.SizeAltData.exist = true;
483  calcSizeAlteration();
484 }
485 
486