MySQL 5.6.14 Source Code Document
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
Dbtux.hpp
1 /*
2  Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
3 
4  This program is free software; you can redistribute it and/or modify
5  it under the terms of the GNU General Public License as published by
6  the Free Software Foundation; version 2 of the License.
7 
8  This program is distributed in the hope that it will be useful,
9  but WITHOUT ANY WARRANTY; without even the implied warranty of
10  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11  GNU General Public License for more details.
12 
13  You should have received a copy of the GNU General Public License
14  along with this program; if not, write to the Free Software
15  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
16 */
17 
18 #ifndef DBTUX_H
19 #define DBTUX_H
20 
21 #include <ndb_limits.h>
22 #include <SimulatedBlock.hpp>
23 #include <AttributeDescriptor.hpp>
24 #include <AttributeHeader.hpp>
25 #include <ArrayPool.hpp>
26 #include <DataBuffer.hpp>
27 #include <DLFifoList.hpp>
28 #include <md5_hash.hpp>
29 
30 // big brother
31 #include <dbtup/Dbtup.hpp>
32 
33 // packed index keys and bounds
34 #include <NdbPack.hpp>
35 
36 // signal classes
37 #include <signaldata/DictTabInfo.hpp>
38 #include <signaldata/TuxContinueB.hpp>
39 #include <signaldata/TupFrag.hpp>
40 #include <signaldata/AlterIndxImpl.hpp>
41 #include <signaldata/DropTab.hpp>
42 #include <signaldata/TuxMaint.hpp>
43 #include <signaldata/AccScan.hpp>
44 #include <signaldata/TuxBound.hpp>
45 #include <signaldata/NextScan.hpp>
46 #include <signaldata/AccLock.hpp>
47 #include <signaldata/DumpStateOrd.hpp>
48 #include <signaldata/IndexStatSignal.hpp>
49 
50 // debug
51 #ifdef VM_TRACE
52 #include <NdbOut.hpp>
53 #include <OutputStream.hpp>
54 #endif
55 
56 // jams
57 #undef jam
58 #undef jamEntry
59 #ifdef DBTUX_GEN_CPP
60 #define jam() jamLine(10000 + __LINE__)
61 #define jamEntry() jamEntryLine(10000 + __LINE__)
62 #endif
63 #ifdef DBTUX_META_CPP
64 #define jam() jamLine(20000 + __LINE__)
65 #define jamEntry() jamEntryLine(20000 + __LINE__)
66 #endif
67 #ifdef DBTUX_MAINT_CPP
68 #define jam() jamLine(30000 + __LINE__)
69 #define jamEntry() jamEntryLine(30000 + __LINE__)
70 #endif
71 #ifdef DBTUX_NODE_CPP
72 #define jam() jamLine(40000 + __LINE__)
73 #define jamEntry() jamEntryLine(40000 + __LINE__)
74 #endif
75 #ifdef DBTUX_TREE_CPP
76 #define jam() jamLine(50000 + __LINE__)
77 #define jamEntry() jamEntryLine(50000 + __LINE__)
78 #endif
79 #ifdef DBTUX_SCAN_CPP
80 #define jam() jamLine(60000 + __LINE__)
81 #define jamEntry() jamEntryLine(60000 + __LINE__)
82 #endif
83 #ifdef DBTUX_SEARCH_CPP
84 #define jam() jamLine(70000 + __LINE__)
85 #define jamEntry() jamEntryLine(70000 + __LINE__)
86 #endif
87 #ifdef DBTUX_CMP_CPP
88 #define jam() jamLine(80000 + __LINE__)
89 #define jamEntry() jamEntryLine(80000 + __LINE__)
90 #endif
91 #ifdef DBTUX_STAT_CPP
92 #define jam() jamLine(90000 + __LINE__)
93 #define jamEntry() jamEntryLine(90000 + __LINE__)
94 #endif
95 #ifdef DBTUX_DEBUG_CPP
96 #define jam() jamLine(100000 + __LINE__)
97 #define jamEntry() jamEntryLine(100000 + __LINE__)
98 #endif
99 #ifndef jam
100 #define jam() jamLine(__LINE__)
101 #define jamEntry() jamEntryLine(__LINE__)
102 #endif
103 
104 #undef max
105 #undef min
106 
107 class Configuration;
108 struct mt_BuildIndxCtx;
109 
110 class Dbtux : public SimulatedBlock {
111  friend class DbtuxProxy;
112  friend struct mt_BuildIndxCtx;
113  friend Uint32 Dbtux_mt_buildIndexFragment_wrapper_C(void*);
114 public:
115  Dbtux(Block_context& ctx, Uint32 instanceNumber = 0);
116  virtual ~Dbtux();
117 
118  // pointer to TUP instance in this thread
119  Dbtup* c_tup;
120 
121 private:
122  // sizes are in words (Uint32)
123  STATIC_CONST( MaxIndexFragments = MAX_FRAG_PER_NODE );
124  STATIC_CONST( MaxIndexAttributes = MAX_ATTRIBUTES_IN_INDEX );
125  STATIC_CONST( MaxAttrDataSize = 2 * MAX_ATTRIBUTES_IN_INDEX + MAX_KEY_SIZE_IN_WORDS );
126  STATIC_CONST( MaxXfrmDataSize = MaxAttrDataSize * MAX_XFRM_MULTIPLY);
127 public:
128  STATIC_CONST( DescPageSize = 512 );
129 private:
130  STATIC_CONST( MaxTreeNodeSize = MAX_TTREE_NODE_SIZE );
131  STATIC_CONST( MaxPrefSize = MAX_TTREE_PREF_SIZE );
132  STATIC_CONST( ScanBoundSegmentSize = 7 );
133  STATIC_CONST( MaxAccLockOps = MAX_PARALLEL_OP_PER_SCAN );
134  STATIC_CONST( MaxTreeDepth = 32 ); // strict
135 #ifdef VM_TRACE
136  // for TuxCtx::c_debugBuffer
137  STATIC_CONST( DebugBufferBytes = (MaxAttrDataSize << 2) );
138 #endif
139  BLOCK_DEFINES(Dbtux);
140 
141  // forward declarations
142  struct TuxCtx;
143 
144  // AttributeHeader size is assumed to be 1 word
145  STATIC_CONST( AttributeHeaderSize = 1 );
146 
147  /*
148  * Logical tuple address, "local key". Identifies table tuples.
149  */
150  typedef Uint32 TupAddr;
151  STATIC_CONST( NullTupAddr = (Uint32)-1 );
152 
153  /*
154  * Physical tuple address in TUP. Provides fast access to table tuple
155  * or index node. Valid within the db node and across timeslices.
156  * Not valid between db nodes or across restarts.
157  *
158  * To avoid wasting an Uint16 the pageid is split in two.
159  */
160  struct TupLoc {
161  private:
162  Uint16 m_pageId1; // page i-value (big-endian)
163  Uint16 m_pageId2;
164  Uint16 m_pageOffset; // page offset in words
165  public:
166  TupLoc();
167  TupLoc(Uint32 pageId, Uint16 pageOffset);
168  Uint32 getPageId() const;
169  void setPageId(Uint32 pageId);
170  Uint32 getPageOffset() const;
171  void setPageOffset(Uint32 pageOffset);
172  bool operator==(const TupLoc& loc) const;
173  bool operator!=(const TupLoc& loc) const;
174  };
175 
176  /*
177  * There is no const member NullTupLoc since the compiler may not be
178  * able to optimize it to TupLoc() constants. Instead null values are
179  * constructed on the stack with TupLoc().
180  */
181 #define NullTupLoc TupLoc()
182 
183  // tree definitions
184 
185  /*
186  * Tree entry. Points to a tuple in primary table via physical
187  * address of "original" tuple and tuple version.
188  *
189  * ZTUP_VERSION_BITS must be 15 (or less).
190  */
191  struct TreeEnt;
192  friend struct TreeEnt;
193  struct TreeEnt {
194  TupLoc m_tupLoc; // address of original tuple
195  unsigned m_tupVersion : 15; // version
196  TreeEnt();
197  // methods
198  bool eqtuple(const TreeEnt ent) const;
199  bool eq(const TreeEnt ent) const;
200  int cmp(const TreeEnt ent) const;
201  };
202  STATIC_CONST( TreeEntSize = sizeof(TreeEnt) >> 2 );
203  static const TreeEnt NullTreeEnt;
204 
205  /*
206  * Tree node has 3 parts:
207  *
208  * 1) struct TreeNode - the header (6 words)
209  * 2) some key values for min entry - the min prefix
210  * 3) list of TreeEnt (each 2 words)
211  *
212  * There are 3 links to other nodes: left child, right child, parent.
213  * Occupancy (number of entries) is at least 1 except temporarily when
214  * a node is about to be removed.
215  */
216  struct TreeNode;
217  friend struct TreeNode;
218  struct TreeNode {
219  TupLoc m_link[3]; // link to 0-left child 1-right child 2-parent
220  unsigned m_side : 2; // we are 0-left child 1-right child 2-root
221  unsigned m_balance : 2; // balance -1, 0, +1 plus 1 for Solaris CC
222  unsigned pad1 : 4;
223  Uint8 m_occup; // current number of entries
224  Uint32 m_nodeScan; // list of scans at this node
225  TreeNode();
226  };
227  STATIC_CONST( NodeHeadSize = sizeof(TreeNode) >> 2 );
228 
229  /*
230  * Tree header. There is one in each fragment. Contains tree
231  * parameters and address of root node.
232  */
233  struct TreeHead;
234  friend struct TreeHead;
235  struct TreeHead {
236  Uint8 m_nodeSize; // words in tree node
237  Uint8 m_prefSize; // words in min prefix
238  Uint8 m_minOccup; // min entries in internal node
239  Uint8 m_maxOccup; // max entries in node
240  TupLoc m_root; // root node
241  TreeHead();
242  // methods
243  Uint32* getPref(TreeNode* node) const;
244  TreeEnt* getEntList(TreeNode* node) const;
245  };
246 
247  /*
248  * Tree position. Specifies node, position within node (from 0 to
249  * m_occup), and whether the position is at an existing entry or
250  * before one (if any). Position m_occup points past the node and is
251  * also represented by position 0 of next node. Includes direction
252  * used by scan.
253  */
254  struct TreePos;
255  friend struct TreePos;
256  struct TreePos {
257  TupLoc m_loc; // physical node address
258  Uint16 m_pos; // position 0 to m_occup
259  Uint8 m_dir; // see scanNext
260  TreePos();
261  };
262 
263  // packed metadata
264 
265  /*
266  * Descriptor page. The "hot" metadata for an index is stored as
267  * contiguous array of words on some page. It has 3 parts:
268  * 1) DescHead
269  * 2) array of NdbPack::Type used by NdbPack::Spec of index key
270  * 3) array of attr headers for reading index key values from TUP
271  */
272  struct DescPage;
273  friend struct DescPage;
274  struct DescPage {
275  Uint32 m_nextPage;
276  Uint32 m_numFree; // number of free words
277  union {
278  Uint32 m_data[DescPageSize];
279  Uint32 nextPool;
280  };
281  DescPage();
282  };
283  typedef Ptr<DescPage> DescPagePtr;
284  ArrayPool<DescPage> c_descPagePool;
285  Uint32 c_descPageList;
286 
287  struct DescHead {
288  Uint32 m_indexId;
289  Uint16 m_numAttrs;
290  Uint16 m_magic;
291  enum { Magic = 0xDE5C };
292  };
293  STATIC_CONST( DescHeadSize = sizeof(DescHead) >> 2 );
294 
295  typedef NdbPack::Type KeyType;
296  typedef NdbPack::Spec KeySpec;
297  STATIC_CONST( KeyTypeSize = sizeof(KeyType) >> 2 );
298 
299  typedef NdbPack::DataC KeyDataC;
300  typedef NdbPack::Data KeyData;
301  typedef NdbPack::BoundC KeyBoundC;
302  typedef NdbPack::Bound KeyBound;
303 
304  // range scan
305 
306  /*
307  * ScanBound instances are members of ScanOp. Bound data is stored in
308  * a separate segmented buffer pool.
309  */
310  struct ScanBound {
312  Uint16 m_cnt; // number of attributes
313  Int16 m_side;
314  ScanBound();
315  };
317 
318  // ScanLock
319  struct ScanLock {
320  ScanLock() {}
321  Uint32 m_accLockOp;
322  union {
323  Uint32 nextPool;
324  Uint32 nextList;
325  };
326  Uint32 prevList;
327  };
328  typedef Ptr<ScanLock> ScanLockPtr;
329  ArrayPool<ScanLock> c_scanLockPool;
330 
331  /*
332  * Scan operation.
333  *
334  * Tuples are locked one at a time. The current lock op is set to
335  * RNIL as soon as the lock is obtained and passed to LQH. We must
336  * however remember all locks which LQH has not returned for unlocking
337  * since they must be aborted by us when the scan is closed.
338  *
339  * Scan state describes the entry we are interested in. There is
340  * a separate lock wait flag. It may be for current entry or it may
341  * be for an entry we were moved away from. In any case nothing
342  * happens with current entry before lock wait flag is cleared.
343  *
344  * An unfinished scan is always linked to some tree node, and has
345  * current position and direction (see comments at scanNext). There
346  * is also a copy of latest entry found.
347  *
348  * Error handling: An error code (independent of scan state) is set
349  * and returned to LQH. No more result rows are returned but normal
350  * protocol is still followed until scan close.
351  */
352  struct ScanOp;
353  friend struct ScanOp;
354  struct ScanOp {
355  enum {
356  Undef = 0,
357  First = 1, // before first entry
358  Current = 2, // at some entry
359  Found = 3, // return current as next scan result
360  Blocked = 4, // found and waiting for ACC lock
361  Locked = 5, // found and locked or no lock needed
362  Next = 6, // looking for next extry
363  Last = 7, // after last entry
364  Aborting = 8
365  };
366  Uint8 m_state;
367  Uint8 m_lockwait;
368  Uint16 m_errorCode;
369  Uint32 m_userPtr; // scanptr.i in LQH
370  Uint32 m_userRef;
371  Uint32 m_tableId;
372  Uint32 m_indexId;
373  Uint32 m_fragId;
374  Uint32 m_fragPtrI;
375  Uint32 m_transId1;
376  Uint32 m_transId2;
377  Uint32 m_savePointId;
378  // lock waited for or obtained and not yet passed to LQH
379  Uint32 m_accLockOp;
380  // locks obtained and passed to LQH but not yet returned by LQH
381  DLFifoList<ScanLock>::Head m_accLockOps;
382  Uint8 m_readCommitted; // no locking
383  Uint8 m_lockMode;
384  Uint8 m_descending;
385  ScanBound m_scanBound[2];
386  TreePos m_scanPos; // position
387  TreeEnt m_scanEnt; // latest entry found
388  Uint32 m_nodeScan; // next scan at node (single-linked)
389  Uint32 m_statOpPtrI; // RNIL unless this is a statistics scan
390  union {
391  Uint32 nextPool;
392  Uint32 nextList;
393  };
394  Uint32 prevList;
395  ScanOp();
396  };
397  typedef Ptr<ScanOp> ScanOpPtr;
398  ArrayPool<ScanOp> c_scanOpPool;
399 
400  // indexes and fragments
401 
402  /*
403  * Ordered index. Top level data structure. The primary table (table
404  * being indexed) lives in TUP.
405  */
406  struct Index;
407  friend struct Index;
408  struct Index {
409  enum State {
410  NotDefined = 0,
411  Defining = 1,
412  Building = 3, // triggers activated, building
413  Online = 2, // triggers activated and build done
414  Dropping = 9
415  };
416  State m_state;
417  DictTabInfo::TableType m_tableType;
418  Uint32 m_tableId;
419  Uint16 unused;
420  Uint16 m_numFrags;
421  Uint32 m_fragId[MaxIndexFragments];
422  Uint32 m_fragPtrI[MaxIndexFragments];
423  Uint32 m_descPage; // descriptor page
424  Uint16 m_descOff; // offset within the page
425  Uint16 m_numAttrs;
426  Uint16 m_prefAttrs; // attributes in min prefix
427  Uint16 m_prefBytes; // max bytes in min prefix
428  KeySpec m_keySpec;
429  Uint32 m_statFragPtrI; // fragment to monitor if not RNIL
430  Uint32 m_statLoadTime; // load time of index stats
431  union {
432  bool m_storeNullKey;
433  Uint32 nextPool;
434  };
435  Index();
436  };
437  typedef Ptr<Index> IndexPtr;
438  ArrayPool<Index> c_indexPool;
439  RSS_AP_SNAPSHOT(c_indexPool);
440 
441  /*
442  * Fragment of an index, as known to DIH/TC. Represents the two
443  * duplicate fragments known to LQH/ACC/TUP. Includes tree header.
444  * There are no maintenance operation records yet.
445  */
446  struct Frag;
447  friend struct Frag;
448  struct Frag {
449  Uint32 m_tableId; // copy from index level
450  Uint32 m_indexId;
451  Uint16 unused;
452  Uint16 m_fragId;
453  TreeHead m_tree;
454  TupLoc m_freeLoc; // one free node for next op
455  DLList<ScanOp> m_scanList; // current scans on this fragment
456  Uint32 m_tupIndexFragPtrI;
457  Uint32 m_tupTableFragPtrI;
458  Uint32 m_accTableFragPtrI;
459  Uint64 m_entryCount; // current entries
460  Uint64 m_entryBytes; // sum of index key sizes
461  Uint64 m_entryOps; // ops since last index stats update
462  union {
463  Uint32 nextPool;
464  };
465  Frag(ArrayPool<ScanOp>& scanOpPool);
466  };
467  typedef Ptr<Frag> FragPtr;
468  ArrayPool<Frag> c_fragPool;
469  RSS_AP_SNAPSHOT(c_fragPool);
470 
471  /*
472  * Fragment metadata operation.
473  */
474  struct FragOp {
475  Uint32 m_userPtr;
476  Uint32 m_userRef;
477  Uint32 m_indexId;
478  Uint32 m_fragId;
479  Uint32 m_fragPtrI;
480  Uint32 m_fragNo; // fragment number starting at zero
481  Uint32 m_numAttrsRecvd;
482  union {
483  Uint32 nextPool;
484  };
485  FragOp();
486  };
487  typedef Ptr<FragOp> FragOpPtr;
488  ArrayPool<FragOp> c_fragOpPool;
489  RSS_AP_SNAPSHOT(c_fragOpPool);
490 
491  // node handles
492 
493  /*
494  * A node handle is a reference to a tree node in TUP. It is used to
495  * operate on the node. Node handles are allocated on the stack.
496  */
497  struct NodeHandle;
498  friend struct NodeHandle;
499  struct NodeHandle {
500  Frag& m_frag; // fragment using the node
501  TupLoc m_loc; // physical node address
502  TreeNode* m_node; // pointer to node storage
503  NodeHandle(Frag& frag);
504  NodeHandle(const NodeHandle& node);
505  NodeHandle& operator=(const NodeHandle& node);
506  // check if unassigned
507  bool isNull();
508  // getters
509  TupLoc getLink(unsigned i);
510  unsigned getChilds(); // cannot spell
511  unsigned getSide();
512  unsigned getOccup();
513  int getBalance();
514  Uint32 getNodeScan();
515  // setters
516  void setLink(unsigned i, TupLoc loc);
517  void setSide(unsigned i);
518  void setOccup(unsigned n);
519  void setBalance(int b);
520  void setNodeScan(Uint32 scanPtrI);
521  // access other parts of the node
522  Uint32* getPref();
523  TreeEnt getEnt(unsigned pos);
524  // for ndbrequire and ndbassert
525  void progError(int line, int cause, const char* file);
526  };
527 
528  // stats scan
529  struct StatOp;
530  friend struct StatOp;
531  struct StatOp {
532  // the scan
533  Uint32 m_scanOpPtrI;
534  // parameters
535  Uint32 m_saveSize;
536  Uint32 m_saveScale;
537  Uint32 m_batchSize;
538  Uint32 m_estBytes;
539  // counters
540  Uint32 m_rowCount;
541  Uint32 m_batchCurr;
542  bool m_haveSample;
543  Uint32 m_sampleCount;
544  Uint32 m_keyBytes;
545  bool m_keyChange;
546  bool m_usePrev;
547  // metadata
548  enum { MaxKeyCount = MAX_INDEX_STAT_KEY_COUNT };
549  enum { MaxKeySize = MAX_INDEX_STAT_KEY_SIZE };
550  enum { MaxValueCount = MAX_INDEX_STAT_VALUE_COUNT };
551  enum { MaxValueSize = MAX_INDEX_STAT_VALUE_SIZE };
552  Uint32 m_keyCount;
553  Uint32 m_valueCount;
554  // pack
555  const KeySpec& m_keySpec;
556  NdbPack::Spec m_valueSpec;
557  NdbPack::Type m_valueSpecBuf[MaxValueCount];
558  // data previous current result
559  KeyData m_keyData1;
560  KeyData m_keyData2;
561  KeyData m_keyData;
562  NdbPack::Data m_valueData;
563  // buffers with one word for length bytes
564  Uint32 m_keyDataBuf1[1 + MaxKeySize];
565  Uint32 m_keyDataBuf2[1 + MaxKeySize];
566  Uint32 m_keyDataBuf[1 + MaxKeySize];
567  Uint32 m_valueDataBuf[1 + MaxValueCount];
568  // value collection
569  struct Value {
570  Uint32 m_rir;
571  Uint32 m_unq[MaxKeyCount];
572  Value();
573  };
574  Value m_value1;
575  Value m_value2;
576  union {
577  Uint32 nextPool;
578  };
579  StatOp(const Index&);
580  };
581  typedef Ptr<StatOp> StatOpPtr;
582  ArrayPool<StatOp> c_statOpPool;
583  RSS_AP_SNAPSHOT(c_statOpPool);
584 
585  // stats monitor (shared by req data and continueB loop)
586  struct StatMon;
587  friend struct StatMon;
588  struct StatMon {
589  IndexStatImplReq m_req;
590  Uint32 m_requestType;
591  // continueB loop
592  Uint32 m_loopIndexId;
593  Uint32 m_loopDelay;
594  StatMon();
595  };
596  StatMon c_statMon;
597 
598  // methods
599 
600  /*
601  * DbtuxGen.cpp
602  */
603  void execCONTINUEB(Signal* signal);
604  void execSTTOR(Signal* signal);
605  void execREAD_CONFIG_REQ(Signal* signal);
606  void execNODE_STATE_REP(Signal* signal);
607 
608  // utils
609  void readKeyAttrs(TuxCtx&, const Frag& frag, TreeEnt ent, KeyData& keyData, Uint32 count);
610  void readTablePk(const Frag& frag, TreeEnt ent, Uint32* pkData, unsigned& pkSize);
611  void unpackBound(TuxCtx&, const ScanBound& bound, KeyBoundC& searchBound);
612  void findFrag(const Index& index, Uint32 fragId, FragPtr& fragPtr);
613 
614  /*
615  * DbtuxMeta.cpp
616  */
617  void execCREATE_TAB_REQ(Signal*);
618  void execTUXFRAGREQ(Signal* signal);
619  void execTUX_ADD_ATTRREQ(Signal* signal);
620  void execALTER_INDX_IMPL_REQ(Signal* signal);
621  void execDROP_TAB_REQ(Signal* signal);
622  void execDROP_FRAG_REQ(Signal* signal);
623  bool allocDescEnt(IndexPtr indexPtr);
624  void freeDescEnt(IndexPtr indexPtr);
625  void abortAddFragOp(Signal* signal);
626  void dropIndex(Signal* signal, IndexPtr indexPtr, Uint32 senderRef, Uint32 senderData);
627 
628  /*
629  * DbtuxMaint.cpp
630  */
631  void execTUX_MAINT_REQ(Signal* signal);
632 
633  /*
634  * DbtuxNode.cpp
635  */
636  int allocNode(TuxCtx&, NodeHandle& node);
637  void freeNode(NodeHandle& node);
638  void selectNode(NodeHandle& node, TupLoc loc);
639  void insertNode(NodeHandle& node);
640  void deleteNode(NodeHandle& node);
641  void freePreallocatedNode(Frag& frag);
642  void setNodePref(struct TuxCtx &, NodeHandle& node);
643  // node operations
644  void nodePushUp(TuxCtx&, NodeHandle& node, unsigned pos, const TreeEnt& ent, Uint32 scanList);
645  void nodePushUpScans(NodeHandle& node, unsigned pos);
646  void nodePopDown(TuxCtx&, NodeHandle& node, unsigned pos, TreeEnt& en, Uint32* scanList);
647  void nodePopDownScans(NodeHandle& node, unsigned pos);
648  void nodePushDown(TuxCtx&, NodeHandle& node, unsigned pos, TreeEnt& ent, Uint32& scanList);
649  void nodePushDownScans(NodeHandle& node, unsigned pos);
650  void nodePopUp(TuxCtx&, NodeHandle& node, unsigned pos, TreeEnt& ent, Uint32 scanList);
651  void nodePopUpScans(NodeHandle& node, unsigned pos);
652  void nodeSlide(TuxCtx&, NodeHandle& dstNode, NodeHandle& srcNode, unsigned cnt, unsigned i);
653  // scans linked to node
654  void addScanList(NodeHandle& node, unsigned pos, Uint32 scanList);
655  void removeScanList(NodeHandle& node, unsigned pos, Uint32& scanList);
656  void moveScanList(NodeHandle& node, unsigned pos);
657  void linkScan(NodeHandle& node, ScanOpPtr scanPtr);
658  void unlinkScan(NodeHandle& node, ScanOpPtr scanPtr);
659  bool islinkScan(NodeHandle& node, ScanOpPtr scanPtr);
660 
661  /*
662  * DbtuxTree.cpp
663  */
664  // add entry
665  void treeAdd(TuxCtx&, Frag& frag, TreePos treePos, TreeEnt ent);
666  void treeAddFull(TuxCtx&, Frag& frag, NodeHandle lubNode, unsigned pos, TreeEnt ent);
667  void treeAddNode(TuxCtx&, Frag& frag, NodeHandle lubNode, unsigned pos, TreeEnt ent, NodeHandle parentNode, unsigned i);
668  void treeAddRebalance(TuxCtx&, Frag& frag, NodeHandle node, unsigned i);
669  // remove entry
670  void treeRemove(Frag& frag, TreePos treePos);
671  void treeRemoveInner(Frag& frag, NodeHandle lubNode, unsigned pos);
672  void treeRemoveSemi(Frag& frag, NodeHandle node, unsigned i);
673  void treeRemoveLeaf(Frag& frag, NodeHandle node);
674  void treeRemoveNode(Frag& frag, NodeHandle node);
675  void treeRemoveRebalance(Frag& frag, NodeHandle node, unsigned i);
676  // rotate
677  void treeRotateSingle(TuxCtx&, Frag& frag, NodeHandle& node, unsigned i);
678  void treeRotateDouble(TuxCtx&, Frag& frag, NodeHandle& node, unsigned i);
679 
680  /*
681  * DbtuxScan.cpp
682  */
683  void execACC_SCANREQ(Signal* signal);
684  void execTUX_BOUND_INFO(Signal* signal);
685  void execNEXT_SCANREQ(Signal* signal);
686  void execACC_CHECK_SCAN(Signal* signal);
687  void execACCKEYCONF(Signal* signal);
688  void execACCKEYREF(Signal* signal);
689  void execACC_ABORTCONF(Signal* signal);
690  void scanFirst(ScanOpPtr scanPtr);
691  void scanFind(ScanOpPtr scanPtr);
692  void scanNext(ScanOpPtr scanPtr, bool fromMaintReq);
693  bool scanCheck(ScanOpPtr scanPtr, TreeEnt ent);
694  bool scanVisible(ScanOpPtr scanPtr, TreeEnt ent);
695  void scanClose(Signal* signal, ScanOpPtr scanPtr);
696  void abortAccLockOps(Signal* signal, ScanOpPtr scanPtr);
697  void addAccLockOp(ScanOpPtr scanPtr, Uint32 accLockOp);
698  void removeAccLockOp(ScanOpPtr scanPtr, Uint32 accLockOp);
699  void releaseScanOp(ScanOpPtr& scanPtr);
700 
701  /*
702  * DbtuxSearch.cpp
703  */
704  void findNodeToUpdate(TuxCtx&, Frag& frag, const KeyDataC& searchKey, TreeEnt searchEnt, NodeHandle& currNode);
705  bool findPosToAdd(TuxCtx&, Frag& frag, const KeyDataC& searchKey, TreeEnt searchEnt, NodeHandle& currNode, TreePos& treePos);
706  bool findPosToRemove(TuxCtx&, Frag& frag, const KeyDataC& searchKey, TreeEnt searchEnt, NodeHandle& currNode, TreePos& treePos);
707  bool searchToAdd(TuxCtx&, Frag& frag, const KeyDataC& searchKey, TreeEnt searchEnt, TreePos& treePos);
708  bool searchToRemove(TuxCtx&, Frag& frag, const KeyDataC& searchKey, TreeEnt searchEnt, TreePos& treePos);
709  void findNodeToScan(Frag& frag, unsigned dir, const KeyBoundC& searchBound, NodeHandle& currNode);
710  void findPosToScan(Frag& frag, unsigned idir, const KeyBoundC& searchBound, NodeHandle& currNode, Uint16* pos);
711  void searchToScan(Frag& frag, unsigned idir, const KeyBoundC& searchBound, TreePos& treePos);
712 
713  /*
714  * DbtuxCmp.cpp
715  */
716  int cmpSearchKey(TuxCtx&, const KeyDataC& searchKey, const KeyDataC& entryKey, Uint32 cnt);
717  int cmpSearchBound(TuxCtx&, const KeyBoundC& searchBound, const KeyDataC& entryKey, Uint32 cnt);
718 
719  /*
720  * DbtuxStat.cpp
721  */
722  void execREAD_PSEUDO_REQ(Signal* signal);
723  // one-round-trip tree-dive records in range
724  void statRecordsInRange(ScanOpPtr scanPtr, Uint32* out);
725  Uint32 getEntriesBeforeOrAfter(Frag& frag, TreePos pos, unsigned idir);
726  unsigned getPathToNode(NodeHandle node, Uint16* path);
727  // stats scan
728  int statScanInit(StatOpPtr, const Uint32* data, Uint32 len, Uint32* usedLen);
729  int statScanAddRow(StatOpPtr, TreeEnt ent);
730  void statScanReadKey(StatOpPtr, Uint32* out);
731  void statScanReadValue(StatOpPtr, Uint32* out);
732  void execINDEX_STAT_REP(Signal*); // from TRIX
733  // stats monitor request
734  void execINDEX_STAT_IMPL_REQ(Signal*);
735  void statMonStart(Signal*, StatMon&);
736  void statMonStop(Signal*, StatMon&);
737  void statMonConf(Signal*, StatMon&);
738  // stats monitor continueB loop
739  void statMonSendContinueB(Signal*);
740  void statMonExecContinueB(Signal*);
741  void statMonCheck(Signal*, StatMon&);
742  void statMonRep(Signal*, StatMon&);
743 
744  /*
745  * DbtuxDebug.cpp
746  */
747  void execDUMP_STATE_ORD(Signal* signal);
748 #ifdef VM_TRACE
749  struct PrintPar {
750  char m_path[100]; // LR prefix
751  unsigned m_side; // expected side
752  TupLoc m_parent; // expected parent address
753  int m_depth; // returned depth
754  unsigned m_occup; // returned occupancy
755  TreeEnt m_minmax[2]; // returned subtree min and max
756  bool m_ok; // returned status
757  PrintPar();
758  };
759  void printTree(Signal* signal, Frag& frag, NdbOut& out);
760  void printNode(struct TuxCtx&, Frag&, NdbOut& out, TupLoc loc, PrintPar& par);
761  friend class NdbOut& operator<<(NdbOut&, const TupLoc&);
762  friend class NdbOut& operator<<(NdbOut&, const TreeEnt&);
763  friend class NdbOut& operator<<(NdbOut&, const TreeNode&);
764  friend class NdbOut& operator<<(NdbOut&, const TreeHead&);
765  friend class NdbOut& operator<<(NdbOut&, const TreePos&);
766  friend class NdbOut& operator<<(NdbOut&, const KeyType&);
767  friend class NdbOut& operator<<(NdbOut&, const ScanOp&);
768  friend class NdbOut& operator<<(NdbOut&, const Index&);
769  friend class NdbOut& operator<<(NdbOut&, const Frag&);
770  friend class NdbOut& operator<<(NdbOut&, const FragOp&);
771  friend class NdbOut& operator<<(NdbOut&, const NodeHandle&);
772  friend class NdbOut& operator<<(NdbOut&, const StatOp&);
773  friend class NdbOut& operator<<(NdbOut&, const StatMon&);
774  FILE* debugFile;
775  NdbOut debugOut;
776  unsigned debugFlags;
777  enum {
778  DebugMeta = 1, // log create and drop index
779  DebugMaint = 2, // log maintenance ops
780  DebugTree = 4, // log and check tree after each op
781  DebugScan = 8, // log scans
782  DebugLock = 16, // log ACC locks
783  DebugStat = 32 // log stats collection
784  };
785  STATIC_CONST( DataFillByte = 0xa2 );
786  STATIC_CONST( NodeFillByte = 0xa4 );
787 #endif
788 
789  void execDBINFO_SCANREQ(Signal* signal);
790 
791  // start up info
792  Uint32 c_internalStartPhase;
793  Uint32 c_typeOfStart;
794 
795  /*
796  * Global data set at operation start. Unpacked from index metadata.
797  * Not passed as parameter to methods. Invalid across timeslices.
798  *
799  * TODO inline all into index metadata
800  */
801  struct TuxCtx
802  {
803  EmulatedJamBuffer * jamBuffer;
804 
805  // buffer for scan bound and search key data
806  Uint32* c_searchKey;
807 
808  // buffer for current entry key data
809  Uint32* c_entryKey;
810 
811  // buffer for xfrm-ed PK and for temporary use
812  Uint32* c_dataBuffer;
813 
814 #ifdef VM_TRACE
815  char* c_debugBuffer;
816 #endif
817  };
818 
819  struct TuxCtx c_ctx; // Global Tux context, for everything build MT-index build
820 
821  // index stats
822  bool c_indexStatAutoUpdate;
823  Uint32 c_indexStatSaveSize;
824  Uint32 c_indexStatSaveScale;
825  Uint32 c_indexStatTriggerPct;
826  Uint32 c_indexStatTriggerScale;
827  Uint32 c_indexStatUpdateDelay;
828 
829  // inlined utils
830  Uint32 getDescSize(const Index& index);
831  DescHead& getDescHead(const Index& index);
832  KeyType* getKeyTypes(DescHead& descHead);
833  const KeyType* getKeyTypes(const DescHead& descHead);
834  AttributeHeader* getKeyAttrs(DescHead& descHead);
835  const AttributeHeader* getKeyAttrs(const DescHead& descHead);
836  //
837  void getTupAddr(const Frag& frag, TreeEnt ent, Uint32& lkey1, Uint32& lkey2);
838  static unsigned min(unsigned x, unsigned y);
839  static unsigned max(unsigned x, unsigned y);
840 
841 public:
842  static Uint32 mt_buildIndexFragment_wrapper(void*);
843 private:
844  Uint32 mt_buildIndexFragment(struct mt_BuildIndxCtx*);
845 };
846 
847 // Dbtux::TupLoc
848 
849 inline
850 Dbtux::TupLoc::TupLoc() :
851  m_pageId1(RNIL >> 16),
852  m_pageId2(RNIL & 0xFFFF),
853  m_pageOffset(0)
854 {
855 }
856 
857 inline
858 Dbtux::TupLoc::TupLoc(Uint32 pageId, Uint16 pageOffset) :
859  m_pageId1(pageId >> 16),
860  m_pageId2(pageId & 0xFFFF),
861  m_pageOffset(pageOffset)
862 {
863 }
864 
865 inline Uint32
866 Dbtux::TupLoc::getPageId() const
867 {
868  return (m_pageId1 << 16) | m_pageId2;
869 }
870 
871 inline void
872 Dbtux::TupLoc::setPageId(Uint32 pageId)
873 {
874  m_pageId1 = (pageId >> 16);
875  m_pageId2 = (pageId & 0xFFFF);
876 }
877 
878 inline Uint32
879 Dbtux::TupLoc::getPageOffset() const
880 {
881  return (Uint32)m_pageOffset;
882 }
883 
884 inline void
885 Dbtux::TupLoc::setPageOffset(Uint32 pageOffset)
886 {
887  m_pageOffset = (Uint16)pageOffset;
888 }
889 
890 inline bool
891 Dbtux::TupLoc::operator==(const TupLoc& loc) const
892 {
893  return
894  m_pageId1 == loc.m_pageId1 &&
895  m_pageId2 == loc.m_pageId2 &&
896  m_pageOffset == loc.m_pageOffset;
897 }
898 
899 inline bool
900 Dbtux::TupLoc::operator!=(const TupLoc& loc) const
901 {
902  return ! (*this == loc);
903 }
904 
905 // Dbtux::TreeEnt
906 
907 inline
908 Dbtux::TreeEnt::TreeEnt() :
909  m_tupLoc(),
910  m_tupVersion(0)
911 {
912 }
913 
914 inline bool
915 Dbtux::TreeEnt::eqtuple(const TreeEnt ent) const
916 {
917  return
918  m_tupLoc == ent.m_tupLoc;
919 }
920 
921 inline bool
922 Dbtux::TreeEnt::eq(const TreeEnt ent) const
923 {
924  return
925  m_tupLoc == ent.m_tupLoc &&
926  m_tupVersion == ent.m_tupVersion;
927 }
928 
929 inline int
930 Dbtux::TreeEnt::cmp(const TreeEnt ent) const
931 {
932  if (m_tupLoc.getPageId() < ent.m_tupLoc.getPageId())
933  return -1;
934  if (m_tupLoc.getPageId() > ent.m_tupLoc.getPageId())
935  return +1;
936  if (m_tupLoc.getPageOffset() < ent.m_tupLoc.getPageOffset())
937  return -1;
938  if (m_tupLoc.getPageOffset() > ent.m_tupLoc.getPageOffset())
939  return +1;
940  /*
941  * Guess if one tuple version has wrapped around. This is well
942  * defined ordering on existing versions since versions are assigned
943  * consecutively and different versions exists only on uncommitted
944  * tuple. Assuming max 2**14 uncommitted ops on same tuple.
945  */
946  const unsigned version_wrap_limit = (1 << (ZTUP_VERSION_BITS - 1));
947  if (m_tupVersion < ent.m_tupVersion) {
948  if (unsigned(ent.m_tupVersion - m_tupVersion) < version_wrap_limit)
949  return -1;
950  else
951  return +1;
952  }
953  if (m_tupVersion > ent.m_tupVersion) {
954  if (unsigned(m_tupVersion - ent.m_tupVersion) < version_wrap_limit)
955  return +1;
956  else
957  return -1;
958  }
959  return 0;
960 }
961 
962 // Dbtux::TreeNode
963 
964 inline
965 Dbtux::TreeNode::TreeNode() :
966  m_side(2),
967  m_balance(0 + 1),
968  pad1(0),
969  m_occup(0),
970  m_nodeScan(RNIL)
971 {
972  m_link[0] = NullTupLoc;
973  m_link[1] = NullTupLoc;
974  m_link[2] = NullTupLoc;
975 }
976 
977 // Dbtux::TreeHead
978 
979 inline
980 Dbtux::TreeHead::TreeHead() :
981  m_nodeSize(0),
982  m_prefSize(0),
983  m_minOccup(0),
984  m_maxOccup(0),
985  m_root()
986 {
987 }
988 
989 inline Uint32*
990 Dbtux::TreeHead::getPref(TreeNode* node) const
991 {
992  Uint32* ptr = (Uint32*)node + NodeHeadSize;
993  return ptr;
994 }
995 
996 inline Dbtux::TreeEnt*
997 Dbtux::TreeHead::getEntList(TreeNode* node) const
998 {
999  Uint32* ptr = (Uint32*)node + NodeHeadSize + m_prefSize;
1000  return (TreeEnt*)ptr;
1001 }
1002 
1003 // Dbtux::TreePos
1004 
1005 inline
1006 Dbtux::TreePos::TreePos() :
1007  m_loc(),
1008  m_pos(ZNIL),
1009  m_dir(255)
1010 {
1011 }
1012 
1013 // Dbtux::DescPage
1014 
1015 inline
1016 Dbtux::DescPage::DescPage() :
1017  m_nextPage(RNIL),
1018  m_numFree(ZNIL)
1019 {
1020  for (unsigned i = 0; i < DescPageSize; i++) {
1021 #ifdef VM_TRACE
1022  m_data[i] = 0x13571357;
1023 #else
1024  m_data[i] = 0;
1025 #endif
1026  }
1027 }
1028 
1029 // Dbtux::ScanBound
1030 
1031 inline
1032 Dbtux::ScanBound::ScanBound() :
1033  m_head(),
1034  m_cnt(0),
1035  m_side(0)
1036 {
1037 }
1038 
1039 // Dbtux::ScanOp
1040 
1041 inline
1042 Dbtux::ScanOp::ScanOp() :
1043  m_state(Undef),
1044  m_lockwait(false),
1045  m_errorCode(0),
1046  m_userPtr(RNIL),
1047  m_userRef(RNIL),
1048  m_tableId(RNIL),
1049  m_indexId(RNIL),
1050  m_fragPtrI(RNIL),
1051  m_transId1(0),
1052  m_transId2(0),
1053  m_savePointId(0),
1054  m_accLockOp(RNIL),
1055  m_accLockOps(),
1056  m_readCommitted(0),
1057  m_lockMode(0),
1058  m_descending(0),
1059  m_scanBound(),
1060  m_scanPos(),
1061  m_scanEnt(),
1062  m_nodeScan(RNIL),
1063  m_statOpPtrI(RNIL)
1064 {
1065 }
1066 
1067 // Dbtux::Index
1068 
1069 inline
1070 Dbtux::Index::Index() :
1071  m_state(NotDefined),
1072  m_tableType(DictTabInfo::UndefTableType),
1073  m_tableId(RNIL),
1074  m_numFrags(0),
1075  m_descPage(RNIL),
1076  m_descOff(0),
1077  m_numAttrs(0),
1078  m_prefAttrs(0),
1079  m_prefBytes(0),
1080  m_keySpec(),
1081  m_statFragPtrI(RNIL),
1082  m_statLoadTime(0),
1083  m_storeNullKey(false)
1084 {
1085  for (unsigned i = 0; i < MaxIndexFragments; i++) {
1086  m_fragId[i] = ZNIL;
1087  m_fragPtrI[i] = RNIL;
1088  };
1089 }
1090 
1091 // Dbtux::Frag
1092 
1093 inline
1094 Dbtux::Frag::Frag(ArrayPool<ScanOp>& scanOpPool) :
1095  m_tableId(RNIL),
1096  m_indexId(RNIL),
1097  m_fragId(ZNIL),
1098  m_tree(),
1099  m_freeLoc(),
1100  m_scanList(scanOpPool),
1101  m_tupIndexFragPtrI(RNIL),
1102  m_tupTableFragPtrI(RNIL),
1103  m_accTableFragPtrI(RNIL),
1104  m_entryCount(0),
1105  m_entryBytes(0),
1106  m_entryOps(0)
1107 {
1108 }
1109 
1110 // Dbtux::FragOp
1111 
1112 inline
1113 Dbtux::FragOp::FragOp() :
1114  m_userPtr(RNIL),
1115  m_userRef(RNIL),
1116  m_indexId(RNIL),
1117  m_fragId(ZNIL),
1118  m_fragPtrI(RNIL),
1119  m_fragNo(ZNIL),
1120  m_numAttrsRecvd(ZNIL)
1121 {
1122 }
1123 
1124 // Dbtux::NodeHandle
1125 
1126 inline
1127 Dbtux::NodeHandle::NodeHandle(Frag& frag) :
1128  m_frag(frag),
1129  m_loc(),
1130  m_node(0)
1131 {
1132 }
1133 
1134 inline
1135 Dbtux::NodeHandle::NodeHandle(const NodeHandle& node) :
1136  m_frag(node.m_frag),
1137  m_loc(node.m_loc),
1138  m_node(node.m_node)
1139 {
1140 }
1141 
1142 inline Dbtux::NodeHandle&
1143 Dbtux::NodeHandle::operator=(const NodeHandle& node)
1144 {
1145  ndbassert(&m_frag == &node.m_frag);
1146  m_loc = node.m_loc;
1147  m_node = node.m_node;
1148  return *this;
1149 }
1150 
1151 inline bool
1152 Dbtux::NodeHandle::isNull()
1153 {
1154  return m_node == 0;
1155 }
1156 
1157 inline Dbtux::TupLoc
1158 Dbtux::NodeHandle::getLink(unsigned i)
1159 {
1160  ndbrequire(i <= 2);
1161  return m_node->m_link[i];
1162 }
1163 
1164 inline unsigned
1165 Dbtux::NodeHandle::getChilds()
1166 {
1167  return (m_node->m_link[0] != NullTupLoc) + (m_node->m_link[1] != NullTupLoc);
1168 }
1169 
1170 inline unsigned
1171 Dbtux::NodeHandle::getSide()
1172 {
1173  return m_node->m_side;
1174 }
1175 
1176 inline unsigned
1177 Dbtux::NodeHandle::getOccup()
1178 {
1179  return m_node->m_occup;
1180 }
1181 
1182 inline int
1183 Dbtux::NodeHandle::getBalance()
1184 {
1185  return (int)m_node->m_balance - 1;
1186 }
1187 
1188 inline Uint32
1189 Dbtux::NodeHandle::getNodeScan()
1190 {
1191  return m_node->m_nodeScan;
1192 }
1193 
1194 inline void
1195 Dbtux::NodeHandle::setLink(unsigned i, TupLoc loc)
1196 {
1197  if (likely(i <= 2))
1198  {
1199  m_node->m_link[i] = loc;
1200  }
1201  else
1202  {
1203  ndbrequire(false);
1204  }
1205 }
1206 
1207 inline void
1208 Dbtux::NodeHandle::setSide(unsigned i)
1209 {
1210  if (likely(i <= 2))
1211  {
1212  m_node->m_side = i;
1213  }
1214  else
1215  {
1216  ndbrequire(false);
1217  }
1218 }
1219 
1220 inline void
1221 Dbtux::NodeHandle::setOccup(unsigned n)
1222 {
1223  TreeHead& tree = m_frag.m_tree;
1224  ndbrequire(n <= tree.m_maxOccup);
1225  m_node->m_occup = n;
1226 }
1227 
1228 inline void
1229 Dbtux::NodeHandle::setBalance(int b)
1230 {
1231  ndbrequire(abs(b) <= 1);
1232  m_node->m_balance = (unsigned)(b + 1);
1233 }
1234 
1235 inline void
1236 Dbtux::NodeHandle::setNodeScan(Uint32 scanPtrI)
1237 {
1238  m_node->m_nodeScan = scanPtrI;
1239 }
1240 
1241 inline Uint32*
1242 Dbtux::NodeHandle::getPref()
1243 {
1244  TreeHead& tree = m_frag.m_tree;
1245  return tree.getPref(m_node);
1246 }
1247 
1248 inline Dbtux::TreeEnt
1249 Dbtux::NodeHandle::getEnt(unsigned pos)
1250 {
1251  TreeHead& tree = m_frag.m_tree;
1252  TreeEnt* entList = tree.getEntList(m_node);
1253  const unsigned occup = m_node->m_occup;
1254  ndbrequire(pos < occup);
1255  return entList[pos];
1256 }
1257 
1258 // stats
1259 
1260 inline
1261 Dbtux::StatOp::Value::Value()
1262 {
1263  m_rir = 0;
1264  Uint32 i;
1265  for (i = 0; i < MaxKeyCount; i++)
1266  m_unq[i] = 0;
1267 }
1268 
1269 inline
1270 Dbtux::StatOp::StatOp(const Index& index) :
1271  m_scanOpPtrI(RNIL),
1272  m_saveSize(0),
1273  m_saveScale(0),
1274  m_batchSize(0),
1275  m_estBytes(0),
1276  m_rowCount(0),
1277  m_batchCurr(0),
1278  m_haveSample(false),
1279  m_sampleCount(0),
1280  m_keyBytes(0),
1281  m_keyChange(false),
1282  m_usePrev(false),
1283  m_keyCount(0),
1284  m_valueCount(0),
1285  m_keySpec(index.m_keySpec),
1286  m_keyData1(m_keySpec, false, 2),
1287  m_keyData2(m_keySpec, false, 2),
1288  m_keyData(m_keySpec, false, 2),
1289  m_valueData(m_valueSpec, false, 2),
1290  m_value1(),
1291  m_value2()
1292 {
1293  m_valueSpec.set_buf(m_valueSpecBuf, MaxValueCount);
1294  m_keyData1.set_buf(m_keyDataBuf1, sizeof(m_keyDataBuf1));
1295  m_keyData2.set_buf(m_keyDataBuf2, sizeof(m_keyDataBuf2));
1296  m_keyData.set_buf(m_keyDataBuf, sizeof(m_keyDataBuf));
1297  m_valueData.set_buf(m_valueDataBuf, sizeof(m_valueDataBuf));
1298 }
1299 
1300 // Dbtux::StatMon
1301 
1302 inline
1303 Dbtux::StatMon::StatMon() :
1304  m_requestType(0),
1305  m_loopIndexId(0),
1306  m_loopDelay(1000)
1307 {
1308  memset(&m_req, 0, sizeof(m_req));
1309 }
1310 
1311 // parameters for methods
1312 
1313 #ifdef VM_TRACE
1314 inline
1315 Dbtux::PrintPar::PrintPar() :
1316  // caller fills in
1317  m_path(),
1318  m_side(255),
1319  m_parent(),
1320  // default return values
1321  m_depth(0),
1322  m_occup(0),
1323  m_ok(true)
1324 {
1325 }
1326 #endif
1327 
1328 // utils
1329 
1330 inline Uint32
1331 Dbtux::getDescSize(const Index& index)
1332 {
1333  return
1334  DescHeadSize +
1335  index.m_numAttrs * KeyTypeSize +
1336  index.m_numAttrs * AttributeHeaderSize;
1337 }
1338 
1339 inline Dbtux::DescHead&
1340 Dbtux::getDescHead(const Index& index)
1341 {
1342  DescPagePtr pagePtr;
1343  pagePtr.i = index.m_descPage;
1344  c_descPagePool.getPtr(pagePtr);
1345  ndbrequire(index.m_descOff < DescPageSize);
1346  Uint32* ptr = &pagePtr.p->m_data[index.m_descOff];
1347  DescHead* descHead = reinterpret_cast<DescHead*>(ptr);
1348  ndbrequire(descHead->m_magic == DescHead::Magic);
1349  return *descHead;
1350 }
1351 
1352 inline Dbtux::KeyType*
1353 Dbtux::getKeyTypes(DescHead& descHead)
1354 {
1355  Uint32* ptr = reinterpret_cast<Uint32*>(&descHead);
1356  ptr += DescHeadSize;
1357  return reinterpret_cast<KeyType*>(ptr);
1358 }
1359 
1360 inline const Dbtux::KeyType*
1361 Dbtux::getKeyTypes(const DescHead& descHead)
1362 {
1363  const Uint32* ptr = reinterpret_cast<const Uint32*>(&descHead);
1364  ptr += DescHeadSize;
1365  return reinterpret_cast<const KeyType*>(ptr);
1366 }
1367 
1368 inline AttributeHeader*
1369 Dbtux::getKeyAttrs(DescHead& descHead)
1370 {
1371  Uint32* ptr = reinterpret_cast<Uint32*>(&descHead);
1372  ptr += DescHeadSize;
1373  ptr += descHead.m_numAttrs * KeyTypeSize;
1374  return reinterpret_cast<AttributeHeader*>(ptr);
1375 }
1376 
1377 inline const AttributeHeader*
1378 Dbtux::getKeyAttrs(const DescHead& descHead)
1379 {
1380  const Uint32* ptr = reinterpret_cast<const Uint32*>(&descHead);
1381  ptr += DescHeadSize;
1382  ptr += descHead.m_numAttrs * KeyTypeSize;
1383  return reinterpret_cast<const AttributeHeader*>(ptr);
1384 }
1385 
1386 inline
1387 void
1388 Dbtux::getTupAddr(const Frag& frag, TreeEnt ent, Uint32& lkey1, Uint32& lkey2)
1389 {
1390  const Uint32 tableFragPtrI = frag.m_tupTableFragPtrI;
1391  const TupLoc tupLoc = ent.m_tupLoc;
1392  c_tup->tuxGetTupAddr(tableFragPtrI, tupLoc.getPageId(),tupLoc.getPageOffset(),
1393  lkey1, lkey2);
1394  jamEntry();
1395 }
1396 
1397 inline unsigned
1398 Dbtux::min(unsigned x, unsigned y)
1399 {
1400  return x < y ? x : y;
1401 }
1402 
1403 inline unsigned
1404 Dbtux::max(unsigned x, unsigned y)
1405 {
1406  return x > y ? x : y;
1407 }
1408 
1409 // DbtuxCmp.cpp
1410 
1411 inline int
1412 Dbtux::cmpSearchKey(TuxCtx& ctx, const KeyDataC& searchKey, const KeyDataC& entryKey, Uint32 cnt)
1413 {
1414  // compare cnt attributes from each
1415  Uint32 num_eq;
1416  int ret = searchKey.cmp(entryKey, cnt, num_eq);
1417 #ifdef VM_TRACE
1418  if (debugFlags & DebugMaint) {
1419  debugOut << "cmpSearchKey: ret:" << ret;
1420  debugOut << " search:" << searchKey.print(ctx.c_debugBuffer, DebugBufferBytes);
1421  debugOut << " entry:" << entryKey.print(ctx.c_debugBuffer, DebugBufferBytes);
1422  debugOut << endl;
1423  }
1424 #endif
1425  return ret;
1426 }
1427 
1428 inline int
1429 Dbtux::cmpSearchBound(TuxCtx& ctx, const KeyBoundC& searchBound, const KeyDataC& entryKey, Uint32 cnt)
1430 {
1431  // compare cnt attributes from each
1432  Uint32 num_eq;
1433  int ret = searchBound.cmp(entryKey, cnt, num_eq);
1434 #ifdef VM_TRACE
1435  if (debugFlags & DebugScan) {
1436  debugOut << "cmpSearchBound: res:" << ret;
1437  debugOut << " search:" << searchBound.print(ctx.c_debugBuffer, DebugBufferBytes);
1438  debugOut << " entry:" << entryKey.print(ctx.c_debugBuffer, DebugBufferBytes);
1439  debugOut << endl;
1440  }
1441 #endif
1442  return ret;
1443 }
1444 
1445 #endif