MySQL 5.6.14 Source Code Document
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
DbtuxMeta.cpp
1 /*
2  Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
3 
4  This program is free software; you can redistribute it and/or modify
5  it under the terms of the GNU General Public License as published by
6  the Free Software Foundation; version 2 of the License.
7 
8  This program is distributed in the hope that it will be useful,
9  but WITHOUT ANY WARRANTY; without even the implied warranty of
10  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11  GNU General Public License for more details.
12 
13  You should have received a copy of the GNU General Public License
14  along with this program; if not, write to the Free Software
15  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
16 */
17 
18 #define DBTUX_META_CPP
19 #include "Dbtux.hpp"
20 #include <my_sys.h>
21 
22 /*
23  * Create index.
24  *
25  * For historical reasons it looks like we are adding random fragments
26  * and attributes to existing index. In fact all fragments must be
27  * created at one time and they have identical attributes.
28  *
29  * But history changes?
30  * Now index will be created using the sequence
31  * CREATE_TAB_REQ
32  * TUP_ADD_ATTR_REQ +
33  *
34  * Followed by 0-N
35  * TUXFRAGREQ
36  */
37 
38 #include <signaldata/CreateTab.hpp>
39 #include <signaldata/LqhFrag.hpp>
40 
41 void
42 Dbtux::execCREATE_TAB_REQ(Signal* signal)
43 {
44  jamEntry();
45  CreateTabReq copy = *(CreateTabReq*)signal->getDataPtr();
46  CreateTabReq* req = &copy;
47 
48  IndexPtr indexPtr;
49  indexPtr.i = RNIL;
50  FragOpPtr fragOpPtr;
51  fragOpPtr.i = RNIL;
52  Uint32 errorCode = 0;
53 
54  do {
55  // get the index record
56  if (req->tableId >= c_indexPool.getSize()) {
57  jam();
58  errorCode = TuxFragRef::InvalidRequest;
59  break;
60  }
61  c_indexPool.getPtr(indexPtr, req->tableId);
62  if (indexPtr.p->m_state != Index::NotDefined)
63  {
64  jam();
65  errorCode = TuxFragRef::InvalidRequest;
66  indexPtr.i = RNIL; // leave alone
67  break;
68  }
69 
70  // get new operation record
71  c_fragOpPool.seize(fragOpPtr);
72  ndbrequire(fragOpPtr.i != RNIL);
73  new (fragOpPtr.p) FragOp();
74  fragOpPtr.p->m_userPtr = req->senderData;
75  fragOpPtr.p->m_userRef = req->senderRef;
76  fragOpPtr.p->m_indexId = req->tableId;
77  fragOpPtr.p->m_fragId = RNIL;
78  fragOpPtr.p->m_fragNo = RNIL;
79  fragOpPtr.p->m_numAttrsRecvd = 0;
80 #ifdef VM_TRACE
81  if (debugFlags & DebugMeta) {
82  debugOut << "Seize frag op " << fragOpPtr.i << " " << *fragOpPtr.p << endl;
83  }
84 #endif
85  // check if index has place for more fragments
86  ndbrequire(indexPtr.p->m_state == Index::NotDefined &&
87  DictTabInfo::isOrderedIndex(req->tableType) &&
88  req->noOfAttributes > 0 &&
89  req->noOfAttributes <= MaxIndexAttributes &&
90  indexPtr.p->m_descPage == RNIL);
91 
92  indexPtr.p->m_state = Index::Defining;
93  indexPtr.p->m_tableType = (DictTabInfo::TableType)req->tableType;
94  indexPtr.p->m_tableId = req->primaryTableId;
95  indexPtr.p->m_numAttrs = req->noOfAttributes;
96  indexPtr.p->m_storeNullKey = true; // not yet configurable
97  // allocate attribute descriptors
98  if (! allocDescEnt(indexPtr)) {
99  jam();
100  errorCode = TuxFragRef::NoFreeAttributes;
101  break;
102  }
103 
104  // error inserts
105  if ((ERROR_INSERTED(12001) && fragOpPtr.p->m_fragNo == 0) ||
106  (ERROR_INSERTED(12002) && fragOpPtr.p->m_fragNo == 1)) {
107  jam();
108  errorCode = (TuxFragRef::ErrorCode)1;
109  CLEAR_ERROR_INSERT_VALUE;
110  break;
111  }
112  // success
113  CreateTabConf* conf = (CreateTabConf*)signal->getDataPtrSend();
114  conf->senderRef = reference();
115  conf->senderData = req->senderData;
116  conf->tuxConnectPtr = fragOpPtr.i;
117  sendSignal(req->senderRef, GSN_CREATE_TAB_CONF,
118  signal, CreateTabConf::SignalLength, JBB);
119  return;
120  } while (0);
121  // error
122 
123  CreateTabRef* const ref = (CreateTabRef*)signal->getDataPtrSend();
124  ref->senderData = req->senderData;
125  ref->errorCode = errorCode;
126  sendSignal(req->senderRef, GSN_CREATE_TAB_REF,
127  signal, CreateTabRef::SignalLength, JBB);
128 
129  if (indexPtr.i != RNIL) {
130  jam();
131  // let DICT drop the unfinished index
132  }
133 
134  if (fragOpPtr.i != RNIL)
135  {
136  jam();
137  c_fragOpPool.release(fragOpPtr);
138  }
139 }
140 
141 void
142 Dbtux::execTUX_ADD_ATTRREQ(Signal* signal)
143 {
144  jamEntry();
145  const TuxAddAttrReq reqCopy = *(const TuxAddAttrReq*)signal->getDataPtr();
146  const TuxAddAttrReq* const req = &reqCopy;
147  // get the records
148  FragOpPtr fragOpPtr;
149  IndexPtr indexPtr;
150  c_fragOpPool.getPtr(fragOpPtr, req->tuxConnectPtr);
151  c_indexPool.getPtr(indexPtr, fragOpPtr.p->m_indexId);
152  TuxAddAttrRef::ErrorCode errorCode = TuxAddAttrRef::NoError;
153  do {
154  // expected attribute id
155  const unsigned attrId = fragOpPtr.p->m_numAttrsRecvd++;
156  ndbrequire(
157  indexPtr.p->m_state == Index::Defining &&
158  attrId < indexPtr.p->m_numAttrs &&
159  attrId == req->attrId);
160  const Uint32 ad = req->attrDescriptor;
161  const Uint32 typeId = AttributeDescriptor::getType(ad);
162  const Uint32 sizeInBytes = AttributeDescriptor::getSizeInBytes(ad);
163  const Uint32 nullable = AttributeDescriptor::getNullable(ad);
164  const Uint32 csNumber = req->extTypeInfo >> 16;
165  const Uint32 primaryAttrId = req->primaryAttrId;
166 
167  DescHead& descHead = getDescHead(*indexPtr.p);
168  // add type to spec
169  KeySpec& keySpec = indexPtr.p->m_keySpec;
170  KeyType keyType(typeId, sizeInBytes, nullable, csNumber);
171  if (keySpec.add(keyType) == -1) {
172  jam();
173  errorCode = TuxAddAttrRef::InvalidAttributeType;
174  break;
175  }
176  // add primary attr to read keys array
177  AttributeHeader* keyAttrs = getKeyAttrs(descHead);
178  AttributeHeader& keyAttr = keyAttrs[attrId];
179  new (&keyAttr) AttributeHeader(primaryAttrId, sizeInBytes);
180 #ifdef VM_TRACE
181  if (debugFlags & DebugMeta) {
182  debugOut << "attr " << attrId << " " << keyType << endl;
183  }
184 #endif
185  if (csNumber != 0) {
186  unsigned err;
187  CHARSET_INFO *cs = all_charsets[csNumber];
188  ndbrequire(cs != 0);
189  if ((err = NdbSqlUtil::check_column_for_ordered_index(typeId, cs))) {
190  jam();
191  errorCode = (TuxAddAttrRef::ErrorCode) err;
192  break;
193  }
194  }
195  const bool lastAttr = (indexPtr.p->m_numAttrs == fragOpPtr.p->m_numAttrsRecvd);
196  if ((ERROR_INSERTED(12003) && attrId == 0) ||
197  (ERROR_INSERTED(12004) && lastAttr))
198  {
199  errorCode = (TuxAddAttrRef::ErrorCode)1;
200  CLEAR_ERROR_INSERT_VALUE;
201  break;
202  }
203  if (lastAttr) {
204  // compute min prefix
205  const KeySpec& keySpec = indexPtr.p->m_keySpec;
206  unsigned attrs = 0;
207  unsigned bytes = keySpec.get_nullmask_len(false);
208  unsigned maxAttrs = indexPtr.p->m_numAttrs;
209 #ifdef VM_TRACE
210  {
211  const char* p = NdbEnv_GetEnv("MAX_TTREE_PREF_ATTRS", (char*)0, 0);
212  if (p != 0 && p[0] != 0 && maxAttrs > (unsigned)atoi(p))
213  maxAttrs = atoi(p);
214  }
215 #endif
216  while (attrs < maxAttrs) {
217  const KeyType& keyType = keySpec.get_type(attrs);
218  const unsigned newbytes = bytes + keyType.get_byte_size();
219  if (newbytes > (MAX_TTREE_PREF_SIZE << 2))
220  break;
221  attrs++;
222  bytes = newbytes;
223  }
224  if (attrs == 0)
225  bytes = 0;
226  indexPtr.p->m_prefAttrs = attrs;
227  indexPtr.p->m_prefBytes = bytes;
228  // fragment is defined
229 #ifdef VM_TRACE
230  if (debugFlags & DebugMeta) {
231  debugOut << "Release frag op " << fragOpPtr.i << " " << *fragOpPtr.p << endl;
232  }
233 #endif
234  c_fragOpPool.release(fragOpPtr);
235  }
236  // success
237  TuxAddAttrConf* conf = (TuxAddAttrConf*)signal->getDataPtrSend();
238  conf->userPtr = fragOpPtr.p->m_userPtr;
239  conf->lastAttr = lastAttr;
240  sendSignal(fragOpPtr.p->m_userRef, GSN_TUX_ADD_ATTRCONF,
241  signal, TuxAddAttrConf::SignalLength, JBB);
242  return;
243  } while (0);
244  // error
245  TuxAddAttrRef* ref = (TuxAddAttrRef*)signal->getDataPtrSend();
246  ref->userPtr = fragOpPtr.p->m_userPtr;
247  ref->errorCode = errorCode;
248  sendSignal(fragOpPtr.p->m_userRef, GSN_TUX_ADD_ATTRREF,
249  signal, TuxAddAttrRef::SignalLength, JBB);
250 #ifdef VM_TRACE
251  if (debugFlags & DebugMeta) {
252  debugOut << "Release on attr error frag op " << fragOpPtr.i << " " << *fragOpPtr.p << endl;
253  }
254 #endif
255  // let DICT drop the unfinished index
256 }
257 
258 void
259 Dbtux::execTUXFRAGREQ(Signal* signal)
260 {
261  jamEntry();
262 
263  if (signal->theData[0] == (Uint32)-1) {
264  jam();
265  abortAddFragOp(signal);
266  return;
267  }
268 
269  const TuxFragReq reqCopy = *(const TuxFragReq*)signal->getDataPtr();
270  const TuxFragReq* const req = &reqCopy;
271  IndexPtr indexPtr;
272  indexPtr.i = RNIL;
273  TuxFragRef::ErrorCode errorCode = TuxFragRef::NoError;
274  do {
275  // get the index record
276  if (req->tableId >= c_indexPool.getSize()) {
277  jam();
278  errorCode = TuxFragRef::InvalidRequest;
279  break;
280  }
281  c_indexPool.getPtr(indexPtr, req->tableId);
282  if (false && indexPtr.p->m_state != Index::Defining) {
283  jam();
284  errorCode = TuxFragRef::InvalidRequest;
285  indexPtr.i = RNIL; // leave alone
286  break;
287  }
288 
289  // check if index has place for more fragments
290  ndbrequire(indexPtr.p->m_numFrags < MaxIndexFragments);
291  // seize new fragment record
292  if (ERROR_INSERTED(12008))
293  {
294  CLEAR_ERROR_INSERT_VALUE;
295  errorCode = TuxFragRef::InvalidRequest;
296  break;
297  }
298 
299  FragPtr fragPtr;
300  c_fragPool.seize(fragPtr);
301  if (fragPtr.i == RNIL) {
302  jam();
303  errorCode = TuxFragRef::NoFreeFragment;
304  break;
305  }
306  new (fragPtr.p) Frag(c_scanOpPool);
307  fragPtr.p->m_tableId = req->primaryTableId;
308  fragPtr.p->m_indexId = req->tableId;
309  fragPtr.p->m_fragId = req->fragId;
310  fragPtr.p->m_tupIndexFragPtrI = req->tupIndexFragPtrI;
311  fragPtr.p->m_tupTableFragPtrI = req->tupTableFragPtrI;
312  fragPtr.p->m_accTableFragPtrI = req->accTableFragPtrI;
313  // add the fragment to the index
314  Uint32 fragNo = indexPtr.p->m_numFrags;
315  indexPtr.p->m_fragId[indexPtr.p->m_numFrags] = req->fragId;
316  indexPtr.p->m_fragPtrI[indexPtr.p->m_numFrags] = fragPtr.i;
317  indexPtr.p->m_numFrags++;
318 #ifdef VM_TRACE
319  if (debugFlags & DebugMeta) {
320  debugOut << "Add frag " << fragPtr.i << " " << *fragPtr.p << endl;
321  }
322 #endif
323  // error inserts
324  if ((ERROR_INSERTED(12001) && fragNo == 0) ||
325  (ERROR_INSERTED(12002) && fragNo == 1)) {
326  jam();
327  errorCode = (TuxFragRef::ErrorCode)1;
328  CLEAR_ERROR_INSERT_VALUE;
329  break;
330  }
331 
332  // initialize tree header
333  TreeHead& tree = fragPtr.p->m_tree;
334  new (&tree) TreeHead();
335  // make these configurable later
336  tree.m_nodeSize = MAX_TTREE_NODE_SIZE;
337  tree.m_prefSize = (indexPtr.p->m_prefBytes + 3) / 4;
338  const unsigned maxSlack = MAX_TTREE_NODE_SLACK;
339  // size of header and min prefix
340  const unsigned fixedSize = NodeHeadSize + tree.m_prefSize;
341  if (! (fixedSize <= tree.m_nodeSize)) {
342  jam();
343  errorCode = (TuxFragRef::ErrorCode)TuxAddAttrRef::InvalidNodeSize;
344  break;
345  }
346  const unsigned slots = (tree.m_nodeSize - fixedSize) / TreeEntSize;
347  tree.m_maxOccup = slots;
348  // min occupancy of interior node must be at least 2
349  if (! (2 + maxSlack <= tree.m_maxOccup)) {
350  jam();
351  errorCode = (TuxFragRef::ErrorCode)TuxAddAttrRef::InvalidNodeSize;
352  break;
353  }
354  tree.m_minOccup = tree.m_maxOccup - maxSlack;
355  // root node does not exist (also set by ctor)
356  tree.m_root = NullTupLoc;
357 #ifdef VM_TRACE
358  if (debugFlags & DebugMeta) {
359  if (fragNo == 0) {
360  debugOut << "Index id=" << indexPtr.i;
361  debugOut << " nodeSize=" << tree.m_nodeSize;
362  debugOut << " headSize=" << NodeHeadSize;
363  debugOut << " prefSize=" << tree.m_prefSize;
364  debugOut << " entrySize=" << TreeEntSize;
365  debugOut << " minOccup=" << tree.m_minOccup;
366  debugOut << " maxOccup=" << tree.m_maxOccup;
367  debugOut << endl;
368  }
369  }
370 #endif
371 
372  // success
373  TuxFragConf* const conf = (TuxFragConf*)signal->getDataPtrSend();
374  conf->userPtr = req->userPtr;
375  conf->tuxConnectPtr = RNIL;
376  conf->fragPtr = fragPtr.i;
377  conf->fragId = fragPtr.p->m_fragId;
378  sendSignal(req->userRef, GSN_TUXFRAGCONF,
379  signal, TuxFragConf::SignalLength, JBB);
380  return;
381  } while (0);
382 
383  // error
384  TuxFragRef* const ref = (TuxFragRef*)signal->getDataPtrSend();
385  ref->userPtr = req->userPtr;
386  ref->errorCode = errorCode;
387  sendSignal(req->userRef, GSN_TUXFRAGREF,
388  signal, TuxFragRef::SignalLength, JBB);
389 
390  if (indexPtr.i != RNIL) {
391  jam();
392  // let DICT drop the unfinished index
393  }
394 }
395 
396 /*
397  * LQH aborts on-going create index operation.
398  */
399 void
400 Dbtux::abortAddFragOp(Signal* signal)
401 {
402  FragOpPtr fragOpPtr;
403  IndexPtr indexPtr;
404  c_fragOpPool.getPtr(fragOpPtr, signal->theData[1]);
405  c_indexPool.getPtr(indexPtr, fragOpPtr.p->m_indexId);
406 #ifdef VM_TRACE
407  if (debugFlags & DebugMeta) {
408  debugOut << "Release on abort frag op " << fragOpPtr.i << " " << *fragOpPtr.p << endl;
409  }
410 #endif
411  c_fragOpPool.release(fragOpPtr);
412  // let DICT drop the unfinished index
413 }
414 
415 /*
416  * Set index online. Currently at system restart this arrives before
417  * build and is therefore not correct.
418  */
419 void
420 Dbtux::execALTER_INDX_IMPL_REQ(Signal* signal)
421 {
422  jamEntry();
423  const AlterIndxImplReq reqCopy = *(const AlterIndxImplReq*)signal->getDataPtr();
424  const AlterIndxImplReq* const req = &reqCopy;
425 
426  IndexPtr indexPtr;
427  c_indexPool.getPtr(indexPtr, req->indexId);
428 
429  //Uint32 save = indexPtr.p->m_state;
430  if (! (refToBlock(req->senderRef) == DBDICT) &&
431  ! (isNdbMt() && refToMain(req->senderRef) == DBTUX &&
432  refToInstance(req->senderRef) == 0))
433  {
438  jam();
439  switch(req->requestType){
440  case AlterIndxImplReq::AlterIndexOffline:
441  jam();
442  /*
443  * This happens at failed index build, and before dropping an
444  * Online index. It causes scans to terminate.
445  */
446  indexPtr.p->m_state = Index::Dropping;
447  break;
448  case AlterIndxImplReq::AlterIndexBuilding:
449  jam();
450  indexPtr.p->m_state = Index::Building;
451  break;
452  default:
453  jam(); // fall-through
454  case AlterIndxImplReq::AlterIndexOnline:
455  jam();
456  indexPtr.p->m_state = Index::Online;
457  break;
458  }
459  }
460 
461  // success
462  AlterIndxImplConf* const conf = (AlterIndxImplConf*)signal->getDataPtrSend();
463  conf->senderRef = reference();
464  conf->senderData = req->senderData;
465  if (req->senderRef != 0)
466  {
471  jam();
472  sendSignal(req->senderRef, GSN_ALTER_INDX_IMPL_CONF,
473  signal, AlterIndxImplConf::SignalLength, JBB);
474  }
475 }
476 
477 /*
478  * Drop index.
479  *
480  * Uses same DROP_TAB_REQ signal as normal tables.
481  */
482 
483 void
484 Dbtux::execDROP_TAB_REQ(Signal* signal)
485 {
486  jamEntry();
487  const DropTabReq reqCopy = *(const DropTabReq*)signal->getDataPtr();
488  const DropTabReq* const req = &reqCopy;
489  IndexPtr indexPtr;
490 
491  Uint32 tableId = req->tableId;
492  Uint32 senderRef = req->senderRef;
493  Uint32 senderData = req->senderData;
494  if (tableId >= c_indexPool.getSize()) {
495  jam();
496  // reply to sender
497  DropTabConf* const conf = (DropTabConf*)signal->getDataPtrSend();
498  conf->senderRef = reference();
499  conf->senderData = senderData;
500  conf->tableId = tableId;
501  sendSignal(senderRef, GSN_DROP_TAB_CONF,
502  signal, DropTabConf::SignalLength, JBB);
503  return;
504  }
505 
506  c_indexPool.getPtr(indexPtr, req->tableId);
507  // drop works regardless of index state
508 #ifdef VM_TRACE
509  if (debugFlags & DebugMeta) {
510  debugOut << "Drop index " << indexPtr.i << " " << *indexPtr.p << endl;
511  }
512 #endif
513  ndbrequire(req->senderRef != 0);
514  dropIndex(signal, indexPtr, req->senderRef, req->senderData);
515 }
516 
517 void
518 Dbtux::dropIndex(Signal* signal, IndexPtr indexPtr, Uint32 senderRef, Uint32 senderData)
519 {
520  jam();
521  /*
522  * Index state should be Defining or Dropping but in 7.0 it can also
523  * be NotDefined (due to double call). The Index record is always
524  * consistent regardless of state so there is no state assert here.
525  */
526  // drop fragments
527  while (indexPtr.p->m_numFrags > 0) {
528  jam();
529  Uint32 i = --indexPtr.p->m_numFrags;
530  FragPtr fragPtr;
531  c_fragPool.getPtr(fragPtr, indexPtr.p->m_fragPtrI[i]);
532  /*
533  * Verify that LQH has terminated scans. (If not, then drop order
534  * must change from TUP,TUX to TUX,TUP and we must wait for scans).
535  */
536  ScanOpPtr scanPtr;
537  bool b = fragPtr.p->m_scanList.first(scanPtr);
538  ndbrequire(!b);
539  c_fragPool.release(fragPtr);
540  }
541  // drop attributes
542  if (indexPtr.p->m_descPage != RNIL) {
543  jam();
544  freeDescEnt(indexPtr);
545  indexPtr.p->m_descPage = RNIL;
546  }
547  if (senderRef != 0) {
548  jam();
549  // reply to sender
550  DropTabConf* const conf = (DropTabConf*)signal->getDataPtrSend();
551  conf->senderRef = reference();
552  conf->senderData = senderData;
553  conf->tableId = indexPtr.i;
554  sendSignal(senderRef, GSN_DROP_TAB_CONF,
555  signal, DropTabConf::SignalLength, JBB);
556  }
557  new (indexPtr.p) Index();
558 }
559 
560 /*
561  * Subroutines.
562  */
563 
564 bool
565 Dbtux::allocDescEnt(IndexPtr indexPtr)
566 {
567  jam();
568  const Uint32 size = getDescSize(*indexPtr.p);
569  DescPagePtr pagePtr;
570  pagePtr.i = c_descPageList;
571  while (pagePtr.i != RNIL) {
572  jam();
573  c_descPagePool.getPtr(pagePtr);
574  if (pagePtr.p->m_numFree >= size) {
575  jam();
576  break;
577  }
578  pagePtr.i = pagePtr.p->m_nextPage;
579  }
580  if (pagePtr.i == RNIL) {
581  jam();
582  if (! c_descPagePool.seize(pagePtr)) {
583  jam();
584  return false;
585  }
586  new (pagePtr.p) DescPage();
587  // add in front of list
588  pagePtr.p->m_nextPage = c_descPageList;
589  c_descPageList = pagePtr.i;
590  pagePtr.p->m_numFree = DescPageSize;
591  }
592  ndbrequire(pagePtr.p->m_numFree >= size);
593  indexPtr.p->m_descPage = pagePtr.i;
594  indexPtr.p->m_descOff = DescPageSize - pagePtr.p->m_numFree;
595  pagePtr.p->m_numFree -= size;
596  DescHead& descHead = *(DescHead*)&pagePtr.p->m_data[indexPtr.p->m_descOff];
597  descHead.m_indexId = indexPtr.i;
598  descHead.m_numAttrs = indexPtr.p->m_numAttrs;
599  descHead.m_magic = DescHead::Magic;
600  KeySpec& keySpec = indexPtr.p->m_keySpec;
601  KeyType* keyTypes = getKeyTypes(descHead);
602  keySpec.set_buf(keyTypes, indexPtr.p->m_numAttrs);
603  return true;
604 }
605 
606 void
607 Dbtux::freeDescEnt(IndexPtr indexPtr)
608 {
609  DescPagePtr pagePtr;
610  c_descPagePool.getPtr(pagePtr, indexPtr.p->m_descPage);
611  Uint32* const data = pagePtr.p->m_data;
612  const Uint32 size = getDescSize(*indexPtr.p);
613  Uint32 off = indexPtr.p->m_descOff;
614  // move the gap to the free area at the top
615  while (off + size < DescPageSize - pagePtr.p->m_numFree) {
616  jam();
617  // next entry to move over the gap
618  DescHead& descHead2 = *(DescHead*)&data[off + size];
619  Uint32 indexId2 = descHead2.m_indexId;
620  Index& index2 = *c_indexPool.getPtr(indexId2);
621  Uint32 size2 = getDescSize(index2);
622  ndbrequire(
623  index2.m_descPage == pagePtr.i &&
624  index2.m_descOff == off + size &&
625  index2.m_numAttrs == descHead2.m_numAttrs);
626  // move the entry (overlapping copy if size < size2)
627  Uint32 i;
628  for (i = 0; i < size2; i++) {
629  jam();
630  data[off + i] = data[off + size + i];
631  }
632  off += size2;
633  // adjust page offset in index
634  index2.m_descOff -= size;
635  {
636  // move KeySpec pointer
637  DescHead& descHead2 = getDescHead(index2);
638  KeyType* keyType2 = getKeyTypes(descHead2);
639  index2.m_keySpec.set_buf(keyType2);
640  ndbrequire(index2.m_keySpec.validate() == 0);
641  }
642  }
643  ndbrequire(off + size == DescPageSize - pagePtr.p->m_numFree);
644  pagePtr.p->m_numFree += size;
645 }
646 
647 void
648 Dbtux::execDROP_FRAG_REQ(Signal* signal)
649 {
650  DropFragReq copy = *(DropFragReq*)signal->getDataPtr();
651  DropFragReq *req = &copy;
652 
653  IndexPtr indexPtr;
654  c_indexPool.getPtr(indexPtr, req->tableId);
655  Uint32 i = 0;
656  for (i = 0; i < indexPtr.p->m_numFrags; i++)
657  {
658  jam();
659  if (indexPtr.p->m_fragId[i] == req->fragId)
660  {
661  jam();
662  FragPtr fragPtr;
663  c_fragPool.getPtr(fragPtr, indexPtr.p->m_fragPtrI[i]);
664  c_fragPool.release(fragPtr);
665 
666  for (i++; i < indexPtr.p->m_numFrags; i++)
667  {
668  jam();
669  indexPtr.p->m_fragPtrI[i-1] = indexPtr.p->m_fragPtrI[i];
670  indexPtr.p->m_fragId[i-1] = indexPtr.p->m_fragId[i];
671  }
672  indexPtr.p->m_numFrags--;
673  break;
674  }
675  }
676 
677 
678  // reply to sender
679  DropFragConf* const conf = (DropFragConf*)signal->getDataPtrSend();
680  conf->senderRef = reference();
681  conf->senderData = req->senderData;
682  conf->tableId = req->tableId;
683  sendSignal(req->senderRef, GSN_DROP_FRAG_CONF,
684  signal, DropFragConf::SignalLength, JBB);
685 }