MySQL 5.6.14 Source Code Document
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
testLimits.cpp
1 /* Copyright (C) 2008 MySQL AB, 2008, 2009 Sun Microsystems, Inc.
2  All rights reserved. Use is subject to license terms.
3 
4  This program is free software; you can redistribute it and/or modify
5  it under the terms of the GNU General Public License as published by
6  the Free Software Foundation; version 2 of the License.
7 
8  This program is distributed in the hope that it will be useful,
9  but WITHOUT ANY WARRANTY; without even the implied warranty of
10  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11  GNU General Public License for more details.
12 
13  You should have received a copy of the GNU General Public License
14  along with this program; if not, write to the Free Software
15  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
16 
17 #include <NDBT.hpp>
18 #include <NDBT_Test.hpp>
19 #include <NdbRestarter.hpp>
20 
21 #define CHECKNOTNULL(p) if ((p) == NULL) { \
22  ndbout << "Error at line " << __LINE__ << endl; \
23  ERR(trans->getNdbError()); \
24  trans->close(); \
25  return NDBT_FAILED; }
26 
27 #define CHECKEQUAL(v, e) if ((e) != (v)) { \
28  ndbout << "Error at line " << __LINE__ << \
29  " expected " << v << endl; \
30  ERR(trans->getNdbError()); \
31  trans->close(); \
32  return NDBT_FAILED; }
33 
34 
35 /* Setup memory as a long Varchar with 2 bytes of
36  * length information
37  */
38 Uint32 setLongVarchar(char* where, const char* what, Uint32 sz)
39 {
40  where[0]=sz & 0xff;
41  where[1]=(sz >> 8) & 0xff;
42  memcpy(&where[2], what, sz);
43  return (sz + 2);
44 }
45 
46 
47 /* Activate the given error insert in TC block
48  * This is used for error insertion where a TCKEYREQ
49  * is required to activate the error
50  */
51 int activateErrorInsert(NdbTransaction* trans,
52  const NdbRecord* record,
53  const NdbDictionary::Table* tab,
54  const char* buf,
55  NdbRestarter* restarter,
56  Uint32 val)
57 {
58  /* We insert the error twice to avoid what appear to be
59  * races between the error insert and the subsequent
60  * tests
61  * Alternatively we could sleep here.
62  */
63  if (restarter->insertErrorInAllNodes(val) != 0){
64  g_err << "error insert 1 (" << val << ") failed" << endl;
65  return NDBT_FAILED;
66  }
67  if (restarter->insertErrorInAllNodes(val) != 0){
68  g_err << "error insert 2 (" << val << ") failed" << endl;
69  return NDBT_FAILED;
70  }
71 
72  NdbOperation* insert= trans->getNdbOperation(tab);
73 
74  CHECKNOTNULL(insert);
75 
76  CHECKEQUAL(0, insert->insertTuple());
77 
78  CHECKEQUAL(0, insert->equal((Uint32) 0,
79  NdbDictionary::getValuePtr
80  (record,
81  buf,
82  0)));
83  CHECKEQUAL(0, insert->setValue(1,
84  NdbDictionary::getValuePtr
85  (record,
86  buf,
87  1)));
88 
89  CHECKEQUAL(0, trans->execute(NdbTransaction::NoCommit));
90 
91  CHECKEQUAL(0, trans->getNdbError().code);
92 
93  return NDBT_OK;
94 }
95 
96 /* Test for correct behaviour using primary key operations
97  * when an NDBD node's SegmentedSection pool is exhausted.
98  */
99 int testSegmentedSectionPk(NDBT_Context* ctx, NDBT_Step* step){
100  /*
101  * Signal type Exhausted @ How
102  * -----------------------------------------------------
103  * Long TCKEYREQ Initial import Consume + send
104  * Long TCKEYREQ Initial import, not first
105  * TCKEYREQ in batch Consume + send
106  * Long TCKEYREQ Initial import, not last
107  * TCKEYREQ in batch Consume + send
108  * No testing of short TCKEYREQ variants as they cannot be
109  * generated in mysql-5.1-telco-6.4+
110  * TODO : Add short variant testing to testUpgrade.
111  */
112 
113  /* We just run on one table */
114  if (strcmp(ctx->getTab()->getName(), "WIDE_2COL") != 0)
115  return NDBT_OK;
116 
117  const Uint32 maxRowBytes= NDB_MAX_TUPLE_SIZE_IN_WORDS * sizeof(Uint32);
118  const Uint32 srcBuffBytes= NDBT_Tables::MaxVarTypeKeyBytes;
119  const Uint32 maxAttrBytes= NDBT_Tables::MaxKeyMaxVarTypeAttrBytes;
120  char smallKey[50];
121  char srcBuff[srcBuffBytes];
122  char smallRowBuf[maxRowBytes];
123  char bigKeyRowBuf[maxRowBytes];
124  char bigAttrRowBuf[maxRowBytes];
125 
126  /* Small key for hinting to same TC */
127  Uint32 smallKeySize= setLongVarchar(&smallKey[0],
128  "ShortKey",
129  8);
130 
131  /* Large value source */
132  memset(srcBuff, 'B', srcBuffBytes);
133 
134  const NdbRecord* record= ctx->getTab()->getDefaultRecord();
135 
136  /* Setup buffers
137  * Small row buffer with small key and small data
138  */
139  setLongVarchar(NdbDictionary::getValuePtr(record,
140  smallRowBuf,
141  0),
142  "ShortKey",
143  8);
144  NdbDictionary::setNull(record, smallRowBuf, 0, false);
145 
146  setLongVarchar(NdbDictionary::getValuePtr(record,
147  smallRowBuf,
148  1),
149  "ShortData",
150  9);
151  NdbDictionary::setNull(record, smallRowBuf, 1, false);
152 
153  /* Big key buffer with big key and small data*/
154  setLongVarchar(NdbDictionary::getValuePtr(record,
155  bigKeyRowBuf,
156  0),
157  &srcBuff[0],
158  srcBuffBytes);
159  NdbDictionary::setNull(record, bigKeyRowBuf, 0, false);
160 
161  setLongVarchar(NdbDictionary::getValuePtr(record,
162  bigKeyRowBuf,
163  1),
164  "ShortData",
165  9);
166  NdbDictionary::setNull(record, bigKeyRowBuf, 1, false);
167 
168  /* Big AttrInfo buffer with small key and big data */
169  setLongVarchar(NdbDictionary::getValuePtr(record,
170  bigAttrRowBuf,
171  0),
172  "ShortKey",
173  8);
174  NdbDictionary::setNull(record, bigAttrRowBuf, 0, false);
175 
176  setLongVarchar(NdbDictionary::getValuePtr(record,
177  bigAttrRowBuf,
178  1),
179  &srcBuff[0],
180  maxAttrBytes);
181  NdbDictionary::setNull(record, bigAttrRowBuf, 1, false);
182 
183  NdbRestarter restarter;
184  Ndb* pNdb= GETNDB(step);
185 
186  /* Start a transaction on a specific node */
187  NdbTransaction* trans= pNdb->startTransaction(ctx->getTab(),
188  &smallKey[0],
189  smallKeySize);
190  CHECKNOTNULL(trans);
191 
192  /* Activate error insert 8065 in this transaction, limits
193  * any single import/append to 1 section
194  */
195  CHECKEQUAL(NDBT_OK, activateErrorInsert(trans,
196  record,
197  ctx->getTab(),
198  smallRowBuf,
199  &restarter,
200  8065));
201 
202  /* Ok, let's try an insert with a key bigger than 1 section.
203  * Since it's part of the same transaction, it'll go via
204  * the same TC.
205  */
206  const NdbOperation* bigInsert = trans->insertTuple(record, bigKeyRowBuf);
207 
208  CHECKNOTNULL(bigInsert);
209 
210  CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
211 
212  /* ZGET_DATABUF_ERR expected */
213  CHECKEQUAL(218, trans->getNdbError().code)
214 
215  trans->close();
216 
217  /* Ok, now a long TCKEYREQ to the same TC - this
218  * has slightly different abort handling since no other
219  * operations exist in this new transaction.
220  * We also change it so that import overflow occurs
221  * on the AttrInfo section
222  */
223  /* Start transaction on the same node */
224  CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
225  &smallKey[0],
226  smallKeySize));
227 
228 
229  CHECKNOTNULL(bigInsert = trans->insertTuple(record, bigAttrRowBuf));
230 
231  CHECKEQUAL(-1,trans->execute(NdbTransaction::NoCommit));
232 
233  /* ZGET_DATABUF_ERR expected */
234  CHECKEQUAL(218, trans->getNdbError().code);
235 
236  trans->close();
237 
238  /* Ok, now a long TCKEYREQ where we run out of SegmentedSections
239  * on the first TCKEYREQ, but there are other TCKEYREQs following
240  * in the same batch. Check that abort handling is correct
241  */
242  /* Start transaction on the same node */
243  CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
244  &smallKey[0],
245  smallKeySize));
246  /* First op in batch, will cause overflow */
247  CHECKNOTNULL(bigInsert = trans->insertTuple(record, bigAttrRowBuf));
248 
249  /* Second op in batch, what happens to it? */
250  const NdbOperation* secondOp;
251  CHECKNOTNULL(secondOp = trans->insertTuple(record, bigAttrRowBuf));
252 
253 
254  CHECKEQUAL(-1,trans->execute(NdbTransaction::NoCommit));
255 
256  /* ZGET_DATABUF_ERR expected */
257  CHECKEQUAL(218, trans->getNdbError().code);
258 
259  trans->close();
260 
261  /* Now try with a 'short' TCKEYREQ, generated using the old Api
262  * with a big key value
263  */
264  /* Start transaction on the same node */
265  CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
266  &smallKey[0],
267  smallKeySize));
268 
269  NdbOperation* bigInsertOldApi;
270  CHECKNOTNULL(bigInsertOldApi= trans->getNdbOperation(ctx->getTab()));
271 
272  CHECKEQUAL(0, bigInsertOldApi->insertTuple());
273  CHECKEQUAL(0, bigInsertOldApi->equal((Uint32)0,
274  NdbDictionary::getValuePtr
275  (record,
276  bigKeyRowBuf,
277  0)));
278  CHECKEQUAL(0, bigInsertOldApi->setValue(1,
279  NdbDictionary::getValuePtr
280  (record,
281  bigKeyRowBuf,
282  1)));
283 
284  CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
285 
286  /* ZGET_DATABUF_ERR expected */
287  CHECKEQUAL(218, trans->getNdbError().code)
288 
289  trans->close();
290 
291  /* Now try with a 'short' TCKEYREQ, generated using the old Api
292  * with a big data value
293  */
294  CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
295  &smallKey[0],
296  smallKeySize));
297 
298  CHECKNOTNULL(bigInsertOldApi= trans->getNdbOperation(ctx->getTab()));
299 
300  CHECKEQUAL(0, bigInsertOldApi->insertTuple());
301  CHECKEQUAL(0, bigInsertOldApi->equal((Uint32)0,
302  NdbDictionary::getValuePtr
303  (record,
304  bigAttrRowBuf,
305  0)));
306  CHECKEQUAL(0, bigInsertOldApi->setValue(1,
307  NdbDictionary::getValuePtr
308  (record,
309  bigAttrRowBuf,
310  1)));
311 
312  CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
313 
314  /* ZGET_DATABUF_ERR expected */
315  CHECKEQUAL(218, trans->getNdbError().code)
316 
317  trans->close();
318 
319  // TODO : Add code to testUpgrade
320 #if 0
321  /*
322  * Short TCKEYREQ KeyInfo accumulate Consume + send long
323  * (TCKEYREQ + KEYINFO)
324  * Short TCKEYREQ AttrInfo accumulate Consume + send short key
325  * + long AI
326  * (TCKEYREQ + ATTRINFO)
327  */
328  /* Change error insert so that next TCKEYREQ will grab
329  * all but one SegmentedSection so that we can then test SegmentedSection
330  * exhaustion when importing the Key/AttrInfo words from the
331  * TCKEYREQ signal itself.
332  */
333  restarter.insertErrorInAllNodes(8066);
334 
335 
336  /* Now a 'short' TCKEYREQ, there will be space to import the
337  * short key, but not the AttrInfo
338  */
339  /* Start transaction on same node */
340  CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
341  &smallKey[0],
342  smallKeySize));
343 
344  CHECKNOTNULL(bigInsertOldApi= trans->getNdbOperation(ctx->getTab()));
345 
346  CHECKEQUAL(0, bigInsertOldApi->insertTuple());
347  CHECKEQUAL(0, bigInsertOldApi->equal((Uint32)0,
348  NdbDictionary::getValuePtr
349  (record,
350  smallRowBuf,
351  0)));
352  CHECKEQUAL(0, bigInsertOldApi->setValue(1, NdbDictionary::getValuePtr
353  (record,
354  smallRowBuf,
355  1)));
356 
357  CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
358 
359  /* ZGET_DATABUF_ERR expected */
360  CHECKEQUAL(218, trans->getNdbError().code)
361 
362  trans->close();
363 
364  /* Change error insert so that there are no SectionSegments
365  * This will cause failure when attempting to import the
366  * KeyInfo from the TCKEYREQ
367  */
368  restarter.insertErrorInAllNodes(8067);
369 
370  /* Now a 'short' TCKEYREQ - there will be no space to import the key */
371  CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
372  &smallKey[0],
373  smallKeySize));
374 
375  CHECKNOTNULL(bigInsertOldApi= trans->getNdbOperation(ctx->getTab()));
376 
377  CHECKEQUAL(0, bigInsertOldApi->insertTuple());
378  CHECKEQUAL(0, bigInsertOldApi->equal((Uint32)0,
379  NdbDictionary::getValuePtr
380  (record,
381  smallRowBuf,
382  0)));
383  CHECKEQUAL(0, bigInsertOldApi->setValue(1,
384  NdbDictionary::getValuePtr
385  (record,
386  smallRowBuf,
387  1)));
388 
389  CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
390 
391  /* ZGET_DATABUF_ERR expected */
392  CHECKEQUAL(218, trans->getNdbError().code)
393 
394  trans->close();
395 #endif
396 
397  /* Finished with error insert, cleanup the error insertion
398  * Error insert 8068 will free the hoarded segments
399  */
400  CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
401  &smallKey[0],
402  smallKeySize));
403 
404  CHECKEQUAL(NDBT_OK, activateErrorInsert(trans,
405  record,
406  ctx->getTab(),
407  smallRowBuf,
408  &restarter,
409  8068));
410 
412 
413  CHECKEQUAL(0, trans->getNdbError().code);
414 
415  trans->close();
416 
417  return NDBT_OK;
418 }
419 
420 /* Test for correct behaviour using unique key operations
421  * when an NDBD node's SegmentedSection pool is exhausted.
422  */
423 int testSegmentedSectionIx(NDBT_Context* ctx, NDBT_Step* step){
424  /*
425  * Signal type Exhausted @ How
426  * -----------------------------------------------------
427  * Long TCINDXREQ Initial import Consume + send
428  * Long TCINDXREQ Build second TCKEYREQ Consume + send short
429  * w. long base key
430  */
431  /* We will generate :
432  * 10 SS left :
433  * Long IndexReq with too long Key/AttrInfo
434  * 1 SS left :
435  * Long IndexReq read with short Key + Attrinfo to long
436  * base table Key
437  */
438  /* We just run on one table */
439  if (strcmp(ctx->getTab()->getName(), "WIDE_2COL_IX") != 0)
440  return NDBT_OK;
441 
442  const char* indexName= "WIDE_2COL_IX$NDBT_IDX0";
443  const Uint32 maxRowBytes= NDB_MAX_TUPLE_SIZE_IN_WORDS * sizeof(Uint32);
444  const Uint32 srcBuffBytes= NDBT_Tables::MaxVarTypeKeyBytes;
445  const Uint32 maxIndexKeyBytes= NDBT_Tables::MaxKeyMaxVarTypeAttrBytesIndex;
446  /* We want to use 6 Segmented Sections, each of 60 32-bit words, including
447  * a 2 byte length overhead
448  * (We don't want to use 10 Segmented Sections as in some scenarios TUP
449  * uses Segmented Sections when sending results, and if we use TUP on
450  * the same node, the exhaustion will occur in TUP, which is not what
451  * we're testing)
452  */
453  const Uint32 mediumPrimaryKeyBytes= (6* 60 * 4) - 2;
454  char smallKey[50];
455  char srcBuff[srcBuffBytes];
456  char smallRowBuf[maxRowBytes];
457  char bigKeyIxBuf[maxRowBytes];
458  char bigAttrIxBuf[maxRowBytes];
459  char bigKeyRowBuf[maxRowBytes];
460  char resultSpace[maxRowBytes];
461 
462  /* Small key for hinting to same TC */
463  Uint32 smallKeySize= setLongVarchar(&smallKey[0],
464  "ShortKey",
465  8);
466 
467  /* Large value source */
468  memset(srcBuff, 'B', srcBuffBytes);
469 
470  Ndb* pNdb= GETNDB(step);
471 
472  const NdbRecord* baseRecord= ctx->getTab()->getDefaultRecord();
473  const NdbRecord* ixRecord= pNdb->
474  getDictionary()->getIndex(indexName,
475  ctx->getTab()->getName())->getDefaultRecord();
476 
477  /* Setup buffers
478  * Small row buffer with short key and data in base table record format
479  */
480  setLongVarchar(NdbDictionary::getValuePtr(baseRecord,
481  smallRowBuf,
482  0),
483  "ShortKey",
484  8);
485  NdbDictionary::setNull(baseRecord, smallRowBuf, 0, false);
486 
487  setLongVarchar(NdbDictionary::getValuePtr(baseRecord,
488  smallRowBuf,
489  1),
490  "ShortData",
491  9);
492  NdbDictionary::setNull(baseRecord, smallRowBuf, 1, false);
493 
494  /* Big index key buffer
495  * Big index key (normal row attribute) in index record format
496  * Index's key is attrid 1 from the base table
497  * This could get confusing !
498  */
499 
500  setLongVarchar(NdbDictionary::getValuePtr(ixRecord,
501  bigKeyIxBuf,
502  1),
503  &srcBuff[0],
504  maxIndexKeyBytes);
505  NdbDictionary::setNull(ixRecord, bigKeyIxBuf, 1, false);
506 
507  /* Big AttrInfo buffer
508  * Small key and large attrinfo in base table record format */
509  setLongVarchar(NdbDictionary::getValuePtr(baseRecord,
510  bigAttrIxBuf,
511  0),
512  "ShortIXKey",
513  10);
514 
515  NdbDictionary::setNull(baseRecord, bigAttrIxBuf, 0, false);
516 
517  setLongVarchar(NdbDictionary::getValuePtr(baseRecord,
518  bigAttrIxBuf,
519  1),
520  &srcBuff[0],
521  maxIndexKeyBytes);
522  NdbDictionary::setNull(baseRecord, bigAttrIxBuf, 1, false);
523 
524  /* Big key row buffer
525  * Medium sized key and small attrinfo (index key) in
526  * base table record format
527  */
528  setLongVarchar(NdbDictionary::getValuePtr(baseRecord,
529  bigKeyRowBuf,
530  0),
531  &srcBuff[0],
532  mediumPrimaryKeyBytes);
533 
534  NdbDictionary::setNull(baseRecord, bigKeyRowBuf, 0, false);
535 
536  setLongVarchar(NdbDictionary::getValuePtr(baseRecord,
537  bigKeyRowBuf,
538  1),
539  "ShortIXKey",
540  10);
541  NdbDictionary::setNull(baseRecord, bigKeyRowBuf, 1, false);
542 
543 
544  /* Start a transaction on a specific node */
545  NdbTransaction* trans= pNdb->startTransaction(ctx->getTab(),
546  &smallKey[0],
547  smallKeySize);
548  /* Insert a row in the base table with a big PK, and
549  * small data (Unique IX key). This is used later to lookup
550  * a big PK and cause overflow when reading TRANSID_AI in TC.
551  */
552  CHECKNOTNULL(trans->insertTuple(baseRecord,
553  bigKeyRowBuf));
554 
555  CHECKEQUAL(0, trans->execute(NdbTransaction::Commit));
556 
557  NdbRestarter restarter;
558  /* Start a transaction on a specific node */
559  trans= pNdb->startTransaction(ctx->getTab(),
560  &smallKey[0],
561  smallKeySize);
562  CHECKNOTNULL(trans);
563 
564  /* Activate error insert 8065 in this transaction, limits any
565  * single append/import to 10 sections.
566  */
567  CHECKEQUAL(NDBT_OK, activateErrorInsert(trans,
568  baseRecord,
569  ctx->getTab(),
570  smallRowBuf,
571  &restarter,
572  8065));
573 
574  /* Ok, let's try an index read with a big index key.
575  * Since it's part of the same transaction, it'll go via
576  * the same TC.
577  */
578  const NdbOperation* bigRead= trans->readTuple(ixRecord,
579  bigKeyIxBuf,
580  baseRecord,
581  resultSpace);
582 
583  CHECKNOTNULL(bigRead);
584 
585  CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
586 
587  /* ZGET_DATABUF_ERR expected */
588  CHECKEQUAL(218, trans->getNdbError().code)
589 
590  trans->close();
591 
592 
593  /* Ok, now a long TCINDXREQ to the same TC - this
594  * has slightly different abort handling since no other
595  * operations exist in this new transaction.
596  */
597  /* Start a transaction on a specific node */
598  CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
599  &smallKey[0],
600  smallKeySize));
601 
602  CHECKNOTNULL(trans->readTuple(ixRecord,
603  bigKeyIxBuf,
604  baseRecord,
605  resultSpace));
606 
607  CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
608 
609  /* ZGET_DATABUF_ERR expected */
610  CHECKEQUAL(218, trans->getNdbError().code);
611 
612  trans->close();
613 
614  /* Now a TCINDXREQ that overflows, but is not the last in the
615  * batch, what happens to the other TCINDXREQ in the batch?
616  */
617  CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
618  &smallKey[0],
619  smallKeySize));
620 
621  CHECKNOTNULL(trans->readTuple(ixRecord,
622  bigKeyIxBuf,
623  baseRecord,
624  resultSpace));
625  /* Another read */
626  CHECKNOTNULL(trans->readTuple(ixRecord,
627  bigKeyIxBuf,
628  baseRecord,
629  resultSpace));
630 
631  CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
632 
633  /* ZGET_DATABUF_ERR expected */
634  CHECKEQUAL(218, trans->getNdbError().code);
635 
636  trans->close();
637 
638 
639  /* Next we read a tuple with a large primary key via the unique
640  * index. The index read itself should be fine, but
641  * pulling in the base table PK will cause abort due to overflow
642  * handling TRANSID_AI
643  */
644  /* Start a transaction on a specific node */
645  CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
646  &smallKey[0],
647  smallKeySize));
648 
649  /* Activate error insert 8066 in this transaction, limits a
650  * single import/append to 1 section.
651  * Note that the TRANSID_AI is received by TC as a short-signal
652  * train, so no single append is large, but when the first
653  * segment is used and append starts on the second, it will
654  * fail.
655  */
656  CHECKEQUAL(NDBT_OK, activateErrorInsert(trans,
657  baseRecord,
658  ctx->getTab(),
659  smallRowBuf,
660  &restarter,
661  8066));
662  CHECKEQUAL(0, trans->execute(NdbTransaction::NoCommit));
663 
664  CHECKNOTNULL(bigRead= trans->readTuple(ixRecord,
665  bigAttrIxBuf,
666  baseRecord,
667  resultSpace));
668 
669  CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
670 
671  /* ZGET_DATABUF_ERR expected */
672  CHECKEQUAL(218, trans->getNdbError().code)
673 
674  trans->close();
675 
676  // TODO Move short signal testing to testUpgrade
677 #if 0
678  /*
679  * Short TCINDXREQ KeyInfo accumulate Consume + send long
680  * (TCINDXREQ + KEYINFO)
681  * Short TCINDXREQ AttrInfo accumulate Consume + send short key
682  * + long AI
683  * (TCINDXREQ + ATTRINFO)
684  */
685  /* Now try with a 'short' TCINDXREQ, generated using the old Api
686  * with a big index key value
687  */
688  CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
689  &smallKey[0],
690  smallKeySize));
691 
693  CHECKNOTNULL(index= pNdb->getDictionary()->
694  getIndex(indexName,
695  ctx->getTab()->getName()));
696 
697  NdbIndexOperation* bigReadOldApi;
698  CHECKNOTNULL(bigReadOldApi= trans->getNdbIndexOperation(index));
699 
700  CHECKEQUAL(0, bigReadOldApi->readTuple());
701  /* We use the attribute id of the index, not the base table here */
702  CHECKEQUAL(0, bigReadOldApi->equal((Uint32)0,
703  NdbDictionary::getValuePtr
704  (ixRecord,
705  bigKeyIxBuf,
706  1)));
707 
708  CHECKNOTNULL(bigReadOldApi->getValue((Uint32)1));
709 
710  CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
711 
712  /* ZGET_DATABUF_ERR expected */
713  CHECKEQUAL(218, trans->getNdbError().code)
714 
715  trans->close();
716 
717  /* Now try with a 'short' TCINDXREQ, generated using the old Api
718  * with a big attrinfo value
719  */
720  CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
721  &smallKey[0],
722  smallKeySize));
723 
724  NdbIndexOperation* bigUpdateOldApi;
725  CHECKNOTNULL(bigUpdateOldApi= trans->getNdbIndexOperation(index));
726 
727  CHECKEQUAL(0, bigUpdateOldApi->updateTuple());
728  /* We use the attribute id of the index, not the base table here */
729  CHECKEQUAL(0, bigUpdateOldApi->equal((Uint32)0,
730  NdbDictionary::getValuePtr
731  (baseRecord,
732  smallRowBuf,
733  1)));
734 
735  CHECKEQUAL(0, bigUpdateOldApi->setValue((Uint32)1,
736  NdbDictionary::getValuePtr
737  (baseRecord,
738  bigAttrIxBuf,
739  1)));
740 
741  CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
742 
743  /* ZGET_DATABUF_ERR expected */
744  CHECKEQUAL(218, trans->getNdbError().code)
745 
746  trans->close();
747 
748  /* Change error insert so that next TCINDXREQ will grab
749  * all but one SegmentedSection
750  */
751  restarter.insertErrorInAllNodes(8066);
752 
753  /* Now a short TCINDXREQ where the KeyInfo from the TCINDXREQ
754  * can be imported, but the ATTRINFO can't
755  */
756  CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
757  &smallKey[0],
758  smallKeySize));
759 
760  CHECKNOTNULL(bigUpdateOldApi= trans->getNdbIndexOperation(index));
761 
762  CHECKEQUAL(0, bigUpdateOldApi->updateTuple());
763  /* We use the attribute id of the index, not the base table here */
764  CHECKEQUAL(0, bigUpdateOldApi->equal((Uint32)0,
765  NdbDictionary::getValuePtr
766  (baseRecord,
767  smallRowBuf,
768  1)));
769 
770  CHECKEQUAL(0, bigUpdateOldApi->setValue((Uint32)1,
771  NdbDictionary::getValuePtr
772  (baseRecord,
773  bigAttrIxBuf,
774  1)));
775 
776  CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
777 
778  /* ZGET_DATABUF_ERR expected */
779  CHECKEQUAL(218, trans->getNdbError().code)
780 
781  trans->close();
782 
783  /* Change error insert so that there are no SectionSegments */
784  restarter.insertErrorInAllNodes(8067);
785 
786  /* Now a short TCINDXREQ where the KeyInfo from the TCINDXREQ
787  * can't be imported
788  */
789  CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
790  &smallKey[0],
791  smallKeySize));
792 
793  CHECKNOTNULL(bigUpdateOldApi= trans->getNdbIndexOperation(index));
794 
795  CHECKEQUAL(0, bigUpdateOldApi->updateTuple());
796  /* We use the attribute id of the index, not the base table here */
797  CHECKEQUAL(0, bigUpdateOldApi->equal((Uint32)0,
798  NdbDictionary::getValuePtr
799  (baseRecord,
800  smallRowBuf,
801  1)));
802 
803  CHECKEQUAL(0, bigUpdateOldApi->setValue((Uint32)1,
804  NdbDictionary::getValuePtr
805  (baseRecord,
806  bigAttrIxBuf,
807  1)));
808 
809  CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
810 
811  /* ZGET_DATABUF_ERR expected */
812  CHECKEQUAL(218, trans->getNdbError().code)
813 
814  trans->close();
815 
816 #endif
817 
818  /* Finished with error insert, cleanup the error insertion */
819  CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
820  &smallKey[0],
821  smallKeySize));
822 
823  CHECKEQUAL(NDBT_OK, activateErrorInsert(trans,
824  baseRecord,
825  ctx->getTab(),
826  smallRowBuf,
827  &restarter,
828  8068));
829 
831 
832  CHECKEQUAL(0, trans->getNdbError().code);
833 
834  trans->close();
835 
836  return NDBT_OK;
837 }
838 
839 
840 int testSegmentedSectionScan(NDBT_Context* ctx, NDBT_Step* step){
841  /* Test that TC handling of segmented section exhaustion is
842  * correct
843  * Since NDBAPI always send long requests, that is all that
844  * we test
845  */
846  /* We just run on one table */
847  if (strcmp(ctx->getTab()->getName(), "WIDE_2COL") != 0)
848  return NDBT_OK;
849 
850  const Uint32 maxRowBytes= NDB_MAX_TUPLE_SIZE_IN_WORDS * sizeof(Uint32);
851  char smallKey[50];
852  char smallRowBuf[maxRowBytes];
853 
854  Uint32 smallKeySize= setLongVarchar(&smallKey[0],
855  "ShortKey",
856  8);
857 
858  const NdbRecord* record= ctx->getTab()->getDefaultRecord();
859 
860  /* Setup buffers
861  * Small row buffer with small key and small data
862  */
863  setLongVarchar(NdbDictionary::getValuePtr(record,
864  smallRowBuf,
865  0),
866  "ShortKey",
867  8);
868  NdbDictionary::setNull(record, smallRowBuf, 0, false);
869 
870  setLongVarchar(NdbDictionary::getValuePtr(record,
871  smallRowBuf,
872  1),
873  "ShortData",
874  9);
875  NdbDictionary::setNull(record, smallRowBuf, 1, false);
876 
877  NdbRestarter restarter;
878  Ndb* pNdb= GETNDB(step);
879 
880  /* Start a transaction on a specific node */
881  NdbTransaction* trans= pNdb->startTransaction(ctx->getTab(),
882  &smallKey[0],
883  smallKeySize);
884  CHECKNOTNULL(trans);
885 
886  /* Activate error insert 8066 in this transaction, limits a
887  * single import/append to 1 section.
888  */
889  CHECKEQUAL(NDBT_OK, activateErrorInsert(trans,
890  record,
891  ctx->getTab(),
892  smallRowBuf,
893  &restarter,
894  8066));
895 
896  /* A scan will always send 2 long sections (Receiver Ids,
897  * AttrInfo)
898  * Let's start a scan with > 2400 bytes of
899  * ATTRINFO and see what happens
900  */
901  NdbScanOperation* scan= trans->getNdbScanOperation(ctx->getTab());
902 
903  CHECKNOTNULL(scan);
904 
905  CHECKEQUAL(0, scan->readTuples());
906 
907  /* Create a particularly useless program */
908  NdbInterpretedCode prog;
909 
910  for (Uint32 w=0; w < 2500; w++)
911  CHECKEQUAL(0, prog.load_const_null(1));
912 
913  CHECKEQUAL(0, prog.interpret_exit_ok());
914  CHECKEQUAL(0, prog.finalise());
915 
916  CHECKEQUAL(0, scan->setInterpretedCode(&prog));
917 
918  /* Api doesn't seem to wait for result of scan request */
919  CHECKEQUAL(0, trans->execute(NdbTransaction::NoCommit));
920 
921  CHECKEQUAL(0, trans->getNdbError().code);
922 
923  CHECKEQUAL(-1, scan->nextResult());
924 
925  CHECKEQUAL(217, scan->getNdbError().code);
926 
927  trans->close();
928 
929  /* Finished with error insert, cleanup the error insertion */
930  CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
931  &smallKey[0],
932  smallKeySize));
933 
934  CHECKEQUAL(NDBT_OK, activateErrorInsert(trans,
935  record,
936  ctx->getTab(),
937  smallRowBuf,
938  &restarter,
939  8068));
940 
941  CHECKEQUAL(0, trans->execute(NdbTransaction::Rollback));
942 
943  CHECKEQUAL(0, trans->getNdbError().code);
944 
945  trans->close();
946 
947  return NDBT_OK;
948 }
949 
950 int testDropSignalFragments(NDBT_Context* ctx, NDBT_Step* step){
951  /* Segmented section exhaustion results in dropped signals
952  * Fragmented signals split one logical signal over multiple
953  * physical signals (to cope with the MAX_SIGNAL_LENGTH=32kB
954  * limitation).
955  * This testcase checks that when individual signals comprising
956  * a fragmented signal (in this case SCANTABREQ) are dropped, the
957  * system behaves correctly.
958  * Correct behaviour is to behave in the same way as if the signal
959  * was not fragmented, and for SCANTABREQ, to return a temporary
960  * resource error.
961  */
962  NdbRestarter restarter;
963  Ndb* pNdb= GETNDB(step);
964 
965  /* SEND > ((2 * MAX_SEND_MESSAGE_BYTESIZE) + SOME EXTRA)
966  * This way we get at least 3 fragments
967  * However, as this is generally > 64kB, it's too much AttrInfo for
968  * a ScanTabReq, so the 'success' case returns error 874
969  */
970  const Uint32 PROG_WORDS= 16500;
971 
972  struct SubCase
973  {
974  Uint32 errorInsertCode;
975  int expectedRc;
976  };
977  const Uint32 numSubCases= 5;
978  const SubCase cases[numSubCases]=
979  /* Error insert Scanrc */
980  {{ 0, 874}, // Normal, success which gives too much AI error
981  { 8074, 217}, // Drop first fragment -> error 217
982  { 8075, 217}, // Drop middle fragment(s) -> error 217
983  { 8076, 217}, // Drop last fragment -> error 217
984  { 8077, 217}}; // Drop all fragments -> error 217
985  const Uint32 numIterations= 50;
986 
987  Uint32 buff[ PROG_WORDS + 10 ]; // 10 extra for final 'return' etc.
988 
989  for (Uint32 iteration=0; iteration < (numIterations * numSubCases); iteration++)
990  {
991  /* Start a transaction */
992  NdbTransaction* trans= pNdb->startTransaction();
993  CHECKNOTNULL(trans);
994 
995  SubCase subcase= cases[iteration % numSubCases];
996 
997  Uint32 errorInsertVal= subcase.errorInsertCode;
998  // printf("Inserting error : %u\n", errorInsertVal);
999  /* We insert the error twice, to bias races between
1000  * error-insert propagation and the succeeding scan
1001  * in favour of error insert winning!
1002  * This problem needs a more general fix
1003  */
1004  CHECKEQUAL(0, restarter.insertErrorInAllNodes(errorInsertVal));
1005  CHECKEQUAL(0, restarter.insertErrorInAllNodes(errorInsertVal));
1006 
1007  NdbScanOperation* scan= trans->getNdbScanOperation(ctx->getTab());
1008 
1009  CHECKNOTNULL(scan);
1010 
1011  CHECKEQUAL(0, scan->readTuples());
1012 
1013  /* Create a large program, to give a large SCANTABREQ */
1014  NdbInterpretedCode prog(ctx->getTab(), buff, PROG_WORDS + 10);
1015 
1016  for (Uint32 w=0; w < PROG_WORDS; w++)
1017  CHECKEQUAL(0, prog.load_const_null(1));
1018 
1019  CHECKEQUAL(0, prog.interpret_exit_ok());
1020  CHECKEQUAL(0, prog.finalise());
1021 
1022  CHECKEQUAL(0, scan->setInterpretedCode(&prog));
1023 
1024  /* Api doesn't seem to wait for result of scan request */
1025  CHECKEQUAL(0, trans->execute(NdbTransaction::NoCommit));
1026 
1027  CHECKEQUAL(0, trans->getNdbError().code);
1028 
1029  CHECKEQUAL(-1, scan->nextResult());
1030 
1031  int expectedResult= subcase.expectedRc;
1032  CHECKEQUAL(expectedResult, scan->getNdbError().code);
1033 
1034  scan->close();
1035 
1036  trans->close();
1037  }
1038 
1039  restarter.insertErrorInAllNodes(0);
1040 
1041  return NDBT_OK;
1042 }
1043 
1044 
1045 NDBT_TESTSUITE(testLimits);
1046 
1047 TESTCASE("ExhaustSegmentedSectionPk",
1048  "Test behaviour at Segmented Section exhaustion for PK"){
1049  INITIALIZER(testSegmentedSectionPk);
1050 }
1051 
1052 TESTCASE("ExhaustSegmentedSectionIX",
1053  "Test behaviour at Segmented Section exhaustion for Unique index"){
1054  INITIALIZER(testSegmentedSectionIx);
1055 }
1056 TESTCASE("ExhaustSegmentedSectionScan",
1057  "Test behaviour at Segmented Section exhaustion for Scan"){
1058  INITIALIZER(testSegmentedSectionScan);
1059 }
1060 
1061 TESTCASE("DropSignalFragments",
1062  "Test behaviour of Segmented Section exhaustion with fragmented signals"){
1063  INITIALIZER(testDropSignalFragments);
1064 }
1065 
1066 NDBT_TESTSUITE_END(testLimits);
1067 
1068 int main(int argc, const char** argv){
1069  ndb_init();
1070  NDBT_TESTSUITE_INSTANCE(testLimits);
1071  return testLimits.execute(argc, argv);
1072 }