MySQL 5.6.14 Source Code Document
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
testBlobs.cpp
1 /*
2  Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
3 
4  This program is free software; you can redistribute it and/or modify
5  it under the terms of the GNU General Public License as published by
6  the Free Software Foundation; version 2 of the License.
7 
8  This program is distributed in the hope that it will be useful,
9  but WITHOUT ANY WARRANTY; without even the implied warranty of
10  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11  GNU General Public License for more details.
12 
13  You should have received a copy of the GNU General Public License
14  along with this program; if not, write to the Free Software
15  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
16 */
17 
18 /*
19  * testBlobs
20  */
21 
22 #include <ndb_global.h>
23 #include <NdbMain.h>
24 #include <NdbOut.hpp>
25 #include <OutputStream.hpp>
26 #include <NdbTest.hpp>
27 #include <NdbTick.h>
28 #include <my_sys.h>
29 #include <NdbRestarter.hpp>
30 
31 #include <ndb_rand.h>
32 
33 struct Chr {
35  bool m_fixed;
36  bool m_binary;
37  uint m_len; // native
38  uint m_bytelen; // in bytes
39  uint m_totlen; // plus length bytes
40  const char* m_cs;
41  CHARSET_INFO* m_csinfo;
42  uint m_mblen;
43  bool m_caseins; // for latin letters
44  Chr() :
46  m_fixed(false),
47  m_binary(false),
48  m_len(55),
49  m_bytelen(0),
50  m_totlen(0),
51  m_cs("latin1"),
52  m_csinfo(0),
53  m_caseins(true)
54  {}
55 };
56 
57 struct Opt {
58  unsigned m_batch;
59  bool m_core;
60  bool m_dbg;
61  const char* m_debug;
62  bool m_fac;
63  bool m_full;
64  unsigned m_loop;
65  bool m_min;
66  unsigned m_parts;
67  unsigned m_rows;
68  int m_seed;
69  const char* m_skip;
70  const char* m_test;
71  int m_timeout_retries;
72  int m_blob_version;
73  // metadata
74  const char* m_tname;
75  const char* m_x1name; // hash index
76  const char* m_x2name; // ordered index
77  unsigned m_pk1off;
78  Chr m_pk2chr;
79  bool m_pk2part;
80  bool m_oneblob;
81 
82  int m_rbatch;
83  int m_wbatch;
84  // perf
85  const char* m_tnameperf;
86  unsigned m_rowsperf;
87  // bugs
88  int m_bug;
89  int (*m_bugtest)();
90  Opt() :
91  m_batch(7),
92  m_core(false),
93  m_dbg(false),
94  m_debug(0),
95  m_fac(false),
96  m_full(false),
97  m_loop(1),
98  m_min(false),
99  m_parts(10),
100  m_rows(100),
101  m_seed(-1),
102  m_skip(0),
103  m_test(0),
104  m_timeout_retries(10),
105  m_blob_version(2),
106  // metadata
107  m_tname("TB1"),
108  m_x1name("TB1X1"),
109  m_x2name("TB1X2"),
110  m_pk1off(0x12340000),
111  m_pk2chr(),
112  m_pk2part(false),
113  m_oneblob(false),
114  m_rbatch(-1),
115  m_wbatch(-1),
116  // perf
117  m_tnameperf("TB2"),
118  m_rowsperf(10000),
119  // bugs
120  m_bug(0),
121  m_bugtest(0)
122  {}
123 };
124 
125 static void
126 printusage()
127 {
128  Opt d;
129  ndbout
130  << "usage: testBlobs options [default/max]" << endl
131  << " -batch N number of pk ops in batch [" << d.m_batch << "]" << endl
132  << " -core dump core on error" << endl
133  << " -dbg print program debug" << endl
134  << " -debug opt also ndb api DBUG (if no ':' becomes d:t:F:L:o,opt)" << endl
135  << " -fac fetch across commit in scan delete" << endl
136  << " -full read/write only full blob values" << endl
137  << " -loop N loop N times 0=forever [" << d.m_loop << "]" << endl
138  << " -min small blob sizes" << endl
139  << " -parts N max parts in blob value [" << d.m_parts << "]" << endl
140  << " -rows N number of rows [" << d.m_rows << "]" << endl
141  << " -rowsperf N rows for performace test [" << d.m_rowsperf << "]" << endl
142  << " -seed N random seed 0=loop number -1=random [" << d.m_seed << "]" << endl
143  << " -skip xxx skip given tests (see list) [no tests]" << endl
144  << " -test xxx only given tests (see list) [all tests]" << endl
145  << " -timeoutretries N Number of times to retry in deadlock situations ["
146  << d.m_timeout_retries << "]" << endl
147  << " -version N blob version 1 or 2 [" << d.m_blob_version << "]" << endl
148  << "metadata" << endl
149  << " -pk2len N native length of PK2, zero omits PK2,PK3 [" << d.m_pk2chr.m_len << "]" << endl
150  << " -pk2fixed PK2 is Char [default Varchar]" << endl
151  << " -pk2binary PK2 is Binary or Varbinary" << endl
152  << " -pk2cs PK2 charset or collation [" << d.m_pk2chr.m_cs << "]" << endl
153  << " -pk2part partition primary table by PK2" << endl
154  << " -oneblob only 1 blob attribute [default 2]" << endl
155  << " -rbatch N Read parts batchsize (bytes) [default -1] -1=random" << endl
156  << " -wbatch N Write parts batchsize (bytes) [default -1] -1=random" << endl
157  << "disk or memory storage for blobs. Don't apply to performance test" << endl
158  << " m Blob columns stored in memory" << endl
159  << " h Blob columns stored on disk" << endl
160  << "api styles for test/skip. Don't apply to performance test" << endl
161  << " a NdbRecAttr(old) interface" << endl
162  << " b NdbRecord interface" << endl
163  << "test cases for test/skip" << endl
164  << " k primary key ops" << endl
165  << " i hash index ops" << endl
166  << " s table scans" << endl
167  << " r ordered index scans" << endl
168  << " p performance test" << endl
169  << "operations for test/skip" << endl
170  << " u update existing blob value" << endl
171  << " n normal insert and update" << endl
172  << " w insert and update using writeTuple" << endl
173  << " d delete, can skip only for one subtest" << endl
174  << " l read with lock and unlock" << endl
175  << "blob operation styles for test/skip" << endl
176  << " 0 getValue / setValue" << endl
177  << " 1 setActiveHook" << endl
178  << " 2 readData / writeData" << endl
179  << "example: -test makn0 (need all 4 parts)" << endl
180  << "example: -test mhabkisrunwd012 (Everything except performance tests" << endl
181  << "bug tests" << endl
182  << " -bug 4088 ndb api hang with mixed ops on index table" << endl
183  << " -bug 27018 middle partial part write clobbers rest of part" << endl
184  << " -bug 27370 Potential inconsistent blob reads for ReadCommitted reads" << endl
185  << " -bug 36756 Handling execute(.., abortOption) and Blobs " << endl
186  << " -bug 45768 execute(Commit) after failing blob batch " << endl
187  << " -bug 62321 Blob obscures ignored error codes in batch" << endl
188  ;
189 }
190 
191 static Opt g_opt;
192 
193 static bool
194 testcase(char x)
195 {
196  if (x < 10)
197  x += '0';
198 
199  return
200  (g_opt.m_test == 0 || strchr(g_opt.m_test, x) != 0) &&
201  (g_opt.m_skip == 0 || strchr(g_opt.m_skip, x) == 0);
202 }
203 
204 static Ndb_cluster_connection* g_ncc = 0;
205 static Ndb* g_ndb = 0;
206 static NdbDictionary::Dictionary* g_dic = 0;
207 static NdbConnection* g_con = 0;
208 static NdbOperation* g_opr = 0;
209 static const NdbOperation* g_const_opr = 0;
210 static NdbIndexOperation* g_opx = 0;
211 static NdbScanOperation* g_ops = 0;
212 static NdbBlob* g_bh1 = 0;
213 static NdbBlob* g_bh2 = 0;
214 static bool g_printerror = true;
215 static unsigned g_loop = 0;
216 static NdbRecord *g_key_record= 0;
217 static NdbRecord *g_blob_record= 0;
218 static NdbRecord *g_full_record= 0;
219 static NdbRecord *g_idx_record= 0;
220 static NdbRecord *g_ord_record= 0;
221 static unsigned g_pk1_offset= 0;
222 static unsigned g_pk2_offset= 0;
223 static unsigned g_pk3_offset= 0;
224 static unsigned g_blob1_offset= 0;
225 static unsigned g_blob1_null_offset= 0;
226 static unsigned g_blob2_offset= 0;
227 static unsigned g_blob2_null_offset= 0;
228 static unsigned g_rowsize= 0;
229 static const char* g_tsName= "DEFAULT-TS";
230 static Uint32 g_batchSize= 0;
231 static Uint32 g_scanFlags= 0;
232 static Uint32 g_parallel= 0;
233 static Uint32 g_usingDisk= false;
234 static const Uint32 MAX_FRAGS=48 * 8 * 4; // e.g. 48 nodes, 8 frags/node, 4 replicas
235 static Uint32 frag_ng_mappings[MAX_FRAGS];
236 
237 
238 static const char* stylename[3] = {
239  "style=getValue/setValue",
240  "style=setActiveHook",
241  "style=readData/writeData"
242 };
243 
244 // Blob API variants
245 static const char* apiName[2] = {
246  "api=NdbRecAttr",
247  "api=NdbRecord"
248 };
249 
250 static const char apiSymbol[2] = {
251  'a', // RecAttr
252  'b' // NdbRecord
253 };
254 
255 static const int API_RECATTR=0;
256 static const int API_NDBRECORD=1;
257 
258 static const char* storageName[2] = {
259  "storage=memory",
260  "storage=disk"
261 };
262 
263 static const char storageSymbol[2] = {
264  'm', // Memory storage
265  'h' // Disk storage
266 };
267 
268 static const int STORAGE_MEM=0;
269 static const int STORAGE_DISK=1;
270 
271 static void
272 printerror(int line, const char* msg)
273 {
274  ndbout << "line " << line << " FAIL " << msg << endl;
275  if (! g_printerror) {
276  return;
277  }
278  if (g_ndb != 0 && g_ndb->getNdbError().code != 0) {
279  ndbout << "ndb: " << g_ndb->getNdbError() << endl;
280  }
281  if (g_dic != 0 && g_dic->getNdbError().code != 0) {
282  ndbout << "dic: " << g_dic->getNdbError() << endl;
283  }
284  if (g_con != 0 && g_con->getNdbError().code != 0) {
285  ndbout << "con: " << g_con->getNdbError() << endl;
286  if (g_opr != 0 && g_opr->getNdbError().code != 0) {
287  ndbout << "opr: table=" << g_opr->getTableName() << " " << g_opr->getNdbError() << endl;
288  }
289  if (g_const_opr != 0 && g_const_opr->getNdbError().code !=0) {
290  ndbout << "const_opr: table=" << g_const_opr->getTableName() << " " << g_const_opr->getNdbError() << endl;
291  }
292  if (g_opx != 0 && g_opx->getNdbError().code != 0) {
293  ndbout << "opx: table=" << g_opx->getTableName() << " " << g_opx->getNdbError() << endl;
294  }
295  if (g_ops != 0 && g_ops->getNdbError().code != 0) {
296  ndbout << "ops: table=" << g_ops->getTableName() << " " << g_ops->getNdbError() << endl;
297  }
298  NdbOperation* ope = g_con->getNdbErrorOperation();
299  if (ope != 0 && ope->getNdbError().code != 0) {
300  if (ope != g_opr && ope != g_const_opr && ope != g_opx && ope != g_ops)
301  ndbout << "ope: ptr=" << ope << " table=" << ope->getTableName() << " type= "<< ope->getType() << " " << ope->getNdbError() << endl;
302  }
303  }
304  if (g_bh1 != 0 && g_bh1->getNdbError().code != 0) {
305  ndbout << "bh1: " << g_bh1->getNdbError() << endl;
306  }
307  if (g_bh2 != 0 && g_bh2->getNdbError().code != 0) {
308  ndbout << "bh2: " << g_bh2->getNdbError() << endl;
309  }
310  if (g_opt.m_core) {
311  abort();
312  }
313  g_printerror = false;
314 }
315 
316 #define CHK(x) \
317  do { \
318  if (x) break; \
319  printerror(__LINE__, #x); return -1; \
320  } while (0)
321 #define DBG(x) \
322  do { \
323  if (! g_opt.m_dbg) break; \
324  ndbout << "line " << __LINE__ << " " << x << endl; \
325  } while (0)
326 #define DISP(x) \
327  do { \
328  ndbout << "line " << __LINE__ << " " << x << endl; \
329  } while (0)
330 
331 struct Bcol {
332  int m_type;
333  int m_version;
334  bool m_nullable;
335  uint m_inline;
336  uint m_partsize;
337  uint m_stripe;
338  char m_btname[200];
339  Bcol() { memset(this, 0, sizeof(*this)); }
340 };
341 
342 static Bcol g_blob1;
343 static Bcol g_blob2;
344 
345 enum OpState {Normal, Retrying};
346 
347 static void
348 initblobs()
349 {
350  {
351  Bcol& b = g_blob1;
352  b.m_type = NdbDictionary::Column::Text;
353  b.m_version = g_opt.m_blob_version;
354  b.m_nullable = false;
355  b.m_inline = g_opt.m_min ? 8 : 240;
356  b.m_partsize = g_opt.m_min ? 8 : 2000;
357  b.m_stripe = b.m_version == 1 ? 4 : 0;
358  }
359  {
360  Bcol& b = g_blob2;
361  b.m_type = NdbDictionary::Column::Blob;
362  b.m_version = g_opt.m_blob_version;
363  b.m_nullable = true;
364  b.m_inline = g_opt.m_min ? 9 : 99;
365  b.m_partsize = g_opt.m_min ? 5 : 55;
366  b.m_stripe = 3;
367  }
368 }
369 
370 static void
371 initConstants()
372 {
373  g_pk1_offset= 0;
374  g_pk2_offset= g_pk1_offset + 4;
375  g_pk3_offset= g_pk2_offset + g_opt.m_pk2chr.m_totlen;
376  g_blob1_offset= g_pk3_offset + 2;
377  g_blob2_offset= g_blob1_offset + sizeof(NdbBlob *);
378  g_blob1_null_offset= g_blob2_offset + sizeof(NdbBlob *);
379  g_blob2_null_offset= g_blob1_null_offset + 1;
380  g_rowsize= g_blob2_null_offset + 1;
381 }
382 
383 static int
384 createDefaultTableSpace()
385 {
386  /* 'Inspired' by NDBT_Tables::create_default_tablespace */
387  int res;
388  NdbDictionary::LogfileGroup lg = g_dic->getLogfileGroup("DEFAULT-LG");
389  if (strcmp(lg.getName(), "DEFAULT-LG") != 0)
390  {
391  lg.setName("DEFAULT-LG");
392  lg.setUndoBufferSize(8*1024*1024);
393  res = g_dic->createLogfileGroup(lg);
394  if(res != 0){
395  DBG("Failed to create logfilegroup:"
396  << endl << g_dic->getNdbError() << endl);
397  return -1;
398  }
399  }
400  {
401  NdbDictionary::Undofile uf = g_dic->getUndofile(0, "undofile01.dat");
402  if (strcmp(uf.getPath(), "undofile01.dat") != 0)
403  {
404  uf.setPath("undofile01.dat");
405  uf.setSize(32*1024*1024);
406  uf.setLogfileGroup("DEFAULT-LG");
407 
408  res = g_dic->createUndofile(uf, true);
409  if(res != 0){
410  DBG("Failed to create undofile:"
411  << endl << g_dic->getNdbError() << endl);
412  return -1;
413  }
414  }
415  }
416  {
417  NdbDictionary::Undofile uf = g_dic->getUndofile(0, "undofile02.dat");
418  if (strcmp(uf.getPath(), "undofile02.dat") != 0)
419  {
420  uf.setPath("undofile02.dat");
421  uf.setSize(32*1024*1024);
422  uf.setLogfileGroup("DEFAULT-LG");
423 
424  res = g_dic->createUndofile(uf, true);
425  if(res != 0){
426  DBG("Failed to create undofile:"
427  << endl << g_dic->getNdbError() << endl);
428  return -1;
429  }
430  }
431  }
432  NdbDictionary::Tablespace ts = g_dic->getTablespace(g_tsName);
433  if (strcmp(ts.getName(), g_tsName) != 0)
434  {
435  ts.setName(g_tsName);
436  ts.setExtentSize(1024*1024);
437  ts.setDefaultLogfileGroup("DEFAULT-LG");
438 
439  res = g_dic->createTablespace(ts);
440  if(res != 0){
441  DBG("Failed to create tablespace:"
442  << endl << g_dic->getNdbError() << endl);
443  return -1;
444  }
445  }
446 
447  {
448  NdbDictionary::Datafile df = g_dic->getDatafile(0, "datafile01.dat");
449  if (strcmp(df.getPath(), "datafile01.dat") != 0)
450  {
451  df.setPath("datafile01.dat");
452  df.setSize(64*1024*1024);
453  df.setTablespace(g_tsName);
454 
455  res = g_dic->createDatafile(df, true);
456  if(res != 0){
457  DBG("Failed to create datafile:"
458  << endl << g_dic->getNdbError() << endl);
459  return -1;
460  }
461  }
462  }
463 
464  {
465  NdbDictionary::Datafile df = g_dic->getDatafile(0, "datafile02.dat");
466  if (strcmp(df.getPath(), "datafile02.dat") != 0)
467  {
468  df.setPath("datafile02.dat");
469  df.setSize(64*1024*1024);
470  df.setTablespace(g_tsName);
471 
472  res = g_dic->createDatafile(df, true);
473  if(res != 0){
474  DBG("Failed to create datafile:"
475  << endl << g_dic->getNdbError() << endl);
476  return -1;
477  }
478  }
479  }
480 
481  return 0;
482 }
483 
484 static int
485 dropTable()
486 {
487  NdbDictionary::Table tab(g_opt.m_tname);
488  if (g_dic->getTable(g_opt.m_tname) != 0)
489  CHK(g_dic->dropTable(g_opt.m_tname) == 0);
490 
491  if (g_key_record != NULL)
492  g_dic->releaseRecord(g_key_record);
493  if (g_blob_record != NULL)
494  g_dic->releaseRecord(g_blob_record);
495  if (g_full_record != NULL)
496  g_dic->releaseRecord(g_full_record);
497 
498  if (g_opt.m_pk2chr.m_len != 0)
499  {
500  if (g_idx_record != NULL)
501  g_dic->releaseRecord(g_idx_record);
502  if (g_ord_record != NULL)
503  g_dic->releaseRecord(g_ord_record);
504  }
505 
506  g_key_record= NULL;
507  g_blob_record= NULL;
508  g_full_record= NULL;
509  g_idx_record= NULL;
510  g_ord_record= NULL;
511 
512  return 0;
513 }
514 
515 static unsigned
516 urandom(unsigned n)
517 {
518  return n == 0 ? 0 : ndb_rand() % n;
519 }
520 
521 static int
522 createTable(int storageType)
523 {
524  /* No logging for memory tables */
525  bool loggingRequired=(storageType == STORAGE_DISK);
526  NdbDictionary::Column::StorageType blobStorageType=
527  (storageType == STORAGE_MEM)?
528  NdbDictionary::Column::StorageTypeMemory :
529  NdbDictionary::Column::StorageTypeDisk;
530 
531  NdbDictionary::Table tab(g_opt.m_tname);
532  if (storageType == STORAGE_DISK)
533  tab.setTablespaceName(g_tsName);
534  tab.setLogging(loggingRequired);
535 
536  /* Choose from the interesting fragmentation types :
537  * DistrKeyHash, DistrKeyLin, UserDefined, HashMapPartitioned
538  * Others are obsolete fragment-count setting variants
539  * of DistrKeyLin
540  * For UserDefined partitioning, we need to set the partition
541  * id for all PK operations.
542  */
543  Uint32 fragTypeRange= 1 + (NdbDictionary::Object::HashMapPartition -
544  NdbDictionary::Object::DistrKeyHash);
545  Uint32 fragType= NdbDictionary::Object::DistrKeyHash + urandom(fragTypeRange);
546 
547  /* Value 8 is unused currently, map it to something else */
548  if (fragType == 8)
549  fragType= NdbDictionary::Object::UserDefined;
550 
551  tab.setFragmentType((NdbDictionary::Object::FragmentType)fragType);
552 
553  if (fragType == NdbDictionary::Object::UserDefined)
554  {
555  /* Need to set the FragmentCount and fragment to NG mapping
556  * for this partitioning type
557  */
558  const Uint32 numNodes= g_ncc->no_db_nodes();
559  const Uint32 numReplicas= 2; // Assumption
560  const Uint32 guessNumNgs= numNodes/2;
561  const Uint32 numNgs= guessNumNgs?guessNumNgs : 1;
562  const Uint32 numFragsPerNode= 2 + (rand() % 3);
563  const Uint32 numPartitions= numReplicas * numNgs * numFragsPerNode;
564 
565  tab.setFragmentCount(numPartitions);
566  for (Uint32 i=0; i<numPartitions; i++)
567  {
568  frag_ng_mappings[i]= i % numNgs;
569  }
570  tab.setFragmentData(frag_ng_mappings, numPartitions);
571  }
572  const Chr& pk2chr = g_opt.m_pk2chr;
573  // col PK1 - Uint32
574  { NdbDictionary::Column col("PK1");
575  col.setType(NdbDictionary::Column::Unsigned);
576  col.setPrimaryKey(true);
577  tab.addColumn(col);
578  }
579  // col BL1 - Text not-nullable
580  { NdbDictionary::Column col("BL1");
581  const Bcol& b = g_blob1;
582  col.setType((NdbDictionary::Column::Type)b.m_type);
583  col.setBlobVersion(b.m_version);
584  col.setNullable(b.m_nullable);
585  col.setInlineSize(b.m_inline);
586  col.setPartSize(b.m_partsize);
587  col.setStripeSize(b.m_stripe);
588  col.setStorageType(blobStorageType);
589  tab.addColumn(col);
590  }
591  // col PK2 - Char or Varchar
592  if (pk2chr.m_len != 0)
593  { NdbDictionary::Column col("PK2");
594  col.setType(pk2chr.m_type);
595  col.setPrimaryKey(true);
596  col.setLength(pk2chr.m_bytelen);
597  if (pk2chr.m_csinfo != 0)
598  col.setCharset(pk2chr.m_csinfo);
599  if (g_opt.m_pk2part)
600  col.setPartitionKey(true);
601  tab.addColumn(col);
602  }
603  // col BL2 - Blob nullable
604  if (! g_opt.m_oneblob)
605  { NdbDictionary::Column col("BL2");
606  const Bcol& b = g_blob2;
607  col.setType((NdbDictionary::Column::Type)b.m_type);
608  col.setBlobVersion(b.m_version);
609  col.setNullable(b.m_nullable);
610  col.setInlineSize(b.m_inline);
611  col.setPartSize(b.m_partsize);
612  col.setStripeSize(b.m_stripe);
613  col.setStorageType(blobStorageType);
614  tab.addColumn(col);
615  }
616  // col PK3 - puts the Var* key PK2 between PK1 and PK3
617  if (pk2chr.m_len != 0)
618  { NdbDictionary::Column col("PK3");
620  col.setPrimaryKey(true);
621 
622  tab.addColumn(col);
623  }
624  // create table
625  CHK(g_dic->createTable(tab) == 0);
626  // unique hash index on PK2,PK3
627  if (g_opt.m_pk2chr.m_len != 0)
628  { NdbDictionary::Index idx(g_opt.m_x1name);
630  idx.setLogging(loggingRequired);
631  idx.setTable(g_opt.m_tname);
632  idx.addColumnName("PK2");
633  idx.addColumnName("PK3");
634  CHK(g_dic->createIndex(idx) == 0);
635  }
636  // ordered index on PK2
637  if (g_opt.m_pk2chr.m_len != 0)
638  { NdbDictionary::Index idx(g_opt.m_x2name);
640  idx.setLogging(false);
641  idx.setTable(g_opt.m_tname);
642  idx.addColumnName("PK2");
643  CHK(g_dic->createIndex(idx) == 0);
644  }
645 
647  unsigned numpks= g_opt.m_pk2chr.m_len == 0 ? 1 : 3;
648  unsigned numblobs= g_opt.m_oneblob ? 1 : 2;
649 
650  const NdbDictionary::Table *dict_table;
651  CHK((dict_table= g_dic->getTable(g_opt.m_tname)) != 0);
652  memset(spec, 0, sizeof(spec));
653  spec[0].column= dict_table->getColumn("PK1");
654  spec[0].offset= g_pk1_offset;
655  spec[numpks].column= dict_table->getColumn("BL1");
656  spec[numpks].offset= g_blob1_offset;
657  spec[numpks].nullbit_byte_offset= g_blob1_null_offset;
658  spec[numpks].nullbit_bit_in_byte= 0;
659  if (g_opt.m_pk2chr.m_len != 0)
660  {
661  spec[1].column= dict_table->getColumn("PK2");
662  spec[1].offset= g_pk2_offset;
663  spec[2].column= dict_table->getColumn("PK3");
664  spec[2].offset= g_pk3_offset;
665  }
666  if (! g_opt.m_oneblob)
667  {
668  spec[numpks+1].column= dict_table->getColumn("BL2");
669  spec[numpks+1].offset= g_blob2_offset;
670  spec[numpks+1].nullbit_byte_offset= g_blob2_null_offset;
671  spec[numpks+1].nullbit_bit_in_byte= 0;
672  }
673  CHK((g_key_record= g_dic->createRecord(dict_table, &spec[0], numpks,
674  sizeof(spec[0]))) != 0);
675  CHK((g_blob_record= g_dic->createRecord(dict_table, &spec[numpks], numblobs,
676  sizeof(spec[0]))) != 0);
677  CHK((g_full_record= g_dic->createRecord(dict_table, &spec[0], numpks+numblobs,
678  sizeof(spec[0]))) != 0);
679 
680  if (g_opt.m_pk2chr.m_len != 0)
681  {
682  const NdbDictionary::Index *dict_index;
683  CHK((dict_index= g_dic->getIndex(g_opt.m_x1name, g_opt.m_tname)) != 0);
684  CHK((g_idx_record= g_dic->createRecord(dict_index, &spec[1], 2,
685  sizeof(spec[0]))) != 0);
686  CHK((dict_index= g_dic->getIndex(g_opt.m_x2name, g_opt.m_tname)) != 0);
687  CHK((g_ord_record= g_dic->createRecord(dict_index, &spec[1], 1,
688  sizeof(spec[0]))) != 0);
689  }
690 
691  return 0;
692 }
693 
694 // tuples
695 
696 struct Bval {
697  const Bcol& m_bcol;
698  char* m_val;
699  unsigned m_len;
700  char* m_buf; // read/write buffer
701  unsigned m_buflen;
702  int m_error_code; // for testing expected error code
703  Bval(const Bcol& bcol) :
704  m_bcol(bcol),
705  m_val(0),
706  m_len(0),
707  m_buf(0),
708  m_buflen(0),
709  m_error_code(0)
710  {}
711  ~Bval() { delete [] m_val; delete [] m_buf; }
712  void alloc() {
713  alloc(m_bcol.m_inline + m_bcol.m_partsize * g_opt.m_parts);
714  }
715  void alloc(unsigned buflen) {
716  m_buflen = buflen;
717  delete [] m_buf;
718  m_buf = new char [m_buflen];
719  trash();
720  }
721  void copyfrom(const Bval& v) {
722  m_len = v.m_len;
723  delete [] m_val;
724  if (v.m_val == 0)
725  m_val = 0;
726  else
727  m_val = (char*)memcpy(new char [m_len], v.m_val, m_len);
728  }
729  void trash() const {
730  assert(m_buf != 0);
731  memset(m_buf, 'x', m_buflen);
732  }
733 private:
734  Bval(const Bval&);
735  Bval& operator=(const Bval&);
736 };
737 
738 NdbOut&
739 operator<<(NdbOut& out, const Bval& v)
740 {
741  if (g_opt.m_min && v.m_val != 0) {
742  out << "[" << v.m_len << "]";
743  for (uint i = 0; i < v.m_len; i++) {
744  const Bcol& b = v.m_bcol;
745  if (i == b.m_inline ||
746  (i > b.m_inline && (i - b.m_inline) % b.m_partsize == 0))
747  out.print("|");
748  out.print("%c", v.m_val[i]);
749  }
750  }
751  return out;
752 }
753 
754 struct Tup {
755  bool m_exists; // exists in table
756  Uint32 m_pk1; // in V1 primary keys concatenated like keyinfo
757  char* m_pk2;
758  char* m_pk2eq; // equivalent (if case independent)
759  Uint16 m_pk3;
760  Bval m_bval1;
761  Bval m_bval2;
762  char *m_key_row;
763  char *m_row;
764  Uint32 m_frag;
765  Tup() :
766  m_exists(false),
767  m_pk2(new char [g_opt.m_pk2chr.m_totlen + 1]), // nullterm for convenience
768  m_pk2eq(new char [g_opt.m_pk2chr.m_totlen + 1]),
769  m_bval1(g_blob1),
770  m_bval2(g_blob2),
771  m_key_row(new char[g_rowsize]),
772  m_row(new char[g_rowsize]),
773  m_frag(~(Uint32)0)
774  {}
775  ~Tup() {
776  delete [] m_pk2;
777  m_pk2 = 0;
778  delete [] m_pk2eq;
779  m_pk2eq = 0;
780  delete [] m_key_row;
781  m_key_row= 0;
782  delete [] m_row;
783  m_row= 0;
784  }
785  // alloc buffers of max size
786  void alloc() {
787  m_bval1.alloc();
788  m_bval2.alloc();
789  }
790  void copyfrom(const Tup& tup) {
791  assert(m_pk1 == tup.m_pk1);
792  m_bval1.copyfrom(tup.m_bval1);
793  m_bval2.copyfrom(tup.m_bval2);
794  }
795  /*
796  * in V2 return pk2 or pk2eq at random
797  * in V1 mixed cases do not work in general due to key packing
798  * luckily they do work via mysql
799  */
800  char* pk2() {
801  if (g_opt.m_blob_version == 1)
802  return m_pk2;
803  return urandom(2) == 0 ? m_pk2 : m_pk2eq;
804  }
805  Uint32 getPartitionId(Uint32 numParts) const {
806  /* Only for UserDefined tables really */
807  return m_pk1 % numParts; // MySQLD hash(PK1) style partitioning
808  }
809 
810 private:
811  Tup(const Tup&);
812  Tup& operator=(const Tup&);
813 };
814 
815 static Tup* g_tups;
816 
817 static void
818 setUDpartId(const Tup& tup, NdbOperation* op)
819 {
820  const NdbDictionary::Table* tab= op->getTable();
821  if (tab->getFragmentType() == NdbDictionary::Object::UserDefined)
822  {
823  Uint32 partId= tup.getPartitionId(tab->getFragmentCount());
824  DBG("Setting partition id to " << partId << " out of " <<
825  tab->getFragmentCount());
826  op->setPartitionId(partId);
827  }
828 }
829 
830 static void
831 setUDpartIdNdbRecord(const Tup& tup,
832  const NdbDictionary::Table* tab,
834 {
835  opts.optionsPresent= 0;
836  if (tab->getFragmentType() == NdbDictionary::Object::UserDefined)
837  {
838  opts.optionsPresent= NdbOperation::OperationOptions::OO_PARTITION_ID;
839  opts.partitionId= tup.getPartitionId(tab->getFragmentCount());
840  }
841 }
842 
843 static void
844 calcBval(const Bcol& b, Bval& v, bool keepsize)
845 {
846  if (b.m_nullable && urandom(10) == 0) {
847  v.m_len = 0;
848  delete [] v.m_val;
849  v.m_val = 0;
850  v.m_buf = new char [1];
851  } else {
852  if (keepsize && v.m_val != 0)
853  ;
854  else if (urandom(10) == 0)
855  v.m_len = urandom(b.m_inline);
856  else
857  v.m_len = urandom(b.m_inline + g_opt.m_parts * b.m_partsize + 1);
858  delete [] v.m_val;
859  v.m_val = new char [v.m_len + 1];
860  for (unsigned i = 0; i < v.m_len; i++)
861  v.m_val[i] = 'a' + urandom(26);
862  v.m_val[v.m_len] = 0;
863  v.m_buf = new char [v.m_len];
864  }
865  v.m_buflen = v.m_len;
866  v.trash();
867 }
868 
869 static bool
870 conHasTimeoutError()
871 {
872  Uint32 code= g_con->getNdbError().code;
873  /* Indicate timeout for cases where LQH too slow responding
874  * (As can happen for disk based tuples with batching or
875  * lots of parts)
876  */
877  // 296 == Application timeout waiting for SCAN_NEXTREQ from API
878  // 297 == Error code in response to SCAN_NEXTREQ for timed-out scan
879  bool isTimeout= ((code == 274) || // General TC connection timeout
880  (code == 266)); // TC Scan frag timeout
881  if (!isTimeout)
882  ndbout << "Connection error is not timeout, but is "
883  << code << endl;
884 
885  return isTimeout;
886 }
887 
888 static
889 Uint32 conError()
890 {
891  return g_con->getNdbError().code;
892 }
893 
894 static void
895 calcBval(Tup& tup, bool keepsize)
896 {
897  calcBval(g_blob1, tup.m_bval1, keepsize);
898  if (! g_opt.m_oneblob)
899  calcBval(g_blob2, tup.m_bval2, keepsize);
900 }
901 
902 // dont remember what the keepsize was for..
903 static void
904 calcTups(bool keys, bool keepsize = false)
905 {
906  for (uint k = 0; k < g_opt.m_rows; k++) {
907  Tup& tup = g_tups[k];
908  if (keys) {
909  tup.m_pk1 = g_opt.m_pk1off + k;
910  {
911  const Chr& c = g_opt.m_pk2chr;
912  char* const p = tup.m_pk2;
913  char* const q = tup.m_pk2eq;
914  uint len = urandom(c.m_len + 1);
915  uint i = 0;
916  if (! c.m_fixed) {
917  *(uchar*)&p[0] = *(uchar*)&q[0] = len;
918  i++;
919  }
920  uint j = 0;
921  while (j < len) {
922  // mixed case for distribution check
923  if (urandom(3) == 0) {
924  uint u = urandom(26);
925  p[i] = 'A' + u;
926  q[i] = c.m_caseins ? 'a' + u : 'A' + u;
927  } else {
928  uint u = urandom(26);
929  p[i] = 'a' + u;
930  q[i] = c.m_caseins ? 'A' + u : 'a' + u;
931  }
932  i++;
933  j++;
934  }
935  while (j < c.m_bytelen) {
936  if (c.m_fixed)
937  p[i] = q[i] = 0x20;
938  else
939  p[i] = q[i] = '#'; // garbage
940  i++;
941  j++;
942  }
943  assert(i == c.m_totlen);
944  p[i] = q[i] = 0; // convenience
945  }
946  tup.m_pk3 = (Uint16)k;
947  }
948  calcBval(tup, keepsize);
949  }
950 }
951 
952 static void setBatchSizes()
953 {
954  if (g_opt.m_rbatch != 0)
955  {
956  Uint32 byteSize = (g_opt.m_rbatch == -1) ?
957  urandom(~Uint32(0)) :
958  g_opt.m_rbatch;
959 
960  DBG("Setting read batch size to " << byteSize
961  << " bytes.");
962  g_con->setMaxPendingBlobReadBytes(byteSize);
963  }
964 
965  if (g_opt.m_wbatch != 0)
966  {
967  Uint32 byteSize = (g_opt.m_wbatch == -1) ?
968  urandom(~Uint32(0)) :
969  g_opt.m_wbatch;
970 
971  DBG("Setting write batch size to " << byteSize
972  << " bytes.");
973  g_con->setMaxPendingBlobWriteBytes(byteSize);
974  }
975 }
976 
977 
978 // blob handle ops
979 // const version for NdbRecord defined operations
980 static int
981 getBlobHandles(const NdbOperation* opr)
982 {
983  CHK((g_bh1 = opr->getBlobHandle("BL1")) != 0);
984  if (! g_opt.m_oneblob)
985  CHK((g_bh2 = opr->getBlobHandle("BL2")) != 0);
986 
987  setBatchSizes();
988  return 0;
989 }
990 
991 // non-const version for NdbRecAttr defined operations
992 // and scans
993 static int
994 getBlobHandles(NdbOperation* opr)
995 {
996  CHK((g_bh1 = opr->getBlobHandle("BL1")) != 0);
997  if (! g_opt.m_oneblob)
998  CHK((g_bh2 = opr->getBlobHandle("BL2")) != 0);
999  setBatchSizes();
1000  return 0;
1001 }
1002 
1003 
1004 static int
1005 getBlobHandles(NdbScanOperation* ops)
1006 {
1007  CHK((g_bh1 = ops->getBlobHandle("BL1")) != 0);
1008  if (! g_opt.m_oneblob)
1009  CHK((g_bh2 = ops->getBlobHandle("BL2")) != 0);
1010  setBatchSizes();
1011  return 0;
1012 }
1013 
1014 static int
1015 getBlobLength(NdbBlob* h, unsigned& len)
1016 {
1017  Uint64 len2 = (unsigned)-1;
1018  CHK(h->getLength(len2) == 0);
1019  len = (unsigned)len2;
1020  assert(len == len2);
1021  bool isNull;
1022  CHK(h->getNull(isNull) == 0);
1023  DBG("getBlobLength " << h->getColumn()->getName() << " len=" << len << " null=" << isNull);
1024  return 0;
1025 }
1026 
1027 // setValue / getValue
1028 
1029 static int
1030 setBlobValue(NdbBlob* h, const Bval& v, int error_code = 0)
1031 {
1032  bool null = (v.m_val == 0);
1033  bool isNull;
1034  unsigned len;
1035  DBG("setValue " << h->getColumn()->getName() << " len=" << v.m_len << " null=" << null << " " << v);
1036  if (null) {
1037  CHK(h->setNull() == 0 || h->getNdbError().code == error_code);
1038  if (error_code)
1039  return 0;
1040  isNull = false;
1041  CHK(h->getNull(isNull) == 0 && isNull == true);
1042  CHK(getBlobLength(h, len) == 0 && len == 0);
1043  } else {
1044  CHK(h->setValue(v.m_val, v.m_len) == 0 || h->getNdbError().code == error_code);
1045  if (error_code)
1046  return 0;
1047  CHK(h->getNull(isNull) == 0 && isNull == false);
1048  CHK(getBlobLength(h, len) == 0 && len == v.m_len);
1049  }
1050  return 0;
1051 }
1052 
1053 static int
1054 setBlobValue(const Tup& tup, int error_code = 0)
1055 {
1056  CHK(setBlobValue(g_bh1, tup.m_bval1, error_code) == 0);
1057  if (! g_opt.m_oneblob)
1058  CHK(setBlobValue(g_bh2, tup.m_bval2, error_code) == 0);
1059  return 0;
1060 }
1061 
1062 static int
1063 getBlobValue(NdbBlob* h, const Bval& v)
1064 {
1065  DBG("getValue " << h->getColumn()->getName() << " buflen=" << v.m_buflen);
1066  CHK(h->getValue(v.m_buf, v.m_buflen) == 0);
1067  return 0;
1068 }
1069 
1070 static int
1071 getBlobValue(const Tup& tup)
1072 {
1073  CHK(getBlobValue(g_bh1, tup.m_bval1) == 0);
1074  if (! g_opt.m_oneblob)
1075  CHK(getBlobValue(g_bh2, tup.m_bval2) == 0);
1076  return 0;
1077 }
1078 
1079 /*
1080  * presetBH1
1081  * This method controls how BL1 is pre-set (using setValue()) for
1082  * inserts and writes that later use writeData to set the correct
1083  * value.
1084  * Sometimes it is set to length zero, other times to the value
1085  * for some other row in the dataset. This tests that the writeData()
1086  * functionality correctly overwrites values written in the
1087  * prepare phase.
1088  */
1089 static int presetBH1(int rowNumber)
1090 {
1091  unsigned int variant = urandom(2);
1092  DBG("presetBH1 - Variant=" << variant);
1093  if (variant==0)
1094  CHK(g_bh1->setValue("", 0) == 0);
1095  else
1096  {
1097  CHK(setBlobValue(g_tups[(rowNumber+1) % g_opt.m_rows]) == 0); // Pre-set to something else
1098  };
1099  return 0;
1100 }
1101 
1102 static int
1103 verifyBlobValue(NdbBlob* h, const Bval& v)
1104 {
1105  bool null = (v.m_val == 0);
1106  bool isNull;
1107  unsigned len;
1108  if (null) {
1109  isNull = false;
1110  CHK(h->getNull(isNull) == 0 && isNull == true);
1111  CHK(getBlobLength(h, len) == 0 && len == 0);
1112  } else {
1113  isNull = true;
1114  CHK(h->getNull(isNull) == 0 && isNull == false);
1115  CHK(getBlobLength(h, len) == 0 && len == v.m_len);
1116  for (unsigned i = 0; i < v.m_len; i++)
1117  CHK(v.m_val[i] == v.m_buf[i]);
1118  }
1119  return 0;
1120 }
1121 
1122 static int
1123 verifyBlobValue(const Tup& tup)
1124 {
1125  CHK(verifyBlobValue(g_bh1, tup.m_bval1) == 0);
1126  if (! g_opt.m_oneblob)
1127  CHK(verifyBlobValue(g_bh2, tup.m_bval2) == 0);
1128  return 0;
1129 }
1130 
1131 // readData / writeData
1132 
1133 static int
1134 writeBlobData(NdbBlob* h, const Bval& v)
1135 {
1136  bool null = (v.m_val == 0);
1137  bool isNull;
1138  unsigned len;
1139  DBG("write " << h->getColumn()->getName() << " len=" << v.m_len << " null=" << null << " " << v);
1140  int error_code = v.m_error_code;
1141  if (null) {
1142  CHK(h->setNull() == 0 || h->getNdbError().code == error_code);
1143  if (error_code)
1144  return 0;
1145  isNull = false;
1146  CHK(h->getNull(isNull) == 0 && isNull == true);
1147  CHK(getBlobLength(h, len) == 0 && len == 0);
1148  } else {
1149  CHK(h->truncate(v.m_len) == 0 || h->getNdbError().code == error_code);
1150  if (error_code)
1151  return 0;
1152  CHK(h->setPos(0) == 0); // Reset write pointer in case there was a previous write.
1153  unsigned n = 0;
1154  do {
1155  unsigned m = g_opt.m_full ? v.m_len : urandom(v.m_len + 1);
1156  if (m > v.m_len - n)
1157  m = v.m_len - n;
1158  DBG("write pos=" << n << " cnt=" << m);
1159  CHK(h->writeData(v.m_val + n, m) == 0);
1160  n += m;
1161  } while (n < v.m_len);
1162  assert(n == v.m_len);
1163  isNull = true;
1164  CHK(h->getNull(isNull) == 0 && isNull == false);
1165  CHK(getBlobLength(h, len) == 0 && len == v.m_len);
1166  }
1167  return 0;
1168 }
1169 
1170 static int
1171 writeBlobData(Tup& tup, int error_code = 0)
1172 {
1173  tup.m_bval1.m_error_code = error_code;
1174  CHK(writeBlobData(g_bh1, tup.m_bval1) == 0);
1175  if (! g_opt.m_oneblob) {
1176  tup.m_bval2.m_error_code = error_code;
1177  CHK(writeBlobData(g_bh2, tup.m_bval2) == 0);
1178  }
1179  return 0;
1180 }
1181 
1182 static int
1183 readBlobData(NdbBlob* h, const Bval& v)
1184 {
1185  bool null = (v.m_val == 0);
1186  bool isNull;
1187  unsigned len;
1188  DBG("read " << h->getColumn()->getName() << " len=" << v.m_len << " null=" << null);
1189  if (null) {
1190  isNull = false;
1191  CHK(h->getNull(isNull) == 0 && isNull == true);
1192  CHK(getBlobLength(h, len) == 0 && len == 0);
1193  } else {
1194  isNull = true;
1195  CHK(h->getNull(isNull) == 0 && isNull == false);
1196  CHK(getBlobLength(h, len) == 0 && len == v.m_len);
1197  v.trash();
1198  unsigned n = 0;
1199  while (n < v.m_len) {
1200  unsigned m = g_opt.m_full ? v.m_len : urandom(v.m_len + 1);
1201  if (m > v.m_len - n)
1202  m = v.m_len - n;
1203  DBG("read pos=" << n << " cnt=" << m);
1204  const unsigned m2 = m;
1205  CHK(h->readData(v.m_buf + n, m) == 0);
1206  CHK(m2 == m);
1207  n += m;
1208  }
1209  assert(n == v.m_len);
1210  // need to execute to see the data
1211  CHK(g_con->execute(NoCommit) == 0);
1212  for (unsigned i = 0; i < v.m_len; i++)
1213  CHK(v.m_val[i] == v.m_buf[i]);
1214  }
1215  return 0;
1216 }
1217 
1218 static int
1219 readBlobData(const Tup& tup)
1220 {
1221  CHK(readBlobData(g_bh1, tup.m_bval1) == 0);
1222  if (! g_opt.m_oneblob)
1223  CHK(readBlobData(g_bh2, tup.m_bval2) == 0);
1224  return 0;
1225 }
1226 
1227 // hooks
1228 
1229 static NdbBlob::ActiveHook blobWriteHook;
1230 
1231 static int
1232 blobWriteHook(NdbBlob* h, void* arg)
1233 {
1234  DBG("blobWriteHook");
1235  Bval& v = *(Bval*)arg;
1236  CHK(writeBlobData(h, v) == 0);
1237  return 0;
1238 }
1239 
1240 
1241 static int
1242 setBlobWriteHook(NdbBlob* h, Bval& v, int error_code = 0)
1243 {
1244  DBG("setBlobWriteHook");
1245  v.m_error_code = error_code;
1246  CHK(h->setActiveHook(blobWriteHook, &v) == 0);
1247  return 0;
1248 }
1249 
1250 static int
1251 setBlobWriteHook(Tup& tup, int error_code = 0)
1252 {
1253  CHK(setBlobWriteHook(g_bh1, tup.m_bval1, error_code) == 0);
1254  if (! g_opt.m_oneblob)
1255  CHK(setBlobWriteHook(g_bh2, tup.m_bval2, error_code) == 0);
1256  return 0;
1257 }
1258 
1259 static NdbBlob::ActiveHook blobReadHook;
1260 
1261 // no PK yet to identify tuple so just read the value
1262 static int
1263 blobReadHook(NdbBlob* h, void* arg)
1264 {
1265  DBG("blobReadHook");
1266  Bval& v = *(Bval*)arg;
1267  unsigned len;
1268  CHK(getBlobLength(h, len) == 0);
1269  v.alloc(len);
1270  Uint32 maxlen = 0xffffffff;
1271  CHK(h->readData(v.m_buf, maxlen) == 0);
1272  DBG("read " << maxlen << " bytes");
1273  CHK(len == maxlen);
1274  return 0;
1275 }
1276 
1277 static int
1278 setBlobReadHook(NdbBlob* h, Bval& v)
1279 {
1280  DBG("setBlobReadHook");
1281  CHK(h->setActiveHook(blobReadHook, &v) == 0);
1282  return 0;
1283 }
1284 
1285 static int
1286 setBlobReadHook(Tup& tup)
1287 {
1288  CHK(setBlobReadHook(g_bh1, tup.m_bval1) == 0);
1289  if (! g_opt.m_oneblob)
1290  CHK(setBlobReadHook(g_bh2, tup.m_bval2) == 0);
1291  return 0;
1292 }
1293 
1294 static int
1295 tryRowLock(Tup& tup, bool exclusive)
1296 {
1297  NdbTransaction* testTrans;
1298  NdbOperation* testOp;
1299  CHK((testTrans = g_ndb->startTransaction()) != NULL);
1300  CHK((testOp = testTrans->getNdbOperation(g_opt.m_tname)) != 0);
1301  CHK(testOp->readTuple(exclusive?
1303  NdbOperation::LM_Read) == 0);
1304  CHK(testOp->equal("PK1", tup.m_pk1) == 0);
1305  if (g_opt.m_pk2chr.m_len != 0) {
1306  CHK(testOp->equal("PK2", tup.m_pk2) == 0);
1307  CHK(testOp->equal("PK3", tup.m_pk3) == 0);
1308  }
1309  setUDpartId(tup, testOp);
1310 
1311  if (testTrans->execute(Commit, AbortOnError) == 0)
1312  {
1313  /* Successfully claimed lock */
1314  testTrans->close();
1315  return 0;
1316  }
1317  else
1318  {
1319  if (testTrans->getNdbError().code == 266)
1320  {
1321  /* Error as expected for lock already claimed */
1322  testTrans->close();
1323  return -2;
1324  }
1325  else
1326  {
1327  DBG("Error on tryRowLock, exclusive = " << exclusive
1328  << endl << testTrans->getNdbError() << endl);
1329  testTrans->close();
1330  return -1;
1331  }
1332  }
1333 }
1334 
1335 
1336 static int
1337 verifyRowLocked(Tup& tup)
1338 {
1339  CHK(tryRowLock(tup, true) == -2);
1340  return 0;
1341 }
1342 
1343 static int
1344 verifyRowNotLocked(Tup& tup)
1345 {
1346  CHK(tryRowLock(tup, true) == 0);
1347  return 0;
1348 }
1349 
1350 // verify blob data
1351 
1352 static int
1353 verifyHeadInline(const Bcol& b, const Bval& v, NdbRecAttr* ra)
1354 {
1355  if (v.m_val == 0) {
1356  CHK(ra->isNULL() == 1);
1357  } else {
1358  CHK(ra->isNULL() == 0);
1359  NdbBlob::Head head;
1360  NdbBlob::unpackBlobHead(head, ra->aRef(), b.m_version);
1361  CHK(head.length == v.m_len);
1362  const char* data = ra->aRef() + head.headsize;
1363  for (unsigned i = 0; i < head.length && i < b.m_inline; i++)
1364  CHK(data[i] == v.m_val[i]);
1365  }
1366  return 0;
1367 }
1368 
1369 static int
1370 verifyHeadInline(Tup& tup)
1371 {
1372  DBG("verifyHeadInline pk1=" << hex << tup.m_pk1);
1373  CHK((g_con = g_ndb->startTransaction()) != 0);
1374  CHK((g_opr = g_con->getNdbOperation(g_opt.m_tname)) != 0);
1375  CHK(g_opr->readTuple() == 0);
1376  CHK(g_opr->equal("PK1", tup.m_pk1) == 0);
1377  if (g_opt.m_pk2chr.m_len != 0) {
1378  CHK(g_opr->equal("PK2", tup.pk2()) == 0);
1379  CHK(g_opr->equal("PK3", (char*)&tup.m_pk3) == 0);
1380  }
1381  setUDpartId(tup, g_opr);
1382  NdbRecAttr* ra1;
1383  NdbRecAttr* ra2;
1384  NdbRecAttr* ra_frag;
1385  CHK((ra1 = g_opr->getValue("BL1")) != 0);
1386  if (! g_opt.m_oneblob)
1387  CHK((ra2 = g_opr->getValue("BL2")) != 0);
1388  CHK((ra_frag = g_opr->getValue(NdbDictionary::Column::FRAGMENT)) != 0);
1389  if (tup.m_exists) {
1390  CHK(g_con->execute(Commit, AbortOnError) == 0);
1391  tup.m_frag = ra_frag->u_32_value();
1392  DBG("fragment id: " << tup.m_frag);
1393  DBG("verifyHeadInline BL1");
1394  CHK(verifyHeadInline(g_blob1, tup.m_bval1, ra1) == 0);
1395  if (! g_opt.m_oneblob) {
1396  DBG("verifyHeadInline BL2");
1397  CHK(verifyHeadInline(g_blob2, tup.m_bval2, ra2) == 0);
1398  }
1399  } else {
1400  CHK(g_con->execute(Commit, AbortOnError) == -1 &&
1401  g_con->getNdbError().code == 626);
1402  }
1403  g_ndb->closeTransaction(g_con);
1404  g_opr = 0;
1405  g_con = 0;
1406  return 0;
1407 }
1408 
1409 static unsigned
1410 getvarsize(const char* buf)
1411 {
1412  const unsigned char* p = (const unsigned char*)buf;
1413  return p[0] + (p[1] << 8);
1414 }
1415 
1416 static int
1417 verifyBlobTable(const Bval& v, Uint32 pk1, Uint32 frag, bool exists)
1418 {
1419  const Bcol& b = v.m_bcol;
1420  DBG("verify " << b.m_btname << " pk1=" << hex << pk1);
1421  NdbRecAttr* ra_pk = 0; // V1
1422  NdbRecAttr* ra_pk1 = 0; // V2
1423  NdbRecAttr* ra_pk2 = 0; // V2
1424  NdbRecAttr* ra_pk3 = 0; // V2
1425  NdbRecAttr* ra_part = 0;
1426  NdbRecAttr* ra_data = 0;
1427  NdbRecAttr* ra_frag = 0;
1428  Uint32 opTimeoutRetries= g_opt.m_timeout_retries;
1429  enum OpState opState;
1430 
1431  do
1432  {
1433  opState= Normal;
1434  CHK((g_con = g_ndb->startTransaction()) != 0);
1435  CHK((g_ops = g_con->getNdbScanOperation(b.m_btname)) != 0);
1437  g_scanFlags,
1438  g_batchSize,
1439  g_parallel) == 0);
1440  if (b.m_version == 1) {
1441  CHK((ra_pk = g_ops->getValue("PK")) != 0);
1442  CHK((ra_part = g_ops->getValue("PART")) != 0);
1443  CHK((ra_data = g_ops->getValue("DATA")) != 0);
1444  } else {
1445  CHK((ra_pk1 = g_ops->getValue("PK1")) != 0);
1446  if (g_opt.m_pk2chr.m_len != 0) {
1447  CHK((ra_pk2 = g_ops->getValue("PK2")) != 0);
1448  CHK((ra_pk3 = g_ops->getValue("PK3")) != 0);
1449  }
1450  CHK((ra_part = g_ops->getValue("NDB$PART")) != 0);
1451  CHK((ra_data = g_ops->getValue("NDB$DATA")) != 0);
1452  }
1453 
1454  /* No partition id set on Blob part table scan so that we
1455  * find any misplaced parts in other partitions
1456  */
1457 
1458  CHK((ra_frag = g_ops->getValue(NdbDictionary::Column::FRAGMENT)) != 0);
1459  CHK(g_con->execute(NoCommit) == 0);
1460  unsigned partcount;
1461  if (! exists || v.m_len <= b.m_inline)
1462  partcount = 0;
1463  else
1464  partcount = (v.m_len - b.m_inline + b.m_partsize - 1) / b.m_partsize;
1465  char* seen = new char [partcount];
1466  memset(seen, 0, partcount);
1467  while (1) {
1468  int ret= g_ops->nextResult();
1469  if (ret == -1)
1470  {
1471  /* Timeout? */
1472  CHK(conHasTimeoutError());
1473 
1474  /* Break out and restart scan unless we've
1475  * run out of attempts
1476  */
1477  DISP("Parts table scan failed due to timeout("
1478  << conError() <<"). Retries left : "
1479  << opTimeoutRetries -1);
1480  CHK(--opTimeoutRetries);
1481 
1482  opState= Retrying;
1483  sleep(1);
1484  break;
1485  }
1486  CHK(opState == Normal);
1487  CHK((ret == 0) || (ret == 1));
1488  if (ret == 1)
1489  break;
1490  if (b.m_version == 1) {
1491  if (pk1 != ra_pk->u_32_value())
1492  continue;
1493  } else {
1494  if (pk1 != ra_pk1->u_32_value())
1495  continue;
1496  }
1497  Uint32 part = ra_part->u_32_value();
1498  Uint32 frag2 = ra_frag->u_32_value();
1499  DBG("part " << part << " of " << partcount << " from fragment " << frag2);
1500  CHK(part < partcount && ! seen[part]);
1501  seen[part] = 1;
1502  unsigned n = b.m_inline + part * b.m_partsize;
1503  assert(exists && v.m_val != 0 && n < v.m_len);
1504  unsigned m = v.m_len - n;
1505  if (m > b.m_partsize)
1506  m = b.m_partsize;
1507  const char* data = ra_data->aRef();
1508  if (b.m_version == 1)
1509  ;
1510  else {
1511  // Blob v2 stored on disk is currently fixed
1512  // size, so we skip these tests.
1513  if (!g_usingDisk)
1514  {
1515  unsigned sz = getvarsize(data);
1516  DBG("varsize " << sz);
1517  DBG("b.m_partsize " << b.m_partsize);
1518  CHK(sz <= b.m_partsize);
1519  data += 2;
1520  if (part + 1 < partcount)
1521  CHK(sz == b.m_partsize);
1522  else
1523  CHK(sz == m);
1524  }
1525  }
1526  CHK(memcmp(data, v.m_val + n, m) == 0);
1527  if (b.m_version == 1 ||
1528  g_usingDisk ) { // Blob v2 stored on disk is currently
1529  // fixed size, so we do these tests.
1530  char fillchr;
1531  if (b.m_type == NdbDictionary::Column::Text)
1532  fillchr = 0x20;
1533  else
1534  fillchr = 0x0;
1535  uint i = m;
1536  while (i < b.m_partsize) {
1537  CHK(data[i] == fillchr);
1538  i++;
1539  }
1540  }
1541  DBG("frags main=" << frag << " blob=" << frag2 << " stripe=" << b.m_stripe);
1542  if (b.m_stripe == 0)
1543  CHK(frag == frag2);
1544  }
1545 
1546  if (opState == Normal)
1547  {
1548  for (unsigned i = 0; i < partcount; i++)
1549  CHK(seen[i] == 1);
1550  }
1551  delete [] seen;
1552  g_ops->close();
1553  g_ndb->closeTransaction(g_con);
1554  } while (opState == Retrying);
1555 
1556  g_ops = 0;
1557  g_con = 0;
1558  return 0;
1559 }
1560 
1561 static int
1562 verifyBlobTable(const Tup& tup)
1563 {
1564  CHK(verifyBlobTable(tup.m_bval1, tup.m_pk1, tup.m_frag, tup.m_exists) == 0);
1565  if (! g_opt.m_oneblob)
1566  CHK(verifyBlobTable(tup.m_bval2, tup.m_pk1, tup.m_frag, tup.m_exists) == 0);
1567  return 0;
1568 }
1569 
1570 static int
1571 verifyBlob()
1572 {
1573  for (unsigned k = 0; k < g_opt.m_rows; k++) {
1574  Tup& tup = g_tups[k];
1575  DBG("verifyBlob pk1=" << hex << tup.m_pk1);
1576  CHK(verifyHeadInline(tup) == 0);
1577  CHK(verifyBlobTable(tup) == 0);
1578  }
1579  return 0;
1580 }
1581 
1582 static int
1583 rowIsLocked(Tup& tup)
1584 {
1585  NdbTransaction* testTrans;
1586  CHK((testTrans = g_ndb->startTransaction()) != 0);
1587 
1588  NdbOperation* testOp;
1589  CHK((testOp = testTrans->getNdbOperation(g_opt.m_tname)) != 0);
1590 
1591  CHK(testOp->readTuple(NdbOperation::LM_Exclusive) == 0);
1592  CHK(testOp->equal("PK1", tup.m_pk1) == 0);
1593  if (g_opt.m_pk2chr.m_len != 0)
1594  {
1595  CHK(testOp->equal("PK2", tup.m_pk2) == 0);
1596  CHK(testOp->equal("PK3", tup.m_pk3) == 0);
1597  }
1598  setUDpartId(tup, testOp);
1599  CHK(testOp->getValue("PK1") != 0);
1600 
1601  CHK(testTrans->execute(Commit) == -1);
1602  CHK(testTrans->getNdbError().code == 266);
1603 
1604  testTrans->close();
1605 
1606  return 0;
1607 }
1608 
1609 // operations
1610 
1611 // pk ops
1612 
1613 static int
1614 insertPk(int style, int api)
1615 {
1616  DBG("--- insertPk " << stylename[style] << " " << apiName[api] << " ---");
1617  unsigned n = 0;
1618  unsigned k = 0;
1619  Uint32 opTimeoutRetries= g_opt.m_timeout_retries;
1620  enum OpState opState;
1621 
1622  do
1623  {
1624  opState= Normal;
1625  CHK((g_con = g_ndb->startTransaction()) != 0);
1626  for (; k < g_opt.m_rows; k++) {
1627  Tup& tup = g_tups[k];
1628  DBG("insertPk pk1=" << hex << tup.m_pk1);
1629  if (api == API_RECATTR)
1630  {
1631  CHK((g_opr = g_con->getNdbOperation(g_opt.m_tname)) != 0);
1632  CHK(g_opr->insertTuple() ==0);
1633  CHK(g_opr->equal("PK1", tup.m_pk1) == 0);
1634  if (g_opt.m_pk2chr.m_len != 0)
1635  {
1636  CHK(g_opr->equal("PK2", tup.m_pk2) == 0);
1637  CHK(g_opr->equal("PK3", tup.m_pk3) == 0);
1638  }
1639  setUDpartId(tup, g_opr);
1640  CHK(getBlobHandles(g_opr) == 0);
1641  }
1642  else
1643  {
1644  memcpy(&tup.m_row[g_pk1_offset], &tup.m_pk1, sizeof(tup.m_pk1));
1645  if (g_opt.m_pk2chr.m_len != 0) {
1646  memcpy(&tup.m_row[g_pk2_offset], tup.m_pk2, g_opt.m_pk2chr.m_totlen);
1647  memcpy(&tup.m_row[g_pk3_offset], &tup.m_pk3, sizeof(tup.m_pk3));
1648  }
1650  setUDpartIdNdbRecord(tup,
1651  g_ndb->getDictionary()->getTable(g_opt.m_tname),
1652  opts);
1653  CHK((g_const_opr = g_con->insertTuple(g_full_record,
1654  tup.m_row,
1655  NULL,
1656  &opts,
1657  sizeof(opts))) != 0);
1658  CHK(getBlobHandles(g_const_opr) == 0);
1659  }
1660  bool timeout= false;
1661  if (style == 0) {
1662  CHK(setBlobValue(tup) == 0);
1663  } else if (style == 1) {
1664  CHK(presetBH1(k) == 0);
1665  CHK(setBlobWriteHook(tup) == 0);
1666  } else {
1667  CHK(presetBH1(k) == 0);
1668  CHK(g_con->execute(NoCommit) == 0);
1669  if (writeBlobData(tup) == -1)
1670  CHK((timeout= conHasTimeoutError()) == true);
1671  }
1672 
1673  if (!timeout &&
1674  (++n == g_opt.m_batch)) {
1675  if (g_con->execute(Commit) == 0)
1676  {
1677  g_ndb->closeTransaction(g_con);
1678  CHK((g_con = g_ndb->startTransaction()) != 0);
1679  n = 0;
1680  }
1681  else
1682  {
1683  CHK((timeout = conHasTimeoutError()) == true);
1684  n-= 1;
1685  }
1686  }
1687 
1688  if (timeout)
1689  {
1690  /* Timeout */
1691  DISP("Insert failed due to timeout("
1692  << conError() <<") "
1693  << " Operations lost : " << n - 1
1694  << " Retries left : "
1695  << opTimeoutRetries -1);
1696  CHK(--opTimeoutRetries);
1697 
1698  k = k - n;
1699  n = 0;
1700  opState= Retrying;
1701  sleep(1);
1702  break;
1703  }
1704 
1705  g_const_opr = 0;
1706  g_opr = 0;
1707  tup.m_exists = true;
1708  }
1709  if (opState == Normal)
1710  {
1711  if (n != 0) {
1712  CHK(g_con->execute(Commit) == 0);
1713  n = 0;
1714  }
1715  }
1716  g_ndb->closeTransaction(g_con);
1717  } while (opState == Retrying);
1718  g_con = 0;
1719  return 0;
1720 }
1721 
1722 static int
1723 readPk(int style, int api)
1724 {
1725  DBG("--- readPk " << stylename[style] <<" " << apiName[api] << " ---");
1726  for (unsigned k = 0; k < g_opt.m_rows; k++) {
1727  Tup& tup = g_tups[k];
1728  Uint32 opTimeoutRetries= g_opt.m_timeout_retries;
1729  OpState opState;
1730 
1731  do
1732  {
1733  opState= Normal;
1734  DBG("readPk pk1=" << hex << tup.m_pk1);
1735  CHK((g_con = g_ndb->startTransaction()) != 0);
1737  switch(urandom(3))
1738  {
1739  case 0:
1740  lm = NdbOperation::LM_Read;
1741  break;
1742  case 1:
1744  break;
1745  default:
1746  break;
1747  }
1748  if (api == API_RECATTR)
1749  {
1750  CHK((g_opr = g_con->getNdbOperation(g_opt.m_tname)) != 0);
1751  CHK(g_opr->readTuple(lm) == 0);
1752  CHK(g_opr->equal("PK1", tup.m_pk1) == 0);
1753  if (g_opt.m_pk2chr.m_len != 0)
1754  {
1755  CHK(g_opr->equal("PK2", tup.m_pk2) == 0);
1756  CHK(g_opr->equal("PK3", tup.m_pk3) == 0);
1757  }
1758  setUDpartId(tup, g_opr);
1759  CHK(getBlobHandles(g_opr) == 0);
1760  }
1761  else
1762  { // NdbRecord
1763  memcpy(&tup.m_key_row[g_pk1_offset], &tup.m_pk1, sizeof(tup.m_pk1));
1764  if (g_opt.m_pk2chr.m_len != 0) {
1765  memcpy(&tup.m_key_row[g_pk2_offset], tup.pk2(), g_opt.m_pk2chr.m_totlen);
1766  memcpy(&tup.m_key_row[g_pk3_offset], &tup.m_pk3, sizeof(tup.m_pk3));
1767  }
1769  setUDpartIdNdbRecord(tup,
1770  g_ndb->getDictionary()->getTable(g_opt.m_tname),
1771  opts);
1772  CHK((g_const_opr = g_con->readTuple(g_key_record, tup.m_key_row,
1773  g_blob_record, tup.m_row,
1774  lm,
1775  NULL,
1776  &opts,
1777  sizeof(opts))) != 0);
1778 
1779  CHK(getBlobHandles(g_const_opr) == 0);
1780  }
1781  bool timeout= false;
1782  if (style == 0) {
1783  CHK(getBlobValue(tup) == 0);
1784  } else if (style == 1) {
1785  CHK(setBlobReadHook(tup) == 0);
1786  } else {
1787  CHK(g_con->execute(NoCommit) == 0);
1788  if (readBlobData(tup) == -1)
1789  CHK((timeout= conHasTimeoutError()) == true);
1790  }
1791  if (!timeout)
1792  {
1793  if (urandom(200) == 0)
1794  {
1795  if (g_con->execute(NoCommit) == 0)
1796  {
1797  /* Verify row is locked */
1798  //ndbout << "Checking row is locked for lm "
1799  // << lm << endl;
1800  CHK(rowIsLocked(tup) == 0);
1801  CHK(g_con->execute(Commit) == 0);
1802  }
1803  else
1804  {
1805  CHK((timeout= conHasTimeoutError()) == true);
1806  }
1807  }
1808  else
1809  {
1810  if (g_con->execute(Commit) != 0)
1811  {
1812  CHK((timeout= conHasTimeoutError()) == true);
1813  }
1814  }
1815  }
1816  if (timeout)
1817  {
1818  DISP("ReadPk failed due to timeout("
1819  << conError() <<") Retries left : "
1820  << opTimeoutRetries -1);
1821  CHK(--opTimeoutRetries);
1822  opState= Retrying;
1823  sleep(1);
1824  }
1825  else
1826  {
1827  // verify lock mode upgrade
1828  CHK((g_opr?g_opr:g_const_opr)->getLockMode() == NdbOperation::LM_Read);
1829 
1830  if (style == 0 || style == 1) {
1831  CHK(verifyBlobValue(tup) == 0);
1832  }
1833  }
1834  g_ndb->closeTransaction(g_con);
1835  } while (opState == Retrying);
1836  g_opr = 0;
1837  g_const_opr = 0;
1838  g_con = 0;
1839  }
1840  return 0;
1841 }
1842 
1843 static int
1844 readLockPk(int style, int api)
1845 {
1846  DBG("--- readLockPk " << stylename[style] <<" " << apiName[api] << " ---");
1847  for (unsigned k = 0; k < g_opt.m_rows; k++) {
1848  Tup& tup = g_tups[k];
1849  Uint32 opTimeoutRetries= g_opt.m_timeout_retries;
1850  OpState opState;
1851 
1852  do
1853  {
1854  opState= Normal;
1855  DBG("readLockPk pk1=" << hex << tup.m_pk1);
1856  CHK((g_con = g_ndb->startTransaction()) != 0);
1858  switch(urandom(4))
1859  {
1860  case 0:
1862  break;
1863  case 1:
1864  lm = NdbOperation::LM_Read;
1865  break;
1866  case 2:
1868  default:
1869  break;
1870  }
1871 
1872  bool manualUnlock = ( (lm == NdbOperation::LM_Read) ||
1873  (lm == NdbOperation::LM_Exclusive));
1874 
1875  if (api == API_RECATTR)
1876  {
1877  CHK((g_opr = g_con->getNdbOperation(g_opt.m_tname)) != 0);
1878  CHK(g_opr->readTuple(lm) == 0);
1879 
1880  CHK(g_opr->equal("PK1", tup.m_pk1) == 0);
1881  if (g_opt.m_pk2chr.m_len != 0)
1882  {
1883  CHK(g_opr->equal("PK2", tup.m_pk2) == 0);
1884  CHK(g_opr->equal("PK3", tup.m_pk3) == 0);
1885  }
1886  setUDpartId(tup, g_opr);
1887  CHK(getBlobHandles(g_opr) == 0);
1888  if (manualUnlock)
1889  {
1890  CHK(g_opr->getLockHandle() != NULL);
1891  }
1892  }
1893  else
1894  { // NdbRecord
1895  memcpy(&tup.m_key_row[g_pk1_offset], &tup.m_pk1, sizeof(tup.m_pk1));
1896  if (g_opt.m_pk2chr.m_len != 0) {
1897  memcpy(&tup.m_key_row[g_pk2_offset], tup.pk2(), g_opt.m_pk2chr.m_totlen);
1898  memcpy(&tup.m_key_row[g_pk3_offset], &tup.m_pk3, sizeof(tup.m_pk3));
1899  }
1901  setUDpartIdNdbRecord(tup,
1902  g_ndb->getDictionary()->getTable(g_opt.m_tname),
1903  opts);
1904  if (manualUnlock)
1905  {
1906  opts.optionsPresent |= NdbOperation::OperationOptions::OO_LOCKHANDLE;
1907  }
1908  CHK((g_const_opr = g_con->readTuple(g_key_record, tup.m_key_row,
1909  g_blob_record, tup.m_row,
1910  lm,
1911  NULL,
1912  &opts,
1913  sizeof(opts))) != 0);
1914  CHK(getBlobHandles(g_const_opr) == 0);
1915  }
1916  bool timeout= false;
1917  if (style == 0) {
1918  CHK(getBlobValue(tup) == 0);
1919  } else if (style == 1) {
1920  CHK(setBlobReadHook(tup) == 0);
1921  } else {
1922  CHK(g_con->execute(NoCommit) == 0);
1923  if (readBlobData(tup) == -1)
1924  CHK((timeout= conHasTimeoutError()) == true);
1925  }
1926  if (!timeout)
1927  {
1928  if (g_con->execute(NoCommit) == 0)
1929  {
1930  /* Ok, read executed ok, now
1931  * - Verify the Blob data
1932  * - Verify the row is locked
1933  * - Close the Blob handles
1934  * - Attempt to unlock
1935  */
1936  NdbOperation::LockMode lmused = (g_opr?g_opr:g_const_opr)->getLockMode();
1937  CHK((lmused == NdbOperation::LM_Read) ||
1938  (lmused == NdbOperation::LM_Exclusive));
1939 
1940  if (style == 0 || style == 1) {
1941  CHK(verifyBlobValue(tup) == 0);
1942  }
1943 
1944  /* Occasionally check that we are locked */
1945  if (urandom(200) == 0)
1946  CHK(verifyRowLocked(tup) == 0);
1947 
1948  /* Close Blob handles */
1949  CHK(g_bh1->close() == 0);
1950  CHK(g_bh1->getState() == NdbBlob::Closed);
1951  if (! g_opt.m_oneblob)
1952  {
1953  CHK(g_bh2->close() == 0);
1954  CHK(g_bh2->getState() == NdbBlob::Closed);
1955  }
1956 
1957  /* Check Blob handle is closed */
1958  char byte;
1959  Uint32 len = 1;
1960  CHK(g_bh1->readData(&byte, len) != 0);
1961  CHK(g_bh1->getNdbError().code == 4265);
1962  CHK(g_bh1->close() != 0);
1963  CHK(g_bh1->getNdbError().code == 4554);
1964  if(! g_opt.m_oneblob)
1965  {
1966  CHK(g_bh2->readData(&byte, len) != 0);
1967  CHK(g_bh2->getNdbError().code == 4265);
1968  CHK(g_bh2->close() != 0);
1969  CHK(g_bh2->getNdbError().code == 4554);
1970  }
1971 
1972 
1973  if (manualUnlock)
1974  {
1975  /* All Blob handles closed, now we can issue an
1976  * unlock operation and the main row should be
1977  * unlocked
1978  */
1979  const NdbOperation* readOp = (g_opr?g_opr:g_const_opr);
1980  const NdbLockHandle* lh = readOp->getLockHandle();
1981  CHK(lh != NULL);
1982  const NdbOperation* unlockOp = g_con->unlock(lh);
1983  CHK(unlockOp != NULL);
1984  }
1985 
1986  /* All Blob handles closed - manual or automatic
1987  * unlock op has been enqueued. Now execute and
1988  * check that the row is unlocked.
1989  */
1990  CHK(g_con->execute(NoCommit) == 0);
1991  CHK(verifyRowNotLocked(tup) == 0);
1992 
1993  if (g_con->execute(Commit) != 0)
1994  {
1995  CHK((timeout= conHasTimeoutError()) == true);
1996  }
1997  }
1998  else
1999  {
2000  CHK((timeout= conHasTimeoutError()) == true);
2001  }
2002  }
2003  if (timeout)
2004  {
2005  DISP("ReadLockPk failed due to timeout on read("
2006  << conError() <<") Retries left : "
2007  << opTimeoutRetries -1);
2008  CHK(--opTimeoutRetries);
2009  opState= Retrying;
2010  sleep(1);
2011  }
2012 
2013  g_ndb->closeTransaction(g_con);
2014  } while (opState == Retrying);
2015  g_opr = 0;
2016  g_const_opr = 0;
2017  g_con = 0;
2018  }
2019  return 0;
2020 }
2021 
2022 static int
2023 updatePk(int style, int api)
2024 {
2025  DBG("--- updatePk " << stylename[style] << " " << apiName[api] << " ---");
2026  for (unsigned k = 0; k < g_opt.m_rows; k++) {
2027  Tup& tup = g_tups[k];
2028  DBG("updatePk pk1=" << hex << tup.m_pk1);
2029  Uint32 opTimeoutRetries= g_opt.m_timeout_retries;
2030  OpState opState;
2031 
2032  do
2033  {
2034  opState= Normal;
2035  int mode = urandom(3);
2036  int error_code = mode == 0 ? 0 : 4275;
2037  CHK((g_con = g_ndb->startTransaction()) != 0);
2038  if (api == API_RECATTR)
2039  {
2040  CHK((g_opr = g_con->getNdbOperation(g_opt.m_tname)) != 0);
2041  if (mode == 0) {
2042  DBG("using updateTuple");
2043  CHK(g_opr->updateTuple() == 0);
2044  } else if (mode == 1) {
2045  DBG("using readTuple exclusive");
2046  CHK(g_opr->readTuple(NdbOperation::LM_Exclusive) == 0);
2047  } else {
2048  DBG("using readTuple - will fail and retry");
2049  CHK(g_opr->readTuple() == 0);
2050  }
2051  CHK(g_opr->equal("PK1", tup.m_pk1) == 0);
2052  if (g_opt.m_pk2chr.m_len != 0)
2053  {
2054  CHK(g_opr->equal("PK2", tup.m_pk2) == 0);
2055  CHK(g_opr->equal("PK3", tup.m_pk3) == 0);
2056  }
2057  setUDpartId(tup, g_opr);
2058  CHK(getBlobHandles(g_opr) == 0);
2059  }
2060  else
2061  {
2062  memcpy(&tup.m_key_row[g_pk1_offset], &tup.m_pk1, sizeof(tup.m_pk1));
2063  if (g_opt.m_pk2chr.m_len != 0) {
2064  memcpy(&tup.m_key_row[g_pk2_offset], tup.pk2(), g_opt.m_pk2chr.m_totlen);
2065  memcpy(&tup.m_key_row[g_pk3_offset], &tup.m_pk3, sizeof(tup.m_pk3));
2066  }
2068  setUDpartIdNdbRecord(tup,
2069  g_ndb->getDictionary()->getTable(g_opt.m_tname),
2070  opts);
2071  if (mode == 0) {
2072  DBG("using updateTuple");
2073  CHK((g_const_opr= g_con->updateTuple(g_key_record, tup.m_key_row,
2074  g_blob_record, tup.m_row,
2075  NULL, &opts, sizeof(opts))) != 0);
2076  } else if (mode == 1) {
2077  DBG("using readTuple exclusive");
2078  CHK((g_const_opr= g_con->readTuple(g_key_record, tup.m_key_row,
2079  g_blob_record, tup.m_row,
2081  NULL, &opts, sizeof(opts))) != 0);
2082  } else {
2083  DBG("using readTuple - will fail and retry");
2084  CHK((g_const_opr= g_con->readTuple(g_key_record, tup.m_key_row,
2085  g_blob_record, tup.m_row,
2086  NdbOperation::LM_Read,
2087  NULL, &opts, sizeof(opts))) != 0);
2088  }
2089  CHK(getBlobHandles(g_const_opr) == 0);
2090  }
2091 
2092  bool timeout= false;
2093  if (style == 0) {
2094  CHK(setBlobValue(tup, error_code) == 0);
2095  } else if (style == 1) {
2096  CHK(setBlobWriteHook(tup, error_code) == 0);
2097  } else {
2098  CHK(g_con->execute(NoCommit) == 0);
2099  if (writeBlobData(tup, error_code) != 0)
2100  CHK((timeout= conHasTimeoutError()) == true);
2101  }
2102  if (!timeout &&
2103  (error_code == 0)) {
2104  /* Normal success case, try execute commit */
2105  if (g_con->execute(Commit) != 0)
2106  CHK((timeout= conHasTimeoutError()) == true);
2107  else
2108  {
2109  g_ndb->closeTransaction(g_con);
2110  break;
2111  }
2112  }
2113  if (timeout)
2114  {
2115  DISP("UpdatePk failed due to timeout("
2116  << conError() <<") Retries left : "
2117  << opTimeoutRetries -1);
2118  CHK(--opTimeoutRetries);
2119 
2120  opState= Retrying;
2121  sleep(1);
2122  }
2123  if (error_code)
2124  opState= Retrying;
2125 
2126  g_ndb->closeTransaction(g_con);
2127  } while (opState == Retrying);
2128  g_const_opr = 0;
2129  g_opr = 0;
2130  g_con = 0;
2131  tup.m_exists = true;
2132  }
2133  return 0;
2134 }
2135 
2136 static int
2137 writePk(int style, int api)
2138 {
2139  DBG("--- writePk " << stylename[style] << " " << apiName[api] << " ---");
2140  for (unsigned k = 0; k < g_opt.m_rows; k++) {
2141  Tup& tup = g_tups[k];
2142  Uint32 opTimeoutRetries= g_opt.m_timeout_retries;
2143  enum OpState opState;
2144 
2145  do
2146  {
2147  opState= Normal;
2148  DBG("writePk pk1=" << hex << tup.m_pk1);
2149  CHK((g_con = g_ndb->startTransaction()) != 0);
2150  if (api == API_RECATTR)
2151  {
2152  CHK((g_opr = g_con->getNdbOperation(g_opt.m_tname)) != 0);
2153  CHK(g_opr->writeTuple() == 0);
2154  CHK(g_opr->equal("PK1", tup.m_pk1) == 0);
2155  if (g_opt.m_pk2chr.m_len != 0)
2156  {
2157  CHK(g_opr->equal("PK2", tup.m_pk2) == 0);
2158  CHK(g_opr->equal("PK3", tup.m_pk3) == 0);
2159  }
2160  setUDpartId(tup, g_opr);
2161  CHK(getBlobHandles(g_opr) == 0);
2162  }
2163  else
2164  {
2165  memcpy(&tup.m_key_row[g_pk1_offset], &tup.m_pk1, sizeof(tup.m_pk1));
2166  memcpy(&tup.m_row[g_pk1_offset], &tup.m_pk1, sizeof(tup.m_pk1));
2167  if (g_opt.m_pk2chr.m_len != 0) {
2168  memcpy(&tup.m_key_row[g_pk2_offset], tup.pk2(), g_opt.m_pk2chr.m_totlen);
2169  memcpy(&tup.m_row[g_pk2_offset], tup.pk2(), g_opt.m_pk2chr.m_totlen);
2170  memcpy(&tup.m_key_row[g_pk3_offset], &tup.m_pk3, sizeof(tup.m_pk3));
2171  memcpy(&tup.m_row[g_pk3_offset], &tup.m_pk3, sizeof(tup.m_pk3));
2172  }
2174  setUDpartIdNdbRecord(tup,
2175  g_ndb->getDictionary()->getTable(g_opt.m_tname),
2176  opts);
2177  CHK((g_const_opr= g_con->writeTuple(g_key_record, tup.m_key_row,
2178  g_full_record, tup.m_row,
2179  NULL, &opts, sizeof(opts))) != 0);
2180  CHK(getBlobHandles(g_const_opr) == 0);
2181  }
2182  bool timeout= false;
2183  if (style == 0) {
2184  CHK(setBlobValue(tup) == 0);
2185  } else if (style == 1) {
2186  CHK(presetBH1(k) == 0);
2187  CHK(setBlobWriteHook(tup) == 0);
2188  } else {
2189  CHK(presetBH1(k) == 0);
2190  CHK(g_con->execute(NoCommit) == 0);
2191  if (writeBlobData(tup) != 0)
2192  CHK((timeout= conHasTimeoutError()) == true);
2193  }
2194 
2195  if (!timeout)
2196  {
2197  if (g_con->execute(Commit) != 0)
2198  CHK((timeout= conHasTimeoutError()) == true);
2199  }
2200  if (timeout)
2201  {
2202  DISP("WritePk failed due to timeout("
2203  << conError() <<") Retries left : "
2204  << opTimeoutRetries -1);
2205  CHK(--opTimeoutRetries);
2206 
2207  opState= Retrying;
2208  sleep(1);
2209  }
2210  g_ndb->closeTransaction(g_con);
2211  } while (opState == Retrying);
2212 
2213  g_const_opr = 0;
2214  g_opr = 0;
2215  g_con = 0;
2216  tup.m_exists = true;
2217  }
2218  return 0;
2219 }
2220 
2221 static int
2222 deletePk(int api)
2223 {
2224  DBG("--- deletePk " << apiName[api] << " ---");
2225  unsigned n = 0;
2226  unsigned k = 0;
2227  Uint32 opTimeoutRetries= g_opt.m_timeout_retries;
2228  enum OpState opState;
2229 
2230  do
2231  {
2232  opState= Normal;
2233  CHK((g_con = g_ndb->startTransaction()) != 0);
2234  for (; k < g_opt.m_rows; k++) {
2235  Tup& tup = g_tups[k];
2236  DBG("deletePk pk1=" << hex << tup.m_pk1);
2237  if (api == API_RECATTR)
2238  {
2239  CHK((g_opr = g_con->getNdbOperation(g_opt.m_tname)) != 0);
2240  CHK(g_opr->deleteTuple() == 0);
2241  /* Must set explicit partitionId before equal() calls as that's
2242  * where implicit Blob handles are created which need the
2243  * partitioning info
2244  */
2245  setUDpartId(tup, g_opr);
2246  CHK(g_opr->equal("PK1", tup.m_pk1) == 0);
2247  if (g_opt.m_pk2chr.m_len != 0)
2248  {
2249  CHK(g_opr->equal("PK2", tup.m_pk2) == 0);
2250  CHK(g_opr->equal("PK3", tup.m_pk3) == 0);
2251  }
2252  }
2253  else
2254  {
2255  memcpy(&tup.m_key_row[g_pk1_offset], &tup.m_pk1, sizeof(tup.m_pk1));
2256  if (g_opt.m_pk2chr.m_len != 0) {
2257  memcpy(&tup.m_key_row[g_pk2_offset], tup.pk2(), g_opt.m_pk2chr.m_totlen);
2258  memcpy(&tup.m_key_row[g_pk3_offset], &tup.m_pk3, sizeof(tup.m_pk3));
2259  }
2261  setUDpartIdNdbRecord(tup,
2262  g_ndb->getDictionary()->getTable(g_opt.m_tname),
2263  opts);
2264  CHK((g_const_opr= g_con->deleteTuple(g_key_record, tup.m_key_row,
2265  g_full_record, NULL,
2266  NULL, &opts, sizeof(opts))) != 0);
2267  }
2268  if (++n == g_opt.m_batch) {
2269  if (g_con->execute(Commit) != 0)
2270  {
2271  CHK(conHasTimeoutError());
2272  DISP("DeletePk failed due to timeout("
2273  << conError() <<") Retries left : "
2274  << opTimeoutRetries -1);
2275  CHK(--opTimeoutRetries);
2276 
2277  opState= Retrying;
2278  k= k - (n-1);
2279  n= 0;
2280  sleep(1);
2281  break; // Out of for
2282  }
2283 
2284  g_ndb->closeTransaction(g_con);
2285  CHK((g_con = g_ndb->startTransaction()) != 0);
2286  n = 0;
2287  }
2288  g_const_opr = 0;
2289  g_opr = 0;
2290  tup.m_exists = false;
2291  } // for(
2292  if (opState == Normal)
2293  {
2294  if (n != 0) {
2295  if (g_con->execute(Commit) != 0)
2296  {
2297  CHK(conHasTimeoutError());
2298  DISP("DeletePk failed on last batch ("
2299  << conError() <<") Retries left : "
2300  << opTimeoutRetries -1);
2301  CHK(--opTimeoutRetries);
2302  sleep(1);
2303  opState= Retrying;
2304  k= k- (n-1);
2305  }
2306  n = 0;
2307  }
2308  }
2309  g_ndb->closeTransaction(g_con);
2310  g_con = 0;
2311  } while (opState == Retrying);
2312 
2313  return 0;
2314 }
2315 
2316 static int
2317 deleteNoPk()
2318 {
2319  DBG("--- deleteNoPk ---");
2320  Tup no_tup; // bug#24028
2321  no_tup.m_pk1 = 0xb1ff;
2322  const Chr& pk2chr = g_opt.m_pk2chr;
2323  if (pk2chr.m_len != 0) {
2324  char* const p = no_tup.m_pk2;
2325  uint len = urandom(pk2chr.m_len + 1);
2326  uint i = 0;
2327  if (! pk2chr.m_fixed) {
2328  *(uchar*)&p[0] = len;
2329  i++;
2330  }
2331  uint j = 0;
2332  while (j < len) {
2333  p[i] = "b1ff"[j % 4];
2334  i++;
2335  j++;
2336  }
2337  }
2338  no_tup.m_pk3 = 0xb1ff;
2339  CHK((g_con = g_ndb->startTransaction()) != 0);
2340  Tup& tup = no_tup;
2341  DBG("deletePk pk1=" << hex << tup.m_pk1);
2342  CHK((g_opr = g_con->getNdbOperation(g_opt.m_tname)) != 0);
2343  CHK(g_opr->deleteTuple() == 0);
2344  setUDpartId(tup, g_opr);
2345  CHK(g_opr->equal("PK1", tup.m_pk1) == 0);
2346  if (pk2chr.m_len != 0) {
2347  CHK(g_opr->equal("PK2", tup.m_pk2) == 0);
2348  CHK(g_opr->equal("PK3", (char*)&tup.m_pk2) == 0);
2349  }
2350  CHK(g_con->execute(Commit) == -1); // fail
2351  // BUG: error should be on op but is on con now
2352  DBG("con: " << g_con->getNdbError());
2353  DBG("opr: " << g_opr->getNdbError());
2354  CHK(g_con->getNdbError().code == 626 || g_opr->getNdbError().code == 626);
2355  g_ndb->closeTransaction(g_con);
2356  g_opr = 0;
2357  g_con = 0;
2358  return 0;
2359 }
2360 
2361 // hash index ops
2362 
2363 static int
2364 readIdx(int style, int api)
2365 {
2366  DBG("--- readIdx " << stylename[style] << " " << apiName[api] << " ---");
2367  for (unsigned k = 0; k < g_opt.m_rows; k++) {
2368  Tup& tup = g_tups[k];
2369  Uint32 opTimeoutRetries= g_opt.m_timeout_retries;
2370  enum OpState opState;
2371 
2372  do
2373  {
2374  opState= Normal;
2375  DBG("readIdx pk1=" << hex << tup.m_pk1);
2376  CHK((g_con = g_ndb->startTransaction()) != 0);
2378  switch(urandom(3))
2379  {
2380  case 0:
2381  lm = NdbOperation::LM_Read;
2382  break;
2383  case 1:
2385  break;
2386  default:
2387  break;
2388  }
2389  if (api == API_RECATTR)
2390  {
2391  CHK((g_opx = g_con->getNdbIndexOperation(g_opt.m_x1name, g_opt.m_tname)) != 0);
2392  CHK(g_opx->readTuple(lm) == 0);
2393  CHK(g_opx->equal("PK2", tup.m_pk2) == 0);
2394  CHK(g_opx->equal("PK3", tup.m_pk3) == 0);
2395  /* No need to set partition Id for unique indexes */
2396  CHK(getBlobHandles(g_opx) == 0);
2397  }
2398  else
2399  {
2400  memcpy(&tup.m_key_row[g_pk2_offset], tup.pk2(), g_opt.m_pk2chr.m_totlen);
2401  memcpy(&tup.m_key_row[g_pk3_offset], &tup.m_pk3, sizeof(tup.m_pk3));
2402  /* No need to set partition Id for unique indexes */
2403  CHK((g_const_opr= g_con->readTuple(g_idx_record, tup.m_key_row,
2404  g_blob_record, tup.m_row,
2405  lm)) != 0);
2406  CHK(getBlobHandles(g_const_opr) == 0);
2407  }
2408 
2409  bool timeout= false;
2410  if (style == 0) {
2411  CHK(getBlobValue(tup) == 0);
2412  } else if (style == 1) {
2413  CHK(setBlobReadHook(tup) == 0);
2414  } else {
2415  if(g_con->execute(NoCommit) ||
2416  readBlobData(tup))
2417  CHK((timeout= conHasTimeoutError()) == true);
2418  }
2419  if (!timeout)
2420  {
2421  if (g_con->execute(Commit) != 0)
2422  {
2423  CHK((timeout= conHasTimeoutError()) == true);
2424  }
2425  }
2426  if (!timeout)
2427  {
2428  // verify lock mode upgrade (already done by NdbIndexOperation)
2429  CHK((g_opx?g_opx:g_const_opr)->getLockMode() == NdbOperation::LM_Read);
2430  if (style == 0 || style == 1) {
2431  CHK(verifyBlobValue(tup) == 0);
2432  }
2433  }
2434  else
2435  {
2436  DISP("Timeout while reading via index ("
2437  << conError() <<") Retries left : "
2438  << opTimeoutRetries -1);
2439  CHK(--opTimeoutRetries);
2440 
2441  opState= Retrying;
2442  sleep(1);
2443  }
2444  g_ndb->closeTransaction(g_con);
2445  } while (opState == Retrying);
2446  g_const_opr = 0;
2447  g_opx = 0;
2448  g_con = 0;
2449  }
2450  return 0;
2451 }
2452 
2453 static int
2454 updateIdx(int style, int api)
2455 {
2456  DBG("--- updateIdx " << stylename[style] << " " << apiName[api] << " ---");
2457  for (unsigned k = 0; k < g_opt.m_rows; k++) {
2458  Tup& tup = g_tups[k];
2459  Uint32 opTimeoutRetries= g_opt.m_timeout_retries;
2460  enum OpState opState;
2461 
2462  do
2463  {
2464  opState= Normal;
2465  DBG("updateIdx pk1=" << hex << tup.m_pk1);
2466  // skip 4275 testing
2467  CHK((g_con = g_ndb->startTransaction()) != 0);
2468  if (api == API_RECATTR)
2469  {
2470  CHK((g_opx = g_con->getNdbIndexOperation(g_opt.m_x1name, g_opt.m_tname)) != 0);
2471  CHK(g_opx->updateTuple() == 0);
2472  CHK(g_opx->equal("PK2", tup.m_pk2) == 0);
2473  CHK(g_opx->equal("PK3", tup.m_pk3) == 0);
2474  /* No need to set partition Id for unique indexes */
2475  CHK(getBlobHandles(g_opx) == 0);
2476  }
2477  else
2478  {
2479  memcpy(&tup.m_key_row[g_pk2_offset], tup.pk2(), g_opt.m_pk2chr.m_totlen);
2480  memcpy(&tup.m_key_row[g_pk3_offset], &tup.m_pk3, sizeof(tup.m_pk3));
2481  /* No need to set partition Id for unique indexes */
2482  CHK((g_const_opr= g_con->updateTuple(g_idx_record, tup.m_key_row,
2483  g_blob_record, tup.m_row)) != 0);
2484  CHK(getBlobHandles(g_const_opr) == 0);
2485  }
2486  bool timeout= false;
2487  if (style == 0) {
2488  CHK(setBlobValue(tup) == 0);
2489  } else if (style == 1) {
2490  CHK(setBlobWriteHook(tup) == 0);
2491  } else {
2492  if (g_con->execute(NoCommit) ||
2493  writeBlobData(tup))
2494  CHK((timeout= conHasTimeoutError()) == true);
2495  }
2496  if (!timeout)
2497  {
2498  if (g_con->execute(Commit) != 0)
2499  CHK((timeout= conHasTimeoutError()) == true);
2500  }
2501  if (timeout)
2502  {
2503  DISP("Timeout in Index Update ("
2504  << conError() <<") Retries left : "
2505  << opTimeoutRetries-1);
2506  CHK(--opTimeoutRetries);
2507  opState= Retrying;
2508  sleep(1);
2509  }
2510  g_ndb->closeTransaction(g_con);
2511  } while (opState == Retrying);
2512  g_const_opr = 0;
2513  g_opx = 0;
2514  g_con = 0;
2515  tup.m_exists = true;
2516  }
2517  return 0;
2518 }
2519 
2520 static int
2521 writeIdx(int style, int api)
2522 {
2523  DBG("--- writeIdx " << stylename[style] << " " << apiName[api] << " ---");
2524  for (unsigned k = 0; k < g_opt.m_rows; k++) {
2525  Tup& tup = g_tups[k];
2526  Uint32 opTimeoutRetries= g_opt.m_timeout_retries;
2527  enum OpState opState;
2528 
2529  do
2530  {
2531  opState= Normal;
2532  DBG("writeIdx pk1=" << hex << tup.m_pk1);
2533  CHK((g_con = g_ndb->startTransaction()) != 0);
2534  if (api == API_RECATTR)
2535  {
2536  CHK((g_opx = g_con->getNdbIndexOperation(g_opt.m_x1name, g_opt.m_tname)) != 0);
2537  CHK(g_opx->writeTuple() == 0);
2538  CHK(g_opx->equal("PK2", tup.m_pk2) == 0);
2539  CHK(g_opx->equal("PK3", tup.m_pk3) == 0);
2540  /* No need to set partition Id for unique indexes */
2541  CHK(getBlobHandles(g_opx) == 0);
2542  }
2543  else
2544  {
2545  memcpy(&tup.m_key_row[g_pk2_offset], tup.pk2(), g_opt.m_pk2chr.m_totlen);
2546  memcpy(&tup.m_key_row[g_pk3_offset], &tup.m_pk3, sizeof(tup.m_pk3));
2547  memcpy(&tup.m_row[g_pk1_offset], &tup.m_pk1, sizeof(tup.m_pk1));
2548  memcpy(&tup.m_row[g_pk2_offset], tup.pk2(), g_opt.m_pk2chr.m_totlen);
2549  memcpy(&tup.m_row[g_pk3_offset], &tup.m_pk3, sizeof(tup.m_pk3));
2550  /* No need to set partition Id for unique indexes */
2551  CHK((g_const_opr= g_con->writeTuple(g_idx_record, tup.m_key_row,
2552  g_full_record, tup.m_row)) != 0);
2553  CHK(getBlobHandles(g_const_opr) == 0);
2554  }
2555  bool timeout= false;
2556  if (style == 0) {
2557  CHK(setBlobValue(tup) == 0);
2558  } else if (style == 1) {
2559  // non-nullable must be set
2560  CHK(g_bh1->setValue("", 0) == 0);
2561  CHK(setBlobWriteHook(tup) == 0);
2562  } else {
2563  // non-nullable must be set
2564  CHK(g_bh1->setValue("", 0) == 0);
2565  if (g_con->execute(NoCommit) ||
2566  writeBlobData(tup))
2567  CHK((timeout= conHasTimeoutError()) == true);
2568  }
2569  if (!timeout)
2570  {
2571  if (g_con->execute(Commit))
2572  CHK((timeout= conHasTimeoutError()) == true);
2573  }
2574  if (timeout)
2575  {
2576  DISP("Timeout in Index Write ("
2577  << conError() <<") Retries left : "
2578  << opTimeoutRetries-1);
2579  CHK(--opTimeoutRetries);
2580  opState= Retrying;
2581  sleep(1);
2582  }
2583  g_ndb->closeTransaction(g_con);
2584  } while (opState == Retrying);
2585  g_const_opr = 0;
2586  g_opx = 0;
2587  g_con = 0;
2588  tup.m_exists = true;
2589  }
2590  return 0;
2591 }
2592 
2593 static int
2594 deleteIdx(int api)
2595 {
2596  DBG("--- deleteIdx " << apiName[api] << " ---");
2597  unsigned n = 0;
2598  unsigned k = 0;
2599  Uint32 opTimeoutRetries= g_opt.m_timeout_retries;
2600  enum OpState opState;
2601 
2602  do
2603  {
2604  opState= Normal;
2605  CHK((g_con = g_ndb->startTransaction()) != 0);
2606  for (; k < g_opt.m_rows; k++) {
2607  Tup& tup = g_tups[k];
2608  DBG("deleteIdx pk1=" << hex << tup.m_pk1);
2609  if (api == API_RECATTR)
2610  {
2611  CHK((g_opx = g_con->getNdbIndexOperation(g_opt.m_x1name, g_opt.m_tname)) != 0);
2612  CHK(g_opx->deleteTuple() == 0);
2613  CHK(g_opx->equal("PK2", tup.m_pk2) == 0);
2614  CHK(g_opx->equal("PK3", tup.m_pk3) == 0);
2615  /* No need to set partition Id for unique indexes */
2616  }
2617  else
2618  {
2619  memcpy(&tup.m_key_row[g_pk2_offset], tup.pk2(), g_opt.m_pk2chr.m_totlen);
2620  memcpy(&tup.m_key_row[g_pk3_offset], &tup.m_pk3, sizeof(tup.m_pk3));
2621  /* No need to set partition Id for unique indexes */
2622  CHK((g_const_opr= g_con->deleteTuple(g_idx_record, tup.m_key_row,
2623  g_full_record)) != 0);
2624  }
2625  if (++n == g_opt.m_batch) {
2626  if (g_con->execute(Commit))
2627  {
2628  CHK(conHasTimeoutError());
2629  DISP("Timeout deleteing via index ("
2630  << conError() <<") Retries left :"
2631  << opTimeoutRetries-1);
2632  CHK(--opTimeoutRetries);
2633  opState= Retrying;
2634  k= k- (n-1);
2635  n= 0;
2636  sleep(1);
2637  break;
2638  }
2639 
2640  g_ndb->closeTransaction(g_con);
2641  CHK((g_con = g_ndb->startTransaction()) != 0);
2642  n = 0;
2643  }
2644 
2645  g_const_opr = 0;
2646  g_opx = 0;
2647  tup.m_exists = false;
2648  }
2649  if ((opState == Normal) &&
2650  (n != 0)) {
2651  if(g_con->execute(Commit))
2652  {
2653  CHK(conHasTimeoutError());
2654  DISP("Timeout on last idx delete batch ("
2655  << conError() <<") Retries left :"
2656  << opTimeoutRetries-1);
2657  CHK(--opTimeoutRetries);
2658  opState= Retrying;
2659  k= k-(n-1);
2660  sleep(1);
2661  }
2662  n = 0;
2663  }
2664  g_ndb->closeTransaction(g_con);
2665  } while (opState == Retrying);
2666  g_con= 0;
2667  g_opx= 0;
2668  g_const_opr= 0;
2669  return 0;
2670 }
2671 
2672 // scan ops table and index
2673 
2674 static int
2675 readScan(int style, int api, bool idx)
2676 {
2677  DBG("--- " << "readScan" << (idx ? "Idx" : "") << " " << stylename[style] << " " << apiName[api] << " ---");
2678  Tup tup;
2679  tup.alloc(); // allocate buffers
2680 
2681  Uint32 opTimeoutRetries= g_opt.m_timeout_retries;
2682  enum OpState opState;
2683 
2684  do
2685  {
2686  opState= Normal;
2687  CHK((g_con = g_ndb->startTransaction()) != 0);
2689  switch(urandom(3))
2690  {
2691  case 0:
2692  lm = NdbOperation::LM_Read;
2693  break;
2694  case 1:
2696  break;
2697  default:
2698  break;
2699  }
2700  if (api == API_RECATTR)
2701  {
2702  if (! idx) {
2703  CHK((g_ops = g_con->getNdbScanOperation(g_opt.m_tname)) != 0);
2704  } else {
2705  CHK((g_ops = g_con->getNdbIndexScanOperation(g_opt.m_x2name, g_opt.m_tname)) != 0);
2706  }
2707  CHK(g_ops->readTuples(lm,
2708  g_scanFlags,
2709  g_batchSize,
2710  g_parallel) == 0);
2711  CHK(g_ops->getValue("PK1", (char*)&tup.m_pk1) != 0);
2712  if (g_opt.m_pk2chr.m_len != 0)
2713  {
2714  CHK(g_ops->getValue("PK2", tup.m_pk2) != 0);
2715  CHK(g_ops->getValue("PK3", (char *) &tup.m_pk3) != 0);
2716  }
2717  /* Don't bother setting UserDefined partitions for scan tests */
2718  CHK(getBlobHandles(g_ops) == 0);
2719  }
2720  else
2721  {
2722  /* Don't bother setting UserDefined partitions for scan tests */
2723  if (! idx)
2724  CHK((g_ops= g_con->scanTable(g_full_record,
2725  lm)) != 0);
2726  else
2727  CHK((g_ops= g_con->scanIndex(g_ord_record, g_full_record,
2728  lm)) != 0);
2729  CHK(getBlobHandles(g_ops) == 0);
2730  }
2731 
2732  if (style == 0) {
2733  CHK(getBlobValue(tup) == 0);
2734  } else if (style == 1) {
2735  CHK(setBlobReadHook(tup) == 0);
2736  }
2737  if (g_con->execute(NoCommit))
2738  {
2739  CHK(conHasTimeoutError());
2740  DISP("Timeout scan read ("
2741  << conError()
2742  << "). Retries left : "
2743  << opTimeoutRetries - 1);
2744  CHK(--opTimeoutRetries);
2745  opState= Retrying;
2746  g_ndb->closeTransaction(g_con);
2747  continue;
2748  }
2749 
2750  // verify lock mode upgrade
2751  CHK(g_ops->getLockMode() == NdbOperation::LM_Read);
2752  unsigned rows = 0;
2753  while (1) {
2754  int ret;
2755 
2756  if (api == API_RECATTR)
2757  {
2758  tup.m_pk1 = (Uint32)-1;
2759  memset(tup.m_pk2, 'x', g_opt.m_pk2chr.m_len);
2760  tup.m_pk3 = -1;
2761  ret = g_ops->nextResult(true);
2762  }
2763  else
2764  {
2765  const char *out_row= NULL;
2766 
2767  if (0 == (ret = g_ops->nextResult(&out_row, true, false)))
2768  {
2769  memcpy(&tup.m_pk1, &out_row[g_pk1_offset], sizeof(tup.m_pk1));
2770  if (g_opt.m_pk2chr.m_len != 0)
2771  {
2772  memcpy(tup.m_pk2, &out_row[g_pk2_offset], g_opt.m_pk2chr.m_totlen);
2773  memcpy(&tup.m_pk3, &out_row[g_pk3_offset], sizeof(tup.m_pk3));
2774  }
2775  }
2776  }
2777 
2778  if (ret == -1)
2779  {
2780  /* Timeout? */
2781  if (conHasTimeoutError())
2782  {
2783  /* Break out and restart scan unless we've
2784  * run out of attempts
2785  */
2786  DISP("Scan read failed due to deadlock timeout ("
2787  << conError() <<") retries left :"
2788  << opTimeoutRetries -1);
2789  CHK(--opTimeoutRetries);
2790 
2791  opState= Retrying;
2792  sleep(1);
2793  break;
2794  }
2795  }
2796  CHK(opState == Normal);
2797  CHK((ret == 0) || (ret == 1));
2798  if (ret == 1)
2799  break;
2800 
2801  DBG("readScan" << (idx ? "Idx" : "") << " pk1=" << hex << tup.m_pk1);
2802  Uint32 k = tup.m_pk1 - g_opt.m_pk1off;
2803  CHK(k < g_opt.m_rows && g_tups[k].m_exists);
2804  tup.copyfrom(g_tups[k]);
2805  if (style == 0) {
2806  CHK(verifyBlobValue(tup) == 0);
2807  } else if (style == 1) {
2808  // execute ops generated by callbacks, if any
2809  CHK(verifyBlobValue(tup) == 0);
2810  } else {
2811  if (readBlobData(tup))
2812  {
2813  CHK(conHasTimeoutError());
2814  DISP("Timeout in readScan("
2815  << conError()
2816  << ") Retries left : "
2817  << opTimeoutRetries - 1);
2818  CHK(--opTimeoutRetries);
2819  opState= Retrying;
2820  sleep(1);
2821  continue;
2822  }
2823  }
2824  rows++;
2825  }
2826  g_ndb->closeTransaction(g_con);
2827 
2828  if (opState == Normal)
2829  CHK(g_opt.m_rows == rows);
2830 
2831  } while (opState == Retrying);
2832 
2833  g_con = 0;
2834  g_ops = 0;
2835  return 0;
2836 }
2837 
2838 static int
2839 updateScan(int style, int api, bool idx)
2840 {
2841  DBG("--- " << "updateScan" << (idx ? "Idx" : "") << " " << stylename[style] << " " << apiName[api] << " ---");
2842  Tup tup;
2843  tup.alloc(); // allocate buffers
2844 
2845  Uint32 opTimeoutRetries= g_opt.m_timeout_retries;
2846  enum OpState opState;
2847 
2848  do
2849  {
2850  opState= Normal;
2851  CHK((g_con = g_ndb->startTransaction()) != 0);
2852  if (api == API_RECATTR)
2853  {
2854  if (! idx) {
2855  CHK((g_ops = g_con->getNdbScanOperation(g_opt.m_tname)) != 0);
2856  } else {
2857  CHK((g_ops = g_con->getNdbIndexScanOperation(g_opt.m_x2name, g_opt.m_tname)) != 0);
2858  }
2860  g_scanFlags,
2861  g_batchSize,
2862  g_parallel) == 0);
2863  CHK(g_ops->getValue("PK1", (char*)&tup.m_pk1) != 0);
2864  if (g_opt.m_pk2chr.m_len != 0)
2865  {
2866  CHK(g_ops->getValue("PK2", tup.m_pk2) != 0);
2867  CHK(g_ops->getValue("PK3", (char *) &tup.m_pk3) != 0);
2868  }
2869  /* Don't bother setting UserDefined partitions for scan tests */
2870  }
2871  else
2872  {
2873  /* Don't bother setting UserDefined partitions for scan tests */
2874  if (! idx)
2875  CHK((g_ops= g_con->scanTable(g_key_record,
2877  else
2878  CHK((g_ops= g_con->scanIndex(g_ord_record, g_key_record,
2880  }
2881  CHK(g_con->execute(NoCommit) == 0);
2882  unsigned rows = 0;
2883  while (1) {
2884  const char *out_row= NULL;
2885  int ret;
2886 
2887  if (api == API_RECATTR)
2888  {
2889  tup.m_pk1 = (Uint32)-1;
2890  memset(tup.m_pk2, 'x', g_opt.m_pk2chr.m_totlen);
2891  tup.m_pk3 = -1;
2892 
2893  ret = g_ops->nextResult(true);
2894  }
2895  else
2896  {
2897  if(0 == (ret = g_ops->nextResult(&out_row, true, false)))
2898  {
2899  memcpy(&tup.m_pk1, &out_row[g_pk1_offset], sizeof(tup.m_pk1));
2900  if (g_opt.m_pk2chr.m_len != 0) {
2901  memcpy(tup.m_pk2, &out_row[g_pk2_offset], g_opt.m_pk2chr.m_totlen);
2902  memcpy(&tup.m_pk3, &out_row[g_pk3_offset], sizeof(tup.m_pk3));
2903  }
2904  }
2905  }
2906 
2907  if (ret == -1)
2908  {
2909  /* Timeout? */
2910  if (conHasTimeoutError())
2911  {
2912  /* Break out and restart scan unless we've
2913  * run out of attempts
2914  */
2915  DISP("Scan update failed due to deadlock timeout ("
2916  << conError() <<"), retries left :"
2917  << opTimeoutRetries -1);
2918  CHK(--opTimeoutRetries);
2919 
2920  opState= Retrying;
2921  sleep(1);
2922  break;
2923  }
2924  }
2925  CHK(opState == Normal);
2926  CHK((ret == 0) || (ret == 1));
2927  if (ret == 1)
2928  break;
2929 
2930  DBG("updateScan" << (idx ? "Idx" : "") << " pk1=" << hex << tup.m_pk1);
2931  Uint32 k = tup.m_pk1 - g_opt.m_pk1off;
2932  CHK(k < g_opt.m_rows && g_tups[k].m_exists);
2933  // calculate new blob values
2934  calcBval(g_tups[k], false);
2935  tup.copyfrom(g_tups[k]);
2936  // cannot do 4275 testing, scan op error code controls execution
2937  if (api == API_RECATTR)
2938  {
2939  CHK((g_opr = g_ops->updateCurrentTuple()) != 0);
2940  CHK(getBlobHandles(g_opr) == 0);
2941  }
2942  else
2943  {
2944  CHK((g_const_opr = g_ops->updateCurrentTuple(g_con, g_blob_record, tup.m_row)) != 0);
2945  CHK(getBlobHandles(g_const_opr) == 0);
2946  }
2947  bool timeout= false;
2948  if (style == 0) {
2949  CHK(setBlobValue(tup) == 0);
2950  } else if (style == 1) {
2951  CHK(setBlobWriteHook(tup) == 0);
2952  } else {
2953  CHK(g_con->execute(NoCommit) == 0);
2954  if (writeBlobData(tup))
2955  CHK((timeout= conHasTimeoutError()) == true);
2956  }
2957  if (!timeout &&
2958  (g_con->execute(NoCommit)))
2959  CHK((timeout= conHasTimeoutError()) == true);
2960 
2961  if (timeout)
2962  {
2963  DISP("Scan update timeout("
2964  << conError()
2965  << ") Retries left : "
2966  << opTimeoutRetries-1);
2967  CHK(opTimeoutRetries--);
2968  opState= Retrying;
2969  sleep(1);
2970  break;
2971  }
2972 
2973  g_const_opr = 0;
2974  g_opr = 0;
2975  rows++;
2976  }
2977  if (opState == Normal)
2978  {
2979  CHK(g_con->execute(Commit) == 0);
2980  CHK(g_opt.m_rows == rows);
2981  }
2982  g_ndb->closeTransaction(g_con);
2983  } while (opState == Retrying);
2984  g_con = 0;
2985  g_ops = 0;
2986  return 0;
2987 }
2988 
2989 static int
2990 lockUnlockScan(int style, int api, bool idx)
2991 {
2992  DBG("--- " << "lockUnlockScan" << (idx ? "Idx" : "") << " " << stylename[style] << " " << apiName[api] << " ---");
2993  Tup tup;
2994  tup.alloc(); // allocate buffers
2995 
2996  Uint32 opTimeoutRetries= g_opt.m_timeout_retries;
2997  enum OpState opState;
2998 
2999  do
3000  {
3001  opState= Normal;
3002  CHK((g_con = g_ndb->startTransaction()) != 0);
3004  if (urandom(2) == 0)
3006 
3007  Uint32 scanFlags = g_scanFlags | NdbScanOperation::SF_KeyInfo;
3008 
3009  if (api == API_RECATTR)
3010  {
3011  if (! idx) {
3012  CHK((g_ops = g_con->getNdbScanOperation(g_opt.m_tname)) != 0);
3013  } else {
3014  CHK((g_ops = g_con->getNdbIndexScanOperation(g_opt.m_x2name, g_opt.m_tname)) != 0);
3015  }
3016  CHK(g_ops->readTuples(lm,
3017  scanFlags,
3018  g_batchSize,
3019  g_parallel) == 0);
3020  CHK(g_ops->getValue("PK1", (char*)&tup.m_pk1) != 0);
3021  if (g_opt.m_pk2chr.m_len != 0)
3022  {
3023  CHK(g_ops->getValue("PK2", tup.m_pk2) != 0);
3024  CHK(g_ops->getValue("PK3", (char *) &tup.m_pk3) != 0);
3025  }
3026  /* Don't bother setting UserDefined partitions for scan tests */
3027  }
3028  else
3029  {
3031  opts.optionsPresent = NdbScanOperation::ScanOptions::SO_SCANFLAGS;
3032  opts.scan_flags = scanFlags;
3033 
3034  /* Don't bother setting UserDefined partitions for scan tests */
3035  if (! idx)
3036  CHK((g_ops= g_con->scanTable(g_key_record,
3037  lm, 0, &opts, sizeof(opts))) != 0);
3038  else
3039  CHK((g_ops= g_con->scanIndex(g_ord_record, g_key_record,
3040  lm, 0, 0, &opts, sizeof(opts))) != 0);
3041  }
3042  CHK(g_con->execute(NoCommit) == 0);
3043  unsigned rows = 0;
3044  while (1) {
3045  const char *out_row= NULL;
3046  int ret;
3047 
3048  if (api == API_RECATTR)
3049  {
3050  tup.m_pk1 = (Uint32)-1;
3051  memset(tup.m_pk2, 'x', g_opt.m_pk2chr.m_totlen);
3052  tup.m_pk3 = -1;
3053 
3054  ret = g_ops->nextResult(true);
3055  }
3056  else
3057  {
3058  if(0 == (ret = g_ops->nextResult(&out_row, true, false)))
3059  {
3060  memcpy(&tup.m_pk1, &out_row[g_pk1_offset], sizeof(tup.m_pk1));
3061  if (g_opt.m_pk2chr.m_len != 0) {
3062  memcpy(tup.m_pk2, &out_row[g_pk2_offset], g_opt.m_pk2chr.m_totlen);
3063  memcpy(&tup.m_pk3, &out_row[g_pk3_offset], sizeof(tup.m_pk3));
3064  }
3065  }
3066  }
3067 
3068  if (ret == -1)
3069  {
3070  /* Timeout? */
3071  if (conHasTimeoutError())
3072  {
3073  /* Break out and restart scan unless we've
3074  * run out of attempts
3075  */
3076  DISP("Scan failed due to deadlock timeout ("
3077  << conError() <<"), retries left :"
3078  << opTimeoutRetries -1);
3079  CHK(--opTimeoutRetries);
3080 
3081  opState= Retrying;
3082  sleep(1);
3083  break;
3084  }
3085  }
3086  CHK(opState == Normal);
3087  CHK((ret == 0) || (ret == 1));
3088  if (ret == 1)
3089  break;
3090 
3091  DBG("lockUnlockScan" << (idx ? "Idx" : "") << " pk1=" << hex << tup.m_pk1);
3092  /* Get tuple info for current row */
3093  Uint32 k = tup.m_pk1 - g_opt.m_pk1off;
3094  CHK(k < g_opt.m_rows && g_tups[k].m_exists);
3095  tup.copyfrom(g_tups[k]);
3096 
3097  if (api == API_RECATTR)
3098  {
3099  CHK((g_opr = g_ops->lockCurrentTuple()) != 0);
3100  CHK(g_opr->getLockHandle() != NULL);
3101  CHK(getBlobHandles(g_opr) == 0);
3102  }
3103  else
3104  {
3106  opts.optionsPresent = NdbOperation::OperationOptions::OO_LOCKHANDLE;
3107  CHK((g_const_opr = g_ops->lockCurrentTuple(g_con, g_blob_record, tup.m_row,
3108  0, &opts, sizeof(opts))) != 0);
3109  CHK(getBlobHandles(g_const_opr) == 0);
3110  }
3111  bool timeout= false;
3112  if (style == 0) {
3113  CHK(getBlobValue(tup) == 0);
3114  } else if (style == 1) {
3115  CHK(setBlobReadHook(tup) == 0);
3116  } else {
3117  CHK(g_con->execute(NoCommit) == 0);
3118  if (readBlobData(tup))
3119  CHK((timeout= conHasTimeoutError()) == true);
3120  }
3121  if (!timeout)
3122  {
3123  if (g_con->execute(NoCommit) == 0)
3124  {
3125  /* Read executed successfully,
3126  * - Verify the Blob data
3127  * - Verify the row is locked
3128  * - Close the Blob handles
3129  * - Attempt to unlock
3130  */
3131  NdbOperation::LockMode lmused = g_ops->getLockMode();
3132  CHK((lmused == NdbOperation::LM_Read) ||
3133  (lmused == NdbOperation::LM_Exclusive));
3134 
3135  if (style == 0 || style == 1) {
3136  CHK(verifyBlobValue(tup) == 0);
3137  }
3138 
3139  /* Occasionally check that we are locked */
3140  if (urandom(200) == 0)
3141  CHK(verifyRowLocked(tup) == 0);
3142 
3143  /* Close Blob handles */
3144  CHK(g_bh1->close() == 0);
3145  if (! g_opt.m_oneblob)
3146  CHK(g_bh2->close() == 0);
3147 
3149  {
3150  /* All Blob handles closed, now we can issue an
3151  * unlock operation and the main row should be
3152  * unlocked
3153  */
3154  const NdbOperation* readOp = (g_opr?g_opr:g_const_opr);
3155  const NdbLockHandle* lh = readOp->getLockHandle();
3156  CHK(lh != NULL);
3157  const NdbOperation* unlockOp = g_con->unlock(lh);
3158  CHK(unlockOp != NULL);
3159  }
3160 
3161  /* All Blob handles closed - manual or automatic
3162  * unlock op has been enqueued. Now execute
3163  */
3164  CHK(g_con->execute(NoCommit) == 0);
3165  }
3166  else
3167  {
3168  CHK((timeout= conHasTimeoutError()) == true);
3169  }
3170  }
3171 
3172  if (timeout)
3173  {
3174  DISP("Scan read lock unlock timeout("
3175  << conError()
3176  << ") Retries left : "
3177  << opTimeoutRetries-1);
3178  CHK(opTimeoutRetries--);
3179  opState= Retrying;
3180  sleep(1);
3181  break;
3182  }
3183 
3184  g_const_opr = 0;
3185  g_opr = 0;
3186  rows++;
3187  }
3188  if (opState == Normal)
3189  {
3190  /* We've scanned all rows, locked them and then unlocked them
3191  * All rows should now be unlocked despite the transaction
3192  * not being committed.
3193  */
3194  for (unsigned k = 0; k < g_opt.m_rows; k++) {
3195  CHK(verifyRowNotLocked(g_tups[k]) == 0);
3196  }
3197 
3198  CHK(g_con->execute(Commit) == 0);
3199  CHK(g_opt.m_rows == rows);
3200  }
3201  g_ndb->closeTransaction(g_con);
3202  } while (opState == Retrying);
3203  g_con = 0;
3204  g_ops = 0;
3205  return 0;
3206 }
3207 
3208 static int
3209 deleteScan(int api, bool idx)
3210 {
3211  DBG("--- " << "deleteScan" << (idx ? "Idx" : "") << apiName[api] << " ---");
3212  Tup tup;
3213  Uint32 opTimeoutRetries= g_opt.m_timeout_retries;
3214  enum OpState opState;
3215  unsigned rows = 0;
3216 
3217  do
3218  {
3219  opState= Normal;
3220 
3221  CHK((g_con = g_ndb->startTransaction()) != 0);
3222 
3223  if (api == API_RECATTR)
3224  {
3225  if (! idx) {
3226  CHK((g_ops = g_con->getNdbScanOperation(g_opt.m_tname)) != 0);
3227  } else {
3228  CHK((g_ops = g_con->getNdbIndexScanOperation(g_opt.m_x2name, g_opt.m_tname)) != 0);
3229  }
3231  g_scanFlags,
3232  g_batchSize,
3233  g_parallel) == 0);
3234  CHK(g_ops->getValue("PK1", (char*)&tup.m_pk1) != 0);
3235  if (g_opt.m_pk2chr.m_len != 0)
3236  {
3237  CHK(g_ops->getValue("PK2", tup.m_pk2) != 0);
3238  CHK(g_ops->getValue("PK3", (char *) &tup.m_pk3) != 0);
3239  }
3240  /* Don't bother setting UserDefined partitions for scan tests */
3241  }
3242  else
3243  {
3244  /* Don't bother setting UserDefined partitions for scan tests */
3245  if (! idx)
3246  CHK((g_ops= g_con->scanTable(g_key_record,
3248  else
3249  CHK((g_ops= g_con->scanIndex(g_ord_record, g_key_record,
3251  }
3252  CHK(g_con->execute(NoCommit) == 0);
3253  unsigned n = 0;
3254  while (1) {
3255  int ret;
3256 
3257  if (api == API_RECATTR)
3258  {
3259  tup.m_pk1 = (Uint32)-1;
3260  memset(tup.m_pk2, 'x', g_opt.m_pk2chr.m_len);
3261  tup.m_pk3 = -1;
3262  ret = g_ops->nextResult(true);
3263  }
3264  else
3265  {
3266  const char *out_row= NULL;
3267 
3268  if (0 == (ret = g_ops->nextResult(&out_row, true, false)))
3269  {
3270  memcpy(&tup.m_pk1, &out_row[g_pk1_offset], sizeof(tup.m_pk1));
3271  if (g_opt.m_pk2chr.m_len != 0)
3272  {
3273  memcpy(tup.m_pk2, &out_row[g_pk2_offset], g_opt.m_pk2chr.m_totlen);
3274  memcpy(&tup.m_pk3, &out_row[g_pk3_offset], sizeof(tup.m_pk3));
3275  }
3276  }
3277  }
3278 
3279  if (ret == -1)
3280  {
3281  /* Timeout? */
3282  if (conHasTimeoutError())
3283  {
3284  /* Break out and restart scan unless we've
3285  * run out of attempts
3286  */
3287  DISP("Scan delete failed due to deadlock timeout ("
3288  << conError() <<") retries left :"
3289  << opTimeoutRetries -1);
3290  CHK(--opTimeoutRetries);
3291 
3292  opState= Retrying;
3293  sleep(1);
3294  break;
3295  }
3296  }
3297  CHK(opState == Normal);
3298  CHK((ret == 0) || (ret == 1));
3299  if (ret == 1)
3300  break;
3301 
3302  while (1) {
3303  DBG("deleteScan" << (idx ? "Idx" : "") << " pk1=" << hex << tup.m_pk1);
3304  Uint32 k = tup.m_pk1 - g_opt.m_pk1off;
3305  CHK(k < g_opt.m_rows && g_tups[k].m_exists);
3306  g_tups[k].m_exists = false;
3307  if (api == API_RECATTR)
3308  CHK(g_ops->deleteCurrentTuple() == 0);
3309  else
3310  CHK(g_ops->deleteCurrentTuple(g_con, g_key_record) != NULL);
3311  tup.m_pk1 = (Uint32)-1;
3312  memset(tup.m_pk2, 'x', g_opt.m_pk2chr.m_len);
3313  tup.m_pk3 = -1;
3314  if (api == API_RECATTR)
3315  ret = g_ops->nextResult(false);
3316  else
3317  {
3318  const char *out_row= NULL;
3319  ret = g_ops->nextResult(&out_row, false, false);
3320  if (ret == 0)
3321  {
3322  memcpy(&tup.m_pk1, &out_row[g_pk1_offset], sizeof(tup.m_pk1));
3323  if (g_opt.m_pk2chr.m_len != 0)
3324  {
3325  memcpy(tup.m_pk2, &out_row[g_pk2_offset], g_opt.m_pk2chr.m_totlen);
3326  memcpy(&tup.m_pk3, &out_row[g_pk3_offset], sizeof(tup.m_pk3));
3327  }
3328  }
3329  }
3330 
3331  if (ret == -1)
3332  {
3333  /* Timeout? */
3334  if (conHasTimeoutError())
3335  {
3336  /* Break out and restart scan unless we've
3337  * run out of attempts
3338  */
3339  DISP("Scan delete failed due to deadlock timeout ("
3340  << conError() <<") retries left :"
3341  << opTimeoutRetries -1);
3342  CHK(--opTimeoutRetries);
3343 
3344  opState= Retrying;
3345  sleep(1);
3346  break;
3347  }
3348  }
3349  CHK(opState == Normal);
3350  CHK((ret == 0) || (ret == 1) || (ret == 2));
3351 
3352  if (++n == g_opt.m_batch || ret == 2) {
3353  DBG("execute batch: n=" << n << " ret=" << ret);
3354  if (! g_opt.m_fac) {
3355  CHK(g_con->execute(NoCommit) == 0);
3356  } else {
3357  CHK(g_con->execute(Commit) == 0);
3358  CHK(g_con->restart() == 0);
3359  }
3360  rows+= n;
3361  n = 0;
3362  }
3363  if (ret == 2)
3364  break;
3365  }
3366  if (opState == Retrying)
3367  break;
3368  }
3369  if (opState == Normal)
3370  {
3371  rows+= n;
3372  CHK(g_con->execute(Commit) == 0);
3373  CHK(g_opt.m_rows == rows);
3374  }
3375  g_ndb->closeTransaction(g_con);
3376 
3377  } while (opState == Retrying);
3378  g_con = 0;
3379  g_ops = 0;
3380  return 0;
3381 }
3382 
3383 
3384 enum OpTypes {
3385  PkRead,
3386  PkInsert,
3387  PkUpdate,
3388  PkWrite,
3389  PkDelete,
3390  UkRead,
3391  UkUpdate,
3392  UkWrite,
3393  UkDelete};
3394 
3395 static const char*
3396 operationName(OpTypes optype)
3397 {
3398  switch(optype){
3399  case PkRead:
3400  return "Pk Read";
3401  case PkInsert:
3402  return "Pk Insert";
3403  case PkUpdate:
3404  return "Pk Update";
3405  case PkWrite:
3406  return "Pk Write";
3407  case PkDelete:
3408  return "Pk Delete";
3409  case UkRead:
3410  return "Uk Read";
3411  case UkUpdate:
3412  return "Uk Update";
3413  case UkWrite:
3414  return "Uk Write";
3415  case UkDelete:
3416  return "Uk Delete";
3417  default:
3418  return "Bad operation type";
3419  }
3420 }
3421 
3422 static const char*
3423 aoName(int abortOption)
3424 {
3425  if (abortOption == 0)
3426  return "AbortOnError";
3427  return "IgnoreError";
3428 }
3429 
3430 static int
3431 setupOperation(NdbOperation*& op, OpTypes optype, Tup& tup)
3432 {
3433  bool pkop;
3434  switch(optype){
3435  case PkRead: case PkInsert : case PkUpdate:
3436  case PkWrite : case PkDelete :
3437  pkop=true;
3438  break;
3439  default:
3440  pkop= false;
3441  }
3442 
3443  if (pkop)
3444  CHK((op= g_con->getNdbOperation(g_opt.m_tname)) != 0);
3445  else
3446  CHK((op = g_con->getNdbIndexOperation(g_opt.m_x1name, g_opt.m_tname)) != 0);
3447 
3448  switch(optype){
3449  case PkRead:
3450  case UkRead:
3451  CHK(op->readTuple() == 0);
3452  break;
3453  case PkInsert:
3454  CHK(op->insertTuple() == 0);
3455  break;
3456  case PkUpdate:
3457  case UkUpdate:
3458  CHK(op->updateTuple() == 0);
3459  break;
3460  case PkWrite:
3461  case UkWrite:
3462  CHK(op->writeTuple() == 0);
3463  break;
3464  case PkDelete:
3465  case UkDelete:
3466  CHK(op->deleteTuple() == 0);
3467  break;
3468  default:
3469  CHK(false);
3470  return -1;
3471  }
3472 
3473  if (pkop)
3474  {
3475  setUDpartId(tup, op);
3476  CHK(op->equal("PK1", tup.m_pk1) == 0);
3477  if (g_opt.m_pk2chr.m_len != 0)
3478  {
3479  CHK(op->equal("PK2", tup.m_pk2) == 0);
3480  CHK(op->equal("PK3", tup.m_pk3) == 0);
3481  }
3482  }
3483  else
3484  {
3485  CHK(op->equal("PK2", tup.m_pk2) == 0);
3486  CHK(op->equal("PK3", tup.m_pk3) == 0);
3487  }
3488 
3489  CHK(getBlobHandles(op) == 0);
3490 
3491  switch(optype){
3492  case PkRead:
3493  case UkRead:
3494  CHK(getBlobValue(tup) == 0);
3495  break;
3496  case PkInsert:
3497  case PkUpdate:
3498  case UkUpdate:
3499  /* Fall through */
3500  case PkWrite:
3501  case UkWrite:
3502  CHK(setBlobValue(tup) == 0);
3503  break;
3504  case PkDelete:
3505  case UkDelete:
3506  /* Nothing */
3507  break;
3508  default:
3509  CHK(false);
3510  return -1;
3511  }
3512 
3513  return 0;
3514 }
3515 
3516 static int
3517 bugtest_36756()
3518 {
3519  /* Transaction which had accessed a Blob table was ignoring
3520  * abortOption passed in the execute() call.
3521  * Check that option passed in execute() call overrides
3522  * default / manually set operation abortOption, even in the
3523  * presence of Blobs in the transaction
3524  */
3525 
3526  /* Operation AbortOnError IgnoreError
3527  * PkRead NoDataFound* NoDataFound
3528  * PkInsert Duplicate key Duplicate key*
3529  * PkUpdate NoDataFound NoDataFound*
3530  * PkWrite NoDataFound NoDataFound*
3531  * PkDelete NoDataFound NoDataFound*
3532  * UkRead NoDataFound* NoDataFound
3533  * UkUpdate NoDataFound NoDataFound*
3534  * UkWrite NoDataFound NoDataFound*
3535  * UkDelete NoDataFound NoDataFound*
3536  *
3537  * * Are interesting, where non-default behaviour is requested.
3538  */
3539 
3540  struct ExpectedOutcome
3541  {
3542  int executeRc;
3543  int transactionErrorCode;
3544  int opr1ErrorCode;
3545  int opr2ErrorCode;
3546  int commitStatus;
3547  };
3548 
3549  /* Generally, AbortOnError sets the transaction error
3550  * but not the Operation error codes
3551  * IgnoreError sets the transaction error and the
3552  * failing operation error code(s)
3553  * Odd cases :
3554  * Pk Write : Can't fail due to key presence, just
3555  * incorrect NULLs etc.
3556  * Uk Write : Key must exist, so not really different
3557  * to Update?
3558  */
3559  ExpectedOutcome outcomes[9][2]=
3560  {
3561  // PkRead
3562  {{-1, 626, 0, 0, NdbTransaction::Aborted}, // AE
3563  {0, 626, 0, 626, NdbTransaction::Started}}, // IE
3564  // PkInsert
3565  // Note operation order reversed for insert
3566  {{-1, 630, 0, 0, NdbTransaction::Aborted}, // AE
3567  {0, 630, 0, 630, NdbTransaction::Started}}, // IE
3568  // PkUpdate
3569  {{-1, 626, 0, 0, NdbTransaction::Aborted}, // AE
3570  {0, 626, 0, 626, NdbTransaction::Started}}, // IE
3571  // PkWrite
3572  {{0, 0, 0, 0, NdbTransaction::Started}, // AE
3573  {0, 0, 0, 0, NdbTransaction::Started}}, // IE
3574  // PkDelete
3575  {{-1, 626, 0, 0, NdbTransaction::Aborted}, // AE
3576  {0, 626, 0, 626, NdbTransaction::Started}}, // IE
3577  // UkRead
3578  {{-1, 626, 0, 0, NdbTransaction::Aborted}, // AE
3579  {0, 626, 0, 626, NdbTransaction::Started}}, // IE
3580  // UkUpdate
3581  {{-1, 626, 0, 0, NdbTransaction::Aborted}, // AE
3582  {0, 626, 0, 626, NdbTransaction::Started}}, // IE
3583  // UkWrite
3584  {{-1, 626, 0, 0, NdbTransaction::Aborted}, // AE
3585  {0, 626, 0, 626, NdbTransaction::Started}}, // IE
3586  // UkDelete
3587  {{-1, 626, 0, 0, NdbTransaction::Aborted}, // AE
3588  {0, 626, 0, 626, NdbTransaction::Started}} // IE
3589  };
3590 
3591  DBG("bugtest_36756 : IgnoreError Delete of nonexisting tuple aborts");
3592  DBG(" Also 36851 : Insert IgnoreError of existing tuple aborts");
3593 
3594  for (int iterations=0; iterations < 50; iterations++)
3595  {
3596  /* Recalculate and insert different tuple every time to
3597  * get different keys(and therefore nodes), and
3598  * different length Blobs, including zero length
3599  * and NULL
3600  */
3601  calcTups(true);
3602 
3603  Tup& tupExists = g_tups[0];
3604  Tup& tupDoesNotExist = g_tups[1];
3605 
3606  /* Setup table with just 1 row present */
3607  CHK((g_con= g_ndb->startTransaction()) != 0);
3608  CHK((g_opr= g_con->getNdbOperation(g_opt.m_tname)) != 0);
3609  CHK(g_opr->insertTuple() == 0);
3610  CHK(g_opr->equal("PK1", tupExists.m_pk1) == 0);
3611  if (g_opt.m_pk2chr.m_len != 0)
3612  {
3613  CHK(g_opr->equal("PK2", tupExists.m_pk2) == 0);
3614  CHK(g_opr->equal("PK3", tupExists.m_pk3) == 0);
3615  }
3616  setUDpartId(tupExists, g_opr);
3617  CHK(getBlobHandles(g_opr) == 0);
3618 
3619  CHK(setBlobValue(tupExists) == 0);
3620 
3621  CHK(g_con->execute(Commit) == 0);
3622  g_con->close();
3623 
3624  DBG("Iteration : " << iterations);
3625  for (int optype=PkRead; optype <= UkDelete; optype++)
3626  {
3627  DBG(" " << operationName((OpTypes)optype));
3628 
3629  Tup* tup1= &tupExists;
3630  Tup* tup2= &tupDoesNotExist;
3631 
3632  if (optype == PkInsert)
3633  {
3634  /* Inserts - we want the failing operation to be second
3635  * rather than first to avoid hitting bugs with IgnoreError
3636  * and the first DML in a transaction
3637  * So we swap them
3638  */
3639  tup1= &tupDoesNotExist; // (Insert succeeds)
3640  tup2= &tupExists; //(Insert fails)
3641  }
3642 
3643  for (int abortOption=0; abortOption < 2; abortOption++)
3644  {
3645  DBG(" " << aoName(abortOption));
3646  NdbOperation *opr1, *opr2;
3647  NdbOperation::AbortOption ao= (abortOption==0)?
3650 
3651  CHK((g_con= g_ndb->startTransaction()) != 0);
3652 
3653  /* Operation 1 */
3654  CHK(setupOperation(opr1, (OpTypes)optype, *tup1) == 0);
3655 
3656  /* Operation2 */
3657  CHK(setupOperation(opr2, (OpTypes)optype, *tup2) == 0);
3658 
3659  ExpectedOutcome eo= outcomes[optype][abortOption];
3660 
3661  int rc = g_con->execute(NdbTransaction::NoCommit, ao);
3662 
3663  DBG("execute returned " << rc <<
3664  " Trans err " << g_con->getNdbError().code <<
3665  " Opr1 err " << opr1->getNdbError().code <<
3666  " Opr2 err " << opr2->getNdbError().code <<
3667  " CommitStatus " << g_con->commitStatus());
3668 
3669  CHK(rc == eo.executeRc);
3670  CHK(g_con->getNdbError().code == eo.transactionErrorCode);
3671  CHK(opr1->getNdbError().code == eo.opr1ErrorCode);
3672  CHK(opr2->getNdbError().code == eo.opr2ErrorCode);
3673  CHK(g_con->commitStatus() == eo.commitStatus);
3674 
3675  g_con->close();
3676  }
3677  }
3678 
3679  /* Now delete the 'existing'row */
3680  CHK((g_con= g_ndb->startTransaction()) != 0);
3681  CHK((g_opr= g_con->getNdbOperation(g_opt.m_tname)) != 0);
3682  CHK(g_opr->deleteTuple() == 0);
3683  setUDpartId(tupExists, g_opr);
3684  CHK(g_opr->equal("PK1", tupExists.m_pk1) == 0);
3685  if (g_opt.m_pk2chr.m_len != 0)
3686  {
3687  CHK(g_opr->equal("PK2", tupExists.m_pk2) == 0);
3688  CHK(g_opr->equal("PK3", tupExists.m_pk3) == 0);
3689  }
3690 
3691  CHK(g_con->execute(Commit) == 0);
3692  g_con->close();
3693  }
3694 
3695  g_opr= 0;
3696  g_con= 0;
3697  g_bh1= 0;
3698 
3699  return 0;
3700 }
3701 
3702 
3703 static int
3704 bugtest_45768()
3705 {
3706  /* Transaction inserting using blobs has an early error
3707  resulting in kernel-originated rollback.
3708  Api then calls execute(Commit) which chokes on Blob
3709  objects
3710 
3711  */
3712  DBG("bugtest_45768 : Batched blob transaction with abort followed by commit");
3713 
3714  const int numIterations = 5;
3715 
3716  for (int iteration=0; iteration < numIterations; iteration++)
3717  {
3718  /* Recalculate and insert different tuple every time to
3719  * get different keys(and therefore nodes), and
3720  * different length Blobs, including zero length
3721  * and NULL
3722  */
3723  calcTups(true);
3724 
3725  const Uint32 totalRows = 100;
3726  const Uint32 preExistingTupNum = totalRows / 2;
3727 
3728  Tup& tupExists = g_tups[ preExistingTupNum ];
3729 
3730  /* Setup table with just 1 row present */
3731  CHK((g_con= g_ndb->startTransaction()) != 0);
3732  CHK((g_opr= g_con->getNdbOperation(g_opt.m_tname)) != 0);
3733  CHK(g_opr->insertTuple() == 0);
3734  CHK(g_opr->equal("PK1", tupExists.m_pk1) == 0);
3735  if (g_opt.m_pk2chr.m_len != 0)
3736  {
3737  CHK(g_opr->equal("PK2", tupExists.m_pk2) == 0);
3738  CHK(g_opr->equal("PK3", tupExists.m_pk3) == 0);
3739  }
3740  setUDpartId(tupExists, g_opr);
3741  CHK(getBlobHandles(g_opr) == 0);
3742 
3743  CHK(setBlobValue(tupExists) == 0);
3744 
3745  CHK(g_con->execute(Commit) == 0);
3746  g_con->close();
3747 
3748  DBG("Iteration : " << iteration);
3749 
3750  /* Now do batched insert, including a TUP which already
3751  * exists
3752  */
3753  int rc = 0;
3754  int retries = 10;
3755 
3756  do
3757  {
3758  CHK((g_con = g_ndb->startTransaction()) != 0);
3759 
3760  for (Uint32 tupNum = 0; tupNum < totalRows ; tupNum++)
3761  {
3762  Tup& tup = g_tups[ tupNum ];
3763  CHK((g_opr = g_con->getNdbOperation(g_opt.m_tname)) != 0);
3764  CHK(g_opr->insertTuple() == 0);
3765  CHK(g_opr->equal("PK1", tup.m_pk1) == 0);
3766  if (g_opt.m_pk2chr.m_len != 0)
3767  {
3768  CHK(g_opr->equal("PK2", tup.m_pk2) == 0);
3769  CHK(g_opr->equal("PK3", tup.m_pk3) == 0);
3770  }
3771  setUDpartId(tup, g_opr);
3772 
3773  CHK(getBlobHandles(g_opr) == 0);
3774  CHK(setBlobValue(tup) == 0);
3775  }
3776 
3777  /* Now execute NoCommit */
3778  int rc = g_con->execute(NdbTransaction::NoCommit);
3779 
3780  CHK(rc == -1);
3781 
3782  if (g_con->getNdbError().code == 630)
3783  break; /* Expected */
3784 
3785  CHK(g_con->getNdbError().code == 1218); // Send buffers overloaded
3786 
3787  DBG("Send Buffers overloaded, retrying");
3788  sleep(1);
3789  g_con->close();
3790  } while (retries--);
3791 
3792  CHK(g_con->getNdbError().code == 630);
3793 
3794  /* Now execute Commit */
3795  rc = g_con->execute(NdbTransaction::Commit);
3796 
3797  CHK(rc == -1);
3798  /* Transaction aborted already */
3799  CHK(g_con->getNdbError().code == 4350);
3800 
3801  g_con->close();
3802 
3803  /* Now delete the 'existing'row */
3804  CHK((g_con= g_ndb->startTransaction()) != 0);
3805  CHK((g_opr= g_con->getNdbOperation(g_opt.m_tname)) != 0);
3806  CHK(g_opr->deleteTuple() == 0);
3807  setUDpartId(tupExists, g_opr);
3808  CHK(g_opr->equal("PK1", tupExists.m_pk1) == 0);
3809  if (g_opt.m_pk2chr.m_len != 0)
3810  {
3811  CHK(g_opr->equal("PK2", tupExists.m_pk2) == 0);
3812  CHK(g_opr->equal("PK3", tupExists.m_pk3) == 0);
3813  }
3814 
3815  CHK(g_con->execute(Commit) == 0);
3816  g_con->close();
3817  }
3818 
3819  g_opr= 0;
3820  g_con= 0;
3821  g_bh1= 0;
3822 
3823  return 0;
3824 }
3825 
3826 static int bugtest_48040()
3827 {
3828  /* When batch of operations triggers unique index
3829  * maint triggers (which fire back to TC) and
3830  * TC is still receiving ops in batch from the API
3831  * TC uses ContinueB to self to defer trigger
3832  * processing until all operations have been
3833  * received.
3834  * If the transaction starts aborting (due to some
3835  * problem in the original operations) while the
3836  * ContinueB is 'in-flight', the ContinueB never
3837  * terminates and causes excessive CPU consumption
3838  *
3839  * This testcase sets an ERROR INSERT to detect
3840  * the excessive ContinueB use in 1 transaction,
3841  * and runs bugtest_bug45768 to generate the
3842  * scenario
3843  */
3844  NdbRestarter restarter;
3845 
3846  DBG("bugtest 48040 - Infinite ContinueB loop in TC abort + unique");
3847 
3848  restarter.waitConnected();
3849 
3850  int rc = restarter.insertErrorInAllNodes(8082);
3851 
3852  DBG(" Initial error insert rc" << rc << endl);
3853 
3854  rc = bugtest_45768();
3855 
3856  /* Give time for infinite loop to build */
3857  sleep(10);
3858  restarter.insertErrorInAllNodes(0);
3859 
3860  return rc;
3861 }
3862 
3863 
3864 static int bugtest_62321()
3865 {
3866  /* Having a Blob operation in a batch with other operations
3867  * causes the other operation's ignored error not to be
3868  * set as the transaction error code after execution.
3869  * This is used (e.g in MySQLD) to check for conflicts
3870  */
3871  DBG("bugtest_62321 : Error code from other ops in batch obscured");
3872 
3873  /*
3874  1) Setup table : 1 row exists, another doesnt
3875  2) Start transaction
3876  3) Define failing before op
3877  4) Define Blob op with/without post-exec part
3878  5) Define failing after op
3879  6) Execute
3880  7) Check results
3881  */
3882  calcTups(true);
3883 
3884  /* Setup table */
3885  Tup& tupExists = g_tups[0];
3886  Tup& notExists = g_tups[1];
3887  {
3888  CHK((g_con= g_ndb->startTransaction()) != 0);
3889  CHK((g_opr= g_con->getNdbOperation(g_opt.m_tname)) != 0);
3890  CHK(g_opr->insertTuple() == 0);
3891  CHK(g_opr->equal("PK1", tupExists.m_pk1) == 0);
3892  if (g_opt.m_pk2chr.m_len != 0)
3893  {
3894  CHK(g_opr->equal("PK2", tupExists.m_pk2) == 0);
3895  CHK(g_opr->equal("PK3", tupExists.m_pk3) == 0);
3896  }
3897  setUDpartId(tupExists, g_opr);
3898  CHK(getBlobHandles(g_opr) == 0);
3899 
3900  CHK(setBlobValue(tupExists) == 0);
3901 
3902  CHK(g_con->execute(Commit) == 0);
3903  g_con->close();
3904  }
3905 
3906  for (int scenario = 0; scenario < 4; scenario++)
3907  {
3908  DBG(" Scenario : " << scenario);
3909  CHK((g_con= g_ndb->startTransaction()) != 0);
3910  NdbOperation* failOp = NULL;
3911  if ((scenario & 0x1) == 0)
3912  {
3913  DBG(" Fail op before");
3914  /* Define failing op in batch before Blob op */
3915  failOp= g_con->getNdbOperation(g_opt.m_tname);
3916  CHK(failOp != 0);
3917  CHK(failOp->readTuple() == 0);
3918  CHK(failOp->equal("PK1", notExists.m_pk1) == 0);
3919  if (g_opt.m_pk2chr.m_len != 0)
3920  {
3921  CHK(failOp->equal("PK2", notExists.m_pk2) == 0);
3922  CHK(failOp->equal("PK3", notExists.m_pk3) == 0);
3923  }
3924  setUDpartId(notExists, failOp);
3925  CHK(failOp->getValue("PK1") != 0);
3926  CHK(failOp->setAbortOption(NdbOperation::AO_IgnoreError) == 0);
3927  }
3928 
3929  /* Now define successful Blob op */
3930  CHK((g_opr= g_con->getNdbOperation(g_opt.m_tname)) != 0);
3931  CHK(g_opr->readTuple() == 0);
3932  CHK(g_opr->equal("PK1", tupExists.m_pk1) == 0);
3933  if (g_opt.m_pk2chr.m_len != 0)
3934  {
3935  CHK(g_opr->equal("PK2", tupExists.m_pk2) == 0);
3936  CHK(g_opr->equal("PK3", tupExists.m_pk3) == 0);
3937  }
3938  setUDpartId(tupExists, g_opr);
3939  CHK(getBlobHandles(g_opr) == 0);
3940 
3941  CHK(getBlobValue(tupExists) == 0);
3942 
3943 
3944  /* Define failing batch op after Blob op if not defined before */
3945  if (failOp == 0)
3946  {
3947  DBG(" Fail op after");
3948  failOp= g_con->getNdbOperation(g_opt.m_tname);
3949  CHK(failOp != 0);
3950  CHK(failOp->readTuple() == 0);
3951  CHK(failOp->equal("PK1", notExists.m_pk1) == 0);
3952  if (g_opt.m_pk2chr.m_len != 0)
3953  {
3954  CHK(failOp->equal("PK2", notExists.m_pk2) == 0);
3955  CHK(failOp->equal("PK3", notExists.m_pk3) == 0);
3956  }
3957  setUDpartId(notExists, failOp);
3958  CHK(failOp->getValue("PK1") != 0);
3959  CHK(failOp->setAbortOption(NdbOperation::AO_IgnoreError) == 0);
3960  }
3961 
3962  /* Now execute and check rc etc */
3963  NdbTransaction::ExecType et = (scenario & 0x2) ?
3966 
3967  DBG(" Executing with execType = " << ((et == NdbTransaction::NoCommit)?
3968  "NoCommit":"Commit"));
3969  int rc = g_con->execute(NdbTransaction::NoCommit);
3970 
3971  CHK(rc == 0);
3972  CHK(g_con->getNdbError().code == 626);
3973  CHK(failOp->getNdbError().code == 626);
3974  CHK(g_opr->getNdbError().code == 0);
3975  DBG(" Error code on transaction as expected");
3976 
3977  g_con->close();
3978  }
3979 
3980  return 0;
3981 }
3982 
3983 // main
3984 
3985 // from here on print always
3986 #undef DBG
3987 #define DBG(x) \
3988  do { \
3989  ndbout << "line " << __LINE__ << " " << x << endl; \
3990  } while (0)
3991 
3992 static int
3993 testmain()
3994 {
3995  g_ndb = new Ndb(g_ncc, "TEST_DB");
3996  CHK(g_ndb->init(20) == 0);
3997  CHK(g_ndb->waitUntilReady() == 0);
3998  g_dic = g_ndb->getDictionary();
3999  initblobs();
4000  initConstants();
4001  g_tups = new Tup [g_opt.m_rows];
4002 
4003  // Create tablespace if we're going to use disk based data
4004  if (testcase('h'))
4005  createDefaultTableSpace();
4006 
4007  if (g_opt.m_seed == -1)
4008  g_opt.m_seed = getpid();
4009  if (g_opt.m_seed != 0) {
4010  DBG("random seed = " << g_opt.m_seed);
4011  ndb_srand(g_opt.m_seed);
4012  }
4013  for (g_loop = 0; g_opt.m_loop == 0 || g_loop < g_opt.m_loop; g_loop++) {
4014  for (int storage= 0; storage < 2; storage++) {
4015  if (!testcase(storageSymbol[storage]))
4016  continue;
4017 
4018  DBG("Create table " << storageName[storage]);
4019  CHK(dropTable() == 0);
4020  CHK(createTable(storage) == 0);
4021  { /* Dump created table information */
4022  Bcol& b1 = g_blob1;
4023  DBG("FragType: " << g_dic->getTable(g_opt.m_tname)->getFragmentType());
4024  CHK(NdbBlob::getBlobTableName(b1.m_btname, g_ndb, g_opt.m_tname, "BL1") == 0);
4025  DBG("BL1: inline=" << b1.m_inline << " part=" << b1.m_partsize << " table=" << b1.m_btname);
4026  if (! g_opt.m_oneblob) {
4027  Bcol& b2 = g_blob2;
4028  CHK(NdbBlob::getBlobTableName(b2.m_btname, g_ndb, g_opt.m_tname, "BL2") == 0);
4029  DBG("BL2: inline=" << b2.m_inline << " part=" << b2.m_partsize << " table=" << b2.m_btname);
4030  }
4031  }
4032 
4033  /* Capability to adjust disk scan parameters to avoid scan
4034  * timeouts with disk based Blobs (Error 274)
4035  */
4036  if (storage == STORAGE_DISK)
4037  {
4038  g_usingDisk= true;
4039  // TODO : Resolve whether we need to adjust these for disk data
4040  // Currently the scans are passing ok without this.
4041  g_batchSize= 0;
4042  g_parallel= 0;
4043  g_scanFlags= 0; //NdbScanOperation::SF_DiskScan;
4044  }
4045  else
4046  {
4047  g_usingDisk= false;
4048  g_batchSize= 0;
4049  g_parallel= 0;
4050  g_scanFlags= 0;
4051  }
4052 
4053  // TODO Remove/resolve
4054  DBG("Settings : usingdisk " << g_usingDisk
4055  << " batchSize " << g_batchSize
4056  << " parallel " << g_parallel
4057  << " scanFlags " << g_scanFlags);
4058 
4059  int style;
4060  int api;
4061  DBG("=== loop " << g_loop << " ===");
4062  if (g_opt.m_seed == 0)
4063  ndb_srand(g_loop);
4064  if (g_opt.m_bugtest != 0) {
4065  // test some bug# instead
4066  CHK((*g_opt.m_bugtest)() == 0);
4067  continue;
4068  }
4069  /* Loop over API styles */
4070  for (api = 0; api <=1; api++) {
4071  // pk
4072  if (! testcase(apiSymbol[api]))
4073  continue;
4074  for (style = 0; style <= 2; style++) {
4075  if (! testcase('k') || ! testcase(style) )
4076  continue;
4077  DBG("--- pk ops " << stylename[style] << " " << apiName[api] << " ---");
4078  if (testcase('n')) {
4079  calcTups(true);
4080  CHK(insertPk(style, api) == 0);
4081  CHK(verifyBlob() == 0);
4082  CHK(readPk(style, api) == 0);
4083  if (testcase('u')) {
4084  calcTups(false);
4085  CHK(updatePk(style, api) == 0);
4086  CHK(verifyBlob() == 0);
4087  CHK(readPk(style, api) == 0);
4088  }
4089  if (testcase('l')) {
4090  CHK(readLockPk(style,api) == 0);
4091  }
4092  if (testcase('d')) {
4093  CHK(deletePk(api) == 0);
4094  CHK(deleteNoPk() == 0);
4095  CHK(verifyBlob() == 0);
4096  }
4097  }
4098  if (testcase('w')) {
4099  calcTups(true);
4100  CHK(writePk(style, api) == 0);
4101  CHK(verifyBlob() == 0);
4102  CHK(readPk(style, api) == 0);
4103  if (testcase('u')) {
4104  calcTups(false);
4105  CHK(writePk(style, api) == 0);
4106  CHK(verifyBlob() == 0);
4107  CHK(readPk(style, api) == 0);
4108  }
4109  if (testcase('l')) {
4110  CHK(readLockPk(style,api) == 0);
4111  }
4112  if (testcase('d')) {
4113  CHK(deletePk(api) == 0);
4114  CHK(deleteNoPk() == 0);
4115  CHK(verifyBlob() == 0);
4116  }
4117  }
4118  }
4119 
4120  // hash index
4121  for (style = 0; style <= 2; style++) {
4122  if (! testcase('i') || ! testcase(style))
4123  continue;
4124  DBG("--- idx ops " << stylename[style] << " " << apiName[api] << " ---");
4125  if (testcase('n')) {
4126  calcTups(true);
4127  CHK(insertPk(style, api) == 0);
4128  CHK(verifyBlob() == 0);
4129  CHK(readIdx(style, api) == 0);
4130  if (testcase('u')) {
4131  calcTups(false);
4132  CHK(updateIdx(style, api) == 0);
4133  CHK(verifyBlob() == 0);
4134  CHK(readIdx(style, api) == 0);
4135  }
4136  if (testcase('d')) {
4137  CHK(deleteIdx(api) == 0);
4138  CHK(verifyBlob() == 0);
4139  }
4140  }
4141  if (testcase('w')) {
4142  calcTups(false);
4143  CHK(writePk(style, api) == 0);
4144  CHK(verifyBlob() == 0);
4145  CHK(readIdx(style, api) == 0);
4146  if (testcase('u')) {
4147  calcTups(false);
4148  CHK(writeIdx(style, api) == 0);
4149  CHK(verifyBlob() == 0);
4150  CHK(readIdx(style, api) == 0);
4151  }
4152  if (testcase('d')) {
4153  CHK(deleteIdx(api) == 0);
4154  CHK(verifyBlob() == 0);
4155  }
4156  }
4157  }
4158  // scan table
4159  for (style = 0; style <= 2; style++) {
4160  if (! testcase('s') || ! testcase(style))
4161  continue;
4162  DBG("--- table scan " << stylename[style] << " " << apiName[api] << " ---");
4163  calcTups(true);
4164  CHK(insertPk(style, api) == 0);
4165  CHK(verifyBlob() == 0);
4166  CHK(readScan(style, api, false) == 0);
4167  if (testcase('u')) {
4168  CHK(updateScan(style, api, false) == 0);
4169  CHK(verifyBlob() == 0);
4170  }
4171  if (testcase('l')) {
4172  CHK(lockUnlockScan(style, api, false) == 0);
4173  }
4174  if (testcase('d')) {
4175  CHK(deleteScan(api, false) == 0);
4176  CHK(verifyBlob() == 0);
4177  }
4178  }
4179  // scan index
4180  for (style = 0; style <= 2; style++) {
4181  if (! testcase('r') || ! testcase(style))
4182  continue;
4183  DBG("--- index scan " << stylename[style] << " " << apiName[api] << " ---");
4184  calcTups(true);
4185  CHK(insertPk(style, api) == 0);
4186  CHK(verifyBlob() == 0);
4187  CHK(readScan(style, api, true) == 0);
4188  if (testcase('u')) {
4189  CHK(updateScan(style, api, true) == 0);
4190  CHK(verifyBlob() == 0);
4191  }
4192  if (testcase('l')) {
4193  CHK(lockUnlockScan(style, api, true) == 0);
4194  }
4195  if (testcase('d')) {
4196  CHK(deleteScan(api, true) == 0);
4197  CHK(verifyBlob() == 0);
4198  }
4199  }
4200  } // for (api
4201  } // for (storage
4202  } // for (loop
4203  delete g_ndb;
4204  return 0;
4205 }
4206 
4207 // separate performance test
4208 
4209 struct Tmr { // stolen from testOIBasic
4210  Tmr() {
4211  clr();
4212  }
4213  void clr() {
4214  m_on = m_ms = m_cnt = m_time[0] = m_text[0] = 0;
4215  }
4216  void on() {
4217  assert(m_on == 0);
4218  m_on = NdbTick_CurrentMillisecond();
4219  }
4220  void off(unsigned cnt = 0) {
4221  NDB_TICKS off = NdbTick_CurrentMillisecond();
4222  assert(m_on != 0 && off >= m_on);
4223  m_ms += off - m_on;
4224  m_cnt += cnt;
4225  m_on = 0;
4226  }
4227  const char* time() {
4228  if (m_cnt == 0)
4229  sprintf(m_time, "%u ms", (Uint32)m_ms);
4230  else
4231  sprintf(m_time, "%u ms per %u ( %llu ms per 1000 )", (Uint32)m_ms, m_cnt, (1000 * m_ms) / m_cnt);
4232  return m_time;
4233  }
4234  const char* pct (const Tmr& t1) {
4235  if (0 < t1.m_ms)
4236  sprintf(m_text, "%llu pct", (100 * m_ms) / t1.m_ms);
4237  else
4238  sprintf(m_text, "[cannot measure]");
4239  return m_text;
4240  }
4241  const char* over(const Tmr& t1) {
4242  if (0 < t1.m_ms) {
4243  if (t1.m_ms <= m_ms)
4244  sprintf(m_text, "%llu pct", (100 * (m_ms - t1.m_ms)) / t1.m_ms);
4245  else
4246  sprintf(m_text, "-%llu pct", (100 * (t1.m_ms - m_ms)) / t1.m_ms);
4247  } else
4248  sprintf(m_text, "[cannot measure]");
4249  return m_text;
4250  }
4251  NDB_TICKS m_on;
4252  NDB_TICKS m_ms;
4253  unsigned m_cnt;
4254  char m_time[100];
4255  char m_text[100];
4256 };
4257 
4258 static int
4259 testperf()
4260 {
4261  if (! testcase('p'))
4262  return 0;
4263  DBG("=== perf test ===");
4264  g_bh1 = g_bh2 = 0;
4265  g_ndb = new Ndb(g_ncc, "TEST_DB");
4266  CHK(g_ndb->init() == 0);
4267  CHK(g_ndb->waitUntilReady() == 0);
4268  g_dic = g_ndb->getDictionary();
4269  NdbDictionary::Table tab(g_opt.m_tnameperf);
4270  if (g_dic->getTable(tab.getName()) != 0)
4271  CHK(g_dic->dropTable(tab.getName()) == 0);
4272  // col A - pk
4273  { NdbDictionary::Column col("A");
4274  col.setType(NdbDictionary::Column::Unsigned);
4275  col.setPrimaryKey(true);
4276  tab.addColumn(col);
4277  }
4278  // col B - char 20
4279  { NdbDictionary::Column col("B");
4280  col.setType(NdbDictionary::Column::Char);
4281  col.setLength(20);
4282  col.setNullable(true);
4283  tab.addColumn(col);
4284  }
4285  // col C - text
4286  { NdbDictionary::Column col("C");
4287  col.setType(NdbDictionary::Column::Text);
4288  col.setBlobVersion(g_opt.m_blob_version);
4289  col.setInlineSize(20);
4290  col.setPartSize(512);
4291  col.setStripeSize(1);
4292  col.setNullable(true);
4293  tab.addColumn(col);
4294  }
4295  // create
4296  CHK(g_dic->createTable(tab) == 0);
4297  Uint32 cA = 0, cB = 1, cC = 2;
4298  // timers
4299  Tmr t1;
4300  Tmr t2;
4301  // insert char (one trans)
4302  {
4303  DBG("--- insert char ---");
4304  char b[20];
4305  t1.on();
4306  CHK((g_con = g_ndb->startTransaction()) != 0);
4307  for (Uint32 k = 0; k < g_opt.m_rowsperf; k++) {
4308  CHK((g_opr = g_con->getNdbOperation(tab.getName())) != 0);
4309  CHK(g_opr->insertTuple() == 0);
4310  CHK(g_opr->equal(cA, (char*)&k) == 0);
4311  memset(b, 0x20, sizeof(b));
4312  b[0] = 'b';
4313  CHK(g_opr->setValue(cB, b) == 0);
4314  CHK(g_con->execute(NoCommit) == 0);
4315  }
4316  t1.off(g_opt.m_rowsperf);
4317  CHK(g_con->execute(Rollback) == 0);
4318  DBG(t1.time());
4319  g_opr = 0;
4320  g_con = 0;
4321  }
4322  // insert text (one trans)
4323  {
4324  DBG("--- insert text ---");
4325  t2.on();
4326  CHK((g_con = g_ndb->startTransaction()) != 0);
4327  for (Uint32 k = 0; k < g_opt.m_rowsperf; k++) {
4328  CHK((g_opr = g_con->getNdbOperation(tab.getName())) != 0);
4329  CHK(g_opr->insertTuple() == 0);
4330  CHK(g_opr->equal(cA, (char*)&k) == 0);
4331  CHK((g_bh1 = g_opr->getBlobHandle(cC)) != 0);
4332  CHK((g_bh1->setValue("c", 1) == 0));
4333  CHK(g_con->execute(NoCommit) == 0);
4334  }
4335  t2.off(g_opt.m_rowsperf);
4336  CHK(g_con->execute(Rollback) == 0);
4337  DBG(t2.time());
4338  g_bh1 = 0;
4339  g_opr = 0;
4340  g_con = 0;
4341  }
4342  // insert overhead
4343  DBG("insert overhead: " << t2.over(t1));
4344  t1.clr();
4345  t2.clr();
4346  // insert
4347  {
4348  DBG("--- insert for read test ---");
4349  unsigned n = 0;
4350  char b[20];
4351  CHK((g_con = g_ndb->startTransaction()) != 0);
4352  for (Uint32 k = 0; k < g_opt.m_rowsperf; k++) {
4353  CHK((g_opr = g_con->getNdbOperation(tab.getName())) != 0);
4354  CHK(g_opr->insertTuple() == 0);
4355  CHK(g_opr->equal(cA, (char*)&k) == 0);
4356  memset(b, 0x20, sizeof(b));
4357  b[0] = 'b';
4358  CHK(g_opr->setValue(cB, b) == 0);
4359  CHK((g_bh1 = g_opr->getBlobHandle(cC)) != 0);
4360  CHK((g_bh1->setValue("c", 1) == 0));
4361  if (++n == g_opt.m_batch) {
4362  CHK(g_con->execute(Commit) == 0);
4363  g_ndb->closeTransaction(g_con);
4364  CHK((g_con = g_ndb->startTransaction()) != 0);
4365  n = 0;
4366  }
4367  }
4368  if (n != 0) {
4369  CHK(g_con->execute(Commit) == 0);
4370  g_ndb->closeTransaction(g_con); g_con = 0;
4371  n = 0;
4372  }
4373  g_bh1 = 0;
4374  g_opr = 0;
4375  }
4376  // pk read char (one trans)
4377  {
4378  DBG("--- pk read char ---");
4379  CHK((g_con = g_ndb->startTransaction()) != 0);
4380  Uint32 a;
4381  char b[20];
4382  t1.on();
4383  for (Uint32 k = 0; k < g_opt.m_rowsperf; k++) {
4384  CHK((g_opr = g_con->getNdbOperation(tab.getName())) != 0);
4385  CHK(g_opr->readTuple() == 0);
4386  CHK(g_opr->equal(cA, (char*)&k) == 0);
4387  CHK(g_opr->getValue(cA, (char*)&a) != 0);
4388  CHK(g_opr->getValue(cB, b) != 0);
4389  a = (Uint32)-1;
4390  b[0] = 0;
4391  CHK(g_con->execute(NoCommit) == 0);
4392  CHK(a == k && b[0] == 'b');
4393  }
4394  CHK(g_con->execute(Commit) == 0);
4395  t1.off(g_opt.m_rowsperf);
4396  DBG(t1.time());
4397  g_opr = 0;
4398  g_ndb->closeTransaction(g_con); g_con = 0;
4399  }
4400  // pk read text (one trans)
4401  {
4402  DBG("--- pk read text ---");
4403  CHK((g_con = g_ndb->startTransaction()) != 0);
4404  Uint32 a;
4405  char c[20];
4406  t2.on();
4407  for (Uint32 k = 0; k < g_opt.m_rowsperf; k++) {
4408  CHK((g_opr = g_con->getNdbOperation(tab.getName())) != 0);
4409  CHK(g_opr->readTuple() == 0);
4410  CHK(g_opr->equal(cA, (char*)&k) == 0);
4411  CHK(g_opr->getValue(cA, (char*)&a) != 0);
4412  CHK((g_bh1 = g_opr->getBlobHandle(cC)) != 0);
4413  a = (Uint32)-1;
4414  c[0] = 0;
4415  CHK(g_con->execute(NoCommit) == 0);
4416  Uint32 m = 20;
4417  CHK(g_bh1->readData(c, m) == 0);
4418  CHK(a == k && m == 1 && c[0] == 'c');
4419  }
4420  CHK(g_con->execute(Commit) == 0);
4421  t2.off(g_opt.m_rowsperf);
4422  DBG(t2.time());
4423  g_ndb->closeTransaction(g_con); g_opr = 0;
4424  g_con = 0;
4425  }
4426  // pk read overhead
4427  DBG("pk read overhead: " << t2.over(t1));
4428  t1.clr();
4429  t2.clr();
4430  // scan read char
4431  const uint scan_loops = 10;
4432  {
4433  DBG("--- scan read char ---");
4434  Uint32 a;
4435  char b[20];
4436  uint i;
4437  for (i = 0; i < scan_loops; i++) {
4438  CHK((g_con = g_ndb->startTransaction()) != 0);
4439  CHK((g_ops = g_con->getNdbScanOperation(tab.getName())) != 0);
4440  CHK(g_ops->readTuples(NdbOperation::LM_Read) == 0);
4441  CHK(g_ops->getValue(cA, (char*)&a) != 0);
4442  CHK(g_ops->getValue(cB, b) != 0);
4443  CHK(g_con->execute(NoCommit) == 0);
4444  unsigned n = 0;
4445  t1.on();
4446  while (1) {
4447  a = (Uint32)-1;
4448  b[0] = 0;
4449  int ret;
4450  CHK((ret = g_ops->nextResult(true)) == 0 || ret == 1);
4451  if (ret == 1)
4452  break;
4453  CHK(a < g_opt.m_rowsperf && b[0] == 'b');
4454  n++;
4455  }
4456  CHK(n == g_opt.m_rowsperf);
4457  t1.off(g_opt.m_rowsperf);
4458  g_ndb->closeTransaction(g_con); g_ops = 0;
4459  g_con = 0;
4460  }
4461  DBG(t1.time());
4462  }
4463  // scan read text
4464  {
4465  DBG("--- read text ---");
4466  Uint32 a;
4467  char c[20];
4468  uint i;
4469  for (i = 0; i < scan_loops; i++) {
4470  CHK((g_con = g_ndb->startTransaction()) != 0);
4471  CHK((g_ops = g_con->getNdbScanOperation(tab.getName())) != 0);
4472  CHK(g_ops->readTuples(NdbOperation::LM_Read) == 0);
4473  CHK(g_ops->getValue(cA, (char*)&a) != 0);
4474  CHK((g_bh1 = g_ops->getBlobHandle(cC)) != 0);
4475  CHK(g_con->execute(NoCommit) == 0);
4476  unsigned n = 0;
4477  t2.on();
4478  while (1) {
4479  a = (Uint32)-1;
4480  c[0] = 0;
4481  int ret;
4482  CHK((ret = g_ops->nextResult(true)) == 0 || ret == 1);
4483  if (ret == 1)
4484  break;
4485  Uint32 m = 20;
4486  CHK(g_bh1->readData(c, m) == 0);
4487  CHK(a < g_opt.m_rowsperf && m == 1 && c[0] == 'c');
4488  n++;
4489  }
4490  CHK(n == g_opt.m_rowsperf);
4491  t2.off(g_opt.m_rowsperf);
4492  g_bh1 = 0;
4493  g_ops = 0;
4494  g_ndb->closeTransaction(g_con); g_con = 0;
4495  }
4496  DBG(t2.time());
4497  }
4498  // scan read overhead
4499  DBG("scan read overhead: " << t2.over(t1));
4500  t1.clr();
4501  t2.clr();
4502  delete g_ndb;
4503  return 0;
4504 }
4505 
4506 // bug tests
4507 
4508 static int
4509 bugtest_4088()
4510 {
4511  unsigned i;
4512  DBG("bug test 4088 - ndb api hang with mixed ops on index table");
4513  // insert rows
4514  calcTups(true);
4515  CHK(insertPk(0, API_NDBRECORD) == 0);
4516  // new trans
4517  CHK((g_con = g_ndb->startTransaction()) != 0);
4518  for (unsigned k = 0; k < g_opt.m_rows; k++) {
4519  Tup& tup = g_tups[k];
4520  // read table pk via index as a table
4521  const unsigned pkcnt = 2;
4522  Tup pktup[pkcnt];
4523  for (i = 0; i < pkcnt; i++) {
4524  char name[20];
4525  // XXX guess table id
4526  sprintf(name, "%d/%s", 4, g_opt.m_x1name);
4527  CHK((g_opr = g_con->getNdbOperation(name)) != 0);
4528  CHK(g_opr->readTuple() == 0);
4529  CHK(g_opr->equal("PK2", tup.m_pk2) == 0);
4530  setUDpartId(tup, g_opr);
4531  CHK(g_opr->getValue("NDB$PK", (char*)&pktup[i].m_pk1) != 0);
4532  }
4533  // read blob inline via index as an index
4534  CHK((g_opx = g_con->getNdbIndexOperation(g_opt.m_x1name, g_opt.m_tname)) != 0);
4535  CHK(g_opx->readTuple() == 0);
4536  CHK(g_opx->equal("PK2", tup.m_pk2) == 0);
4537  assert(tup.m_bval1.m_buf != 0);
4538  CHK(g_opx->getValue("BL1", (char*)tup.m_bval1.m_buf) != 0);
4539  // execute
4540  // BUG 4088: gets 1 tckeyconf, 1 tcindxconf, then hangs
4541  CHK(g_con->execute(Commit) == 0);
4542  // verify
4543  for (i = 0; i < pkcnt; i++) {
4544  CHK(pktup[i].m_pk1 == tup.m_pk1);
4545  CHK(memcmp(pktup[i].m_pk2, tup.m_pk2, g_opt.m_pk2chr.m_len) == 0);
4546  }
4547  CHK(memcmp(tup.m_bval1.m_val, tup.m_bval1.m_buf, 8 + g_blob1.m_inline) == 0);
4548  }
4549  return 0;
4550 }
4551 
4552 static int
4553 bugtest_27018()
4554 {
4555  DBG("bug test 27018 - middle partial part write clobbers rest of part");
4556 
4557  // insert rows
4558  calcTups(true);
4559  CHK(insertPk(0, API_NDBRECORD) == 0);
4560  // new trans
4561  for (unsigned k= 0; k < g_opt.m_rows; k++)
4562  {
4563  Tup& tup= g_tups[k];
4564 
4565  /* Update one byte in random position. */
4566  Uint32 offset= urandom(tup.m_bval1.m_len + 1);
4567  if (offset == tup.m_bval1.m_len) {
4568  // testing write at end is another problem..
4569  continue;
4570  }
4571  //DBG("len=" << tup.m_bval1.m_len << " offset=" << offset);
4572 
4573  CHK((g_con= g_ndb->startTransaction()) != 0);
4574  memcpy(&tup.m_key_row[g_pk1_offset], &tup.m_pk1, sizeof(tup.m_pk1));
4575  if (g_opt.m_pk2chr.m_len != 0) {
4576  memcpy(&tup.m_key_row[g_pk2_offset], tup.m_pk2, g_opt.m_pk2chr.m_totlen);
4577  memcpy(&tup.m_key_row[g_pk3_offset], &tup.m_pk3, sizeof(tup.m_pk3));
4578  }
4580  setUDpartIdNdbRecord(tup,
4581  g_ndb->getDictionary()->getTable(g_opt.m_tname),
4582  opts);
4583  CHK((g_const_opr= g_con->updateTuple(g_key_record, tup.m_key_row,
4584  g_blob_record, tup.m_row,
4585  NULL, // mask
4586  &opts,
4587  sizeof(opts))) != 0);
4588  CHK(getBlobHandles(g_const_opr) == 0);
4589  CHK(g_con->execute(NoCommit) == 0);
4590 
4591  tup.m_bval1.m_buf[0]= 0xff ^ tup.m_bval1.m_val[offset];
4592  CHK(g_bh1->setPos(offset) == 0);
4593  CHK(g_bh1->writeData(&(tup.m_bval1.m_buf[0]), 1) == 0);
4594  CHK(g_con->execute(Commit) == 0);
4595  g_ndb->closeTransaction(g_con);
4596 
4597  CHK((g_con= g_ndb->startTransaction()) != 0);
4598  CHK((g_const_opr= g_con->readTuple(g_key_record, tup.m_key_row,
4599  g_blob_record, tup.m_row,
4600  NdbOperation::LM_Read,
4601  NULL, // mask
4602  &opts,
4603  sizeof(opts))) != 0);
4604  CHK(getBlobHandles(g_const_opr) == 0);
4605 
4606  CHK(g_bh1->getValue(tup.m_bval1.m_buf, tup.m_bval1.m_len) == 0);
4607  CHK(g_con->execute(Commit) == 0);
4608 
4609  Uint64 len= ~0;
4610  CHK(g_bh1->getLength(len) == 0 && len == tup.m_bval1.m_len);
4611  tup.m_bval1.m_buf[offset]^= 0xff;
4612  //CHK(memcmp(tup.m_bval1.m_buf, tup.m_bval1.m_val, tup.m_bval1.m_len) == 0);
4613  Uint32 i = 0;
4614  while (i < tup.m_bval1.m_len) {
4615  CHK(tup.m_bval1.m_buf[i] == tup.m_bval1.m_val[i]);
4616  i++;
4617  }
4618 
4619  g_ndb->closeTransaction(g_con);
4620  g_con=0;
4621  g_const_opr=0;
4622  }
4623  CHK(deletePk(API_NDBRECORD) == 0);
4624 
4625  return 0;
4626 }
4627 
4628 
4630  Ndb *m_ndb;
4631  char m_current_write_value;
4632  char *m_writebuf;
4633  Uint32 m_blob1_size;
4634  char *m_key_row;
4635  char *m_read_row;
4636  char *m_write_row;
4637  bool m_thread_stop;
4639 };
4640 
4641 void *bugtest_27370_thread(void *arg)
4642 {
4643  bug27370_data *data= (bug27370_data *)arg;
4644 
4645  while (!data->m_thread_stop)
4646  {
4647  memset(data->m_writebuf, data->m_current_write_value, data->m_blob1_size);
4648  data->m_current_write_value++;
4649 
4650  NdbConnection *con;
4651  if ((con= data->m_ndb->startTransaction()) == 0)
4652  return (void *)"Failed to create transaction";
4653  const NdbOperation *opr;
4654  memcpy(data->m_write_row, data->m_key_row, g_rowsize);
4655  if ((opr= con->writeTuple(g_key_record, data->m_key_row,
4656  g_full_record, data->m_write_row,
4657  NULL, //mask
4658  data->opts,
4659  sizeof(NdbOperation::OperationOptions))) == 0)
4660  return (void *)"Failed to create operation";
4661  NdbBlob *bh;
4662  if ((bh= opr->getBlobHandle("BL1")) == 0)
4663  return (void *)"getBlobHandle() failed";
4664  if (bh->setValue(data->m_writebuf, data->m_blob1_size) != 0)
4665  return (void *)"setValue() failed";
4666  if (con->execute(Commit, AbortOnError, 1) != 0)
4667  return (void *)"execute() failed";
4668  data->m_ndb->closeTransaction(con);
4669  }
4670 
4671  return NULL; // Success
4672 }
4673 
4674 static int
4675 bugtest_27370()
4676 {
4677  DBG("bug test 27370 - Potential inconsistent blob reads for ReadCommitted reads");
4678 
4679  bug27370_data data;
4680 
4681  CHK((data.m_key_row= new char[g_rowsize*3]) != 0);
4682  data.m_read_row= data.m_key_row + g_rowsize;
4683  data.m_write_row= data.m_read_row + g_rowsize;
4684 
4685  data.m_ndb= new Ndb(g_ncc, "TEST_DB");
4686  CHK(data.m_ndb->init(20) == 0);
4687  CHK(data.m_ndb->waitUntilReady() == 0);
4688 
4689  data.m_current_write_value= 0;
4690  data.m_blob1_size= g_blob1.m_inline + 10 * g_blob1.m_partsize;
4691  CHK((data.m_writebuf= new char [data.m_blob1_size]) != 0);
4692  Uint32 pk1_value= 27370;
4693 
4694  const NdbDictionary::Table* t= g_ndb->getDictionary()->getTable(g_opt.m_tname);
4695  bool isUserDefined= (t->getFragmentType() == NdbDictionary::Object::UserDefined);
4696  Uint32 partCount= t->getFragmentCount();
4697  Uint32 udPartId= pk1_value % partCount;
4699  opts.optionsPresent= 0;
4700  data.opts= &opts;
4701  if (isUserDefined)
4702  {
4703  opts.optionsPresent= NdbOperation::OperationOptions::OO_PARTITION_ID;
4704  opts.partitionId= udPartId;
4705  }
4706  memcpy(&data.m_key_row[g_pk1_offset], &pk1_value, sizeof(pk1_value));
4707  if (g_opt.m_pk2chr.m_len != 0)
4708  {
4709  memset(&data.m_key_row[g_pk2_offset], 'x', g_opt.m_pk2chr.m_totlen);
4710  if (!g_opt.m_pk2chr.m_fixed)
4711  data.m_key_row[g_pk2_offset]= urandom(g_opt.m_pk2chr.m_len + 1);
4712  Uint16 pk3_value= 27370;
4713  memcpy(&data.m_key_row[g_pk3_offset], &pk3_value, sizeof(pk3_value));
4714  }
4715  data.m_thread_stop= false;
4716 
4717  memset(data.m_writebuf, data.m_current_write_value, data.m_blob1_size);
4718  data.m_current_write_value++;
4719 
4720  CHK((g_con= g_ndb->startTransaction()) != 0);
4721  memcpy(data.m_write_row, data.m_key_row, g_rowsize);
4722  CHK((g_const_opr= g_con->writeTuple(g_key_record, data.m_key_row,
4723  g_full_record, data.m_write_row,
4724  NULL, // mask
4725  &opts,
4726  sizeof(opts))) != 0);
4727  CHK((g_bh1= g_const_opr->getBlobHandle("BL1")) != 0);
4728  CHK(g_bh1->setValue(data.m_writebuf, data.m_blob1_size) == 0);
4729  CHK(g_con->execute(Commit) == 0);
4730  g_ndb->closeTransaction(g_con);
4731  g_con= NULL;
4732 
4733  pthread_t thread_handle;
4734  CHK(pthread_create(&thread_handle, NULL, bugtest_27370_thread, &data) == 0);
4735 
4736  DBG("bug test 27370 - PK blob reads");
4737  Uint32 seen_updates= 0;
4738  while (seen_updates < 50)
4739  {
4740  CHK((g_con= g_ndb->startTransaction()) != 0);
4741  CHK((g_const_opr= g_con->readTuple(g_key_record, data.m_key_row,
4742  g_blob_record, data.m_read_row,
4744  NULL, // mask
4745  &opts,
4746  sizeof(opts))) != 0);
4747  CHK((g_bh1= g_const_opr->getBlobHandle("BL1")) != 0);
4748  CHK(g_con->execute(NoCommit, AbortOnError, 1) == 0);
4749 
4750  const Uint32 loop_max= 10;
4751  char read_char;
4752  char original_read_char= 0;
4753  Uint32 readloop;
4754  for (readloop= 0;; readloop++)
4755  {
4756  if (readloop > 0)
4757  {
4758  if (readloop > 1)
4759  {
4760  /* Compare against first read. */
4761  CHK(read_char == original_read_char);
4762  }
4763  else
4764  {
4765  /*
4766  We count the number of times we see the other thread had the
4767  chance to update, so that we can be sure it had the opportunity
4768  to run a reasonable number of times before we stop.
4769  */
4770  if (original_read_char != read_char)
4771  seen_updates++;
4772  original_read_char= read_char;
4773  }
4774  }
4775  if (readloop > loop_max)
4776  break;
4777  Uint32 readSize= 1;
4778  CHK(g_bh1->setPos(urandom(data.m_blob1_size)) == 0);
4779  CHK(g_bh1->readData(&read_char, readSize) == 0);
4780  CHK(readSize == 1);
4781  ExecType commitType= readloop == loop_max ? Commit : NoCommit;
4782  CHK(g_con->execute(commitType, AbortOnError, 1) == 0);
4783  }
4784  g_ndb->closeTransaction(g_con);
4785  g_con= NULL;
4786  }
4787 
4788  DBG("bug test 27370 - table scan blob reads");
4789  seen_updates= 0;
4790  while (seen_updates < 50)
4791  {
4792  CHK((g_con= g_ndb->startTransaction()) != 0);
4793  CHK((g_ops= g_con->scanTable(g_full_record,
4795  CHK((g_bh1= g_ops->getBlobHandle("BL1")) != 0);
4796  CHK(g_con->execute(NoCommit, AbortOnError, 1) == 0);
4797  const char *out_row= NULL;
4798  CHK(g_ops->nextResult(&out_row, true, false) == 0);
4799 
4800  const Uint32 loop_max= 10;
4801  char read_char;
4802  char original_read_char= 0;
4803  Uint32 readloop;
4804  for (readloop= 0;; readloop++)
4805  {
4806  if (readloop > 0)
4807  {
4808  if (readloop > 1)
4809  {
4810  /* Compare against first read. */
4811  CHK(read_char == original_read_char);
4812  }
4813  else
4814  {
4815  /*
4816  We count the number of times we see the other thread had the
4817  chance to update, so that we can be sure it had the opportunity
4818  to run a reasonable number of times before we stop.
4819  */
4820  if (original_read_char != read_char)
4821  seen_updates++;
4822  original_read_char= read_char;
4823  }
4824  }
4825  if (readloop > loop_max)
4826  break;
4827  Uint32 readSize= 1;
4828  CHK(g_bh1->setPos(urandom(data.m_blob1_size)) == 0);
4829  CHK(g_bh1->readData(&read_char, readSize) == 0);
4830  CHK(readSize == 1);
4831  CHK(g_con->execute(NoCommit, AbortOnError, 1) == 0);
4832  }
4833 
4834  CHK(g_ops->nextResult(&out_row, true, false) == 1);
4835  g_ndb->closeTransaction(g_con);
4836  g_con= NULL;
4837  }
4838 
4839  data.m_thread_stop= true;
4840  void *thread_return;
4841  CHK(pthread_join(thread_handle, &thread_return) == 0);
4842  DBG("bug 27370 - thread return status: " <<
4843  (thread_return ? (char *)thread_return : "<null>"));
4844  CHK(thread_return == 0);
4845 
4846  delete [] data.m_key_row;
4847  g_con= NULL;
4848  g_const_opr= NULL;
4849  g_bh1= NULL;
4850  return 0;
4851 }
4852 
4853 static int
4854 bugtest_28116()
4855 {
4856  DBG("bug test 28116 - Crash in getBlobHandle() when called without full key");
4857 
4858  if (g_opt.m_pk2chr.m_len == 0)
4859  {
4860  DBG(" ... skipped, requires multi-column primary key.");
4861  return 0;
4862  }
4863 
4864  calcTups(true);
4865 
4866  for (unsigned k = 0; k < g_opt.m_rows; k++) {
4867  Tup& tup = g_tups[k];
4868  CHK((g_con = g_ndb->startTransaction()) != 0);
4869  CHK((g_opr = g_con->getNdbOperation(g_opt.m_tname)) != 0);
4870  int reqType = urandom(4);
4871  switch(reqType) {
4872  case 0:
4873  {
4874  DBG("Read");
4875  CHK(g_opr->readTuple() == 0);
4876  break;
4877  }
4878  case 1:
4879  {
4880  DBG("Insert");
4881  CHK(g_opr->insertTuple() == 0);
4882  break;
4883  }
4884  case 2:
4885  {
4886  DBG("Update");
4887  CHK(g_opr->updateTuple() == 0);
4888  break;
4889  }
4890  case 3:
4891  default:
4892  {
4893  DBG("Delete");
4894  CHK(g_opr->deleteTuple() == 0);
4895  break;
4896  }
4897  }
4898  switch (urandom(3)) {
4899  case 0:
4900  {
4901  DBG(" No keys");
4902  break;
4903  }
4904  case 1:
4905  {
4906  DBG(" Pk1 only");
4907  CHK(g_opr->equal("PK1", tup.m_pk1) == 0);
4908  break;
4909  }
4910  case 2:
4911  default:
4912  {
4913  DBG(" Pk2/3 only");
4914  if (g_opt.m_pk2chr.m_len != 0)
4915  {
4916  CHK(g_opr->equal("PK2", tup.m_pk2) == 0);
4917  CHK(g_opr->equal("PK3", tup.m_pk3) == 0);
4918  }
4919  break;
4920  }
4921  }
4922  /* Deliberately no equal() on rest of primary key, to provoke error. */
4923  CHK(g_opr->getBlobHandle("BL1") == 0);
4924 
4925  /* 4264 - Invalid usage of Blob attribute */
4926  CHK(g_con->getNdbError().code == 4264);
4927  CHK(g_opr->getNdbError().code == 4264);
4928 
4929  g_ndb->closeTransaction(g_con);
4930  g_opr = 0;
4931  g_con = 0;
4932  }
4933  return 0;
4934 }
4935 
4936 static struct {
4937  int m_bug;
4938  int (*m_test)();
4939 } g_bugtest[] = {
4940  { 4088, bugtest_4088 },
4941  { 27018, bugtest_27018 },
4942  { 27370, bugtest_27370 },
4943  { 36756, bugtest_36756 },
4944  { 45768, bugtest_45768 },
4945  { 48040, bugtest_48040 },
4946  { 28116, bugtest_28116 },
4947  { 62321, bugtest_62321 }
4948 };
4949 
4950 NDB_COMMAND(testOdbcDriver, "testBlobs", "testBlobs", "testBlobs", 65535)
4951 {
4952  ndb_init();
4953  // log the invocation
4954  char cmdline[512];
4955  {
4956  const char* progname =
4957  strchr(argv[0], '/') ? strrchr(argv[0], '/') + 1 : argv[0];
4958  strcpy(cmdline, progname);
4959  for (int i = 1; i < argc; i++) {
4960  strcat(cmdline, " ");
4961  strcat(cmdline, argv[i]);
4962  }
4963  }
4964  Chr& pk2chr = g_opt.m_pk2chr;
4965  while (++argv, --argc > 0) {
4966  const char* arg = argv[0];
4967  if (strcmp(arg, "-batch") == 0) {
4968  if (++argv, --argc > 0) {
4969  g_opt.m_batch = atoi(argv[0]);
4970  continue;
4971  }
4972  }
4973  if (strcmp(arg, "-core") == 0) {
4974  g_opt.m_core = true;
4975  continue;
4976  }
4977  if (strcmp(arg, "-dbg") == 0) {
4978  g_opt.m_dbg = true;
4979  continue;
4980  }
4981  if (strcmp(arg, "-debug") == 0) {
4982  if (++argv, --argc > 0) {
4983  g_opt.m_dbg = true;
4984  g_opt.m_debug = strdup(argv[0]);
4985  continue;
4986  }
4987  }
4988  if (strcmp(arg, "-fac") == 0) {
4989  g_opt.m_fac = true;
4990  continue;
4991  }
4992  if (strcmp(arg, "-full") == 0) {
4993  g_opt.m_full = true;
4994  continue;
4995  }
4996  if (strcmp(arg, "-loop") == 0) {
4997  if (++argv, --argc > 0) {
4998  g_opt.m_loop = atoi(argv[0]);
4999  continue;
5000  }
5001  }
5002  if (strcmp(arg, "-min") == 0) {
5003  g_opt.m_min = true;
5004  continue;
5005  }
5006  if (strcmp(arg, "-parts") == 0) {
5007  if (++argv, --argc > 0) {
5008  g_opt.m_parts = atoi(argv[0]);
5009  continue;
5010  }
5011  }
5012  if (strcmp(arg, "-rows") == 0) {
5013  if (++argv, --argc > 0) {
5014  g_opt.m_rows = atoi(argv[0]);
5015  continue;
5016  }
5017  }
5018  if (strcmp(arg, "-rowsperf") == 0) {
5019  if (++argv, --argc > 0) {
5020  g_opt.m_rowsperf = atoi(argv[0]);
5021  continue;
5022  }
5023  }
5024  if (strcmp(arg, "-seed") == 0) {
5025  if (++argv, --argc > 0) {
5026  g_opt.m_seed = atoi(argv[0]);
5027  continue;
5028  }
5029  }
5030  if (strcmp(arg, "-skip") == 0) {
5031  if (++argv, --argc > 0) {
5032  g_opt.m_skip = strdup(argv[0]);
5033  continue;
5034  }
5035  }
5036  if (strcmp(arg, "-test") == 0) {
5037  if (++argv, --argc > 0) {
5038  g_opt.m_test = strdup(argv[0]);
5039  continue;
5040  }
5041  }
5042  if (strcmp(arg, "-timeoutretries") == 0) {
5043  if (++argv, --argc > 0) {
5044  g_opt.m_timeout_retries = atoi(argv[0]);
5045  continue;
5046  }
5047  }
5048  if (strcmp(arg, "-version") == 0) {
5049  if (++argv, --argc > 0) {
5050  g_opt.m_blob_version = atoi(argv[0]);
5051  if (g_opt.m_blob_version == 1 || g_opt.m_blob_version == 2)
5052  continue;
5053  }
5054  }
5055  // metadata
5056  if (strcmp(arg, "-pk2len") == 0) {
5057  if (++argv, --argc > 0) {
5058  pk2chr.m_len = atoi(argv[0]);
5059  continue;
5060  }
5061  }
5062  if (strcmp(arg, "-pk2fixed") == 0) {
5063  pk2chr.m_fixed = true;
5064  continue;
5065  }
5066  if (strcmp(arg, "-pk2binary") == 0) {
5067  pk2chr.m_binary = true;
5068  continue;
5069  }
5070  if (strcmp(arg, "-pk2cs") == 0) {
5071  if (++argv, --argc > 0) {
5072  pk2chr.m_cs = strdup(argv[0]);
5073  continue;
5074  }
5075  }
5076  if (strcmp(arg, "-pk2part") == 0) {
5077  g_opt.m_pk2part = true;
5078  continue;
5079  }
5080  if (strcmp(arg, "-oneblob") == 0) {
5081  g_opt.m_oneblob = true;
5082  continue;
5083  }
5084  if (strcmp(arg, "-rbatch") == 0) {
5085  if (++argv, --argc > 0) {
5086  g_opt.m_rbatch = atoi(argv[0]);
5087  continue;
5088  }
5089  }
5090  if (strcmp(arg, "-wbatch") == 0) {
5091  if (++argv, --argc > 0) {
5092  g_opt.m_wbatch = atoi(argv[0]);
5093  continue;
5094  }
5095  }
5096  // bugs
5097  if (strcmp(arg, "-bug") == 0) {
5098  if (++argv, --argc > 0) {
5099  g_opt.m_bug = atoi(argv[0]);
5100  for (unsigned i = 0; i < sizeof(g_bugtest)/sizeof(g_bugtest[0]); i++) {
5101  if (g_opt.m_bug == g_bugtest[i].m_bug) {
5102  g_opt.m_bugtest = g_bugtest[i].m_test;
5103  break;
5104  }
5105  }
5106  if (g_opt.m_bugtest != 0)
5107  continue;
5108  }
5109  }
5110  if (strcmp(arg, "-?") == 0 || strcmp(arg, "-h") == 0) {
5111  printusage();
5112  goto success;
5113  }
5114  ndbout << "unknown option " << arg << endl;
5115  goto wrongargs;
5116  }
5117  if (g_opt.m_debug != 0) {
5118  if (strchr(g_opt.m_debug, ':') == 0) {
5119  const char* s = "d:t:F:L:o,";
5120  char* t = new char [strlen(s) + strlen(g_opt.m_debug) + 1];
5121  strcpy(t, s);
5122  strcat(t, g_opt.m_debug);
5123  g_opt.m_debug = t;
5124  }
5125  DBUG_PUSH(g_opt.m_debug);
5126  ndbout.m_out = new FileOutputStream(DBUG_FILE);
5127  }
5128  if (pk2chr.m_len == 0) {
5129  char b[100];
5130  b[0] = 0;
5131  if (g_opt.m_skip != 0)
5132  strcpy(b, g_opt.m_skip);
5133  strcat(b, "i");
5134  strcat(b, "r");
5135  g_opt.m_skip = strdup(b);
5136  }
5137  if (pk2chr.m_len != 0) {
5138  Chr& c = pk2chr;
5139  if (c.m_binary) {
5140  if (c.m_fixed)
5141  c.m_type = NdbDictionary::Column::Binary;
5142  else
5144  c.m_mblen = 1;
5145  c.m_cs = 0;
5146  } else {
5147  assert(c.m_cs != 0);
5148  if (c.m_fixed)
5149  c.m_type = NdbDictionary::Column::Char;
5150  else
5151  c.m_type = NdbDictionary::Column::Varchar;
5152  c.m_csinfo = get_charset_by_name(c.m_cs, MYF(0));
5153  if (c.m_csinfo == 0)
5154  c.m_csinfo = get_charset_by_csname(c.m_cs, MY_CS_PRIMARY, MYF(0));
5155  if (c.m_csinfo == 0) {
5156  ndbout << "unknown charset " << c.m_cs << endl;
5157  goto wrongargs;
5158  }
5159  c.m_mblen = c.m_csinfo->mbmaxlen;
5160  if (c.m_mblen == 0)
5161  c.m_mblen = 1;
5162  }
5163  c.m_bytelen = c.m_len * c.m_mblen;
5164  if (c.m_bytelen > 255) {
5165  ndbout << "length of pk2 in bytes exceeds 255" << endl;
5166  goto wrongargs;
5167  }
5168  if (c.m_fixed)
5169  c.m_totlen = c.m_bytelen;
5170  else
5171  c.m_totlen = 1 + c.m_bytelen;
5172  c.m_caseins = false;
5173  if (c.m_cs != 0) {
5174  CHARSET_INFO* info = c.m_csinfo;
5175  const char* p = "ABCxyz";
5176  const char* q = "abcXYZ";
5177  int e;
5178  if ((*info->cset->well_formed_len)(info, p, p + 6, 999, &e) != 6) {
5179  ndbout << "charset does not contain ascii" << endl;
5180  goto wrongargs;
5181  }
5182  if ((*info->coll->strcasecmp)(info, p, q) == 0) {
5183  c.m_caseins = true;
5184  }
5185  ndbout << "charset: " << c.m_cs << " caseins: " << c.m_caseins << endl;
5186  }
5187  }
5188  ndbout << cmdline << endl;
5189  g_ncc = new Ndb_cluster_connection();
5190  if (g_ncc->connect(30) != 0 || testmain() == -1 || testperf() == -1) {
5191  ndbout << "line " << __LINE__ << " FAIL loop=" << g_loop << endl;
5192  return NDBT_ProgramExit(NDBT_FAILED);
5193  }
5194  delete g_ncc;
5195  g_ncc = 0;
5196 success:
5197  return NDBT_ProgramExit(NDBT_OK);
5198 wrongargs:
5199  return NDBT_ProgramExit(NDBT_WRONGARGS);
5200 }
5201 
5202 // vim: set sw=2 et: