MySQL 5.6.14 Source Code Document
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
DbtupAbort.cpp
1 /*
2  Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
3 
4  This program is free software; you can redistribute it and/or modify
5  it under the terms of the GNU General Public License as published by
6  the Free Software Foundation; version 2 of the License.
7 
8  This program is distributed in the hope that it will be useful,
9  but WITHOUT ANY WARRANTY; without even the implied warranty of
10  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11  GNU General Public License for more details.
12 
13  You should have received a copy of the GNU General Public License
14  along with this program; if not, write to the Free Software
15  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
16 */
17 
18 #define DBTUP_C
19 #define DBTUP_ABORT_CPP
20 #include "Dbtup.hpp"
21 #include <RefConvert.hpp>
22 #include <ndb_limits.h>
23 #include <pc.hpp>
24 
28 void Dbtup::execTUP_ABORTREQ(Signal* signal)
29 {
30  jamEntry();
31  do_tup_abortreq(signal, 0);
32 }
33 
34 bool
35 Dbtup::do_tup_abort_operation(Signal* signal,
36  Tuple_header *tuple_ptr,
37  Operationrec* opPtrP,
38  Fragrecord* fragPtrP,
39  Tablerec* tablePtrP)
40 {
41  bool change = true;
42 
43  Uint32 bits= tuple_ptr->m_header_bits;
44  if (opPtrP->op_struct.op_type != ZDELETE)
45  {
46  Tuple_header *copy= get_copy_tuple(&opPtrP->m_copy_tuple_location);
47 
48  if (opPtrP->op_struct.m_disk_preallocated)
49  {
50  jam();
51  Local_key key;
52  memcpy(&key, copy->get_disk_ref_ptr(tablePtrP), sizeof(key));
53  disk_page_abort_prealloc(signal, fragPtrP, &key, key.m_page_idx);
54  }
55 
56  if(! (bits & Tuple_header::ALLOC))
57  {
58  jam();
59  if(bits & Tuple_header::MM_GROWN)
60  {
61  jam();
62  if (0) ndbout_c("abort grow");
63  Ptr<Page> vpage;
64  Uint32 idx= opPtrP->m_tuple_location.m_page_idx;
65  Uint32 *var_part;
66 
67  ndbassert(! (tuple_ptr->m_header_bits & Tuple_header::COPY_TUPLE));
68 
69  Var_part_ref *ref = tuple_ptr->get_var_part_ref_ptr(tablePtrP);
70 
71  Local_key tmp;
72  ref->copyout(&tmp);
73 
74  idx= tmp.m_page_idx;
75  var_part= get_ptr(&vpage, *ref);
76  Var_page* pageP = (Var_page*)vpage.p;
77  Uint32 len= pageP->get_entry_len(idx) & ~Var_page::CHAIN;
78 
79  /*
80  A MM_GROWN tuple was relocated with a bigger size in preparation for
81  commit, so we need to shrink it back. The original size is stored in
82  the last word of the relocated (oversized) tuple.
83  */
84  ndbassert(len > 0);
85  Uint32 sz= var_part[len-1];
86  ndbassert(sz < len);
87  if (sz)
88  {
89  jam();
90  pageP->shrink_entry(idx, sz);
91  update_free_page_list(fragPtrP, vpage);
92  }
93  else
94  {
95  jam();
96  free_var_part(fragPtrP, vpage, tmp.m_page_idx);
97  tmp.m_page_no = RNIL;
98  ref->assign(&tmp);
99  bits &= ~(Uint32)Tuple_header::VAR_PART;
100  }
101  tuple_ptr->m_header_bits= bits & ~Tuple_header::MM_GROWN;
102  change = true;
103  }
104  else if(bits & Tuple_header::MM_SHRINK)
105  {
106  jam();
107  if (0) ndbout_c("abort shrink");
108  }
109  }
110  else if (opPtrP->is_first_operation())
111  {
112  jam();
116  change = true;
117  tuple_ptr->m_header_bits &= ~(Uint32)Tuple_header::ALLOC;
118  tuple_ptr->m_header_bits |= Tuple_header::FREED;
119  }
120  }
121  else if (opPtrP->is_first_operation())
122  {
123  jam();
124  if (bits & Tuple_header::ALLOC)
125  {
126  jam();
127  change = true;
128  tuple_ptr->m_header_bits &= ~(Uint32)Tuple_header::ALLOC;
129  tuple_ptr->m_header_bits |= Tuple_header::FREED;
130  }
131  }
132  return change;
133 }
134 
135 void Dbtup::do_tup_abortreq(Signal* signal, Uint32 flags)
136 {
137  OperationrecPtr regOperPtr;
138  FragrecordPtr regFragPtr;
139  TablerecPtr regTabPtr;
140 
141  regOperPtr.i = signal->theData[0];
142  c_operation_pool.getPtr(regOperPtr);
143  TransState trans_state= get_trans_state(regOperPtr.p);
144  ndbrequire((trans_state == TRANS_STARTED) ||
145  (trans_state == TRANS_TOO_MUCH_AI) ||
146  (trans_state == TRANS_ERROR_WAIT_TUPKEYREQ) ||
147  (trans_state == TRANS_IDLE));
148  if (regOperPtr.p->op_struct.op_type == ZREAD) {
149  jam();
150  initOpConnection(regOperPtr.p);
151  return;
152  }//if
153 
154  regFragPtr.i = regOperPtr.p->fragmentPtr;
155  ptrCheckGuard(regFragPtr, cnoOfFragrec, fragrecord);
156 
157  regTabPtr.i = regFragPtr.p->fragTableId;
158  ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec);
159 
160  PagePtr page;
161  Tuple_header *tuple_ptr= (Tuple_header*)
162  get_ptr(&page, &regOperPtr.p->m_tuple_location, regTabPtr.p);
163 
164  if (get_tuple_state(regOperPtr.p) == TUPLE_PREPARED)
165  {
166  jam();
167 
171  if (!regTabPtr.p->tuxCustomTriggers.isEmpty() &&
172  ! (flags & ZSKIP_TUX_TRIGGERS))
173  {
174  jam();
175  executeTuxAbortTriggers(signal,
176  regOperPtr.p,
177  regFragPtr.p,
178  regTabPtr.p);
179 
180  OperationrecPtr loopOpPtr;
181  loopOpPtr.i = regOperPtr.p->nextActiveOp;
182  while (loopOpPtr.i != RNIL)
183  {
184  jam();
185  c_operation_pool.getPtr(loopOpPtr);
186  if (get_tuple_state(loopOpPtr.p) != TUPLE_ALREADY_ABORTED)
187  {
188  jam();
189  executeTuxAbortTriggers(signal,
190  loopOpPtr.p,
191  regFragPtr.p,
192  regTabPtr.p);
193  }
194  loopOpPtr.i = loopOpPtr.p->nextActiveOp;
195  }
196  }
197 
201  {
202  bool change = do_tup_abort_operation(signal,
203  tuple_ptr,
204  regOperPtr.p,
205  regFragPtr.p,
206  regTabPtr.p);
207 
208  OperationrecPtr loopOpPtr;
209  loopOpPtr.i = regOperPtr.p->nextActiveOp;
210  while (loopOpPtr.i != RNIL)
211  {
212  jam();
213  c_operation_pool.getPtr(loopOpPtr);
214  if (get_tuple_state(loopOpPtr.p) != TUPLE_ALREADY_ABORTED)
215  {
216  jam();
217  change |= do_tup_abort_operation(signal,
218  tuple_ptr,
219  loopOpPtr.p,
220  regFragPtr.p,
221  regTabPtr.p);
222  set_tuple_state(loopOpPtr.p, TUPLE_ALREADY_ABORTED);
223  }
224  loopOpPtr.i = loopOpPtr.p->nextActiveOp;
225  }
226 
227  if (change && (regTabPtr.p->m_bits & Tablerec::TR_Checksum))
228  {
229  jam();
230  setChecksum(tuple_ptr, regTabPtr.p);
231  }
232  }
233  }
234 
235  if(regOperPtr.p->is_first_operation() && regOperPtr.p->is_last_operation())
236  {
237  if (regOperPtr.p->m_undo_buffer_space)
238  {
239  jam();
240  D("Logfile_client - do_tup_abortreq");
241  Logfile_client lgman(this, c_lgman, regFragPtr.p->m_logfile_group_id);
242  lgman.free_log_space(regOperPtr.p->m_undo_buffer_space);
243  }
244  }
245 
246  removeActiveOpList(regOperPtr.p, tuple_ptr);
247  initOpConnection(regOperPtr.p);
248 }
249 
250 /* **************************************************************** */
251 /* ********************** TRANSACTION ERROR MODULE **************** */
252 /* **************************************************************** */
253 int Dbtup::TUPKEY_abort(KeyReqStruct * req_struct, int error_type)
254 {
255  switch(error_type) {
256  case 1:
257 //tmupdate_alloc_error:
258  terrorCode= ZMEM_NOMEM_ERROR;
259  jam();
260  break;
261 
262  case 15:
263  jam();
264  terrorCode = ZREGISTER_INIT_ERROR;
265  break;
266 
267  case 16:
268  jam();
269  terrorCode = ZTRY_TO_UPDATE_ERROR;
270  break;
271 
272  case 17:
273  jam();
274  terrorCode = ZNO_ILLEGAL_NULL_ATTR;
275  break;
276 
277  case 19:
278  jam();
279  terrorCode = ZTRY_TO_UPDATE_ERROR;
280  break;
281 
282  case 20:
283  jam();
284  terrorCode = ZREGISTER_INIT_ERROR;
285  break;
286 
287  case 22:
288  jam();
289  terrorCode = ZTOTAL_LEN_ERROR;
290  break;
291 
292  case 23:
293  jam();
294  terrorCode = ZREGISTER_INIT_ERROR;
295  break;
296 
297  case 24:
298  jam();
299  terrorCode = ZREGISTER_INIT_ERROR;
300  break;
301 
302  case 26:
303  jam();
304  terrorCode = ZREGISTER_INIT_ERROR;
305  break;
306 
307  case 27:
308  jam();
309  terrorCode = ZREGISTER_INIT_ERROR;
310  break;
311 
312  case 28:
313  jam();
314  terrorCode = ZREGISTER_INIT_ERROR;
315  break;
316 
317  case 29:
318  jam();
319  break;
320 
321  case 30:
322  jam();
323  terrorCode = ZCALL_ERROR;
324  break;
325 
326  case 31:
327  jam();
328  terrorCode = ZSTACK_OVERFLOW_ERROR;
329  break;
330 
331  case 32:
332  jam();
333  terrorCode = ZSTACK_UNDERFLOW_ERROR;
334  break;
335 
336  case 33:
337  jam();
338  terrorCode = ZNO_INSTRUCTION_ERROR;
339  break;
340 
341  case 34:
342  jam();
343  terrorCode = ZOUTSIDE_OF_PROGRAM_ERROR;
344  break;
345 
346  case 35:
347  jam();
348  terrorCode = ZTOO_MANY_INSTRUCTIONS_ERROR;
349  break;
350 
351  case 38:
352  jam();
353  terrorCode = ZTEMPORARY_RESOURCE_FAILURE;
354  break;
355 
356  case 39:
357  if (get_trans_state(req_struct->operPtrP) == TRANS_TOO_MUCH_AI) {
358  jam();
359  terrorCode = ZTOO_MUCH_ATTRINFO_ERROR;
360  } else if (get_trans_state(req_struct->operPtrP) == TRANS_ERROR_WAIT_TUPKEYREQ) {
361  jam();
362  terrorCode = ZSEIZE_ATTRINBUFREC_ERROR;
363  } else {
364  ndbrequire(false);
365  }//if
366  break;
367  case 40:
368  jam();
369  terrorCode = ZUNSUPPORTED_BRANCH;
370  break;
371  default:
372  ndbrequire(false);
373  break;
374  }//switch
375  tupkeyErrorLab(req_struct);
376  return -1;
377 }
378 
379 void Dbtup::early_tupkey_error(KeyReqStruct* req_struct)
380 {
381  Operationrec * const regOperPtr = req_struct->operPtrP;
382  ndbrequire(!regOperPtr->op_struct.in_active_list);
383  set_trans_state(regOperPtr, TRANS_IDLE);
384  set_tuple_state(regOperPtr, TUPLE_PREPARED);
385  initOpConnection(regOperPtr);
386  send_TUPKEYREF(req_struct->signal, regOperPtr);
387 }
388 
389 void Dbtup::tupkeyErrorLab(KeyReqStruct* req_struct)
390 {
391  Operationrec * const regOperPtr = req_struct->operPtrP;
392  set_trans_state(regOperPtr, TRANS_IDLE);
393  set_tuple_state(regOperPtr, TUPLE_PREPARED);
394 
395  FragrecordPtr fragPtr;
396  fragPtr.i= regOperPtr->fragmentPtr;
397  ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
398 
399  TablerecPtr tabPtr;
400  tabPtr.i= fragPtr.p->fragTableId;
401  ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec);
402 
403  if (regOperPtr->m_undo_buffer_space &&
404  (regOperPtr->is_first_operation() && regOperPtr->is_last_operation()))
405  {
406  jam();
407  D("Logfile_client - tupkeyErrorLab");
408  Logfile_client lgman(this, c_lgman, fragPtr.p->m_logfile_group_id);
409  lgman.free_log_space(regOperPtr->m_undo_buffer_space);
410  }
411 
412  Uint32 *ptr = 0;
413  if (!regOperPtr->m_tuple_location.isNull())
414  {
415  PagePtr tmp;
416  ptr= get_ptr(&tmp, &regOperPtr->m_tuple_location, tabPtr.p);
417  }
418 
419 
420  removeActiveOpList(regOperPtr, (Tuple_header*)ptr);
421  initOpConnection(regOperPtr);
422  send_TUPKEYREF(req_struct->signal, regOperPtr);
423 }
424 
425 void Dbtup::send_TUPKEYREF(Signal* signal,
426  Operationrec* const regOperPtr)
427 {
428  TupKeyRef * const tupKeyRef = (TupKeyRef *)signal->getDataPtrSend();
429  tupKeyRef->userRef = regOperPtr->userpointer;
430  tupKeyRef->errorCode = terrorCode;
431  BlockReference lqhRef = calcInstanceBlockRef(DBLQH);
432  sendSignal(lqhRef, GSN_TUPKEYREF, signal,
433  TupKeyRef::SignalLength, JBB);
434 }
435 
439 void Dbtup::removeActiveOpList(Operationrec* const regOperPtr,
440  Tuple_header *tuple_ptr)
441 {
442  OperationrecPtr raoOperPtr;
443 
444  if(!regOperPtr->m_copy_tuple_location.isNull())
445  {
446  jam();
447  c_undo_buffer.free_copy_tuple(&regOperPtr->m_copy_tuple_location);
448  }
449 
450  if (regOperPtr->op_struct.in_active_list) {
451  regOperPtr->op_struct.in_active_list= false;
452  if (regOperPtr->nextActiveOp != RNIL) {
453  jam();
454  raoOperPtr.i= regOperPtr->nextActiveOp;
455  c_operation_pool.getPtr(raoOperPtr);
456  raoOperPtr.p->prevActiveOp= regOperPtr->prevActiveOp;
457  } else {
458  jam();
459  tuple_ptr->m_operation_ptr_i = regOperPtr->prevActiveOp;
460  }
461  if (regOperPtr->prevActiveOp != RNIL) {
462  jam();
463  raoOperPtr.i= regOperPtr->prevActiveOp;
464  c_operation_pool.getPtr(raoOperPtr);
465  raoOperPtr.p->nextActiveOp= regOperPtr->nextActiveOp;
466  }
467  regOperPtr->prevActiveOp= RNIL;
468  regOperPtr->nextActiveOp= RNIL;
469  }
470 }