MySQL 5.6.14 Source Code Document
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
ha_tina.cc
1 /* Copyright (c) 2004, 2011, Oracle and/or its affiliates. All rights reserved.
2 
3  This program is free software; you can redistribute it and/or modify
4  it under the terms of the GNU General Public License as published by
5  the Free Software Foundation; version 2 of the License.
6 
7  This program is distributed in the hope that it will be useful,
8  but WITHOUT ANY WARRANTY; without even the implied warranty of
9  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  GNU General Public License for more details.
11 
12  You should have received a copy of the GNU General Public License
13  along with this program; if not, write to the Free Software
14  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
15 
16 /*
17  Make sure to look at ha_tina.h for more details.
18 
19  First off, this is a play thing for me, there are a number of things
20  wrong with it:
21  *) It was designed for csv and therefore its performance is highly
22  questionable.
23  *) Indexes have not been implemented. This is because the files can
24  be traded in and out of the table directory without having to worry
25  about rebuilding anything.
26  *) NULLs and "" are treated equally (like a spreadsheet).
27  *) There was in the beginning no point to anyone seeing this other
28  then me, so there is a good chance that I haven't quite documented
29  it well.
30  *) Less design, more "make it work"
31 
32  Now there are a few cool things with it:
33  *) Errors can result in corrupted data files.
34  *) Data files can be read by spreadsheets directly.
35 
36 TODO:
37  *) Move to a block system for larger files
38  *) Error recovery, its all there, just need to finish it
39  *) Document how the chains work.
40 
41  -Brian
42 */
43 
44 #include "my_global.h"
45 #include "sql_priv.h"
46 #include "sql_class.h" // SSV
47 #include <mysql/plugin.h>
48 #include <mysql/psi/mysql_file.h>
49 #include "ha_tina.h"
50 #include "probes_mysql.h"
51 
52 #include <algorithm>
53 
54 using std::min;
55 using std::max;
56 
57 /*
58  uchar + uchar + ulonglong + ulonglong + ulonglong + ulonglong + uchar
59 */
60 #define META_BUFFER_SIZE sizeof(uchar) + sizeof(uchar) + sizeof(ulonglong) \
61  + sizeof(ulonglong) + sizeof(ulonglong) + sizeof(ulonglong) + sizeof(uchar)
62 #define TINA_CHECK_HEADER 254 // The number we use to determine corruption
63 #define BLOB_MEMROOT_ALLOC_SIZE 8192
64 
65 /* The file extension */
66 #define CSV_EXT ".CSV" // The data file
67 #define CSN_EXT ".CSN" // Files used during repair and update
68 #define CSM_EXT ".CSM" // Meta file
69 
70 
71 static TINA_SHARE *get_share(const char *table_name, TABLE *table);
72 static int free_share(TINA_SHARE *share);
73 static int read_meta_file(File meta_file, ha_rows *rows);
74 static int write_meta_file(File meta_file, ha_rows rows, bool dirty);
75 
76 extern "C" void tina_get_status(void* param, int concurrent_insert);
77 extern "C" void tina_update_status(void* param);
78 extern "C" my_bool tina_check_status(void* param);
79 
80 /* Stuff for shares */
81 mysql_mutex_t tina_mutex;
82 static HASH tina_open_tables;
83 static handler *tina_create_handler(handlerton *hton,
85  MEM_ROOT *mem_root);
86 
87 
88 /*****************************************************************************
89  ** TINA tables
90  *****************************************************************************/
91 
92 /*
93  Used for sorting chains with qsort().
94 */
95 int sort_set (tina_set *a, tina_set *b)
96 {
97  /*
98  We assume that intervals do not intersect. So, it is enought to compare
99  any two points. Here we take start of intervals for comparison.
100  */
101  return ( a->begin > b->begin ? 1 : ( a->begin < b->begin ? -1 : 0 ) );
102 }
103 
104 static uchar* tina_get_key(TINA_SHARE *share, size_t *length,
105  my_bool not_used __attribute__((unused)))
106 {
107  *length=share->table_name_length;
108  return (uchar*) share->table_name;
109 }
110 
111 #ifdef HAVE_PSI_INTERFACE
112 
113 static PSI_mutex_key csv_key_mutex_tina, csv_key_mutex_TINA_SHARE_mutex;
114 
115 static PSI_mutex_info all_tina_mutexes[]=
116 {
117  { &csv_key_mutex_tina, "tina", PSI_FLAG_GLOBAL},
118  { &csv_key_mutex_TINA_SHARE_mutex, "TINA_SHARE::mutex", 0}
119 };
120 
121 static PSI_file_key csv_key_file_metadata, csv_key_file_data,
122  csv_key_file_update;
123 
124 static PSI_file_info all_tina_files[]=
125 {
126  { &csv_key_file_metadata, "metadata", 0},
127  { &csv_key_file_data, "data", 0},
128  { &csv_key_file_update, "update", 0}
129 };
130 
131 static void init_tina_psi_keys(void)
132 {
133  const char* category= "csv";
134  int count;
135 
136  count= array_elements(all_tina_mutexes);
137  mysql_mutex_register(category, all_tina_mutexes, count);
138 
139  count= array_elements(all_tina_files);
140  mysql_file_register(category, all_tina_files, count);
141 }
142 #endif /* HAVE_PSI_INTERFACE */
143 
144 static int tina_init_func(void *p)
145 {
146  handlerton *tina_hton;
147 
148 #ifdef HAVE_PSI_INTERFACE
149  init_tina_psi_keys();
150 #endif
151 
152  tina_hton= (handlerton *)p;
153  mysql_mutex_init(csv_key_mutex_tina, &tina_mutex, MY_MUTEX_INIT_FAST);
154  (void) my_hash_init(&tina_open_tables,system_charset_info,32,0,0,
155  (my_hash_get_key) tina_get_key,0,0);
156  tina_hton->state= SHOW_OPTION_YES;
157  tina_hton->db_type= DB_TYPE_CSV_DB;
158  tina_hton->create= tina_create_handler;
159  tina_hton->flags= (HTON_CAN_RECREATE | HTON_SUPPORT_LOG_TABLES |
160  HTON_NO_PARTITION);
161  return 0;
162 }
163 
164 static int tina_done_func(void *p)
165 {
166  my_hash_free(&tina_open_tables);
167  mysql_mutex_destroy(&tina_mutex);
168 
169  return 0;
170 }
171 
172 
173 /*
174  Simple lock controls.
175 */
176 static TINA_SHARE *get_share(const char *table_name, TABLE *table)
177 {
178  TINA_SHARE *share;
179  char meta_file_name[FN_REFLEN];
180  MY_STAT file_stat; /* Stat information for the data file */
181  char *tmp_name;
182  uint length;
183 
184  mysql_mutex_lock(&tina_mutex);
185  length=(uint) strlen(table_name);
186 
187  /*
188  If share is not present in the hash, create a new share and
189  initialize its members.
190  */
191  if (!(share=(TINA_SHARE*) my_hash_search(&tina_open_tables,
192  (uchar*) table_name,
193  length)))
194  {
195  if (!my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
196  &share, sizeof(*share),
197  &tmp_name, length+1,
198  NullS))
199  {
200  mysql_mutex_unlock(&tina_mutex);
201  return NULL;
202  }
203 
204  share->use_count= 0;
205  share->is_log_table= FALSE;
206  share->table_name_length= length;
207  share->table_name= tmp_name;
208  share->crashed= FALSE;
209  share->rows_recorded= 0;
210  share->update_file_opened= FALSE;
211  share->tina_write_opened= FALSE;
212  share->data_file_version= 0;
213  strmov(share->table_name, table_name);
214  fn_format(share->data_file_name, table_name, "", CSV_EXT,
215  MY_REPLACE_EXT|MY_UNPACK_FILENAME);
216  fn_format(meta_file_name, table_name, "", CSM_EXT,
217  MY_REPLACE_EXT|MY_UNPACK_FILENAME);
218 
219  if (mysql_file_stat(csv_key_file_data,
220  share->data_file_name, &file_stat, MYF(MY_WME)) == NULL)
221  goto error;
222  share->saved_data_file_length= file_stat.st_size;
223 
224  if (my_hash_insert(&tina_open_tables, (uchar*) share))
225  goto error;
226  thr_lock_init(&share->lock);
227  mysql_mutex_init(csv_key_mutex_TINA_SHARE_mutex,
228  &share->mutex, MY_MUTEX_INIT_FAST);
229 
230  /*
231  Open or create the meta file. In the latter case, we'll get
232  an error during read_meta_file and mark the table as crashed.
233  Usually this will result in auto-repair, and we will get a good
234  meta-file in the end.
235  */
236  if (((share->meta_file= mysql_file_open(csv_key_file_metadata,
237  meta_file_name,
238  O_RDWR|O_CREAT,
239  MYF(MY_WME))) == -1) ||
240  read_meta_file(share->meta_file, &share->rows_recorded))
241  share->crashed= TRUE;
242  }
243 
244  share->use_count++;
245  mysql_mutex_unlock(&tina_mutex);
246 
247  return share;
248 
249 error:
250  mysql_mutex_unlock(&tina_mutex);
251  my_free(share);
252 
253  return NULL;
254 }
255 
256 
257 /*
258  Read CSV meta-file
259 
260  SYNOPSIS
261  read_meta_file()
262  meta_file The meta-file filedes
263  ha_rows Pointer to the var we use to store rows count.
264  These are read from the meta-file.
265 
266  DESCRIPTION
267 
268  Read the meta-file info. For now we are only interested in
269  rows counf, crashed bit and magic number.
270 
271  RETURN
272  0 - OK
273  non-zero - error occurred
274 */
275 
276 static int read_meta_file(File meta_file, ha_rows *rows)
277 {
278  uchar meta_buffer[META_BUFFER_SIZE];
279  uchar *ptr= meta_buffer;
280 
281  DBUG_ENTER("ha_tina::read_meta_file");
282 
283  mysql_file_seek(meta_file, 0, MY_SEEK_SET, MYF(0));
284  if (mysql_file_read(meta_file, (uchar*)meta_buffer, META_BUFFER_SIZE, 0)
285  != META_BUFFER_SIZE)
286  DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
287 
288  /*
289  Parse out the meta data, we ignore version at the moment
290  */
291 
292  ptr+= sizeof(uchar)*2; // Move past header
293  *rows= (ha_rows)uint8korr(ptr);
294  ptr+= sizeof(ulonglong); // Move past rows
295  /*
296  Move past check_point, auto_increment and forced_flushes fields.
297  They are present in the format, but we do not use them yet.
298  */
299  ptr+= 3*sizeof(ulonglong);
300 
301  /* check crashed bit and magic number */
302  if ((meta_buffer[0] != (uchar)TINA_CHECK_HEADER) ||
303  ((bool)(*ptr)== TRUE))
304  DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
305 
306  mysql_file_sync(meta_file, MYF(MY_WME));
307 
308  DBUG_RETURN(0);
309 }
310 
311 
312 /*
313  Write CSV meta-file
314 
315  SYNOPSIS
316  write_meta_file()
317  meta_file The meta-file filedes
318  ha_rows The number of rows we have in the datafile.
319  dirty A flag, which marks whether we have a corrupt table
320 
321  DESCRIPTION
322 
323  Write meta-info the the file. Only rows count, crashed bit and
324  magic number matter now.
325 
326  RETURN
327  0 - OK
328  non-zero - error occurred
329 */
330 
331 static int write_meta_file(File meta_file, ha_rows rows, bool dirty)
332 {
333  uchar meta_buffer[META_BUFFER_SIZE];
334  uchar *ptr= meta_buffer;
335 
336  DBUG_ENTER("ha_tina::write_meta_file");
337 
338  *ptr= (uchar)TINA_CHECK_HEADER;
339  ptr+= sizeof(uchar);
340  *ptr= (uchar)TINA_VERSION;
341  ptr+= sizeof(uchar);
342  int8store(ptr, (ulonglong)rows);
343  ptr+= sizeof(ulonglong);
344  memset(ptr, 0, 3*sizeof(ulonglong));
345  /*
346  Skip over checkpoint, autoincrement and forced_flushes fields.
347  We'll need them later.
348  */
349  ptr+= 3*sizeof(ulonglong);
350  *ptr= (uchar)dirty;
351 
352  mysql_file_seek(meta_file, 0, MY_SEEK_SET, MYF(0));
353  if (mysql_file_write(meta_file, (uchar *)meta_buffer, META_BUFFER_SIZE, 0)
354  != META_BUFFER_SIZE)
355  DBUG_RETURN(-1);
356 
357  mysql_file_sync(meta_file, MYF(MY_WME));
358 
359  DBUG_RETURN(0);
360 }
361 
362 bool ha_tina::check_and_repair(THD *thd)
363 {
364  HA_CHECK_OPT check_opt;
365  DBUG_ENTER("ha_tina::check_and_repair");
366 
367  check_opt.init();
368 
369  DBUG_RETURN(repair(thd, &check_opt));
370 }
371 
372 
373 int ha_tina::init_tina_writer()
374 {
375  DBUG_ENTER("ha_tina::init_tina_writer");
376 
377  /*
378  Mark the file as crashed. We will set the flag back when we close
379  the file. In the case of the crash it will remain marked crashed,
380  which enforce recovery.
381  */
382  (void)write_meta_file(share->meta_file, share->rows_recorded, TRUE);
383 
384  if ((share->tina_write_filedes=
385  mysql_file_open(csv_key_file_data,
386  share->data_file_name, O_RDWR|O_APPEND,
387  MYF(MY_WME))) == -1)
388  {
389  DBUG_PRINT("info", ("Could not open tina file writes"));
390  share->crashed= TRUE;
391  DBUG_RETURN(my_errno ? my_errno : -1);
392  }
393  share->tina_write_opened= TRUE;
394 
395  DBUG_RETURN(0);
396 }
397 
398 
399 bool ha_tina::is_crashed() const
400 {
401  DBUG_ENTER("ha_tina::is_crashed");
402  DBUG_RETURN(share->crashed);
403 }
404 
405 /*
406  Free lock controls.
407 */
408 static int free_share(TINA_SHARE *share)
409 {
410  DBUG_ENTER("ha_tina::free_share");
411  mysql_mutex_lock(&tina_mutex);
412  int result_code= 0;
413  if (!--share->use_count){
414  /* Write the meta file. Mark it as crashed if needed. */
415  (void)write_meta_file(share->meta_file, share->rows_recorded,
416  share->crashed ? TRUE :FALSE);
417  if (mysql_file_close(share->meta_file, MYF(0)))
418  result_code= 1;
419  if (share->tina_write_opened)
420  {
421  if (mysql_file_close(share->tina_write_filedes, MYF(0)))
422  result_code= 1;
423  share->tina_write_opened= FALSE;
424  }
425 
426  my_hash_delete(&tina_open_tables, (uchar*) share);
427  thr_lock_delete(&share->lock);
428  mysql_mutex_destroy(&share->mutex);
429  my_free(share);
430  }
431  mysql_mutex_unlock(&tina_mutex);
432 
433  DBUG_RETURN(result_code);
434 }
435 
436 
437 /*
438  This function finds the end of a line and returns the length
439  of the line ending.
440 
441  We support three kinds of line endings:
442  '\r' -- Old Mac OS line ending
443  '\n' -- Traditional Unix and Mac OS X line ending
444  '\r''\n' -- DOS\Windows line ending
445 */
446 
447 my_off_t find_eoln_buff(Transparent_file *data_buff, my_off_t begin,
448  my_off_t end, int *eoln_len)
449 {
450  *eoln_len= 0;
451 
452  for (my_off_t x= begin; x < end; x++)
453  {
454  /* Unix (includes Mac OS X) */
455  if (data_buff->get_value(x) == '\n')
456  *eoln_len= 1;
457  else
458  if (data_buff->get_value(x) == '\r') // Mac or Dos
459  {
460  /* old Mac line ending */
461  if (x + 1 == end || (data_buff->get_value(x + 1) != '\n'))
462  *eoln_len= 1;
463  else // DOS style ending
464  *eoln_len= 2;
465  }
466 
467  if (*eoln_len) // end of line was found
468  return x;
469  }
470 
471  return 0;
472 }
473 
474 
475 static handler *tina_create_handler(handlerton *hton,
476  TABLE_SHARE *table,
477  MEM_ROOT *mem_root)
478 {
479  return new (mem_root) ha_tina(hton, table);
480 }
481 
482 
483 ha_tina::ha_tina(handlerton *hton, TABLE_SHARE *table_arg)
484  :handler(hton, table_arg),
485  /*
486  These definitions are found in handler.h
487  They are not probably completely right.
488  */
489  current_position(0), next_position(0), local_saved_data_file_length(0),
490  file_buff(0), chain_alloced(0), chain_size(DEFAULT_CHAIN_LENGTH),
491  local_data_file_version(0), records_is_known(0)
492 {
493  /* Set our original buffers from pre-allocated memory */
494  buffer.set((char*)byte_buffer, IO_SIZE, &my_charset_bin);
495  chain= chain_buffer;
496  file_buff= new Transparent_file();
497  init_alloc_root(&blobroot, BLOB_MEMROOT_ALLOC_SIZE, 0);;
498 }
499 
500 
501 /*
502  Encode a buffer into the quoted format.
503 */
504 
505 int ha_tina::encode_quote(uchar *buf)
506 {
507  char attribute_buffer[1024];
508  String attribute(attribute_buffer, sizeof(attribute_buffer),
509  &my_charset_bin);
510 
511  my_bitmap_map *org_bitmap= dbug_tmp_use_all_columns(table, table->read_set);
512  buffer.length(0);
513 
514  for (Field **field=table->field ; *field ; field++)
515  {
516  const char *ptr;
517  const char *end_ptr;
518  const bool was_null= (*field)->is_null();
519 
520  /*
521  assistance for backwards compatibility in production builds.
522  note: this will not work for ENUM columns.
523  */
524  if (was_null)
525  {
526  (*field)->set_default();
527  (*field)->set_notnull();
528  }
529 
530  (*field)->val_str(&attribute,&attribute);
531 
532  if (was_null)
533  (*field)->set_null();
534 
535  if ((*field)->str_needs_quotes())
536  {
537  ptr= attribute.ptr();
538  end_ptr= attribute.length() + ptr;
539 
540  buffer.append('"');
541 
542  for (; ptr < end_ptr; ptr++)
543  {
544  if (*ptr == '"')
545  {
546  buffer.append('\\');
547  buffer.append('"');
548  }
549  else if (*ptr == '\r')
550  {
551  buffer.append('\\');
552  buffer.append('r');
553  }
554  else if (*ptr == '\\')
555  {
556  buffer.append('\\');
557  buffer.append('\\');
558  }
559  else if (*ptr == '\n')
560  {
561  buffer.append('\\');
562  buffer.append('n');
563  }
564  else
565  buffer.append(*ptr);
566  }
567  buffer.append('"');
568  }
569  else
570  {
571  buffer.append(attribute);
572  }
573 
574  buffer.append(',');
575  }
576  // Remove the comma, add a line feed
577  buffer.length(buffer.length() - 1);
578  buffer.append('\n');
579 
580  //buffer.replace(buffer.length(), 0, "\n", 1);
581 
582  dbug_tmp_restore_column_map(table->read_set, org_bitmap);
583  return (buffer.length());
584 }
585 
586 /*
587  chain_append() adds delete positions to the chain that we use to keep
588  track of space. Then the chain will be used to cleanup "holes", occurred
589  due to deletes and updates.
590 */
591 int ha_tina::chain_append()
592 {
593  if ( chain_ptr != chain && (chain_ptr -1)->end == current_position)
594  (chain_ptr -1)->end= next_position;
595  else
596  {
597  /* We set up for the next position */
598  if ((off_t)(chain_ptr - chain) == (chain_size -1))
599  {
600  my_off_t location= chain_ptr - chain;
601  chain_size += DEFAULT_CHAIN_LENGTH;
602  if (chain_alloced)
603  {
604  /* Must cast since my_malloc unlike malloc doesn't have a void ptr */
605  if ((chain= (tina_set *) my_realloc((uchar*)chain,
606  chain_size, MYF(MY_WME))) == NULL)
607  return -1;
608  }
609  else
610  {
611  tina_set *ptr= (tina_set *) my_malloc(chain_size * sizeof(tina_set),
612  MYF(MY_WME));
613  memcpy(ptr, chain, DEFAULT_CHAIN_LENGTH * sizeof(tina_set));
614  chain= ptr;
615  chain_alloced++;
616  }
617  chain_ptr= chain + location;
618  }
619  chain_ptr->begin= current_position;
620  chain_ptr->end= next_position;
621  chain_ptr++;
622  }
623 
624  return 0;
625 }
626 
627 
628 /*
629  Scans for a row.
630 */
631 int ha_tina::find_current_row(uchar *buf)
632 {
633  my_off_t end_offset, curr_offset= current_position;
634  int eoln_len;
635  my_bitmap_map *org_bitmap;
636  int error;
637  bool read_all;
638  DBUG_ENTER("ha_tina::find_current_row");
639 
640  free_root(&blobroot, MYF(0));
641 
642  /*
643  We do not read further then local_saved_data_file_length in order
644  not to conflict with undergoing concurrent insert.
645  */
646  if ((end_offset=
647  find_eoln_buff(file_buff, current_position,
648  local_saved_data_file_length, &eoln_len)) == 0)
649  DBUG_RETURN(HA_ERR_END_OF_FILE);
650 
651  /* We must read all columns in case a table is opened for update */
652  read_all= !bitmap_is_clear_all(table->write_set);
653  /* Avoid asserts in ::store() for columns that are not going to be updated */
654  org_bitmap= dbug_tmp_use_all_columns(table, table->write_set);
655  error= HA_ERR_CRASHED_ON_USAGE;
656 
657  memset(buf, 0, table->s->null_bytes);
658 
659  /*
660  Parse the line obtained using the following algorithm
661 
662  BEGIN
663  1) Store the EOL (end of line) for the current row
664  2) Until all the fields in the current query have not been
665  filled
666  2.1) If the current character is a quote
667  2.1.1) Until EOL has not been reached
668  a) If end of current field is reached, move
669  to next field and jump to step 2.3
670  b) If current character is a \\ handle
671  \\n, \\r, \\, \\"
672  c) else append the current character into the buffer
673  before checking that EOL has not been reached.
674  2.2) If the current character does not begin with a quote
675  2.2.1) Until EOL has not been reached
676  a) If the end of field has been reached move to the
677  next field and jump to step 2.3
678  b) If current character begins with \\ handle
679  \\n, \\r, \\, \\"
680  c) else append the current character into the buffer
681  before checking that EOL has not been reached.
682  2.3) Store the current field value and jump to 2)
683  TERMINATE
684  */
685 
686  for (Field **field=table->field ; *field ; field++)
687  {
688  char curr_char;
689 
690  buffer.length(0);
691  if (curr_offset >= end_offset)
692  goto err;
693  curr_char= file_buff->get_value(curr_offset);
694  /* Handle the case where the first character is a quote */
695  if (curr_char == '"')
696  {
697  /* Increment past the first quote */
698  curr_offset++;
699 
700  /* Loop through the row to extract the values for the current field */
701  for ( ; curr_offset < end_offset; curr_offset++)
702  {
703  curr_char= file_buff->get_value(curr_offset);
704  /* check for end of the current field */
705  if (curr_char == '"' &&
706  (curr_offset == end_offset - 1 ||
707  file_buff->get_value(curr_offset + 1) == ','))
708  {
709  /* Move past the , and the " */
710  curr_offset+= 2;
711  break;
712  }
713  if (curr_char == '\\' && curr_offset != (end_offset - 1))
714  {
715  curr_offset++;
716  curr_char= file_buff->get_value(curr_offset);
717  if (curr_char == 'r')
718  buffer.append('\r');
719  else if (curr_char == 'n' )
720  buffer.append('\n');
721  else if (curr_char == '\\' || curr_char == '"')
722  buffer.append(curr_char);
723  else /* This could only happed with an externally created file */
724  {
725  buffer.append('\\');
726  buffer.append(curr_char);
727  }
728  }
729  else // ordinary symbol
730  {
731  /*
732  If we are at final symbol and no last quote was found =>
733  we are working with a damaged file.
734  */
735  if (curr_offset == end_offset - 1)
736  goto err;
737  buffer.append(curr_char);
738  }
739  }
740  }
741  else
742  {
743  for ( ; curr_offset < end_offset; curr_offset++)
744  {
745  curr_char= file_buff->get_value(curr_offset);
746  /* Move past the ,*/
747  if (curr_char == ',')
748  {
749  curr_offset++;
750  break;
751  }
752  if (curr_char == '\\' && curr_offset != (end_offset - 1))
753  {
754  curr_offset++;
755  curr_char= file_buff->get_value(curr_offset);
756  if (curr_char == 'r')
757  buffer.append('\r');
758  else if (curr_char == 'n' )
759  buffer.append('\n');
760  else if (curr_char == '\\' || curr_char == '"')
761  buffer.append(curr_char);
762  else /* This could only happed with an externally created file */
763  {
764  buffer.append('\\');
765  buffer.append(curr_char);
766  }
767  }
768  else
769  {
770  /*
771  We are at the final symbol and a quote was found for the
772  unquoted field => We are working with a damaged field.
773  */
774  if (curr_offset == end_offset - 1 && curr_char == '"')
775  goto err;
776  buffer.append(curr_char);
777  }
778  }
779  }
780 
781  if (read_all || bitmap_is_set(table->read_set, (*field)->field_index))
782  {
783  bool is_enum= ((*field)->real_type() == MYSQL_TYPE_ENUM);
784  /*
785  Here CHECK_FIELD_WARN checks that all values in the csv file are valid
786  which is normally the case, if they were written by
787  INSERT -> ha_tina::write_row. '0' values on ENUM fields are considered
788  invalid by Field_enum::store() but it can store them on INSERT anyway.
789  Thus, for enums we silence the warning, as it doesn't really mean
790  an invalid value.
791  */
792  if ((*field)->store(buffer.ptr(), buffer.length(), buffer.charset(),
793  is_enum ? CHECK_FIELD_IGNORE : CHECK_FIELD_WARN))
794  {
795  if (!is_enum)
796  goto err;
797  }
798  if ((*field)->flags & BLOB_FLAG)
799  {
800  Field_blob *blob= *(Field_blob**) field;
801  uchar *src, *tgt;
802  uint length, packlength;
803 
804  packlength= blob->pack_length_no_ptr();
805  length= blob->get_length(blob->ptr);
806  memcpy(&src, blob->ptr + packlength, sizeof(char*));
807  if (src)
808  {
809  tgt= (uchar*) alloc_root(&blobroot, length);
810  bmove(tgt, src, length);
811  memcpy(blob->ptr + packlength, &tgt, sizeof(char*));
812  }
813  }
814  }
815  }
816  next_position= end_offset + eoln_len;
817  error= 0;
818 
819 err:
820  dbug_tmp_restore_column_map(table->write_set, org_bitmap);
821 
822  DBUG_RETURN(error);
823 }
824 
825 /*
826  If frm_error() is called in table.cc this is called to find out what file
827  extensions exist for this handler.
828 */
829 static const char *ha_tina_exts[] = {
830  CSV_EXT,
831  CSM_EXT,
832  NullS
833 };
834 
835 const char **ha_tina::bas_ext() const
836 {
837  return ha_tina_exts;
838 }
839 
840 /*
841  Three functions below are needed to enable concurrent insert functionality
842  for CSV engine. For more details see mysys/thr_lock.c
843 */
844 
845 void tina_get_status(void* param, int concurrent_insert)
846 {
847  ha_tina *tina= (ha_tina*) param;
848  tina->get_status();
849 }
850 
851 void tina_update_status(void* param)
852 {
853  ha_tina *tina= (ha_tina*) param;
854  tina->update_status();
855 }
856 
857 /* this should exist and return 0 for concurrent insert to work */
858 my_bool tina_check_status(void* param)
859 {
860  return 0;
861 }
862 
863 /*
864  Save the state of the table
865 
866  SYNOPSIS
867  get_status()
868 
869  DESCRIPTION
870  This function is used to retrieve the file length. During the lock
871  phase of concurrent insert. For more details see comment to
872  ha_tina::update_status below.
873 */
874 
875 void ha_tina::get_status()
876 {
877  if (share->is_log_table)
878  {
879  /*
880  We have to use mutex to follow pthreads memory visibility
881  rules for share->saved_data_file_length
882  */
883  mysql_mutex_lock(&share->mutex);
884  local_saved_data_file_length= share->saved_data_file_length;
885  mysql_mutex_unlock(&share->mutex);
886  return;
887  }
888  local_saved_data_file_length= share->saved_data_file_length;
889 }
890 
891 
892 /*
893  Correct the state of the table. Called by unlock routines
894  before the write lock is released.
895 
896  SYNOPSIS
897  update_status()
898 
899  DESCRIPTION
900  When we employ concurrent insert lock, we save current length of the file
901  during the lock phase. We do not read further saved value, as we don't
902  want to interfere with undergoing concurrent insert. Writers update file
903  length info during unlock with update_status().
904 
905  NOTE
906  For log tables concurrent insert works different. The reason is that
907  log tables are always opened and locked. And as they do not unlock
908  tables, the file length after writes should be updated in a different
909  way. For this purpose we need is_log_table flag. When this flag is set
910  we call update_status() explicitly after each row write.
911 */
912 
913 void ha_tina::update_status()
914 {
915  /* correct local_saved_data_file_length for writers */
916  share->saved_data_file_length= local_saved_data_file_length;
917 }
918 
919 
920 /*
921  Open a database file. Keep in mind that tables are caches, so
922  this will not be called for every request. Any sort of positions
923  that need to be reset should be kept in the ::extra() call.
924 */
925 int ha_tina::open(const char *name, int mode, uint open_options)
926 {
927  DBUG_ENTER("ha_tina::open");
928 
929  if (!(share= get_share(name, table)))
930  DBUG_RETURN(HA_ERR_OUT_OF_MEM);
931 
932  if (share->crashed && !(open_options & HA_OPEN_FOR_REPAIR))
933  {
934  free_share(share);
935  DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
936  }
937 
938  local_data_file_version= share->data_file_version;
939  if ((data_file= mysql_file_open(csv_key_file_data,
940  share->data_file_name,
941  O_RDONLY, MYF(MY_WME))) == -1)
942  {
943  free_share(share);
944  DBUG_RETURN(my_errno ? my_errno : -1);
945  }
946 
947  /*
948  Init locking. Pass handler object to the locking routines,
949  so that they could save/update local_saved_data_file_length value
950  during locking. This is needed to enable concurrent inserts.
951  */
952  thr_lock_data_init(&share->lock, &lock, (void*) this);
953  ref_length= sizeof(my_off_t);
954 
955  share->lock.get_status= tina_get_status;
956  share->lock.update_status= tina_update_status;
957  share->lock.check_status= tina_check_status;
958 
959  DBUG_RETURN(0);
960 }
961 
962 
963 /*
964  Close a database file. We remove ourselves from the shared strucutre.
965  If it is empty we destroy it.
966 */
967 int ha_tina::close(void)
968 {
969  int rc= 0;
970  DBUG_ENTER("ha_tina::close");
971  rc= mysql_file_close(data_file, MYF(0));
972  DBUG_RETURN(free_share(share) || rc);
973 }
974 
975 /*
976  This is an INSERT. At the moment this handler just seeks to the end
977  of the file and appends the data. In an error case it really should
978  just truncate to the original position (this is not done yet).
979 */
980 int ha_tina::write_row(uchar * buf)
981 {
982  int size;
983  DBUG_ENTER("ha_tina::write_row");
984 
985  if (share->crashed)
986  DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
987 
988  ha_statistic_increment(&SSV::ha_write_count);
989 
990  size= encode_quote(buf);
991 
992  if (!share->tina_write_opened)
993  if (init_tina_writer())
994  DBUG_RETURN(-1);
995 
996  /* use pwrite, as concurrent reader could have changed the position */
997  if (mysql_file_write(share->tina_write_filedes, (uchar*)buffer.ptr(), size,
998  MYF(MY_WME | MY_NABP)))
999  DBUG_RETURN(-1);
1000 
1001  /* update local copy of the max position to see our own changes */
1002  local_saved_data_file_length+= size;
1003 
1004  /* update shared info */
1005  mysql_mutex_lock(&share->mutex);
1006  share->rows_recorded++;
1007  /* update status for the log tables */
1008  if (share->is_log_table)
1009  update_status();
1010  mysql_mutex_unlock(&share->mutex);
1011 
1012  stats.records++;
1013  DBUG_RETURN(0);
1014 }
1015 
1016 
1017 int ha_tina::open_update_temp_file_if_needed()
1018 {
1019  char updated_fname[FN_REFLEN];
1020 
1021  if (!share->update_file_opened)
1022  {
1023  if ((update_temp_file=
1024  mysql_file_create(csv_key_file_update,
1025  fn_format(updated_fname, share->table_name,
1026  "", CSN_EXT,
1027  MY_REPLACE_EXT | MY_UNPACK_FILENAME),
1028  0, O_RDWR | O_TRUNC, MYF(MY_WME))) < 0)
1029  return 1;
1030  share->update_file_opened= TRUE;
1031  temp_file_length= 0;
1032  }
1033  return 0;
1034 }
1035 
1036 /*
1037  This is called for an update.
1038  Make sure you put in code to increment the auto increment.
1039  Currently auto increment is not being
1040  fixed since autoincrements have yet to be added to this table handler.
1041  This will be called in a table scan right before the previous ::rnd_next()
1042  call.
1043 */
1044 int ha_tina::update_row(const uchar * old_data, uchar * new_data)
1045 {
1046  int size;
1047  int rc= -1;
1048  DBUG_ENTER("ha_tina::update_row");
1049 
1050  ha_statistic_increment(&SSV::ha_update_count);
1051 
1052  size= encode_quote(new_data);
1053 
1054  /*
1055  During update we mark each updating record as deleted
1056  (see the chain_append()) then write new one to the temporary data file.
1057  At the end of the sequence in the rnd_end() we append all non-marked
1058  records from the data file to the temporary data file then rename it.
1059  The temp_file_length is used to calculate new data file length.
1060  */
1061  if (chain_append())
1062  goto err;
1063 
1064  if (open_update_temp_file_if_needed())
1065  goto err;
1066 
1067  if (mysql_file_write(update_temp_file, (uchar*)buffer.ptr(), size,
1068  MYF(MY_WME | MY_NABP)))
1069  goto err;
1070  temp_file_length+= size;
1071  rc= 0;
1072 
1073  /* UPDATE should never happen on the log tables */
1074  DBUG_ASSERT(!share->is_log_table);
1075 
1076 err:
1077  DBUG_PRINT("info",("rc = %d", rc));
1078  DBUG_RETURN(rc);
1079 }
1080 
1081 
1082 /*
1083  Deletes a row. First the database will find the row, and then call this
1084  method. In the case of a table scan, the previous call to this will be
1085  the ::rnd_next() that found this row.
1086  The exception to this is an ORDER BY. This will cause the table handler
1087  to walk the table noting the positions of all rows that match a query.
1088  The table will then be deleted/positioned based on the ORDER (so RANDOM,
1089  DESC, ASC).
1090 */
1091 int ha_tina::delete_row(const uchar * buf)
1092 {
1093  DBUG_ENTER("ha_tina::delete_row");
1094  ha_statistic_increment(&SSV::ha_delete_count);
1095 
1096  if (chain_append())
1097  DBUG_RETURN(-1);
1098 
1099  stats.records--;
1100  /* Update shared info */
1101  DBUG_ASSERT(share->rows_recorded);
1102  mysql_mutex_lock(&share->mutex);
1103  share->rows_recorded--;
1104  mysql_mutex_unlock(&share->mutex);
1105 
1106  /* DELETE should never happen on the log table */
1107  DBUG_ASSERT(!share->is_log_table);
1108 
1109  DBUG_RETURN(0);
1110 }
1111 
1112 
1126 int ha_tina::init_data_file()
1127 {
1128  if (local_data_file_version != share->data_file_version)
1129  {
1130  local_data_file_version= share->data_file_version;
1131  if (mysql_file_close(data_file, MYF(0)) ||
1132  (data_file= mysql_file_open(csv_key_file_data,
1133  share->data_file_name, O_RDONLY,
1134  MYF(MY_WME))) == -1)
1135  return my_errno ? my_errno : -1;
1136  }
1137  file_buff->init_buff(data_file);
1138  return 0;
1139 }
1140 
1141 
1142 /*
1143  All table scans call this first.
1144  The order of a table scan is:
1145 
1146  ha_tina::store_lock
1147  ha_tina::external_lock
1148  ha_tina::info
1149  ha_tina::rnd_init
1150  ha_tina::extra
1151  ENUM HA_EXTRA_CACHE Cash record in HA_rrnd()
1152  ha_tina::rnd_next
1153  ha_tina::rnd_next
1154  ha_tina::rnd_next
1155  ha_tina::rnd_next
1156  ha_tina::rnd_next
1157  ha_tina::rnd_next
1158  ha_tina::rnd_next
1159  ha_tina::rnd_next
1160  ha_tina::rnd_next
1161  ha_tina::extra
1162  ENUM HA_EXTRA_NO_CACHE End cacheing of records (def)
1163  ha_tina::external_lock
1164  ha_tina::extra
1165  ENUM HA_EXTRA_RESET Reset database to after open
1166 
1167  Each call to ::rnd_next() represents a row returned in the can. When no more
1168  rows can be returned, rnd_next() returns a value of HA_ERR_END_OF_FILE.
1169  The ::info() call is just for the optimizer.
1170 
1171 */
1172 
1173 int ha_tina::rnd_init(bool scan)
1174 {
1175  DBUG_ENTER("ha_tina::rnd_init");
1176 
1177  /* set buffer to the beginning of the file */
1178  if (share->crashed || init_data_file())
1179  DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
1180 
1181  current_position= next_position= 0;
1182  stats.records= 0;
1183  records_is_known= 0;
1184  chain_ptr= chain;
1185 
1186  DBUG_RETURN(0);
1187 }
1188 
1189 /*
1190  ::rnd_next() does all the heavy lifting for a table scan. You will need to
1191  populate *buf with the correct field data. You can walk the field to
1192  determine at what position you should store the data (take a look at how
1193  ::find_current_row() works). The structure is something like:
1194  0Foo Dog Friend
1195  The first offset is for the first attribute. All space before that is
1196  reserved for null count.
1197  Basically this works as a mask for which rows are nulled (compared to just
1198  empty).
1199  This table handler doesn't do nulls and does not know the difference between
1200  NULL and "". This is ok since this table handler is for spreadsheets and
1201  they don't know about them either :)
1202 */
1203 int ha_tina::rnd_next(uchar *buf)
1204 {
1205  int rc;
1206  DBUG_ENTER("ha_tina::rnd_next");
1207  MYSQL_READ_ROW_START(table_share->db.str, table_share->table_name.str,
1208  TRUE);
1209 
1210  if (share->crashed)
1211  {
1212  rc= HA_ERR_CRASHED_ON_USAGE;
1213  goto end;
1214  }
1215 
1216  ha_statistic_increment(&SSV::ha_read_rnd_next_count);
1217 
1218  current_position= next_position;
1219 
1220  /* don't scan an empty file */
1221  if (!local_saved_data_file_length)
1222  {
1223  rc= HA_ERR_END_OF_FILE;
1224  goto end;
1225  }
1226 
1227  if ((rc= find_current_row(buf)))
1228  goto end;
1229 
1230  stats.records++;
1231  rc= 0;
1232 end:
1233  MYSQL_READ_ROW_DONE(rc);
1234  DBUG_RETURN(rc);
1235 }
1236 
1237 /*
1238  In the case of an order by rows will need to be sorted.
1239  ::position() is called after each call to ::rnd_next(),
1240  the data it stores is to a byte array. You can store this
1241  data via my_store_ptr(). ref_length is a variable defined to the
1242  class that is the sizeof() of position being stored. In our case
1243  its just a position. Look at the bdb code if you want to see a case
1244  where something other then a number is stored.
1245 */
1246 void ha_tina::position(const uchar *record)
1247 {
1248  DBUG_ENTER("ha_tina::position");
1249  my_store_ptr(ref, ref_length, current_position);
1250  DBUG_VOID_RETURN;
1251 }
1252 
1253 
1254 /*
1255  Used to fetch a row from a posiion stored with ::position().
1256  my_get_ptr() retrieves the data for you.
1257 */
1258 
1259 int ha_tina::rnd_pos(uchar * buf, uchar *pos)
1260 {
1261  int rc;
1262  DBUG_ENTER("ha_tina::rnd_pos");
1263  MYSQL_READ_ROW_START(table_share->db.str, table_share->table_name.str,
1264  FALSE);
1265  ha_statistic_increment(&SSV::ha_read_rnd_count);
1266  current_position= my_get_ptr(pos,ref_length);
1267  rc= find_current_row(buf);
1268  MYSQL_READ_ROW_DONE(rc);
1269  DBUG_RETURN(rc);
1270 }
1271 
1272 /*
1273  ::info() is used to return information to the optimizer.
1274  Currently this table handler doesn't implement most of the fields
1275  really needed. SHOW also makes use of this data
1276 */
1277 int ha_tina::info(uint flag)
1278 {
1279  DBUG_ENTER("ha_tina::info");
1280  /* This is a lie, but you don't want the optimizer to see zero or 1 */
1281  if (!records_is_known && stats.records < 2)
1282  stats.records= 2;
1283  DBUG_RETURN(0);
1284 }
1285 
1286 /*
1287  Grab bag of flags that are sent to the able handler every so often.
1288  HA_EXTRA_RESET and HA_EXTRA_RESET_STATE are the most frequently called.
1289  You are not required to implement any of these.
1290 */
1291 int ha_tina::extra(enum ha_extra_function operation)
1292 {
1293  DBUG_ENTER("ha_tina::extra");
1294  if (operation == HA_EXTRA_MARK_AS_LOG_TABLE)
1295  {
1296  mysql_mutex_lock(&share->mutex);
1297  share->is_log_table= TRUE;
1298  mysql_mutex_unlock(&share->mutex);
1299  }
1300  DBUG_RETURN(0);
1301 }
1302 
1303 
1304 /*
1305  Set end_pos to the last valid byte of continuous area, closest
1306  to the given "hole", stored in the buffer. "Valid" here means,
1307  not listed in the chain of deleted records ("holes").
1308 */
1309 bool ha_tina::get_write_pos(my_off_t *end_pos, tina_set *closest_hole)
1310 {
1311  if (closest_hole == chain_ptr) /* no more chains */
1312  *end_pos= file_buff->end();
1313  else
1314  *end_pos= min(file_buff->end(), closest_hole->begin);
1315  return (closest_hole != chain_ptr) && (*end_pos == closest_hole->begin);
1316 }
1317 
1318 
1319 /*
1320  Called after each table scan. In particular after deletes,
1321  and updates. In the last case we employ chain of deleted
1322  slots to clean up all of the dead space we have collected while
1323  performing deletes/updates.
1324 */
1325 int ha_tina::rnd_end()
1326 {
1327  char updated_fname[FN_REFLEN];
1328  my_off_t file_buffer_start= 0;
1329  DBUG_ENTER("ha_tina::rnd_end");
1330 
1331  free_root(&blobroot, MYF(0));
1332  records_is_known= 1;
1333 
1334  if ((chain_ptr - chain) > 0)
1335  {
1336  tina_set *ptr= chain;
1337 
1338  /*
1339  Re-read the beginning of a file (as the buffer should point to the
1340  end of file after the scan).
1341  */
1342  file_buff->init_buff(data_file);
1343 
1344  /*
1345  The sort is needed when there were updates/deletes with random orders.
1346  It sorts so that we move the firts blocks to the beginning.
1347  */
1348  my_qsort(chain, (size_t)(chain_ptr - chain), sizeof(tina_set),
1349  (qsort_cmp)sort_set);
1350 
1351  my_off_t write_begin= 0, write_end;
1352 
1353  /* create the file to write updated table if it wasn't yet created */
1354  if (open_update_temp_file_if_needed())
1355  DBUG_RETURN(-1);
1356 
1357  /* write the file with updated info */
1358  while ((file_buffer_start != (my_off_t)-1)) // while not end of file
1359  {
1360  bool in_hole= get_write_pos(&write_end, ptr);
1361  my_off_t write_length= write_end - write_begin;
1362 
1363  /* if there is something to write, write it */
1364  if (write_length)
1365  {
1366  if (mysql_file_write(update_temp_file,
1367  (uchar*) (file_buff->ptr() +
1368  (write_begin - file_buff->start())),
1369  (size_t)write_length, MYF_RW))
1370  goto error;
1371  temp_file_length+= write_length;
1372  }
1373  if (in_hole)
1374  {
1375  /* skip hole */
1376  while (file_buff->end() <= ptr->end &&
1377  file_buffer_start != (my_off_t)-1)
1378  file_buffer_start= file_buff->read_next();
1379  write_begin= ptr->end;
1380  ptr++;
1381  }
1382  else
1383  write_begin= write_end;
1384 
1385  if (write_end == file_buff->end())
1386  file_buffer_start= file_buff->read_next(); /* shift the buffer */
1387 
1388  }
1389 
1390  if (mysql_file_sync(update_temp_file, MYF(MY_WME)) ||
1391  mysql_file_close(update_temp_file, MYF(0)))
1392  DBUG_RETURN(-1);
1393 
1394  share->update_file_opened= FALSE;
1395 
1396  if (share->tina_write_opened)
1397  {
1398  if (mysql_file_close(share->tina_write_filedes, MYF(0)))
1399  DBUG_RETURN(-1);
1400  /*
1401  Mark that the writer fd is closed, so that init_tina_writer()
1402  will reopen it later.
1403  */
1404  share->tina_write_opened= FALSE;
1405  }
1406 
1407  /*
1408  Close opened fildes's. Then move updated file in place
1409  of the old datafile.
1410  */
1411  if (mysql_file_close(data_file, MYF(0)) ||
1412  mysql_file_rename(csv_key_file_data,
1413  fn_format(updated_fname, share->table_name,
1414  "", CSN_EXT,
1415  MY_REPLACE_EXT | MY_UNPACK_FILENAME),
1416  share->data_file_name, MYF(0)))
1417  DBUG_RETURN(-1);
1418 
1419  /* Open the file again */
1420  if ((data_file= mysql_file_open(csv_key_file_data,
1421  share->data_file_name,
1422  O_RDONLY, MYF(MY_WME))) == -1)
1423  DBUG_RETURN(my_errno ? my_errno : -1);
1424  /*
1425  As we reopened the data file, increase share->data_file_version
1426  in order to force other threads waiting on a table lock and
1427  have already opened the table to reopen the data file.
1428  That makes the latest changes become visible to them.
1429  Update local_data_file_version as no need to reopen it in the
1430  current thread.
1431  */
1432  share->data_file_version++;
1433  local_data_file_version= share->data_file_version;
1434  /*
1435  The datafile is consistent at this point and the write filedes is
1436  closed, so nothing worrying will happen to it in case of a crash.
1437  Here we record this fact to the meta-file.
1438  */
1439  (void)write_meta_file(share->meta_file, share->rows_recorded, FALSE);
1440  /*
1441  Update local_saved_data_file_length with the real length of the
1442  data file.
1443  */
1444  local_saved_data_file_length= temp_file_length;
1445  }
1446 
1447  DBUG_RETURN(0);
1448 error:
1449  mysql_file_close(update_temp_file, MYF(0));
1450  share->update_file_opened= FALSE;
1451  DBUG_RETURN(-1);
1452 }
1453 
1454 
1455 /*
1456  Repair CSV table in the case, it is crashed.
1457 
1458  SYNOPSIS
1459  repair()
1460  thd The thread, performing repair
1461  check_opt The options for repair. We do not use it currently.
1462 
1463  DESCRIPTION
1464  If the file is empty, change # of rows in the file and complete recovery.
1465  Otherwise, scan the table looking for bad rows. If none were found,
1466  we mark file as a good one and return. If a bad row was encountered,
1467  we truncate the datafile up to the last good row.
1468 
1469  TODO: Make repair more clever - it should try to recover subsequent
1470  rows (after the first bad one) as well.
1471 */
1472 
1473 int ha_tina::repair(THD* thd, HA_CHECK_OPT* check_opt)
1474 {
1475  char repaired_fname[FN_REFLEN];
1476  uchar *buf;
1477  File repair_file;
1478  int rc;
1479  ha_rows rows_repaired= 0;
1480  my_off_t write_begin= 0, write_end;
1481  DBUG_ENTER("ha_tina::repair");
1482 
1483  /* empty file */
1484  if (!share->saved_data_file_length)
1485  {
1486  share->rows_recorded= 0;
1487  goto end;
1488  }
1489 
1490  /* Don't assert in field::val() functions */
1491  table->use_all_columns();
1492  if (!(buf= (uchar*) my_malloc(table->s->reclength, MYF(MY_WME))))
1493  DBUG_RETURN(HA_ERR_OUT_OF_MEM);
1494 
1495  /* position buffer to the start of the file */
1496  if (init_data_file())
1497  DBUG_RETURN(HA_ERR_CRASHED_ON_REPAIR);
1498 
1499  /*
1500  Local_saved_data_file_length is initialized during the lock phase.
1501  Sometimes this is not getting executed before ::repair (e.g. for
1502  the log tables). We set it manually here.
1503  */
1504  local_saved_data_file_length= share->saved_data_file_length;
1505  /* set current position to the beginning of the file */
1506  current_position= next_position= 0;
1507 
1508  /* Read the file row-by-row. If everything is ok, repair is not needed. */
1509  while (!(rc= find_current_row(buf)))
1510  {
1511  thd_inc_row_count(thd);
1512  rows_repaired++;
1513  current_position= next_position;
1514  }
1515 
1516  free_root(&blobroot, MYF(0));
1517 
1518  my_free(buf);
1519 
1520  if (rc == HA_ERR_END_OF_FILE)
1521  {
1522  /*
1523  All rows were read ok until end of file, the file does not need repair.
1524  If rows_recorded != rows_repaired, we should update rows_recorded value
1525  to the current amount of rows.
1526  */
1527  share->rows_recorded= rows_repaired;
1528  goto end;
1529  }
1530 
1531  /*
1532  Otherwise we've encountered a bad row => repair is needed.
1533  Let us create a temporary file.
1534  */
1535  if ((repair_file= mysql_file_create(csv_key_file_update,
1536  fn_format(repaired_fname,
1537  share->table_name,
1538  "", CSN_EXT,
1539  MY_REPLACE_EXT|MY_UNPACK_FILENAME),
1540  0, O_RDWR | O_TRUNC, MYF(MY_WME))) < 0)
1541  DBUG_RETURN(HA_ERR_CRASHED_ON_REPAIR);
1542 
1543  file_buff->init_buff(data_file);
1544 
1545 
1546  /* we just truncated the file up to the first bad row. update rows count. */
1547  share->rows_recorded= rows_repaired;
1548 
1549  /* write repaired file */
1550  while (1)
1551  {
1552  write_end= min(file_buff->end(), current_position);
1553  if ((write_end - write_begin) &&
1554  (mysql_file_write(repair_file, (uchar*)file_buff->ptr(),
1555  (size_t) (write_end - write_begin), MYF_RW)))
1556  DBUG_RETURN(-1);
1557 
1558  write_begin= write_end;
1559  if (write_end== current_position)
1560  break;
1561  else
1562  file_buff->read_next(); /* shift the buffer */
1563  }
1564 
1565  /*
1566  Close the files and rename repaired file to the datafile.
1567  We have to close the files, as on Windows one cannot rename
1568  a file, which descriptor is still open. EACCES will be returned
1569  when trying to delete the "to"-file in mysql_file_rename().
1570  */
1571  if (share->tina_write_opened)
1572  {
1573  /*
1574  Data file might be opened twice, on table opening stage and
1575  during write_row execution. We need to close both instances
1576  to satisfy Win.
1577  */
1578  if (mysql_file_close(share->tina_write_filedes, MYF(0)))
1579  DBUG_RETURN(my_errno ? my_errno : -1);
1580  share->tina_write_opened= FALSE;
1581  }
1582  if (mysql_file_close(data_file, MYF(0)) ||
1583  mysql_file_close(repair_file, MYF(0)) ||
1584  mysql_file_rename(csv_key_file_data,
1585  repaired_fname, share->data_file_name, MYF(0)))
1586  DBUG_RETURN(-1);
1587 
1588  /* Open the file again, it should now be repaired */
1589  if ((data_file= mysql_file_open(csv_key_file_data,
1590  share->data_file_name, O_RDWR|O_APPEND,
1591  MYF(MY_WME))) == -1)
1592  DBUG_RETURN(my_errno ? my_errno : -1);
1593 
1594  /* Set new file size. The file size will be updated by ::update_status() */
1595  local_saved_data_file_length= (size_t) current_position;
1596 
1597 end:
1598  share->crashed= FALSE;
1599  DBUG_RETURN(HA_ADMIN_OK);
1600 }
1601 
1602 /*
1603  DELETE without WHERE calls this
1604 */
1605 
1607 {
1608  int rc;
1609  DBUG_ENTER("ha_tina::delete_all_rows");
1610 
1611  if (!records_is_known)
1612  DBUG_RETURN(my_errno=HA_ERR_WRONG_COMMAND);
1613 
1614  if (!share->tina_write_opened)
1615  if (init_tina_writer())
1616  DBUG_RETURN(-1);
1617 
1618  /* Truncate the file to zero size */
1619  rc= mysql_file_chsize(share->tina_write_filedes, 0, 0, MYF(MY_WME));
1620 
1621  stats.records=0;
1622  /* Update shared info */
1623  mysql_mutex_lock(&share->mutex);
1624  share->rows_recorded= 0;
1625  mysql_mutex_unlock(&share->mutex);
1626  local_saved_data_file_length= 0;
1627  DBUG_RETURN(rc);
1628 }
1629 
1630 /*
1631  Called by the database to lock the table. Keep in mind that this
1632  is an internal lock.
1633 */
1635  THR_LOCK_DATA **to,
1636  enum thr_lock_type lock_type)
1637 {
1638  if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK)
1639  lock.type=lock_type;
1640  *to++= &lock;
1641  return to;
1642 }
1643 
1644 /*
1645  Create a table. You do not want to leave the table open after a call to
1646  this (the database will call ::open() if it needs to).
1647 */
1648 
1649 int ha_tina::create(const char *name, TABLE *table_arg,
1650  HA_CREATE_INFO *create_info)
1651 {
1652  char name_buff[FN_REFLEN];
1653  File create_file;
1654  DBUG_ENTER("ha_tina::create");
1655 
1656  /*
1657  check columns
1658  */
1659  for (Field **field= table_arg->s->field; *field; field++)
1660  {
1661  if ((*field)->real_maybe_null())
1662  {
1663  my_error(ER_CHECK_NOT_IMPLEMENTED, MYF(0), "nullable columns");
1664  DBUG_RETURN(HA_ERR_UNSUPPORTED);
1665  }
1666  }
1667 
1668 
1669  if ((create_file= mysql_file_create(csv_key_file_metadata,
1670  fn_format(name_buff, name, "", CSM_EXT,
1671  MY_REPLACE_EXT|MY_UNPACK_FILENAME),
1672  0, O_RDWR | O_TRUNC, MYF(MY_WME))) < 0)
1673  DBUG_RETURN(-1);
1674 
1675  write_meta_file(create_file, 0, FALSE);
1676  mysql_file_close(create_file, MYF(0));
1677 
1678  if ((create_file= mysql_file_create(csv_key_file_data,
1679  fn_format(name_buff, name, "", CSV_EXT,
1680  MY_REPLACE_EXT|MY_UNPACK_FILENAME),
1681  0, O_RDWR | O_TRUNC, MYF(MY_WME))) < 0)
1682  DBUG_RETURN(-1);
1683 
1684  mysql_file_close(create_file, MYF(0));
1685 
1686  DBUG_RETURN(0);
1687 }
1688 
1689 int ha_tina::check(THD* thd, HA_CHECK_OPT* check_opt)
1690 {
1691  int rc= 0;
1692  uchar *buf;
1693  const char *old_proc_info;
1694  ha_rows count= share->rows_recorded;
1695  DBUG_ENTER("ha_tina::check");
1696 
1697  old_proc_info= thd_proc_info(thd, "Checking table");
1698  if (!(buf= (uchar*) my_malloc(table->s->reclength, MYF(MY_WME))))
1699  DBUG_RETURN(HA_ERR_OUT_OF_MEM);
1700 
1701  /* position buffer to the start of the file */
1702  if (init_data_file())
1703  DBUG_RETURN(HA_ERR_CRASHED);
1704 
1705  /*
1706  Local_saved_data_file_length is initialized during the lock phase.
1707  Check does not use store_lock in certain cases. So, we set it
1708  manually here.
1709  */
1710  local_saved_data_file_length= share->saved_data_file_length;
1711  /* set current position to the beginning of the file */
1712  current_position= next_position= 0;
1713 
1714  /* Read the file row-by-row. If everything is ok, repair is not needed. */
1715  while (!(rc= find_current_row(buf)))
1716  {
1717  thd_inc_row_count(thd);
1718  count--;
1719  current_position= next_position;
1720  }
1721 
1722  free_root(&blobroot, MYF(0));
1723 
1724  my_free(buf);
1725  thd_proc_info(thd, old_proc_info);
1726 
1727  if ((rc != HA_ERR_END_OF_FILE) || count)
1728  {
1729  share->crashed= TRUE;
1730  DBUG_RETURN(HA_ADMIN_CORRUPT);
1731  }
1732 
1733  DBUG_RETURN(HA_ADMIN_OK);
1734 }
1735 
1736 
1738  uint table_changes)
1739 {
1740  return COMPATIBLE_DATA_YES;
1741 }
1742 
1743 struct st_mysql_storage_engine csv_storage_engine=
1744 { MYSQL_HANDLERTON_INTERFACE_VERSION };
1745 
1746 mysql_declare_plugin(csv)
1747 {
1748  MYSQL_STORAGE_ENGINE_PLUGIN,
1749  &csv_storage_engine,
1750  "CSV",
1751  "Brian Aker, MySQL AB",
1752  "CSV storage engine",
1753  PLUGIN_LICENSE_GPL,
1754  tina_init_func, /* Plugin Init */
1755  tina_done_func, /* Plugin Deinit */
1756  0x0100 /* 1.0 */,
1757  NULL, /* status variables */
1758  NULL, /* system variables */
1759  NULL, /* config options */
1760  0, /* flags */
1761 }
1762 mysql_declare_plugin_end;
1763