MySQL 5.6.14 Source Code Document
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
buf0rea.cc
Go to the documentation of this file.
1 /*****************************************************************************
2 
3 Copyright (c) 1995, 2013, Oracle and/or its affiliates. All Rights Reserved.
4 
5 This program is free software; you can redistribute it and/or modify it under
6 the terms of the GNU General Public License as published by the Free Software
7 Foundation; version 2 of the License.
8 
9 This program is distributed in the hope that it will be useful, but WITHOUT
10 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
11 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
12 
13 You should have received a copy of the GNU General Public License along with
14 this program; if not, write to the Free Software Foundation, Inc.,
15 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
16 
17 *****************************************************************************/
18 
19 /**************************************************/
26 #include "buf0rea.h"
27 
28 #include "fil0fil.h"
29 #include "mtr0mtr.h"
30 
31 #include "buf0buf.h"
32 #include "buf0flu.h"
33 #include "buf0lru.h"
34 #include "buf0dblwr.h"
35 #include "ibuf0ibuf.h"
36 #include "log0recv.h"
37 #include "trx0sys.h"
38 #include "os0file.h"
39 #include "srv0start.h"
40 #include "srv0srv.h"
41 #include "mysql/plugin.h"
42 #include "mysql/service_thd_wait.h"
43 
46 #define BUF_READ_AHEAD_RANDOM_THRESHOLD(b) \
47  (5 + BUF_READ_AHEAD_AREA(b) / 8)
48 
52 #define BUF_READ_AHEAD_PEND_LIMIT 2
53 
54 /********************************************************************/
57 static
58 void
59 buf_read_page_handle_error(
60 /*=======================*/
61  buf_page_t* bpage)
62 {
63  buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
64  const bool uncompressed = (buf_page_get_state(bpage)
66 
67  /* First unfix and release lock on the bpage */
68  buf_pool_mutex_enter(buf_pool);
69  mutex_enter(buf_page_get_mutex(bpage));
71  ut_ad(bpage->buf_fix_count == 0);
72 
73  /* Set BUF_IO_NONE before we remove the block from LRU list */
75 
76  if (uncompressed) {
77  rw_lock_x_unlock_gen(
78  &((buf_block_t*) bpage)->lock,
79  BUF_IO_READ);
80  }
81 
82  mutex_exit(buf_page_get_mutex(bpage));
83 
84  /* remove the block from LRU list */
85  buf_LRU_free_one_page(bpage);
86 
87  ut_ad(buf_pool->n_pend_reads > 0);
88  buf_pool->n_pend_reads--;
89 
90  buf_pool_mutex_exit(buf_pool);
91 }
92 
93 /********************************************************************/
103 static
104 ulint
105 buf_read_page_low(
106 /*==============*/
107  dberr_t* err,
110  bool sync,
111  ulint mode,
114  ulint space,
115  ulint zip_size,
116  ibool unzip,
117  ib_int64_t tablespace_version,
122  ulint offset)
123 {
124  buf_page_t* bpage;
125  ulint wake_later;
126  ibool ignore_nonexistent_pages;
127 
128  *err = DB_SUCCESS;
129 
130  wake_later = mode & OS_AIO_SIMULATED_WAKE_LATER;
131  mode = mode & ~OS_AIO_SIMULATED_WAKE_LATER;
132 
133  ignore_nonexistent_pages = mode & BUF_READ_IGNORE_NONEXISTENT_PAGES;
134  mode &= ~BUF_READ_IGNORE_NONEXISTENT_PAGES;
135 
136  if (space == TRX_SYS_SPACE && buf_dblwr_page_inside(offset)) {
137  ut_print_timestamp(stderr);
138  fprintf(stderr,
139  " InnoDB: Warning: trying to read"
140  " doublewrite buffer page %lu\n",
141  (ulong) offset);
142 
143  return(0);
144  }
145 
146  if (ibuf_bitmap_page(zip_size, offset)
147  || trx_sys_hdr_page(space, offset)) {
148 
149  /* Trx sys header is so low in the latching order that we play
150  safe and do not leave the i/o-completion to an asynchronous
151  i/o-thread. Ibuf bitmap pages must always be read with
152  syncronous i/o, to make sure they do not get involved in
153  thread deadlocks. */
154 
155  sync = true;
156  }
157 
158  /* The following call will also check if the tablespace does not exist
159  or is being dropped; if we succeed in initing the page in the buffer
160  pool for read, then DISCARD cannot proceed until the read has
161  completed */
162  bpage = buf_page_init_for_read(err, mode, space, zip_size, unzip,
163  tablespace_version, offset);
164  if (bpage == NULL) {
165 
166  return(0);
167  }
168 
169 #ifdef UNIV_DEBUG
170  if (buf_debug_prints) {
171  fprintf(stderr,
172  "Posting read request for page %lu, sync %s\n",
173  (ulong) offset, sync ? "true" : "false");
174  }
175 #endif
176 
177  ut_ad(buf_page_in_file(bpage));
178 
179  if (sync) {
180  thd_wait_begin(NULL, THD_WAIT_DISKIO);
181  }
182 
183  if (zip_size) {
184  *err = fil_io(OS_FILE_READ | wake_later
185  | ignore_nonexistent_pages,
186  sync, space, zip_size, offset, 0, zip_size,
187  bpage->zip.data, bpage);
188  } else {
190 
191  *err = fil_io(OS_FILE_READ | wake_later
192  | ignore_nonexistent_pages,
193  sync, space, 0, offset, 0, UNIV_PAGE_SIZE,
194  ((buf_block_t*) bpage)->frame, bpage);
195  }
196 
197  if (sync) {
198  thd_wait_end(NULL);
199  }
200 
201  if (*err != DB_SUCCESS) {
202  if (ignore_nonexistent_pages || *err == DB_TABLESPACE_DELETED) {
203  buf_read_page_handle_error(bpage);
204  return(0);
205  }
206  /* else */
207  ut_error;
208  }
209 
210  if (sync) {
211  /* The i/o is already completed when we arrive from
212  fil_read */
213  if (!buf_page_io_complete(bpage)) {
214  return(0);
215  }
216  }
217 
218  return(1);
219 }
220 
221 /********************************************************************/
235 UNIV_INTERN
236 ulint
238 /*==================*/
239  ulint space,
240  ulint zip_size,
242  ulint offset,
244  ibool inside_ibuf)
246 {
247  buf_pool_t* buf_pool = buf_pool_get(space, offset);
248  ib_int64_t tablespace_version;
249  ulint recent_blocks = 0;
250  ulint ibuf_mode;
251  ulint count;
252  ulint low, high;
253  dberr_t err;
254  ulint i;
255  const ulint buf_read_ahead_random_area
256  = BUF_READ_AHEAD_AREA(buf_pool);
257 
258  if (!srv_random_read_ahead) {
259  /* Disabled by user */
260  return(0);
261  }
262 
264  /* No read-ahead to avoid thread deadlocks */
265  return(0);
266  }
267 
268  if (ibuf_bitmap_page(zip_size, offset)
269  || trx_sys_hdr_page(space, offset)) {
270 
271  /* If it is an ibuf bitmap page or trx sys hdr, we do
272  no read-ahead, as that could break the ibuf page access
273  order */
274 
275  return(0);
276  }
277 
278  /* Remember the tablespace version before we ask te tablespace size
279  below: if DISCARD + IMPORT changes the actual .ibd file meanwhile, we
280  do not try to read outside the bounds of the tablespace! */
281 
282  tablespace_version = fil_space_get_version(space);
283 
284  low = (offset / buf_read_ahead_random_area)
285  * buf_read_ahead_random_area;
286  high = (offset / buf_read_ahead_random_area + 1)
287  * buf_read_ahead_random_area;
288  if (high > fil_space_get_size(space)) {
289 
290  high = fil_space_get_size(space);
291  }
292 
293  buf_pool_mutex_enter(buf_pool);
294 
295  if (buf_pool->n_pend_reads
296  > buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
297  buf_pool_mutex_exit(buf_pool);
298 
299  return(0);
300  }
301 
302  /* Count how many blocks in the area have been recently accessed,
303  that is, reside near the start of the LRU list. */
304 
305  for (i = low; i < high; i++) {
306  const buf_page_t* bpage =
307  buf_page_hash_get(buf_pool, space, i);
308 
309  if (bpage
310  && buf_page_is_accessed(bpage)
311  && buf_page_peek_if_young(bpage)) {
312 
313  recent_blocks++;
314 
315  if (recent_blocks
316  >= BUF_READ_AHEAD_RANDOM_THRESHOLD(buf_pool)) {
317 
318  buf_pool_mutex_exit(buf_pool);
319  goto read_ahead;
320  }
321  }
322  }
323 
324  buf_pool_mutex_exit(buf_pool);
325  /* Do nothing */
326  return(0);
327 
328 read_ahead:
329  /* Read all the suitable blocks within the area */
330 
331  if (inside_ibuf) {
332  ibuf_mode = BUF_READ_IBUF_PAGES_ONLY;
333  } else {
334  ibuf_mode = BUF_READ_ANY_PAGE;
335  }
336 
337  count = 0;
338 
339  for (i = low; i < high; i++) {
340  /* It is only sensible to do read-ahead in the non-sync aio
341  mode: hence FALSE as the first parameter */
342 
343  if (!ibuf_bitmap_page(zip_size, i)) {
344  count += buf_read_page_low(
345  &err, false,
346  ibuf_mode | OS_AIO_SIMULATED_WAKE_LATER,
347  space, zip_size, FALSE,
348  tablespace_version, i);
349  if (err == DB_TABLESPACE_DELETED) {
350  ut_print_timestamp(stderr);
351  fprintf(stderr,
352  " InnoDB: Warning: in random"
353  " readahead trying to access\n"
354  "InnoDB: tablespace %lu page %lu,\n"
355  "InnoDB: but the tablespace does not"
356  " exist or is just being dropped.\n",
357  (ulong) space, (ulong) i);
358  }
359  }
360  }
361 
362  /* In simulated aio we wake the aio handler threads only after
363  queuing all aio requests, in native aio the following call does
364  nothing: */
365 
367 
368 #ifdef UNIV_DEBUG
369  if (buf_debug_prints && (count > 0)) {
370  fprintf(stderr,
371  "Random read-ahead space %lu offset %lu pages %lu\n",
372  (ulong) space, (ulong) offset,
373  (ulong) count);
374  }
375 #endif /* UNIV_DEBUG */
376 
377  /* Read ahead is considered one I/O operation for the purpose of
378  LRU policy decision. */
380 
381  buf_pool->stat.n_ra_pages_read_rnd += count;
383  return(count);
384 }
385 
386 /********************************************************************/
392 UNIV_INTERN
393 ibool
395 /*==========*/
396  ulint space,
397  ulint zip_size,
398  ulint offset)
399 {
400  ib_int64_t tablespace_version;
401  ulint count;
402  dberr_t err;
403 
404  tablespace_version = fil_space_get_version(space);
405 
406  /* We do the i/o in the synchronous aio mode to save thread
407  switches: hence TRUE */
408 
409  count = buf_read_page_low(&err, true, BUF_READ_ANY_PAGE, space,
410  zip_size, FALSE,
411  tablespace_version, offset);
413  if (err == DB_TABLESPACE_DELETED) {
414  ut_print_timestamp(stderr);
415  fprintf(stderr,
416  " InnoDB: Error: trying to access"
417  " tablespace %lu page no. %lu,\n"
418  "InnoDB: but the tablespace does not exist"
419  " or is just being dropped.\n",
420  (ulong) space, (ulong) offset);
421  }
422 
423  /* Increment number of I/O operations used for LRU policy. */
425 
426  return(count > 0);
427 }
428 
429 /********************************************************************/
435 UNIV_INTERN
436 ibool
438 /*================*/
439  ulint space,
440  ulint offset)
441 {
442  ulint zip_size;
443  ib_int64_t tablespace_version;
444  ulint count;
445  dberr_t err;
446 
447  zip_size = fil_space_get_zip_size(space);
448 
449  if (zip_size == ULINT_UNDEFINED) {
450  return(FALSE);
451  }
452 
453  tablespace_version = fil_space_get_version(space);
454 
455  count = buf_read_page_low(&err, true, BUF_READ_ANY_PAGE
456  | OS_AIO_SIMULATED_WAKE_LATER
457  | BUF_READ_IGNORE_NONEXISTENT_PAGES,
458  space, zip_size, FALSE,
459  tablespace_version, offset);
461 
462  /* We do not increment number of I/O operations used for LRU policy
463  here (buf_LRU_stat_inc_io()). We use this in heuristics to decide
464  about evicting uncompressed version of compressed pages from the
465  buffer pool. Since this function is called from buffer pool load
466  these IOs are deliberate and are not part of normal workload we can
467  ignore these in our heuristics. */
468 
469  return(count > 0);
470 }
471 
472 /********************************************************************/
496 UNIV_INTERN
497 ulint
499 /*==================*/
500  ulint space,
501  ulint zip_size,
502  ulint offset,
503  ibool inside_ibuf)
504 {
505  buf_pool_t* buf_pool = buf_pool_get(space, offset);
506  ib_int64_t tablespace_version;
507  buf_page_t* bpage;
508  buf_frame_t* frame;
509  buf_page_t* pred_bpage = NULL;
510  ulint pred_offset;
511  ulint succ_offset;
512  ulint count;
513  int asc_or_desc;
514  ulint new_offset;
515  ulint fail_count;
516  ulint ibuf_mode;
517  ulint low, high;
518  dberr_t err;
519  ulint i;
520  const ulint buf_read_ahead_linear_area
521  = BUF_READ_AHEAD_AREA(buf_pool);
522  ulint threshold;
523 
524  /* check if readahead is disabled */
525  if (!srv_read_ahead_threshold) {
526  return(0);
527  }
528 
529  if (UNIV_UNLIKELY(srv_startup_is_before_trx_rollback_phase)) {
530  /* No read-ahead to avoid thread deadlocks */
531  return(0);
532  }
533 
534  low = (offset / buf_read_ahead_linear_area)
535  * buf_read_ahead_linear_area;
536  high = (offset / buf_read_ahead_linear_area + 1)
537  * buf_read_ahead_linear_area;
538 
539  if ((offset != low) && (offset != high - 1)) {
540  /* This is not a border page of the area: return */
541 
542  return(0);
543  }
544 
545  if (ibuf_bitmap_page(zip_size, offset)
546  || trx_sys_hdr_page(space, offset)) {
547 
548  /* If it is an ibuf bitmap page or trx sys hdr, we do
549  no read-ahead, as that could break the ibuf page access
550  order */
551 
552  return(0);
553  }
554 
555  /* Remember the tablespace version before we ask te tablespace size
556  below: if DISCARD + IMPORT changes the actual .ibd file meanwhile, we
557  do not try to read outside the bounds of the tablespace! */
558 
559  tablespace_version = fil_space_get_version(space);
560 
561  buf_pool_mutex_enter(buf_pool);
562 
563  if (high > fil_space_get_size(space)) {
564  buf_pool_mutex_exit(buf_pool);
565  /* The area is not whole, return */
566 
567  return(0);
568  }
569 
570  if (buf_pool->n_pend_reads
571  > buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
572  buf_pool_mutex_exit(buf_pool);
573 
574  return(0);
575  }
576 
577  /* Check that almost all pages in the area have been accessed; if
578  offset == low, the accesses must be in a descending order, otherwise,
579  in an ascending order. */
580 
581  asc_or_desc = 1;
582 
583  if (offset == low) {
584  asc_or_desc = -1;
585  }
586 
587  /* How many out of order accessed pages can we ignore
588  when working out the access pattern for linear readahead */
589  threshold = ut_min((64 - srv_read_ahead_threshold),
590  BUF_READ_AHEAD_AREA(buf_pool));
591 
592  fail_count = 0;
593 
594  for (i = low; i < high; i++) {
595  bpage = buf_page_hash_get(buf_pool, space, i);
596 
597  if (bpage == NULL || !buf_page_is_accessed(bpage)) {
598  /* Not accessed */
599  fail_count++;
600 
601  } else if (pred_bpage) {
602  /* Note that buf_page_is_accessed() returns
603  the time of the first access. If some blocks
604  of the extent existed in the buffer pool at
605  the time of a linear access pattern, the first
606  access times may be nonmonotonic, even though
607  the latest access times were linear. The
608  threshold (srv_read_ahead_factor) should help
609  a little against this. */
610  int res = ut_ulint_cmp(
611  buf_page_is_accessed(bpage),
612  buf_page_is_accessed(pred_bpage));
613  /* Accesses not in the right order */
614  if (res != 0 && res != asc_or_desc) {
615  fail_count++;
616  }
617  }
618 
619  if (fail_count > threshold) {
620  /* Too many failures: return */
621  buf_pool_mutex_exit(buf_pool);
622  return(0);
623  }
624 
625  if (bpage && buf_page_is_accessed(bpage)) {
626  pred_bpage = bpage;
627  }
628  }
629 
630  /* If we got this far, we know that enough pages in the area have
631  been accessed in the right order: linear read-ahead can be sensible */
632 
633  bpage = buf_page_hash_get(buf_pool, space, offset);
634 
635  if (bpage == NULL) {
636  buf_pool_mutex_exit(buf_pool);
637 
638  return(0);
639  }
640 
641  switch (buf_page_get_state(bpage)) {
642  case BUF_BLOCK_ZIP_PAGE:
643  frame = bpage->zip.data;
644  break;
645  case BUF_BLOCK_FILE_PAGE:
646  frame = ((buf_block_t*) bpage)->frame;
647  break;
648  default:
649  ut_error;
650  break;
651  }
652 
653  /* Read the natural predecessor and successor page addresses from
654  the page; NOTE that because the calling thread may have an x-latch
655  on the page, we do not acquire an s-latch on the page, this is to
656  prevent deadlocks. Even if we read values which are nonsense, the
657  algorithm will work. */
658 
659  pred_offset = fil_page_get_prev(frame);
660  succ_offset = fil_page_get_next(frame);
661 
662  buf_pool_mutex_exit(buf_pool);
663 
664  if ((offset == low) && (succ_offset == offset + 1)) {
665 
666  /* This is ok, we can continue */
667  new_offset = pred_offset;
668 
669  } else if ((offset == high - 1) && (pred_offset == offset - 1)) {
670 
671  /* This is ok, we can continue */
672  new_offset = succ_offset;
673  } else {
674  /* Successor or predecessor not in the right order */
675 
676  return(0);
677  }
678 
679  low = (new_offset / buf_read_ahead_linear_area)
680  * buf_read_ahead_linear_area;
681  high = (new_offset / buf_read_ahead_linear_area + 1)
682  * buf_read_ahead_linear_area;
683 
684  if ((new_offset != low) && (new_offset != high - 1)) {
685  /* This is not a border page of the area: return */
686 
687  return(0);
688  }
689 
690  if (high > fil_space_get_size(space)) {
691  /* The area is not whole, return */
692 
693  return(0);
694  }
695 
696  /* If we got this far, read-ahead can be sensible: do it */
697 
698  ibuf_mode = inside_ibuf
699  ? BUF_READ_IBUF_PAGES_ONLY | OS_AIO_SIMULATED_WAKE_LATER
701 
702  count = 0;
703 
704  /* Since Windows XP seems to schedule the i/o handler thread
705  very eagerly, and consequently it does not wait for the
706  full read batch to be posted, we use special heuristics here */
707 
709 
710  for (i = low; i < high; i++) {
711  /* It is only sensible to do read-ahead in the non-sync
712  aio mode: hence FALSE as the first parameter */
713 
714  if (!ibuf_bitmap_page(zip_size, i)) {
715  count += buf_read_page_low(
716  &err, false,
717  ibuf_mode,
718  space, zip_size, FALSE, tablespace_version, i);
719  if (err == DB_TABLESPACE_DELETED) {
720  ut_print_timestamp(stderr);
721  fprintf(stderr,
722  " InnoDB: Warning: in"
723  " linear readahead trying to access\n"
724  "InnoDB: tablespace %lu page %lu,\n"
725  "InnoDB: but the tablespace does not"
726  " exist or is just being dropped.\n",
727  (ulong) space, (ulong) i);
728  }
729  }
730  }
731 
732  /* In simulated aio we wake the aio handler threads only after
733  queuing all aio requests, in native aio the following call does
734  nothing: */
735 
737 
738 #ifdef UNIV_DEBUG
739  if (buf_debug_prints && (count > 0)) {
740  fprintf(stderr,
741  "LINEAR read-ahead space %lu offset %lu pages %lu\n",
742  (ulong) space, (ulong) offset, (ulong) count);
743  }
744 #endif /* UNIV_DEBUG */
745 
746  /* Read ahead is considered one I/O operation for the purpose of
747  LRU policy decision. */
749 
750  buf_pool->stat.n_ra_pages_read += count;
751  return(count);
752 }
753 
754 /********************************************************************/
758 UNIV_INTERN
759 void
761 /*======================*/
762  bool sync,
767  const ulint* space_ids,
768  const ib_int64_t* space_versions,
775  const ulint* page_nos,
779  ulint n_stored)
781 {
782  ulint i;
783 
784 #ifdef UNIV_IBUF_DEBUG
785  ut_a(n_stored < UNIV_PAGE_SIZE);
786 #endif
787 
788  for (i = 0; i < n_stored; i++) {
789  dberr_t err;
790  buf_pool_t* buf_pool;
791  ulint zip_size = fil_space_get_zip_size(space_ids[i]);
792 
793  buf_pool = buf_pool_get(space_ids[i], page_nos[i]);
794 
795  while (buf_pool->n_pend_reads
796  > buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
797  os_thread_sleep(500000);
798  }
799 
800  if (UNIV_UNLIKELY(zip_size == ULINT_UNDEFINED)) {
801 
802  goto tablespace_deleted;
803  }
804 
805  buf_read_page_low(&err, sync && (i + 1 == n_stored),
806  BUF_READ_ANY_PAGE, space_ids[i],
807  zip_size, TRUE, space_versions[i],
808  page_nos[i]);
809 
810  if (UNIV_UNLIKELY(err == DB_TABLESPACE_DELETED)) {
811 tablespace_deleted:
812  /* We have deleted or are deleting the single-table
813  tablespace: remove the entries for that page */
814 
815  ibuf_merge_or_delete_for_page(NULL, space_ids[i],
816  page_nos[i],
817  zip_size, FALSE);
818  }
819  }
820 
822 
823 #ifdef UNIV_DEBUG
824  if (buf_debug_prints) {
825  fprintf(stderr,
826  "Ibuf merge read-ahead space %lu pages %lu\n",
827  (ulong) space_ids[0], (ulong) n_stored);
828  }
829 #endif /* UNIV_DEBUG */
830 }
831 
832 /********************************************************************/
834 UNIV_INTERN
835 void
837 /*================*/
838  ibool sync,
843  ulint space,
844  ulint zip_size,
846  const ulint* page_nos,
850  ulint n_stored)
852 {
853  ib_int64_t tablespace_version;
854  ulint count;
855  dberr_t err;
856  ulint i;
857 
858  zip_size = fil_space_get_zip_size(space);
859 
860  if (UNIV_UNLIKELY(zip_size == ULINT_UNDEFINED)) {
861  /* It is a single table tablespace and the .ibd file is
862  missing: do nothing */
863 
864  return;
865  }
866 
867  tablespace_version = fil_space_get_version(space);
868 
869  for (i = 0; i < n_stored; i++) {
870  buf_pool_t* buf_pool;
871 
872  count = 0;
873 
874  os_aio_print_debug = FALSE;
875  buf_pool = buf_pool_get(space, page_nos[i]);
876  while (buf_pool->n_pend_reads >= recv_n_pool_free_frames / 2) {
877 
879  os_thread_sleep(10000);
880 
881  count++;
882 
883  if (count > 1000) {
884  fprintf(stderr,
885  "InnoDB: Error: InnoDB has waited for"
886  " 10 seconds for pending\n"
887  "InnoDB: reads to the buffer pool to"
888  " be finished.\n"
889  "InnoDB: Number of pending reads %lu,"
890  " pending pread calls %lu\n",
891  (ulong) buf_pool->n_pend_reads,
892  (ulong) os_file_n_pending_preads);
893 
894  os_aio_print_debug = TRUE;
895  }
896  }
897 
898  os_aio_print_debug = FALSE;
899 
900  if ((i + 1 == n_stored) && sync) {
901  buf_read_page_low(&err, true, BUF_READ_ANY_PAGE, space,
902  zip_size, TRUE, tablespace_version,
903  page_nos[i]);
904  } else {
905  buf_read_page_low(&err, false, BUF_READ_ANY_PAGE
906  | OS_AIO_SIMULATED_WAKE_LATER,
907  space, zip_size, TRUE,
908  tablespace_version, page_nos[i]);
909  }
910  }
911 
913 
914 #ifdef UNIV_DEBUG
915  if (buf_debug_prints) {
916  fprintf(stderr,
917  "Recovery applies read-ahead pages %lu\n",
918  (ulong) n_stored);
919  }
920 #endif /* UNIV_DEBUG */
921 }