Groonga 3.0.9 Source Code Document
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
gc.c
Go to the documentation of this file.
1 /*
2 ** gc.c - garbage collector for mruby
3 **
4 ** See Copyright Notice in mruby.h
5 */
6 
7 #ifndef SIZE_MAX
8  /* Some versions of VC++
9  * has SIZE_MAX in stdint.h
10  */
11 # include <limits.h>
12 #endif
13 #include <string.h>
14 #include <stdlib.h>
15 #include "mruby.h"
16 #include "mruby/array.h"
17 #include "mruby/class.h"
18 #include "mruby/data.h"
19 #include "mruby/hash.h"
20 #include "mruby/proc.h"
21 #include "mruby/range.h"
22 #include "mruby/string.h"
23 #include "mruby/variable.h"
24 #include "mruby/gc.h"
25 
26 /*
27  = Tri-color Incremental Garbage Collection
28 
29  mruby's GC is Tri-color Incremental GC with Mark & Sweep.
30  Algorithm details are omitted.
31  Instead, the part about the implementation described below.
32 
33  == Object's Color
34 
35  Each object to be painted in three colors.
36 
37  * White - Unmarked.
38  * Gray - Marked, But the child objects are unmarked.
39  * Black - Marked, the child objects are also marked.
40 
41  == Two White Types
42 
43  There're two white color types in a flip-flop fassion: White-A and White-B,
44  which respectively represent the Current White color (the newly allocated
45  objects in the current GC cycle) and the Sweep Target White color (the
46  dead objects to be swept).
47 
48  A and B will be switched just at the beginning of the next GC cycle. At
49  that time, all the dead objects have been swept, while the newly created
50  objects in the current GC cycle which finally remains White are now
51  regarded as dead objects. Instead of traversing all the White-A objects and
52  paint them as White-B, just switch the meaning of White-A and White-B would
53  be much cheaper.
54 
55  As a result, the objects we sweep in the current GC cycle are always
56  left from the previous GC cycle. This allows us to sweep objects
57  incrementally, without the disturbance of the newly created objects.
58 
59  == Execution Timing
60 
61  GC Execution Time and Each step interval are decided by live objects count.
62  List of Adjustment API:
63 
64  * gc_interval_ratio_set
65  * gc_step_ratio_set
66 
67  For details, see the comments for each function.
68 
69  == Write Barrier
70 
71  mruby implementer, C extension library writer must write a write
72  barrier when writing a pointer to an object on object's field.
73  Two different write barrier:
74 
75  * mrb_field_write_barrier
76  * mrb_write_barrier
77 
78  == Generational Mode
79 
80  mruby's GC offers an Generational Mode while re-using the tri-color GC
81  infrastructure. It will treat the Black objects as Old objects after each
82  sweep phase, instead of paint them to White. The key idea are still same as
83  the traditional generational GC:
84 
85  * Minor GC - just traverse the Young objects (Gray objects) in the mark
86  phase, then only sweep the newly created objects, and leave
87  the Old objects live.
88 
89  * Major GC - same as a full regular GC cycle.
90 
91  the difference between "tranditional" generational GC is that, the major GC
92  in mruby is triggered incrementally in a tri-color manner.
93 
94 
95  For details, see the comments for each function.
96 
97 */
98 
99 struct free_obj {
101  struct RBasic *next;
102 };
103 
104 typedef struct {
105  union {
106  struct free_obj free;
107  struct RBasic basic;
108  struct RObject object;
109  struct RClass klass;
110  struct RString string;
111  struct RArray array;
112  struct RHash hash;
113  struct RRange range;
114  struct RData data;
115  struct RProc proc;
116  } as;
117 } RVALUE;
118 
119 #ifdef GC_PROFILE
120 #include <stdio.h>
121 #include <sys/time.h>
122 
123 static double program_invoke_time = 0;
124 static double gc_time = 0;
125 static double gc_total_time = 0;
126 
127 static double
128 gettimeofday_time(void)
129 {
130  struct timeval tv;
131  gettimeofday(&tv, NULL);
132  return tv.tv_sec + tv.tv_usec * 1e-6;
133 }
134 
135 #define GC_INVOKE_TIME_REPORT(with) do {\
136  fprintf(stderr, "%s\n", with);\
137  fprintf(stderr, "gc_invoke: %19.3f\n", gettimeofday_time() - program_invoke_time);\
138  fprintf(stderr, "is_generational: %d\n", is_generational(mrb));\
139  fprintf(stderr, "is_major_gc: %d\n", is_major_gc(mrb));\
140 } while(0)
141 
142 #define GC_TIME_START do {\
143  gc_time = gettimeofday_time();\
144 } while(0)
145 
146 #define GC_TIME_STOP_AND_REPORT do {\
147  gc_time = gettimeofday_time() - gc_time;\
148  gc_total_time += gc_time;\
149  fprintf(stderr, "gc_state: %d\n", mrb->gc_state);\
150  fprintf(stderr, "live: %zu\n", mrb->live);\
151  fprintf(stderr, "majorgc_old_threshold: %zu\n", mrb->majorgc_old_threshold);\
152  fprintf(stderr, "gc_threshold: %zu\n", mrb->gc_threshold);\
153  fprintf(stderr, "gc_time: %30.20f\n", gc_time);\
154  fprintf(stderr, "gc_total_time: %30.20f\n\n", gc_total_time);\
155 } while(0)
156 #else
157 #define GC_INVOKE_TIME_REPORT(s)
158 #define GC_TIME_START
159 #define GC_TIME_STOP_AND_REPORT
160 #endif
161 
162 #ifdef GC_DEBUG
163 #define DEBUG(x) (x)
164 #else
165 #define DEBUG(x)
166 #endif
167 
168 #define GC_STEP_SIZE 1024
169 
170 
171 void*
172 mrb_realloc_simple(mrb_state *mrb, void *p, size_t len)
173 {
174  void *p2;
175 
176  p2 = (mrb->allocf)(mrb, p, len, mrb->ud);
177  if (!p2 && len > 0 && mrb->heaps) {
178  mrb_full_gc(mrb);
179  p2 = (mrb->allocf)(mrb, p, len, mrb->ud);
180  }
181 
182  return p2;
183 }
184 
185 
186 void*
187 mrb_realloc(mrb_state *mrb, void *p, size_t len)
188 {
189  void *p2;
190 
191  p2 = mrb_realloc_simple(mrb, p, len);
192  if (!p2 && len) {
193  if (mrb->out_of_memory) {
194  /* mrb_panic(mrb); */
195  }
196  else {
197  mrb->out_of_memory = TRUE;
198  mrb_raise(mrb, E_RUNTIME_ERROR, "Out of memory");
199  }
200  }
201  else {
202  mrb->out_of_memory = FALSE;
203  }
204 
205  return p2;
206 }
207 
208 void*
209 mrb_malloc(mrb_state *mrb, size_t len)
210 {
211  return mrb_realloc(mrb, 0, len);
212 }
213 
214 void*
215 mrb_malloc_simple(mrb_state *mrb, size_t len)
216 {
217  return mrb_realloc_simple(mrb, 0, len);
218 }
219 
220 void*
221 mrb_calloc(mrb_state *mrb, size_t nelem, size_t len)
222 {
223  void *p;
224 
225  if (nelem > 0 && len > 0 &&
226  nelem <= SIZE_MAX / len) {
227  size_t size;
228  size = nelem * len;
229  p = mrb_realloc(mrb, 0, size);
230 
231  if (p) {
232  memset(p, 0, size);
233  }
234  }
235  else {
236  p = NULL;
237  }
238 
239  return p;
240 }
241 
242 void
243 mrb_free(mrb_state *mrb, void *p)
244 {
245  (mrb->allocf)(mrb, p, 0, mrb->ud);
246 }
247 
248 #ifndef MRB_HEAP_PAGE_SIZE
249 #define MRB_HEAP_PAGE_SIZE 1024
250 #endif
251 
252 struct heap_page {
253  struct RBasic *freelist;
254  struct heap_page *prev;
255  struct heap_page *next;
260 };
261 
262 static void
263 link_heap_page(mrb_state *mrb, struct heap_page *page)
264 {
265  page->next = mrb->heaps;
266  if (mrb->heaps)
267  mrb->heaps->prev = page;
268  mrb->heaps = page;
269 }
270 
271 static void
272 unlink_heap_page(mrb_state *mrb, struct heap_page *page)
273 {
274  if (page->prev)
275  page->prev->next = page->next;
276  if (page->next)
277  page->next->prev = page->prev;
278  if (mrb->heaps == page)
279  mrb->heaps = page->next;
280  page->prev = NULL;
281  page->next = NULL;
282 }
283 
284 static void
285 link_free_heap_page(mrb_state *mrb, struct heap_page *page)
286 {
287  page->free_next = mrb->free_heaps;
288  if (mrb->free_heaps) {
289  mrb->free_heaps->free_prev = page;
290  }
291  mrb->free_heaps = page;
292 }
293 
294 static void
295 unlink_free_heap_page(mrb_state *mrb, struct heap_page *page)
296 {
297  if (page->free_prev)
298  page->free_prev->free_next = page->free_next;
299  if (page->free_next)
300  page->free_next->free_prev = page->free_prev;
301  if (mrb->free_heaps == page)
302  mrb->free_heaps = page->free_next;
303  page->free_prev = NULL;
304  page->free_next = NULL;
305 }
306 
307 static void
308 add_heap(mrb_state *mrb)
309 {
310  struct heap_page *page = (struct heap_page *)mrb_calloc(mrb, 1, sizeof(struct heap_page));
311  RVALUE *p, *e;
312  struct RBasic *prev = NULL;
313 
314  for (p = page->objects, e=p+MRB_HEAP_PAGE_SIZE; p<e; p++) {
315  p->as.free.tt = MRB_TT_FREE;
316  p->as.free.next = prev;
317  prev = &p->as.basic;
318  }
319  page->freelist = prev;
320 
321  link_heap_page(mrb, page);
322  link_free_heap_page(mrb, page);
323 }
324 
325 #define DEFAULT_GC_INTERVAL_RATIO 200
326 #define DEFAULT_GC_STEP_RATIO 200
327 #define DEFAULT_MAJOR_GC_INC_RATIO 200
328 #define is_generational(mrb) ((mrb)->is_generational_gc_mode)
329 #define is_major_gc(mrb) (is_generational(mrb) && (mrb)->gc_full)
330 #define is_minor_gc(mrb) (is_generational(mrb) && !(mrb)->gc_full)
331 
332 void
334 {
335  mrb->heaps = NULL;
336  mrb->free_heaps = NULL;
337  add_heap(mrb);
340 #ifndef MRB_GC_TURN_OFF_GENERATIONAL
342  mrb->gc_full = TRUE;
343 #endif
344 
345 #ifdef GC_PROFILE
346  program_invoke_time = gettimeofday_time();
347 #endif
348 }
349 
350 static void obj_free(mrb_state *mrb, struct RBasic *obj);
351 
352 void
354 {
355  struct heap_page *page = mrb->heaps;
356  struct heap_page *tmp;
357  RVALUE *p, *e;
358 
359  while (page) {
360  tmp = page;
361  page = page->next;
362  for (p = tmp->objects, e=p+MRB_HEAP_PAGE_SIZE; p<e; p++) {
363  if (p->as.free.tt != MRB_TT_FREE)
364  obj_free(mrb, &p->as.basic);
365  }
366  mrb_free(mrb, tmp);
367  }
368 }
369 
370 static void
371 gc_protect(mrb_state *mrb, struct RBasic *p)
372 {
373  if (mrb->arena_idx >= MRB_ARENA_SIZE) {
374  /* arena overflow error */
375  mrb->arena_idx = MRB_ARENA_SIZE - 4; /* force room in arena */
376  mrb_raise(mrb, E_RUNTIME_ERROR, "arena overflow error");
377  }
378  mrb->arena[mrb->arena_idx++] = p;
379 }
380 
381 void
383 {
384  if (mrb_special_const_p(obj)) return;
385  gc_protect(mrb, mrb_basic_ptr(obj));
386 }
387 
388 struct RBasic*
389 mrb_obj_alloc(mrb_state *mrb, enum mrb_vtype ttype, struct RClass *cls)
390 {
391  struct RBasic *p;
392  static const RVALUE RVALUE_zero = { { { MRB_TT_FALSE } } };
393 
394 #ifdef MRB_GC_STRESS
395  mrb_full_gc(mrb);
396 #endif
397  if (mrb->gc_threshold < mrb->live) {
398  mrb_incremental_gc(mrb);
399  }
400  if (mrb->free_heaps == NULL) {
401  add_heap(mrb);
402  }
403 
404  p = mrb->free_heaps->freelist;
405  mrb->free_heaps->freelist = ((struct free_obj*)p)->next;
406  if (mrb->free_heaps->freelist == NULL) {
407  unlink_free_heap_page(mrb, mrb->free_heaps);
408  }
409 
410  mrb->live++;
411  gc_protect(mrb, p);
412  *(RVALUE *)p = RVALUE_zero;
413  p->tt = ttype;
414  p->c = cls;
415  paint_partial_white(mrb, p);
416  return p;
417 }
418 
419 static inline void
420 add_gray_list(mrb_state *mrb, struct RBasic *obj)
421 {
422 #ifdef MRB_GC_STRESS
423  if (obj->tt > MRB_TT_MAXDEFINE) {
424  abort();
425  }
426 #endif
427  paint_gray(obj);
428  obj->gcnext = mrb->gray_list;
429  mrb->gray_list = obj;
430 }
431 
432 static void
433 mark_context_stack(mrb_state *mrb, struct mrb_context *c)
434 {
435  size_t i;
436  size_t e;
437 
438  e = c->stack - c->stbase;
439  if (c->ci) e += c->ci->nregs;
440  if (c->stbase + e > c->stend) e = c->stend - c->stbase;
441  for (i=0; i<e; i++) {
442  mrb_gc_mark_value(mrb, c->stbase[i]);
443  }
444 }
445 
446 static void
447 mark_context(mrb_state *mrb, struct mrb_context *c)
448 {
449  size_t i;
450  size_t e;
451  mrb_callinfo *ci;
452 
453  /* mark stack */
454  mark_context_stack(mrb, c);
455 
456  /* mark ensure stack */
457  e = (c->ci) ? c->ci->eidx : 0;
458  for (i=0; i<e; i++) {
459  mrb_gc_mark(mrb, (struct RBasic*)c->ensure[i]);
460  }
461  /* mark closure */
462  for (ci = c->cibase; ci <= c->ci; ci++) {
463  if (!ci) continue;
464  mrb_gc_mark(mrb, (struct RBasic*)ci->env);
465  mrb_gc_mark(mrb, (struct RBasic*)ci->proc);
466  mrb_gc_mark(mrb, (struct RBasic*)ci->target_class);
467  }
468  if (c->prev && c->prev->fib) {
469  mrb_gc_mark(mrb, (struct RBasic*)c->prev->fib);
470  }
471 }
472 
473 static void
474 gc_mark_children(mrb_state *mrb, struct RBasic *obj)
475 {
476  mrb_assert(is_gray(obj));
477  paint_black(obj);
478  mrb->gray_list = obj->gcnext;
479  mrb_gc_mark(mrb, (struct RBasic*)obj->c);
480  switch (obj->tt) {
481  case MRB_TT_ICLASS:
482  mrb_gc_mark(mrb, (struct RBasic*)((struct RClass*)obj)->super);
483  break;
484 
485  case MRB_TT_CLASS:
486  case MRB_TT_MODULE:
487  case MRB_TT_SCLASS:
488  {
489  struct RClass *c = (struct RClass*)obj;
490 
491  mrb_gc_mark_mt(mrb, c);
492  mrb_gc_mark(mrb, (struct RBasic*)c->super);
493  }
494  /* fall through */
495 
496  case MRB_TT_OBJECT:
497  case MRB_TT_DATA:
498  mrb_gc_mark_iv(mrb, (struct RObject*)obj);
499  break;
500 
501  case MRB_TT_PROC:
502  {
503  struct RProc *p = (struct RProc*)obj;
504 
505  mrb_gc_mark(mrb, (struct RBasic*)p->env);
506  mrb_gc_mark(mrb, (struct RBasic*)p->target_class);
507  }
508  break;
509 
510  case MRB_TT_ENV:
511  {
512  struct REnv *e = (struct REnv*)obj;
513 
514  if (e->cioff < 0) {
515  int i, len;
516 
517  len = (int)e->flags;
518  for (i=0; i<len; i++) {
519  mrb_gc_mark_value(mrb, e->stack[i]);
520  }
521  }
522  }
523  break;
524 
525  case MRB_TT_FIBER:
526  {
527  struct mrb_context *c = ((struct RFiber*)obj)->cxt;
528 
529  mark_context(mrb, c);
530  }
531  break;
532 
533  case MRB_TT_ARRAY:
534  {
535  struct RArray *a = (struct RArray*)obj;
536  size_t i, e;
537 
538  for (i=0,e=a->len; i<e; i++) {
539  mrb_gc_mark_value(mrb, a->ptr[i]);
540  }
541  }
542  break;
543 
544  case MRB_TT_HASH:
545  mrb_gc_mark_iv(mrb, (struct RObject*)obj);
546  mrb_gc_mark_hash(mrb, (struct RHash*)obj);
547  break;
548 
549  case MRB_TT_STRING:
550  break;
551 
552  case MRB_TT_RANGE:
553  {
554  struct RRange *r = (struct RRange*)obj;
555 
556  if (r->edges) {
557  mrb_gc_mark_value(mrb, r->edges->beg);
558  mrb_gc_mark_value(mrb, r->edges->end);
559  }
560  }
561  break;
562 
563  default:
564  break;
565  }
566 }
567 
568 void
569 mrb_gc_mark(mrb_state *mrb, struct RBasic *obj)
570 {
571  if (obj == 0) return;
572  if (!is_white(obj)) return;
573  mrb_assert((obj)->tt != MRB_TT_FREE);
574  add_gray_list(mrb, obj);
575 }
576 
577 static void
578 obj_free(mrb_state *mrb, struct RBasic *obj)
579 {
580  DEBUG(printf("obj_free(%p,tt=%d)\n",obj,obj->tt));
581  switch (obj->tt) {
582  /* immediate - no mark */
583  case MRB_TT_TRUE:
584  case MRB_TT_FIXNUM:
585  case MRB_TT_SYMBOL:
586  /* cannot happen */
587  return;
588 
589  case MRB_TT_FLOAT:
590 #ifdef MRB_WORD_BOXING
591  break;
592 #else
593  return;
594 #endif
595 
596  case MRB_TT_OBJECT:
597  mrb_gc_free_iv(mrb, (struct RObject*)obj);
598  break;
599 
600  case MRB_TT_CLASS:
601  case MRB_TT_MODULE:
602  case MRB_TT_SCLASS:
603  mrb_gc_free_mt(mrb, (struct RClass*)obj);
604  mrb_gc_free_iv(mrb, (struct RObject*)obj);
605  break;
606 
607  case MRB_TT_ENV:
608  {
609  struct REnv *e = (struct REnv*)obj;
610 
611  if (e->cioff < 0) {
612  mrb_free(mrb, e->stack);
613  e->stack = NULL;
614  }
615  }
616  break;
617 
618  case MRB_TT_FIBER:
619  {
620  struct mrb_context *c = ((struct RFiber*)obj)->cxt;
621 
622  mrb_free_context(mrb, c);
623  }
624  break;
625 
626  case MRB_TT_ARRAY:
627  if (obj->flags & MRB_ARY_SHARED)
628  mrb_ary_decref(mrb, ((struct RArray*)obj)->aux.shared);
629  else
630  mrb_free(mrb, ((struct RArray*)obj)->ptr);
631  break;
632 
633  case MRB_TT_HASH:
634  mrb_gc_free_iv(mrb, (struct RObject*)obj);
635  mrb_gc_free_hash(mrb, (struct RHash*)obj);
636  break;
637 
638  case MRB_TT_STRING:
639  mrb_gc_free_str(mrb, (struct RString*)obj);
640  break;
641 
642  case MRB_TT_RANGE:
643  mrb_free(mrb, ((struct RRange*)obj)->edges);
644  break;
645 
646  case MRB_TT_DATA:
647  {
648  struct RData *d = (struct RData*)obj;
649  if (d->type && d->type->dfree) {
650  d->type->dfree(mrb, d->data);
651  }
652  mrb_gc_free_iv(mrb, (struct RObject*)obj);
653  }
654  break;
655 
656  default:
657  break;
658  }
659  obj->tt = MRB_TT_FREE;
660 }
661 
662 static void
663 root_scan_phase(mrb_state *mrb)
664 {
665  size_t i, e, j;
666 
667  if (!is_minor_gc(mrb)) {
668  mrb->gray_list = NULL;
669  mrb->atomic_gray_list = NULL;
670  }
671 
672  mrb_gc_mark_gv(mrb);
673  /* mark arena */
674  for (i=0,e=mrb->arena_idx; i<e; i++) {
675  mrb_gc_mark(mrb, mrb->arena[i]);
676  }
677  /* mark class hierarchy */
678  mrb_gc_mark(mrb, (struct RBasic*)mrb->object_class);
679  /* mark top_self */
680  mrb_gc_mark(mrb, (struct RBasic*)mrb->top_self);
681  /* mark exception */
682  mrb_gc_mark(mrb, (struct RBasic*)mrb->exc);
683 
684  mark_context(mrb, mrb->root_c);
685  if (mrb->root_c != mrb->c) {
686  mark_context(mrb, mrb->c);
687  }
688 
689  /* mark irep pool */
690  if (mrb->irep) {
691  size_t len = mrb->irep_len;
692  if (len > mrb->irep_capa) len = mrb->irep_capa;
693  for (i=0; i<len; i++) {
694  mrb_irep *irep = mrb->irep[i];
695  if (!irep) continue;
696  for (j=0; j<irep->plen; j++) {
697  mrb_gc_mark_value(mrb, irep->pool[j]);
698  }
699  }
700  }
701 }
702 
703 static size_t
704 gc_gray_mark(mrb_state *mrb, struct RBasic *obj)
705 {
706  size_t children = 0;
707 
708  gc_mark_children(mrb, obj);
709 
710  switch (obj->tt) {
711  case MRB_TT_ICLASS:
712  children++;
713  break;
714 
715  case MRB_TT_CLASS:
716  case MRB_TT_SCLASS:
717  case MRB_TT_MODULE:
718  {
719  struct RClass *c = (struct RClass*)obj;
720 
721  children += mrb_gc_mark_iv_size(mrb, (struct RObject*)obj);
722  children += mrb_gc_mark_mt_size(mrb, c);
723  children++;
724  }
725  break;
726 
727  case MRB_TT_OBJECT:
728  case MRB_TT_DATA:
729  children += mrb_gc_mark_iv_size(mrb, (struct RObject*)obj);
730  break;
731 
732  case MRB_TT_ENV:
733  children += (int)obj->flags;
734  break;
735 
736  case MRB_TT_FIBER:
737  {
738  struct mrb_context *c = ((struct RFiber*)obj)->cxt;
739  size_t i;
740  mrb_callinfo *ci;
741 
742  /* mark stack */
743  i = c->stack - c->stbase;
744  if (c->ci) i += c->ci->nregs;
745  if (c->stbase + i > c->stend) i = c->stend - c->stbase;
746  children += i;
747 
748  /* mark ensure stack */
749  children += (c->ci) ? c->ci->eidx : 0;
750 
751  /* mark closure */
752  if (c->cibase) {
753  for (i=0, ci = c->cibase; ci <= c->ci; i++, ci++)
754  ;
755  }
756  children += i;
757  }
758  break;
759 
760  case MRB_TT_ARRAY:
761  {
762  struct RArray *a = (struct RArray*)obj;
763  children += a->len;
764  }
765  break;
766 
767  case MRB_TT_HASH:
768  children += mrb_gc_mark_iv_size(mrb, (struct RObject*)obj);
769  children += mrb_gc_mark_hash_size(mrb, (struct RHash*)obj);
770  break;
771 
772  case MRB_TT_PROC:
773  case MRB_TT_RANGE:
774  children+=2;
775  break;
776 
777  default:
778  break;
779  }
780  return children;
781 }
782 
783 
784 static void
785 gc_mark_gray_list(mrb_state *mrb) {
786  while (mrb->gray_list) {
787  if (is_gray(mrb->gray_list))
788  gc_mark_children(mrb, mrb->gray_list);
789  else
790  mrb->gray_list = mrb->gray_list->gcnext;
791  }
792 }
793 
794 
795 static size_t
796 incremental_marking_phase(mrb_state *mrb, size_t limit)
797 {
798  size_t tried_marks = 0;
799 
800  while (mrb->gray_list && tried_marks < limit) {
801  tried_marks += gc_gray_mark(mrb, mrb->gray_list);
802  }
803 
804  return tried_marks;
805 }
806 
807 static void
808 final_marking_phase(mrb_state *mrb)
809 {
810  mark_context_stack(mrb, mrb->root_c);
811  gc_mark_gray_list(mrb);
812  mrb_assert(mrb->gray_list == NULL);
813  mrb->gray_list = mrb->atomic_gray_list;
814  mrb->atomic_gray_list = NULL;
815  gc_mark_gray_list(mrb);
816  mrb_assert(mrb->gray_list == NULL);
817 }
818 
819 static void
820 prepare_incremental_sweep(mrb_state *mrb)
821 {
822  mrb->gc_state = GC_STATE_SWEEP;
823  mrb->sweeps = mrb->heaps;
824  mrb->gc_live_after_mark = mrb->live;
825 }
826 
827 static size_t
828 incremental_sweep_phase(mrb_state *mrb, size_t limit)
829 {
830  struct heap_page *page = mrb->sweeps;
831  size_t tried_sweep = 0;
832 
833  while (page && (tried_sweep < limit)) {
834  RVALUE *p = page->objects;
835  RVALUE *e = p + MRB_HEAP_PAGE_SIZE;
836  size_t freed = 0;
837  int dead_slot = 1;
838  int full = (page->freelist == NULL);
839 
840  if (is_minor_gc(mrb) && page->old) {
841  /* skip a slot which doesn't contain any young object */
842  p = e;
843  dead_slot = 0;
844  }
845  while (p<e) {
846  if (is_dead(mrb, &p->as.basic)) {
847  if (p->as.basic.tt != MRB_TT_FREE) {
848  obj_free(mrb, &p->as.basic);
849  p->as.free.next = page->freelist;
850  page->freelist = (struct RBasic*)p;
851  freed++;
852  }
853  }
854  else {
855  if (!is_generational(mrb))
856  paint_partial_white(mrb, &p->as.basic); /* next gc target */
857  dead_slot = 0;
858  }
859  p++;
860  }
861 
862  /* free dead slot */
863  if (dead_slot && freed < MRB_HEAP_PAGE_SIZE) {
864  struct heap_page *next = page->next;
865 
866  unlink_heap_page(mrb, page);
867  unlink_free_heap_page(mrb, page);
868  mrb_free(mrb, page);
869  page = next;
870  }
871  else {
872  if (full && freed > 0) {
873  link_free_heap_page(mrb, page);
874  }
875  if (page->freelist == NULL && is_minor_gc(mrb))
876  page->old = TRUE;
877  else
878  page->old = FALSE;
879  page = page->next;
880  }
881  tried_sweep += MRB_HEAP_PAGE_SIZE;
882  mrb->live -= freed;
883  mrb->gc_live_after_mark -= freed;
884  }
885  mrb->sweeps = page;
886  return tried_sweep;
887 }
888 
889 static size_t
890 incremental_gc(mrb_state *mrb, size_t limit)
891 {
892  switch (mrb->gc_state) {
893  case GC_STATE_NONE:
894  root_scan_phase(mrb);
895  mrb->gc_state = GC_STATE_MARK;
896  flip_white_part(mrb);
897  return 0;
898  case GC_STATE_MARK:
899  if (mrb->gray_list) {
900  return incremental_marking_phase(mrb, limit);
901  }
902  else {
903  final_marking_phase(mrb);
904  prepare_incremental_sweep(mrb);
905  return 0;
906  }
907  case GC_STATE_SWEEP: {
908  size_t tried_sweep = 0;
909  tried_sweep = incremental_sweep_phase(mrb, limit);
910  if (tried_sweep == 0)
911  mrb->gc_state = GC_STATE_NONE;
912  return tried_sweep;
913  }
914  default:
915  /* unknown state */
916  mrb_assert(0);
917  return 0;
918  }
919 }
920 
921 static void
922 incremental_gc_until(mrb_state *mrb, enum gc_state to_state)
923 {
924  do {
925  incremental_gc(mrb, ~0);
926  } while (mrb->gc_state != to_state);
927 }
928 
929 static void
930 incremental_gc_step(mrb_state *mrb)
931 {
932  size_t limit = 0, result = 0;
933  limit = (GC_STEP_SIZE/100) * mrb->gc_step_ratio;
934  while (result < limit) {
935  result += incremental_gc(mrb, limit);
936  if (mrb->gc_state == GC_STATE_NONE)
937  break;
938  }
939 
940  mrb->gc_threshold = mrb->live + GC_STEP_SIZE;
941 }
942 
943 static void
944 clear_all_old(mrb_state *mrb)
945 {
946  size_t origin_mode = mrb->is_generational_gc_mode;
947 
949  if (is_major_gc(mrb)) {
950  /* finish the half baked GC */
951  incremental_gc_until(mrb, GC_STATE_NONE);
952  }
953 
954  /* Sweep the dead objects, then reset all the live objects
955  * (including all the old objects, of course) to white. */
957  prepare_incremental_sweep(mrb);
958  incremental_gc_until(mrb, GC_STATE_NONE);
959  mrb->is_generational_gc_mode = origin_mode;
960 
961  /* The gray objects has already been painted as white */
962  mrb->atomic_gray_list = mrb->gray_list = NULL;
963 }
964 
965 void
967 {
968  if (mrb->gc_disabled) return;
969 
970  GC_INVOKE_TIME_REPORT("mrb_incremental_gc()");
972 
973  if (is_minor_gc(mrb)) {
974  incremental_gc_until(mrb, GC_STATE_NONE);
975  }
976  else {
977  incremental_gc_step(mrb);
978  }
979 
980  if (mrb->gc_state == GC_STATE_NONE) {
981  mrb_assert(mrb->live >= mrb->gc_live_after_mark);
982  mrb->gc_threshold = (mrb->gc_live_after_mark/100) * mrb->gc_interval_ratio;
983  if (mrb->gc_threshold < GC_STEP_SIZE) {
984  mrb->gc_threshold = GC_STEP_SIZE;
985  }
986 
987  if (is_major_gc(mrb)) {
989  mrb->gc_full = FALSE;
990  }
991  else if (is_minor_gc(mrb)) {
992  if (mrb->live > mrb->majorgc_old_threshold) {
993  clear_all_old(mrb);
994  mrb->gc_full = TRUE;
995  }
996  }
997  }
998 
1000 }
1001 
1002 /* Perform a full gc cycle */
1003 void
1005 {
1006  if (mrb->gc_disabled) return;
1007  GC_INVOKE_TIME_REPORT("mrb_full_gc()");
1008  GC_TIME_START;
1009 
1010  if (is_generational(mrb)) {
1011  /* clear all the old objects back to young */
1012  clear_all_old(mrb);
1013  mrb->gc_full = TRUE;
1014  }
1015  else if (mrb->gc_state != GC_STATE_NONE) {
1016  /* finish half baked GC cycle */
1017  incremental_gc_until(mrb, GC_STATE_NONE);
1018  }
1019 
1020  incremental_gc_until(mrb, GC_STATE_NONE);
1021  mrb->gc_threshold = (mrb->gc_live_after_mark/100) * mrb->gc_interval_ratio;
1022 
1023  if (is_generational(mrb)) {
1025  mrb->gc_full = FALSE;
1026  }
1027 
1029 }
1030 
1031 void
1033 {
1034  mrb_full_gc(mrb);
1035 }
1036 
1037 int
1039 {
1040  return mrb->arena_idx;
1041 }
1042 
1043 void
1045 {
1046  mrb->arena_idx = idx;
1047 }
1048 
1049 /*
1050  * Field write barrier
1051  * Paint obj(Black) -> value(White) to obj(Black) -> value(Gray).
1052  */
1053 
1054 void
1055 mrb_field_write_barrier(mrb_state *mrb, struct RBasic *obj, struct RBasic *value)
1056 {
1057  if (!is_black(obj)) return;
1058  if (!is_white(value)) return;
1059 
1060  mrb_assert(!is_dead(mrb, value) && !is_dead(mrb, obj));
1062 
1063  if (is_generational(mrb) || mrb->gc_state == GC_STATE_MARK) {
1064  add_gray_list(mrb, value);
1065  }
1066  else {
1068  paint_partial_white(mrb, obj); /* for never write barriers */
1069  }
1070 }
1071 
1072 /*
1073  * Write barrier
1074  * Paint obj(Black) to obj(Gray).
1075  *
1076  * The object that is painted gray will be traversed atomically in final
1077  * mark phase. So you use this write barrier if it's frequency written spot.
1078  * e.g. Set element on Array.
1079  */
1080 
1081 void
1083 {
1084  if (!is_black(obj)) return;
1085 
1086  mrb_assert(!is_dead(mrb, obj));
1088  paint_gray(obj);
1089  obj->gcnext = mrb->atomic_gray_list;
1090  mrb->atomic_gray_list = obj;
1091 }
1092 
1093 /*
1094  * call-seq:
1095  * GC.start -> nil
1096  *
1097  * Initiates full garbage collection.
1098  *
1099  */
1100 
1101 static mrb_value
1102 gc_start(mrb_state *mrb, mrb_value obj)
1103 {
1104  mrb_full_gc(mrb);
1105  return mrb_nil_value();
1106 }
1107 
1108 /*
1109  * call-seq:
1110  * GC.enable -> true or false
1111  *
1112  * Enables garbage collection, returning <code>true</code> if garbage
1113  * collection was previously disabled.
1114  *
1115  * GC.disable #=> false
1116  * GC.enable #=> true
1117  * GC.enable #=> false
1118  *
1119  */
1120 
1121 static mrb_value
1122 gc_enable(mrb_state *mrb, mrb_value obj)
1123 {
1124  int old = mrb->gc_disabled;
1125 
1126  mrb->gc_disabled = FALSE;
1127 
1128  return mrb_bool_value(old);
1129 }
1130 
1131 /*
1132  * call-seq:
1133  * GC.disable -> true or false
1134  *
1135  * Disables garbage collection, returning <code>true</code> if garbage
1136  * collection was already disabled.
1137  *
1138  * GC.disable #=> false
1139  * GC.disable #=> true
1140  *
1141  */
1142 
1143 static mrb_value
1144 gc_disable(mrb_state *mrb, mrb_value obj)
1145 {
1146  int old = mrb->gc_disabled;
1147 
1148  mrb->gc_disabled = TRUE;
1149 
1150  return mrb_bool_value(old);
1151 }
1152 
1153 /*
1154  * call-seq:
1155  * GC.interval_ratio -> fixnum
1156  *
1157  * Returns ratio of GC interval. Default value is 200(%).
1158  *
1159  */
1160 
1161 static mrb_value
1162 gc_interval_ratio_get(mrb_state *mrb, mrb_value obj)
1163 {
1164  return mrb_fixnum_value(mrb->gc_interval_ratio);
1165 }
1166 
1167 /*
1168  * call-seq:
1169  * GC.interval_ratio = fixnum -> nil
1170  *
1171  * Updates ratio of GC interval. Default value is 200(%).
1172  * GC start as soon as after end all step of GC if you set 100(%).
1173  *
1174  */
1175 
1176 static mrb_value
1177 gc_interval_ratio_set(mrb_state *mrb, mrb_value obj)
1178 {
1179  mrb_int ratio;
1180 
1181  mrb_get_args(mrb, "i", &ratio);
1182  mrb->gc_interval_ratio = ratio;
1183  return mrb_nil_value();
1184 }
1185 
1186 /*
1187  * call-seq:
1188  * GC.step_ratio -> fixnum
1189  *
1190  * Returns step span ratio of Incremental GC. Default value is 200(%).
1191  *
1192  */
1193 
1194 static mrb_value
1195 gc_step_ratio_get(mrb_state *mrb, mrb_value obj)
1196 {
1197  return mrb_fixnum_value(mrb->gc_step_ratio);
1198 }
1199 
1200 /*
1201  * call-seq:
1202  * GC.step_ratio = fixnum -> nil
1203  *
1204  * Updates step span ratio of Incremental GC. Default value is 200(%).
1205  * 1 step of incrementalGC becomes long if a rate is big.
1206  *
1207  */
1208 
1209 static mrb_value
1210 gc_step_ratio_set(mrb_state *mrb, mrb_value obj)
1211 {
1212  mrb_int ratio;
1213 
1214  mrb_get_args(mrb, "i", &ratio);
1215  mrb->gc_step_ratio = ratio;
1216  return mrb_nil_value();
1217 }
1218 
1219 static void
1220 change_gen_gc_mode(mrb_state *mrb, mrb_int enable)
1221 {
1222  if (is_generational(mrb) && !enable) {
1223  clear_all_old(mrb);
1225  mrb->gc_full = FALSE;
1226  }
1227  else if (!is_generational(mrb) && enable) {
1228  incremental_gc_until(mrb, GC_STATE_NONE);
1230  mrb->gc_full = FALSE;
1231  }
1232  mrb->is_generational_gc_mode = enable;
1233 }
1234 
1235 /*
1236  * call-seq:
1237  * GC.generational_mode -> true or false
1238  *
1239  * Returns generational or normal gc mode.
1240  *
1241  */
1242 
1243 static mrb_value
1244 gc_generational_mode_get(mrb_state *mrb, mrb_value self)
1245 {
1246  return mrb_bool_value(mrb->is_generational_gc_mode);
1247 }
1248 
1249 /*
1250  * call-seq:
1251  * GC.generational_mode = true or false -> true or false
1252  *
1253  * Changes to generational or normal gc mode.
1254  *
1255  */
1256 
1257 static mrb_value
1258 gc_generational_mode_set(mrb_state *mrb, mrb_value self)
1259 {
1260  mrb_bool enable;
1261 
1262  mrb_get_args(mrb, "b", &enable);
1263  if (mrb->is_generational_gc_mode != enable)
1264  change_gen_gc_mode(mrb, enable);
1265 
1266  return mrb_bool_value(enable);
1267 }
1268 
1269 void
1271 {
1272  struct heap_page* page = mrb->heaps;
1273 
1274  while (page != NULL) {
1275  RVALUE *p, *pend;
1276 
1277  p = page->objects;
1278  pend = p + MRB_HEAP_PAGE_SIZE;
1279  for (;p < pend; p++) {
1280  (*callback)(mrb, &p->as.basic, data);
1281  }
1282 
1283  page = page->next;
1284  }
1285 }
1286 
1287 #ifdef GC_TEST
1288 #ifdef GC_DEBUG
1289 static mrb_value gc_test(mrb_state *, mrb_value);
1290 #endif
1291 #endif
1292 
1293 void
1295 {
1296  struct RClass *gc;
1297 
1298  gc = mrb_define_module(mrb, "GC");
1299 
1300  mrb_define_class_method(mrb, gc, "start", gc_start, MRB_ARGS_NONE());
1301  mrb_define_class_method(mrb, gc, "enable", gc_enable, MRB_ARGS_NONE());
1302  mrb_define_class_method(mrb, gc, "disable", gc_disable, MRB_ARGS_NONE());
1303  mrb_define_class_method(mrb, gc, "interval_ratio", gc_interval_ratio_get, MRB_ARGS_NONE());
1304  mrb_define_class_method(mrb, gc, "interval_ratio=", gc_interval_ratio_set, MRB_ARGS_REQ(1));
1305  mrb_define_class_method(mrb, gc, "step_ratio", gc_step_ratio_get, MRB_ARGS_NONE());
1306  mrb_define_class_method(mrb, gc, "step_ratio=", gc_step_ratio_set, MRB_ARGS_REQ(1));
1307  mrb_define_class_method(mrb, gc, "generational_mode=", gc_generational_mode_set, MRB_ARGS_REQ(1));
1308  mrb_define_class_method(mrb, gc, "generational_mode", gc_generational_mode_get, MRB_ARGS_NONE());
1309 #ifdef GC_TEST
1310 #ifdef GC_DEBUG
1311  mrb_define_class_method(mrb, gc, "test", gc_test, MRB_ARGS_NONE());
1312 #endif
1313 #endif
1314 }
1315 
1316 #ifdef GC_TEST
1317 #ifdef GC_DEBUG
1318 void
1319 test_mrb_field_write_barrier(void)
1320 {
1321  mrb_state *mrb = mrb_open();
1322  struct RBasic *obj, *value;
1323 
1324  puts("test_mrb_field_write_barrier");
1326  obj = mrb_basic_ptr(mrb_ary_new(mrb));
1327  value = mrb_basic_ptr(mrb_str_new_cstr(mrb, "value"));
1328  paint_black(obj);
1329  paint_partial_white(mrb,value);
1330 
1331 
1332  puts(" in GC_STATE_MARK");
1333  mrb->gc_state = GC_STATE_MARK;
1334  mrb_field_write_barrier(mrb, obj, value);
1335 
1336  mrb_assert(is_gray(value));
1337 
1338 
1339  puts(" in GC_STATE_SWEEP");
1340  paint_partial_white(mrb,value);
1341  mrb->gc_state = GC_STATE_SWEEP;
1342  mrb_field_write_barrier(mrb, obj, value);
1343 
1344  mrb_assert(obj->color & mrb->current_white_part);
1345  mrb_assert(value->color & mrb->current_white_part);
1346 
1347 
1348  puts(" fail with black");
1349  mrb->gc_state = GC_STATE_MARK;
1350  paint_white(obj);
1351  paint_partial_white(mrb,value);
1352  mrb_field_write_barrier(mrb, obj, value);
1353 
1354  mrb_assert(obj->color & mrb->current_white_part);
1355 
1356 
1357  puts(" fail with gray");
1358  mrb->gc_state = GC_STATE_MARK;
1359  paint_black(obj);
1360  paint_gray(value);
1361  mrb_field_write_barrier(mrb, obj, value);
1362 
1363  mrb_assert(is_gray(value));
1364 
1365 
1366  {
1367  puts("test_mrb_field_write_barrier_value");
1368  obj = mrb_basic_ptr(mrb_ary_new(mrb));
1369  mrb_value value = mrb_str_new_cstr(mrb, "value");
1370  paint_black(obj);
1371  paint_partial_white(mrb, mrb_basic_ptr(value));
1372 
1373  mrb->gc_state = GC_STATE_MARK;
1374  mrb_field_write_barrier_value(mrb, obj, value);
1375 
1376  mrb_assert(is_gray(mrb_basic_ptr(value)));
1377  }
1378 
1379  mrb_close(mrb);
1380 }
1381 
1382 void
1383 test_mrb_write_barrier(void)
1384 {
1385  mrb_state *mrb = mrb_open();
1386  struct RBasic *obj;
1387 
1388  puts("test_mrb_write_barrier");
1389  obj = mrb_basic_ptr(mrb_ary_new(mrb));
1390  paint_black(obj);
1391 
1392  puts(" in GC_STATE_MARK");
1393  mrb->gc_state = GC_STATE_MARK;
1394  mrb_write_barrier(mrb, obj);
1395 
1396  mrb_assert(is_gray(obj));
1397  mrb_assert(mrb->atomic_gray_list == obj);
1398 
1399 
1400  puts(" fail with gray");
1401  paint_gray(obj);
1402  mrb_write_barrier(mrb, obj);
1403 
1404  mrb_assert(is_gray(obj));
1405 
1406  mrb_close(mrb);
1407 }
1408 
1409 void
1410 test_add_gray_list(void)
1411 {
1412  mrb_state *mrb = mrb_open();
1413  struct RBasic *obj1, *obj2;
1414 
1415  puts("test_add_gray_list");
1416  change_gen_gc_mode(mrb, FALSE);
1417  mrb_assert(mrb->gray_list == NULL);
1418  obj1 = mrb_basic_ptr(mrb_str_new_cstr(mrb, "test"));
1419  add_gray_list(mrb, obj1);
1420  mrb_assert(mrb->gray_list == obj1);
1421  mrb_assert(is_gray(obj1));
1422 
1423  obj2 = mrb_basic_ptr(mrb_str_new_cstr(mrb, "test"));
1424  add_gray_list(mrb, obj2);
1425  mrb_assert(mrb->gray_list == obj2);
1426  mrb_assert(mrb->gray_list->gcnext == obj1);
1427  mrb_assert(is_gray(obj2));
1428 
1429  mrb_close(mrb);
1430 }
1431 
1432 void
1433 test_gc_gray_mark(void)
1434 {
1435  mrb_state *mrb = mrb_open();
1436  mrb_value obj_v, value_v;
1437  struct RBasic *obj;
1438  size_t gray_num = 0;
1439 
1440  puts("test_gc_gray_mark");
1441 
1442  puts(" in MRB_TT_CLASS");
1443  obj = (struct RBasic*)mrb->object_class;
1444  paint_gray(obj);
1445  gray_num = gc_gray_mark(mrb, obj);
1446  mrb_assert(is_black(obj));
1447  mrb_assert(gray_num > 1);
1448 
1449  puts(" in MRB_TT_ARRAY");
1450  obj_v = mrb_ary_new(mrb);
1451  value_v = mrb_str_new_cstr(mrb, "test");
1452  paint_gray(mrb_basic_ptr(obj_v));
1453  paint_partial_white(mrb, mrb_basic_ptr(value_v));
1454  mrb_ary_push(mrb, obj_v, value_v);
1455  gray_num = gc_gray_mark(mrb, mrb_basic_ptr(obj_v));
1457  mrb_assert(is_gray(mrb_basic_ptr(value_v)));
1458  mrb_assert(gray_num == 1);
1459 
1460  mrb_close(mrb);
1461 }
1462 
1463 void
1464 test_incremental_gc(void)
1465 {
1466  mrb_state *mrb = mrb_open();
1467  size_t max = ~0, live = 0, total = 0, freed = 0;
1468  RVALUE *free;
1469  struct heap_page *page;
1470 
1471  puts("test_incremental_gc");
1472  change_gen_gc_mode(mrb, FALSE);
1473 
1474  puts(" in mrb_full_gc");
1475  mrb_full_gc(mrb);
1476 
1478  puts(" in GC_STATE_NONE");
1479  incremental_gc(mrb, max);
1481  puts(" in GC_STATE_MARK");
1482  incremental_gc_until(mrb, GC_STATE_SWEEP);
1484 
1485  puts(" in GC_STATE_SWEEP");
1486  page = mrb->heaps;
1487  while (page) {
1488  RVALUE *p = page->objects;
1489  RVALUE *e = p + MRB_HEAP_PAGE_SIZE;
1490  while (p<e) {
1491  if (is_black(&p->as.basic)) {
1492  live++;
1493  }
1494  if (is_gray(&p->as.basic) && !is_dead(mrb, &p->as.basic)) {
1495  printf("%p\n", &p->as.basic);
1496  }
1497  p++;
1498  }
1499  page = page->next;
1500  total += MRB_HEAP_PAGE_SIZE;
1501  }
1502 
1503  mrb_assert(mrb->gray_list == NULL);
1504 
1505  incremental_gc(mrb, max);
1507 
1508  incremental_gc(mrb, max);
1510 
1511  free = (RVALUE*)mrb->heaps->freelist;
1512  while (free) {
1513  freed++;
1514  free = (RVALUE*)free->as.free.next;
1515  }
1516 
1517  mrb_assert(mrb->live == live);
1518  mrb_assert(mrb->live == total-freed);
1519 
1520  puts("test_incremental_gc(gen)");
1521  incremental_gc_until(mrb, GC_STATE_SWEEP);
1522  change_gen_gc_mode(mrb, TRUE);
1523 
1524  mrb_assert(mrb->gc_full == FALSE);
1526 
1527  puts(" in minor");
1528  mrb_assert(is_minor_gc(mrb));
1530  mrb->majorgc_old_threshold = 0;
1531  mrb_incremental_gc(mrb);
1532  mrb_assert(mrb->gc_full == TRUE);
1534 
1535  puts(" in major");
1536  mrb_assert(is_major_gc(mrb));
1537  do {
1538  mrb_incremental_gc(mrb);
1539  } while (mrb->gc_state != GC_STATE_NONE);
1540  mrb_assert(mrb->gc_full == FALSE);
1541 
1542  mrb_close(mrb);
1543 }
1544 
1545 void
1546 test_incremental_sweep_phase(void)
1547 {
1548  mrb_state *mrb = mrb_open();
1549 
1550  puts("test_incremental_sweep_phase");
1551 
1552  add_heap(mrb);
1553  mrb->sweeps = mrb->heaps;
1554 
1555  mrb_assert(mrb->heaps->next->next == NULL);
1556  mrb_assert(mrb->free_heaps->next->next == NULL);
1557  incremental_sweep_phase(mrb, MRB_HEAP_PAGE_SIZE*3);
1558 
1559  mrb_assert(mrb->heaps->next == NULL);
1560  mrb_assert(mrb->heaps == mrb->free_heaps);
1561 
1562  mrb_close(mrb);
1563 }
1564 
1565 static mrb_value
1566 gc_test(mrb_state *mrb, mrb_value self)
1567 {
1568  test_mrb_field_write_barrier();
1569  test_mrb_write_barrier();
1570  test_add_gray_list();
1571  test_gc_gray_mark();
1572  test_incremental_gc();
1573  test_incremental_sweep_phase();
1574  return mrb_nil_value();
1575 }
1576 #endif
1577 #endif