@@ -480,6 +480,7 @@ typedef enum {
480
480
GPR_FLAG_HAVE_FINALIZE = 0x4000 ,
481
481
GPR_FLAG_IMMEDIATE_MARK = 0x8000 ,
482
482
GPR_FLAG_FULL_MARK = 0x10000 ,
483
+ GPR_FLAG_COMPACT = 0x20000 ,
483
484
484
485
GPR_DEFAULT_REASON =
485
486
(GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
@@ -634,6 +635,8 @@ typedef struct rb_heap_struct {
634
635
struct heap_page * using_page ;
635
636
struct list_head pages ;
636
637
struct heap_page * sweeping_page ; /* iterator for .pages */
638
+ struct heap_page * compact_cursor ;
639
+ VALUE moved_list ;
637
640
#if GC_ENABLE_INCREMENTAL_MARK
638
641
struct heap_page * pooled_pages ;
639
642
#endif
@@ -667,6 +670,7 @@ typedef struct rb_objspace {
667
670
unsigned int gc_stressful : 1 ;
668
671
unsigned int has_hook : 1 ;
669
672
unsigned int during_minor_gc : 1 ;
673
+ unsigned int compact : 1 ;
670
674
#if GC_ENABLE_INCREMENTAL_MARK
671
675
unsigned int during_incremental_marking : 1 ;
672
676
#endif
@@ -4178,17 +4182,71 @@ gc_setup_mark_bits(struct heap_page *page)
4178
4182
memcpy (& page -> mark_bits [0 ], & page -> uncollectible_bits [0 ], HEAP_PAGE_BITMAP_SIZE );
4179
4183
}
4180
4184
4185
+ static int gc_is_moveable_obj (rb_objspace_t * objspace , VALUE obj );
4186
+ static VALUE gc_move (rb_objspace_t * objspace , VALUE scan , VALUE free , VALUE moved_list );
4187
+
4188
+ static short
4189
+ try_move (rb_objspace_t * objspace , rb_heap_t * heap , struct heap_page * sweep_page , VALUE vp )
4190
+ {
4191
+ struct heap_page * cursor = heap -> compact_cursor ;
4192
+
4193
+ while (1 ) {
4194
+ bits_t * mark_bits = cursor -> mark_bits ;
4195
+ RVALUE * p = cursor -> start ;
4196
+ RVALUE * offset = p - NUM_IN_PAGE (p );
4197
+
4198
+ /* Find an object to move and move it. Movable objects must be
4199
+ * marked, so we iterate using the marking bitmap */
4200
+ for (size_t i = 0 ; i < HEAP_PAGE_BITMAP_LIMIT ; i ++ ) {
4201
+ bits_t bits = mark_bits [i ];
4202
+
4203
+ if (bits ) {
4204
+ p = offset + i * BITS_BITLENGTH ;
4205
+
4206
+ do {
4207
+ if (bits & 1 ) {
4208
+ if (gc_is_moveable_obj (objspace , (VALUE )p )) {
4209
+ heap -> moved_list = gc_move (objspace , (VALUE )p , vp , heap -> moved_list );
4210
+ return 1 ;
4211
+ }
4212
+ }
4213
+ p ++ ;
4214
+ bits >>= 1 ;
4215
+ } while (bits );
4216
+ }
4217
+ }
4218
+
4219
+ struct heap_page * next ;
4220
+
4221
+ next = list_prev (& heap -> pages , cursor , page_node );
4222
+
4223
+ // Cursors have met, lets quit
4224
+ if (next == sweep_page ) {
4225
+ break ;
4226
+ } else {
4227
+ heap -> compact_cursor = next ;
4228
+ cursor = next ;
4229
+ }
4230
+ }
4231
+
4232
+ return 0 ;
4233
+ }
4234
+
4181
4235
static inline int
4182
4236
gc_page_sweep (rb_objspace_t * objspace , rb_heap_t * heap , struct heap_page * sweep_page )
4183
4237
{
4184
4238
int i ;
4185
- int empty_slots = 0 , freed_slots = 0 , final_slots = 0 ;
4239
+ int empty_slots = 0 , freed_slots = 0 , final_slots = 0 , moved_slots = 0 ;
4186
4240
RVALUE * p , * pend ,* offset ;
4187
4241
bits_t * bit
A93C
s , bitset ;
4188
4242
4189
4243
gc_report (2 , objspace , "page_sweep: start.\n" );
4190
4244
4191
4245
sweep_page -> flags .before_sweep = FALSE;
4246
+ if (heap -> compact_cursor && sweep_page == heap -> compact_cursor ) {
4247
+ /* The compaction cursor and sweep page met, so we need to quit compacting */
4248
+ heap -> compact_cursor = NULL ;
4249
+ }
4192
4250
4193
4251
p = sweep_page -> start ; pend = p + sweep_page -> total_slots ;
4194
4252
offset = p - NUM_IN_PAGE (p );
@@ -4220,20 +4278,51 @@ gc_page_sweep(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_
4220
4278
}
4221
4279
else {
4222
4280
(void )VALGRIND_MAKE_MEM_UNDEFINED ((void * )p , sizeof (RVALUE ));
4223
- heap_page_add_freeobj (objspace , sweep_page , vp );
4281
+
4282
+ if (heap -> compact_cursor ) {
4283
+ /* If try_move couldn't compact anything, it means
4284
+ * the cursors have met and there are no objects left that
4285
+ * /can/ be compacted, so we need to quit. */
4286
+ if (try_move (objspace , heap , sweep_page , vp )) {
4287
+ moved_slots ++ ;
4288
+ } else {
4289
+ heap -> compact_cursor = NULL ;
4290
+ heap_page_add_freeobj (objspace , sweep_page , vp );
4291
+ }
4292
+ } else {
4293
+ heap_page_add_freeobj (objspace , sweep_page , vp );
4294
+ }
4295
+
4224
4296
gc_report (3 , objspace , "page_sweep: %s is added to freelist\n" , obj_info (vp ));
4225
- freed_slots ++ ;
4226
- asan_poison_object (vp );
4297
+ freed_slots ++ ;
4227
4298
}
4228
4299
break ;
4229
4300
}
4230
4301
4231
4302
/* minor cases */
4303
+ case T_MOVED :
4304
+ if (!objspace -> flags .during_compacting ) {
4305
+ /* When compaction is combined with sweeping, some of the swept pages
4306
+ * will have T_MOVED objects on them. These need to be kept alive
4307
+ * until references are finished updating. Once references are finished
4308
+ * updating, `gc_unlink_moved_list` will clear the T_MOVED slots
4309
+ * and release them back to the heap. If there are T_MOVED slots
4310
+ * in the heap and we're _not_ compacting, then it's a bug. */
4311
+ rb_bug ("T_MOVED shouldn't be on the heap unless compacting\n" );
4312
+ }
4313
+ break ;
4232
4314
case T_ZOMBIE :
4233
4315
/* already counted */
4234
4316
break ;
4235
4317
case T_NONE :
4236
- empty_slots ++ ; /* already freed */
4318
+ if (heap -> compact_cursor ) {
4319
+ if (try_move (objspace , heap , sweep_page , vp )) {
4320
+ moved_slots ++ ;
4321
+ } else {
4322
+ heap -> compact_cursor = NULL ;
4323
+ }
4324
+ }
4325
+ empty_slots ++ ; /* already freed */
4237
4326
break ;
4238
4327
}
4239
4328
}
@@ -4257,7 +4346,7 @@ gc_page_sweep(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_
4257
4346
(int )sweep_page -> total_slots ,
4258
4347
freed_slots , empty_slots , final_slots );
4259
4348
4260
- sweep_page -> free_slots = freed_slots + empty_slots ;
4349
+ sweep_page -> free_slots = ( freed_slots + empty_slots ) - moved_slots ;
4261
4350
objspace -> profile .total_freed_objects += freed_slots ;
4262
4351
heap_pages_final_slots += final_slots ;
4263
4352
sweep_page -> final_slots += final_slots ;
@@ -4271,7 +4360,7 @@ gc_page_sweep(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_
4271
4360
4272
4361
gc_report (2 , objspace , "page_sweep: end.\n" );
4273
4362
4274
- return freed_slots + empty_slots ;
4363
+ return ( freed_slots + empty_slots ) - moved_slots ;
4275
4364
}
4276
4365
4277
4366
/* allocate additional minimum page to work */
@@ -4456,6 +4545,29 @@ gc_sweep_continue(rb_objspace_t *objspace, rb_heap_t *heap)
4456
4545
gc_exit (objspace , "sweep_continue" );
4457
4546
}
4458
4547
4548
+ static void
4549
+ gc_compact_start (rb_objspace_t * objspace , rb_heap_t * heap )
4550
+ {
4551
+ heap -> compact_cursor = list_tail (& heap -> pages , struct heap_page , page_node );
4552
+ objspace -> profile .compact_count ++ ;
4553
+ heap -> moved_list = Qfalse ;
4554
+ }
4555
+
4556
+ static void gc_update_references (rb_objspace_t * objspace );
4557
+ static void gc_unlink_moved_list (rb_objspace_t * objspace , VALUE moved_list_head );
4558
+
4559
+ static void
4560
+ gc_compact_finish (rb_objspace_t * objspace , rb_heap_t * heap )
4561
+ {
4562
+ heap -> compact_cursor = NULL ;
4563
+ gc_update_references (objspace );
4564
+ rb_clear_constant_cache ();
4565
+ gc_unlink_moved_list (objspace , heap -> moved_list );
4566
+ heap -> moved_list = Qfalse ;
4567
+ objspace -> flags .compact = 0 ;
4568
+ objspace -> flags .during_compacting = FALSE;
4569
+ }
4570
+
4459
4571
static void
4460
4572
gc_sweep (rb_objspace_t * objspace )
4461
4573
{
@@ -4468,7 +458
EED3
0,13 @@ gc_sweep(rb_objspace_t *objspace)
4468
4580
gc_prof_sweep_timer_start (objspace );
4469
4581
#endif
4470
4582
gc_sweep_start (objspace );
4583
+ if (objspace -> flags .compact ) {
4584
+ gc_compact_start (objspace , heap_eden );
4585
+ }
4471
4586
gc_sweep_rest (objspace );
4587
+ if (objspace -> flags .compact ) {
4588
+ gc_compact_finish (objspace , heap_eden );
4589
+ }
4472
4590
#if !GC_ENABLE_LAZY_SWEEP
4473
4591
gc_prof_sweep_timer_stop (objspace );
4474
4592
#endif
@@ -7283,6 +7401,8 @@ gc_start(rb_objspace_t *objspace, int reason)
7283
7401
7284
7402
/* reason may be clobbered, later, so keep set immediate_sweep here */
7285
7403
objspace -> flags .immediate_sweep = !!((unsigned )reason & GPR_FLAG_IMMEDIATE_SWEEP );
7404
+ objspace -> flags .compact = !!((unsigned )reason & GPR_FLAG_COMPACT );
7405
+ objspace -> flags .during_compacting = TRUE;
7286
7406
7287
7407
if (!heap_allocated_pages ) return FALSE; /* heap is not ready */
7288
7408
if (!(reason & GPR_FLAG_METHOD ) && !ready_to_gc (objspace )) return TRUE; /* GC is not allowed */
@@ -7544,17 +7664,22 @@ garbage_collect_with_gvl(rb_objspace_t *objspace, int reason)
7544
7664
}
7545
7665
7546
7666
static VALUE
7547
- gc_start_internal (rb_execution_context_t * ec , VALUE self , VALUE full_mark , VALUE immediate_mark , VALUE immediate_sweep )
7667
+ gc_start_internal (rb_execution_context_t * ec , VALUE self , VALUE full_mark , VALUE immediate_mark , VALUE immediate_sweep , VALUE compact )
7548
7668
{
7549
7669
rb_objspace_t * objspace = & rb_objspace ;
7550
7670
int reason = GPR_FLAG_FULL_MARK |
7551
7671
GPR_FLAG_IMMEDIATE_MARK |
7552
7672
GPR_FLAG_IMMEDIATE_SWEEP |
7553
7673
GPR_FLAG_METHOD ;
7554
7674
7555
- if (!RTEST (full_mark )) reason &= ~GPR_FLAG_FULL_MARK ;
7556
- if (!RTEST (immediate_mark )) reason &= ~GPR_FLAG_IMMEDIATE_MARK ;
7557
- if (!RTEST (immediate_sweep )) reason &= ~GPR_FLAG_IMMEDIATE_SWEEP ;
7675
+ /* For now, compact implies full mark / sweep, so ignore other flags */
7676
+ if (RTEST (compact )) {
7677
+ reason |= GPR_FLAG_COMPACT ;
7678
+ } else {
7679
+ if (!RTEST (full_mark )) reason &= ~GPR_FLAG_FULL_MARK ;
7680
+ if (!RTEST (immediate_mark )) reason &= ~GPR_FLAG_IMMEDIATE_MARK ;
7681
+ if (!RTEST (immediate_sweep )) reason &= ~GPR_FLAG_IMMEDIATE_SWEEP ;
7682
+ }
7558
7683
7559
7684
garbage_collect (objspace , reason );
7560
7685
gc_finalize_deferred (objspace );
0 commit comments