@@ -48,8 +48,8 @@ redblack_left(redblack_node_t *node)
48
48
return LEAF ;
49
49
}
50
50
else {
51
- RUBY_ASSERT (node -> l < GET_SHAPE_TREE () -> cache_size );
52
- redblack_node_t * left = & GET_SHAPE_TREE () -> shape_cache [node -> l - 1 ];
51
+ RUBY_ASSERT (node -> l < rb_shape_tree -> cache_size );
52
+ redblack_node_t * left = & rb_shape_tree -> shape_cache [node -> l - 1 ];
53
53
return left ;
54
54
}
55
55
}
@@ -61,8 +61,8 @@ redblack_right(redblack_node_t *node)
61
61
return LEAF ;
62
62
}
63
63
else {
64
- RUBY_ASSERT (node -> r < GET_SHAPE_TREE () -> cache_size );
65
- redblack_node_t * right = & GET_SHAPE_TREE () -> shape_cache [node -> r - 1 ];
64
+ RUBY_ASSERT (node -> r < rb_shape_tree -> cache_size );
65
+ redblack_node_t * right = & rb_shape_tree -> shape_cache [node -> r - 1 ];
66
66
return right ;
67
67
}
68
68
}
@@ -120,7 +120,7 @@ redblack_id_for(redblack_node_t *node)
120
120
return 0 ;
121
121
}
122
122
else {
123
- redblack_node_t * redblack_nodes = GET_SHAPE_TREE () -> shape_cache ;
123
+ redblack_node_t * redblack_nodes = rb_shape_tree -> shape_cache ;
124
124
redblack_id_t id = (redblack_id_t )(node - redblack_nodes );
125
125
return id + 1 ;
126
126
}
@@ -129,16 +129,16 @@ redblack_id_for(redblack_node_t *node)
129
129
static redblack_node_t *
130
130
redblack_new (char color , ID key , rb_shape_t * value , redblack_node_t * left , redblack_node_t * right )
131
131
{
132
- if (GET_SHAPE_TREE () -> cache_size + 1 >= REDBLACK_CACHE_SIZE ) {
132
+ if (rb_shape_tree -> cache_size + 1 >= REDBLACK_CACHE_SIZE ) {
133
133
// We're out of cache, just quit
134
134
return LEAF ;
135
135
}
136
136
137
137
RUBY_ASSERT (left == LEAF || left -> key < key );
138
138
RUBY_ASSERT (right == LEAF || right -> key > key );
139
139
140
- redblack_node_t * redblack_nodes = GET_SHAPE_TREE () -> shape_cache ;
141
- redblack_node_t * node = & redblack_nodes [(GET_SHAPE_TREE () -> cache_size )++ ];
140
+ redblack_node_t * redblack_nodes = rb_shape_tree -> shape_cache ;
141
+ redblack_node_t * node = & redblack_nodes [(rb_shape_tree -> cache_size )++ ];
142
142
node -> key = key ;
143
143
node -> value = (rb_shape_t * )((uintptr_t )value | color );
144
144
node -> l = redblack_id_for (left );
@@ -288,20 +288,20 @@ redblack_insert(redblack_node_t *tree, ID key, rb_shape_t *value)
288
288
}
289
289
#endif
290
290
291
- rb_shape_tree_t * rb_shape_tree_ptr = NULL ;
291
+ rb_shape_tree_t * rb_shape_tree = NULL ; // extern
292
292
static VALUE shape_tree_obj = Qfalse ;
293
293
294
294
rb_shape_t *
295
295
rb_shape_get_root_shape (void )
296
296
{
297
- return GET_SHAPE_TREE () -> root_shape ;
297
+ return rb_shape_tree -> root_shape ;
298
298
}
299
299
300
300
static void
301
301
shape_tree_mark (void * data )
302
302
{
303
303
rb_shape_t * cursor = rb_shape_get_root_shape ();
304
- rb_shape_t * end = RSHAPE (GET_SHAPE_TREE () -> next_shape_id - 1 );
304
+ rb_shape_t * end = RSHAPE (rb_shape_tree -> next_shape_id - 1 );
305
305
while (cursor < end ) {
306
306
if (cursor -> edges && !SINGLE_CHILD_P (cursor -> edges )) {
307
307
rb_gc_mark_movable (cursor -> edges );
@@ -314,7 +314,7 @@ static void
314
314
shape_tree_compact (void * data )
315
315
{
316
316
rb_shape_t * cursor = rb_shape_get_root_shape ();
317
- rb_shape_t * end = RSHAPE (GET_SHAPE_TREE () -> next_shape_id - 1 );
317
+ rb_shape_t * end = RSHAPE (rb_shape_tree -> next_shape_id - 1 );
318
318
while (cursor < end ) {
319
319
if (cursor -> edges && !SINGLE_CHILD_P (cursor -> edges )) {
320
320
cursor -> edges = rb_gc_location (cursor -> edges );
@@ -326,7 +326,7 @@ shape_tree_compact(void *data)
326
326
static size_t
327
327
shape_tree_memsize (const void * data )
328
328
{
329
- return GET_SHAPE_TREE () -> cache_size * sizeof (redblack_node_t );
329
+ return rb_shape_tree -> cache_size * sizeof (redblack_node_t );
330
330
}
331
331
332
332
static const rb_data_type_t shape_tree_type = {
@@ -349,14 +349,14 @@ static inline shape_id_t
349
349
raw_shape_id (rb_shape_t * shape )
350
350
{
351
351
RUBY_ASSERT (shape );
352
- return (shape_id_t )(shape - GET_SHAPE_TREE () -> shape_list );
352
+ return (shape_id_t )(shape - rb_shape_tree -> shape_list );
353
353
}
354
354
355
355
static inline shape_id_t
356
356
shape_id (rb_shape_t * shape , shape_id_t previous_shape_id )
357
357
{
358
358
RUBY_ASSERT (shape );
359
- shape_id_t raw_id = (shape_id_t )(shape - GET_SHAPE_TREE () -> shape_list );
359
+ shape_id_t raw_id = (shape_id_t )(shape - rb_shape_tree -> shape_list );
360
360
return raw_id | (previous_shape_id & SHAPE_ID_FLAGS_MASK );
361
361
}
362
362
@@ -373,7 +373,7 @@ rb_shape_each_shape_id(each_shape_callback callback, void *data)
373
373
{
374
374
rb_shape_t * start = rb_shape_get_root_shape ();
375
375
rb_shape_t * cursor = start ;
376
- rb_shape_t * end = RSHAPE (GET_SHAPE_TREE () -> next_shape_id );
376
+ rb_shape_t * end = RSHAPE (rb_shape_tree -> next_shape_id );
377
377
while (cursor < end ) {
378
378
callback ((shape_id_t )(cursor - start ), data );
379
379
cursor += 1 ;
@@ -383,10 +383,7 @@ rb_shape_each_shape_id(each_shape_callback callback, void *data)
383
383
RUBY_FUNC_EXPORTED rb_shape_t *
384
384
rb_shape_lookup (shape_id_t shape_id )
385
385
{
386
- uint32_t offset = (shape_id & SHAPE_ID_OFFSET_MASK );
387
- RUBY_ASSERT (offset != INVALID_SHAPE_ID );
388
-
389
- return & GET_SHAPE_TREE ()-> shape_list [offset ];
386
+ return RSHAPE (shape_id );
390
387
}
391
388
392
389
RUBY_FUNC_EXPORTED shape_id_t
@@ -416,14 +413,14 @@ rb_shape_depth(shape_id_t shape_id)
416
413
static rb_shape_t *
417
414
shape_alloc (void )
418
415
{
419
- shape_id_t shape_id = (shape_id_t )RUBY_ATOMIC_FETCH_ADD (GET_SHAPE_TREE () -> next_shape_id , 1 );
416
+ shape_id_t shape_id = (shape_id_t )RUBY_ATOMIC_FETCH_ADD (rb_shape_tree -> next_shape_id , 1 );
420
417
421
418
if (shape_id == (MAX_SHAPE_ID + 1 )) {
422
419
// TODO: Make an OutOfShapesError ??
423
420
rb_bug ("Out of shapes" );
424
421
}
425
422
426
- return & GET_SHAPE_TREE () -> shape_list [shape_id ];
423
+ return & rb_shape_tree -> shape_list [shape_id ];
427
424
}
428
425
429
426
static rb_shape_t *
@@ -487,7 +484,7 @@ redblack_cache_ancestors(rb_shape_t *shape)
487
484
static attr_index_t
488
485
shape_grow_capa (attr_index_t current_capa )
489
486
{
490
- const attr_index_t * capacities = GET_SHAPE_TREE () -> capacities ;
487
+ const attr_index_t * capacities = rb_shape_tree -> capacities ;
491
488
492
489
// First try to use the next size that will be embeddable in a larger object slot.
493
490
attr_index_t capa ;
@@ -566,7 +563,7 @@ get_next_shape_internal_atomic(rb_shape_t *shape, ID id, enum shape_type shape_t
566
563
if (!res ) {
567
564
// If we're not allowed to create a new variation, of if we're out of shapes
568
565
// we return TOO_COMPLEX_SHAPE.
569
- if (!new_variations_allowed || GET_SHAPE_TREE () -> next_shape_id > MAX_SHAPE_ID ) {
566
+ if (!new_variations_allowed || rb_shape_tree -> next_shape_id > MAX_SHAPE_ID ) {
570
567
res = NULL ;
571
568
}
572
569
else {
@@ -642,7 +639,7 @@ get_next_shape_internal(rb_shape_t *shape, ID id, enum shape_type shape_type, bo
642
639
if (!res ) {
643
640
// If we're not allowed to create a new variation, of if we're out of shapes
644
641
// we return TOO_COMPLEX_SHAPE.
645
- if (!new_variations_allowed || GET_SHAPE_TREE () -> next_shape_id > MAX_SHAPE_ID ) {
642
+ if (!new_variations_allowed || rb_shape_tree -> next_shape_id > MAX_SHAPE_ID ) {
646
643
res = NULL ;
647
644
}
648
645
else {
@@ -1239,7 +1236,7 @@ rb_shape_verify_consistency(VALUE obj, shape_id_t shape_id)
1239
1236
1240
1237
uint8_t flags_heap_index = rb_shape_heap_index (shape_id );
1241
1238
if (RB_TYPE_P (obj , T_OBJECT )) {
1242
- size_t shape_id_slot_size = GET_SHAPE_TREE () -> capacities [flags_heap_index - 1 ] * sizeof (VALUE ) + sizeof (struct RBasic );
1239
+ size_t shape_id_slot_size = rb_shape_tree -> capacities [flags_heap_index - 1 ] * sizeof (VALUE ) + sizeof (struct RBasic );
1243
1240
size_t actual_slot_size = rb_gc_obj_slot_size (obj );
1244
1241
1245
1242
if (shape_id_slot_size != actual_slot_size ) {
@@ -1389,15 +1386,15 @@ rb_shape_root_shape(VALUE self)
1389
1386
static VALUE
1390
1387
rb_shape_shapes_available (VALUE self )
1391
1388
{
1392
- return INT2NUM (MAX_SHAPE_ID - (GET_SHAPE_TREE () -> next_shape_id - 1 ));
1389
+ return INT2NUM (MAX_SHAPE_ID - (rb_shape_tree -> next_shape_id - 1 ));
1393
1390
}
1394
1391
1395
1392
static VALUE
1396
1393
rb_shape_exhaust (int argc , VALUE * argv , VALUE self )
1397
1394
{
1398
1395
rb_check_arity (argc , 0 , 1 );
1399
1396
int offset = argc == 1 ? NUM2INT (argv [0 ]) : 0 ;
1400
- GET_SHAPE_TREE () -> next_shape_id = MAX_SHAPE_ID - offset + 1 ;
1397
+ rb_shape_tree -> next_shape_id = MAX_SHAPE_ID - offset + 1 ;
1401
1398
return Qnil ;
1402
1399
}
1403
1400
@@ -1453,7 +1450,7 @@ static VALUE
1453
1450
rb_shape_find_by_id (VALUE mod , VALUE id )
1454
1451
{
1455
1452
shape_id_t shape_id = NUM2UINT (id );
1456
- if (shape_id >= GET_SHAPE_TREE () -> next_shape_id ) {
1453
+ if (shape_id >= rb_shape_tree -> next_shape_id ) {
1457
1454
rb_raise (rb_eArgError , "Shape ID %d is out of bounds\n" , shape_id );
1458
1455
}
1459
1456
return shape_id_t_to_rb_cShape (shape_id );
@@ -1467,7 +1464,7 @@ rb_shape_find_by_id(VALUE mod, VALUE id)
1467
1464
void
1468
1465
Init_default_shapes (void )
1469
1466
{
1470
- rb_shape_tree_ptr = xcalloc (1 , sizeof (rb_shape_tree_t ));
1467
+ rb_shape_tree = xcalloc (1 , sizeof (rb_shape_tree_t ));
1471
1468
1472
1469
size_t * heap_sizes = rb_gc_heap_sizes ();
1473
1470
size_t heaps_count = 0 ;
@@ -1480,23 +1477,23 @@ Init_default_shapes(void)
1480
1477
for (index = 0 ; index < heaps_count ; index ++ ) {
1481
1478
capacities [index ] = (heap_sizes [index ] - sizeof (struct RBasic )) / sizeof (VALUE );
1482
1479
}
1483
- GET_SHAPE_TREE () -> capacities = capacities ;
1480
+ rb_shape_tree -> capacities = capacities ;
1484
1481
1485
1482
#ifdef HAVE_MMAP
1486
1483
size_t shape_list_mmap_size = rb_size_mul_or_raise (SHAPE_BUFFER_SIZE , sizeof (rb_shape_t ), rb_eRuntimeError );
1487
- rb_shape_tree_ptr -> shape_list = (rb_shape_t * )mmap (NULL , shape_list_mmap_size ,
1484
+ rb_shape_tree -> shape_list = (rb_shape_t * )mmap (NULL , shape_list_mmap_size ,
1488
1485
PROT_READ | PROT_WRITE , MAP_PRIVATE | MAP_ANONYMOUS , -1 , 0 );
1489
- if (GET_SHAPE_TREE () -> shape_list == MAP_FAILED ) {
1490
- GET_SHAPE_TREE () -> shape_list = 0 ;
1486
+ if (rb_shape_tree -> shape_list == MAP_FAILED ) {
1487
+ rb_shape_tree -> shape_list = 0 ;
1491
1488
}
1492
1489
else {
1493
- ruby_annotate_mmap (rb_shape_tree_ptr -> shape_list , shape_list_mmap_size , "Ruby:Init_default_shapes:shape_list" );
1490
+ ruby_annotate_mmap (rb_shape_tree -> shape_list , shape_list_mmap_size , "Ruby:Init_default_shapes:shape_list" );
1494
1491
}
1495
1492
#else
1496
- GET_SHAPE_TREE () -> shape_list = xcalloc (SHAPE_BUFFER_SIZE , sizeof (rb_shape_t ));
1493
+ rb_shape_tree -> shape_list = xcalloc (SHAPE_BUFFER_SIZE , sizeof (rb_shape_t ));
1497
1494
#endif
1498
1495
1499
- if (!GET_SHAPE_TREE () -> shape_list ) {
1496
+ if (!rb_shape_tree -> shape_list ) {
1500
1497
rb_memerror ();
1501
1498
}
1502
1499
@@ -1506,19 +1503,19 @@ Init_default_shapes(void)
1506
1503
1507
1504
#ifdef HAVE_MMAP
1508
1505
size_t shape_cache_mmap_size = rb_size_mul_or_raise (REDBLACK_CACHE_SIZE , sizeof (redblack_node_t ), rb_eRuntimeError );
1509
- rb_shape_tree_ptr -> shape_cache = (redblack_node_t * )mmap (NULL , shape_cache_mmap_size ,
1506
+ rb_shape_tree -> shape_cache = (redblack_node_t * )mmap (NULL , shape_cache_mmap_size ,
1510
1507
PROT_READ | PROT_WRITE , MAP_PRIVATE | MAP_ANONYMOUS , -1 , 0 );
1511
- rb_shape_tree_ptr -> cache_size = 0 ;
1508
+ rb_shape_tree -> cache_size = 0 ;
1512
1509
1513
1510
// If mmap fails, then give up on the redblack tree cache.
1514
1511
// We set the cache size such that the redblack node allocators think
1515
1512
// the cache is full.
1516
- if (GET_SHAPE_TREE () -> shape_cache == MAP_FAILED ) {
1517
- GET_SHAPE_TREE () -> shape_cache = 0 ;
1518
- GET_SHAPE_TREE () -> cache_size = REDBLACK_CACHE_SIZE ;
1513
+ if (rb_shape_tree -> shape_cache == MAP_FAILED ) {
1514
+ rb_shape_tree -> shape_cache = 0 ;
1515
+ rb_shape_tree -> cache_size = REDBLACK_CACHE_SIZE ;
1519
1516
}
1520
1517
else {
1521
- ruby_annotate_mmap (rb_shape_tree_ptr -> shape_cache , shape_cache_mmap_size , "Ruby:Init_default_shapes:shape_cache" );
1518
+ ruby_annotate_mmap (rb_shape_tree -> shape_cache , shape_cache_mmap_size , "Ruby:Init_default_shapes:shape_cache" );
1522
1519
}
1523
1520
#endif
1524
1521
@@ -1529,8 +1526,8 @@ Init_default_shapes(void)
1529
1526
rb_shape_t * root = rb_shape_alloc_with_parent_id (0 , INVALID_SHAPE_ID );
1530
1527
root -> capacity = 0 ;
1531
1528
root -> type = SHAPE_ROOT ;
1532
- GET_SHAPE_TREE () -> root_shape = root ;
1533
- RUBY_ASSERT (raw_shape_id (GET_SHAPE_TREE () -> root_shape ) == ROOT_SHAPE_ID );
1529
+ rb_shape_tree -> root_shape = root ;
1530
+ RUBY_ASSERT (raw_shape_id (rb_shape_tree -> root_shape ) == ROOT_SHAPE_ID );
1534
1531
1535
1532
rb_shape_t * root_with_obj_id = rb_shape_alloc_with_parent_id (0 , ROOT_SHAPE_ID );
1536
1533
root_with_obj_id -> type = SHAPE_OBJ_ID ;
@@ -1542,8 +1539,8 @@ Init_default_shapes(void)
1542
1539
void
1543
1540
rb_shape_free_all (void )
1544
1541
{
1545
- xfree ((void * )GET_SHAPE_TREE () -> capacities );
1546
- xfree (GET_SHAPE_TREE () );
1542
+ xfree ((void * )rb_shape_tree -> capacities );
1543
+ xfree (rb_shape_tree );
1547
1544
}
1548
1545
1549
1546
void
0 commit comments