@@ -48,8 +48,8 @@ redblack_left(redblack_node_t *node)
48
48
return LEAF ;
49
49
}
50
50
else {
51
- RUBY_ASSERT (node -> l < rb_shape_tree -> cache_size );
52
- redblack_node_t * left = & rb_shape_tree -> shape_cache [node -> l - 1 ];
51
+ RUBY_ASSERT (node -> l < rb_shape_tree . cache_size );
52
+ redblack_node_t * left = & rb_shape_tree . shape_cache [node -> l - 1 ];
53
53
return left ;
54
54
}
55
55
}
@@ -61,8 +61,8 @@ redblack_right(redblack_node_t *node)
61
61
return LEAF ;
62
62
}
63
63
else {
64
- RUBY_ASSERT (node -> r < rb_shape_tree -> cache_size );
65
- redblack_node_t * right = & rb_shape_tree -> shape_cache [node -> r - 1 ];
64
+ RUBY_ASSERT (node -> r < rb_shape_tree . cache_size );
65
+ redblack_node_t * right = & rb_shape_tree . shape_cache [node -> r - 1 ];
66
66
return right ;
67
67
}
68
68
}
@@ -120,7 +120,7 @@ redblack_id_for(redblack_node_t *node)
120
120
return 0 ;
121
121
}
122
122
else {
123
- redblack_node_t * redblack_nodes = rb_shape_tree -> shape_cache ;
123
+ redblack_node_t * redblack_nodes = rb_shape_tree . shape_cache ;
124
124
redblack_id_t id = (redblack_id_t )(node - redblack_nodes );
125
125
return id + 1 ;
126
126
}
@@ -129,16 +129,16 @@ redblack_id_for(redblack_node_t *node)
129
129
static redblack_node_t *
130
130
redblack_new (char color , ID key , rb_shape_t * value , redblack_node_t * left , redblack_node_t * right )
131
131
{
132
- if (rb_shape_tree -> cache_size + 1 >= REDBLACK_CACHE_SIZE ) {
132
+ if (rb_shape_tree . cache_size + 1 >= REDBLACK_CACHE_SIZE ) {
133
133
// We're out of cache, just quit
134
134
return LEAF ;
135
135
}
136
136
137
137
RUBY_ASSERT (left == LEAF || left -> key < key );
138
138
RUBY_ASSERT (right == LEAF || right -> key > key );
139
139
140
- redblack_node_t * redblack_nodes = rb_shape_tree -> shape_cache ;
141
- redblack_node_t * node = & redblack_nodes [(rb_shape_tree -> cache_size )++ ];
140
+ redblack_node_t * redblack_nodes = rb_shape_tree . shape_cache ;
141
+ redblack_node_t * node = & redblack_nodes [(rb_shape_tree . cache_size )++ ];
142
142
node -> key = key ;
143
143
node -> value = (rb_shape_t * )((uintptr_t )value | color );
144
144
node -> l = redblack_id_for (left );
@@ -288,20 +288,20 @@ redblack_insert(redblack_node_t *tree, ID key, rb_shape_t *value)
288
288
}
289
289
#endif
290
290
291
- rb_shape_tree_t * rb_shape_tree = NULL ;
291
+ rb_shape_tree_t rb_shape_tree = { 0 } ;
292
292
static VALUE shape_tree_obj = Qfalse ;
293
293
294
294
rb_shape_t *
295
295
rb_shape_get_root_shape (void )
296
296
{
297
- return rb_shape_tree -> root_shape ;
297
+ return rb_shape_tree . root_shape ;
298
298
}
299
299
300
300
static void
301
301
shape_tree_mark (void * data )
302
302
{
303
303
rb_shape_t * cursor = rb_shape_get_root_shape ();
304
- rb_shape_t * end = RSHAPE (rb_shape_tree -> next_shape_id - 1 );
304
+ rb_shape_t * end = RSHAPE (rb_shape_tree . next_shape_id - 1 );
305
305
while (cursor < end ) {
306
306
if (cursor -> edges && !SINGLE_CHILD_P (cursor -> edges )) {
307
307
rb_gc_mark_movable (cursor -> edges );
@@ -314,7 +314,7 @@ static void
314
314
shape_tree_compact (void * data )
315
315
{
316
316
rb_shape_t * cursor = rb_shape_get_root_shape ();
317
- rb_shape_t * end = RSHAPE (rb_shape_tree -> next_shape_id - 1 );
317
+ rb_shape_t * end = RSHAPE (rb_shape_tree . next_shape_id - 1 );
318
318
while (cursor < end ) {
319
319
if (cursor -> edges && !SINGLE_CHILD_P (cursor -> edges )) {
320
320
cursor -> edges = rb_gc_location (cursor -> edges );
@@ -326,7 +326,7 @@ shape_tree_compact(void *data)
326
326
static size_t
327
327
shape_tree_memsize (const void * data )
328
328
{
329
- return rb_shape_tree -> cache_size * sizeof (redblack_node_t );
329
+ return rb_shape_tree . cache_size * sizeof (redblack_node_t );
330
330
}
331
331
332
332
static const rb_data_type_t shape_tree_type = {
@@ -349,14 +349,14 @@ static inline shape_id_t
349
349
raw_shape_id (rb_shape_t * shape )
350
35
F438
0
{
351
351
RUBY_ASSERT (shape );
352
- return (shape_id_t )(shape - rb_shape_tree -> shape_list );
352
+ return (shape_id_t )(shape - rb_shape_tree . shape_list );
353
353
}
354
354
355
355
static inline shape_id_t
356
356
shape_id (rb_shape_t * shape , shape_id_t previous_shape_id )
357
357
{
358
358
RUBY_ASSERT (shape );
359
- shape_id_t raw_id = (shape_id_t )(shape - rb_shape_tree -> shape_list );
359
+ shape_id_t raw_id = (shape_id_t )(shape - rb_shape_tree . shape_list );
360
360
return raw_id | (previous_shape_id & SHAPE_ID_FLAGS_MASK );
361
361
}
362
362
@@ -373,7 +373,7 @@ rb_shape_each_shape_id(each_shape_callback callback, void *data)
373
373
{
374
374
rb_shape_t * start = rb_shape_get_root_shape ();
375
375
rb_shape_t * cursor = start ;
376
- rb_shape_t * end = RSHAPE (rb_shape_tree -> next_shape_id );
376
+ rb_shape_t * end = RSHAPE (rb_shape_tree . next_shape_id );
377
377
while (cursor < end ) {
378
378
callback ((shape_id_t )(cursor - start ), data );
379
379
cursor += 1 ;
@@ -414,14 +414,14 @@ rb_shape_depth(shape_id_t shape_id)
414
414
static rb_shape_t *
415
415
shape_alloc (void )
416
416
{
417
- shape_id_t shape_id = (shape_id_t )RUBY_ATOMIC_FETCH_ADD (rb_shape_tree -> next_shape_id , 1 );
417
+ shape_id_t shape_id = (shape_id_t )RUBY_ATOMIC_FETCH_ADD (rb_shape_tree . next_shape_id , 1 );
418
418
419
419
if (shape_id == (MAX_SHAPE_ID + 1 )) {
420
420
// TODO: Make an OutOfShapesError ??
421
421
rb_bug ("Out of shapes" );
422
422
}
423
423
424
- return & rb_shape_tree -> shape_list [shape_id ];
424
+ return & rb_shape_tree . shape_list [shape_id ];
425
425
}
426
426
427
427
static rb_shape_t *
@@ -485,7 +485,7 @@ redblack_cache_ancestors(rb_shape_t *shape)
485
485
static attr_index_t
486
486
shape_grow_capa (attr_index_t current_capa )
487
487
{
488
- const attr_index_t * capacities = rb_shape_tree -> capacities ;
488
+ const attr_index_t * capacities = rb_shape_tree . capacities ;
489
489
490
490
// First try to use the next size that will be embeddable in a larger object slot.
491
491
attr_index_t capa ;
@@ -564,7 +564,7 @@ get_next_shape_internal_atomic(rb_shape_t *shape, ID id, enum shape_type shape_t
564
564
if (!res ) {
565
565
// If we're not allowed to create a new variation, of if we're out of shapes
566
566
// we return TOO_COMPLEX_SHAPE.
567
- if (!new_variations_allowed || rb_shape_tree -> next_shape_id > MAX_SHAPE_ID ) {
567
+ if (!new_variations_allowed || rb_shape_tree . next_shape_id > MAX_SHAPE_ID ) {
568
568
res = NULL ;
569
569
}
570
570
else {
@@ -640,7 +640,7 @@ get_next_shape_internal(rb_shape_t *shape, ID id, enum shape_type shape_type, bo
640
640
if (!res ) {
641
641
// If we're not allowed to create a new variation, of if we're out of shapes
642
642
// we return TOO_COMPLEX_SHAPE.
643
- if (!new_variations_allowed || rb_shape_tree -> next_shape_id > MAX_SHAPE_ID ) {
643
+ if (!new_variations_allowed || rb_shape_tree . next_shape_id > MAX_SHAPE_ID ) {
644
644
res = NULL ;
645
645
}
646
646
else {
@@ -1238,7 +1238,7 @@ rb_shape_verify_consistency(VALUE obj, shape_id_t shape_id)
1238
1238
1239
1239
uint8_t flags_heap_index = rb_shape_heap_index (shape_id );
1240
1240
if (RB_TYPE_P (obj , T_OBJECT )) {
1241
- size_t shape_id_slot_size = rb_shape_tree -> capacities [flags_heap_index - 1 ] * sizeof (VALUE ) + sizeof (struct RBasic );
1241
+ size_t shape_id_slot_size = rb_shape_tree . capacities [flags_heap_index - 1 ] * sizeof (VALUE ) + sizeof (struct RBasic );
1242
1242
size_t actual_slot_size = rb_gc_obj_slot_size (obj );
1243
1243
1244
1244
if (shape_id_slot_size != actual_slot_size ) {
@@ -1388,15 +1388,15 @@ rb_shape_root_shape(VALUE self)
1388
1388
static VALUE
1389
1389
rb_shape_shapes_available (VALUE self )
1390
1390
{
1391
- return INT2NUM (MAX_SHAPE_ID - (rb_shape_tree -> next_shape_id - 1 ));
1C72
code>
1391
+ return INT2NUM (MAX_SHAPE_ID - (rb_shape_tree . next_shape_id - 1 ));
1392
1392
}
1393
1393
1394
1394
static VALUE
1395
1395
rb_shape_exhaust (int argc , VALUE * argv , VALUE self )
1396
1396
{
1397
1397
rb_check_arity (argc , 0 , 1 );
1398
1398
int offset = argc == 1 ? NUM2INT (argv [0 ]) : 0 ;
1399
- rb_shape_tree -> next_shape_id = MAX_SHAPE_ID - offset + 1 ;
1399
+ rb_shape_tree . next_shape_id = MAX_SHAPE_ID - offset + 1 ;
1400
1400
return Qnil ;
1401
1401
}
1402
1402
@@ -1452,7 +1452,7 @@ static VALUE
1452
1452
rb_shape_find_by_id (VALUE mod , VALUE id )
1453
1453
{
1454
1454
shape_id_t shape_id = NUM2UINT (id );
1455
- if (shape_id >= rb_shape_tree -> next_shape_id ) {
1455
+ if (shape_id >= rb_shape_tree . next_shape_id ) {
1456
1456
rb_raise (rb_eArgError , "Shape ID %d is out of bounds\n" , shape_id );
1457
1457
}
1458
1458
return shape_id_t_to_rb_cShape (shape_id );
@@ -1466,8 +1466,6 @@ rb_shape_find_by_id(VALUE mod, VALUE id)
1466
1466
void
1467
1467
Init_default_shapes (void )
1468
1468
{
1469
- rb_shape_tree = xcalloc (1 , sizeof (rb_shape_tree_t ));
1470
-
1471
1469
size_t * heap_sizes = rb_gc_heap_sizes ();
1472
1470
size_t heaps_count = 0 ;
1473
1471
while (heap_sizes [heaps_count ]) {
@@ -1479,23 +1477,23 @@ Init_default_shapes(void)
1479
1477
for (index = 0 ; index < heaps_count ; index ++ ) {
1480
1478
capacities [index ] = (heap_sizes [index ] - sizeof (struct RBasic )) / sizeof (VALUE );
1481
1479
}
1482
- rb_shape_tree -> capacities = capacities ;
1480
+ rb_shape_tree . capacities = capacities ;
1483
1481
1484
1482
#ifdef HAVE_MMAP
1485
1483
size_t shape_list_mmap_size = rb_size_mul_or_raise (SHAPE_BUFFER_SIZE , sizeof (rb_shape_t ), rb_eRuntimeError );
1486
- rb_shape_tree -> shape_list = (rb_shape_t * )mmap (NULL , shape_list_mmap_size ,
1484
+ rb_shape_tree . shape_list = (rb_shape_t * )mmap (NULL , shape_list_mmap_size ,
1487
1485
PROT_READ | PROT_WRITE , MAP_PRIVATE | MAP_ANONYMOUS , -1 , 0 );
1488
- if (rb_shape_tree -> shape_list == MAP_FAILED ) {
1489
- rb_shape_tree -> shape_list = 0 ;
1486
+ if (rb_shape_tree . shape_list == MAP_FAILED ) {
1487
+ rb_shape_tree . shape_list = 0 ;
1490
1488
}
1491
1489
else {
1492
- ruby_annotate_mmap (rb_shape_tree -> shape_list , shape_list_mmap_size , "Ruby:Init_default_shapes:shape_list" );
1490
+ ruby_annotate_mmap (rb_shape_tree . shape_list , shape_list_mmap_size , "Ruby:Init_default_shapes:shape_list" );
1493
1491
}
1494
1492
#else
1495
- rb_shape_tree -> shape_list = xcalloc (SHAPE_BUFFER_SIZE , sizeof (rb_shape_t ));
1493
+ rb_shape_tree . shape_list = xcalloc (SHAPE_BUFFER_SIZE , sizeof (rb_shape_t ));
1496
1494
#endif
1497
1495
1498
- if (!rb_shape_tree -> shape_list ) {
1496
+ if (!rb_shape_tree . shape_list ) {
1499
1497
rb_memerror ();
1500
1498
}
1501
1499
@@ -1505,19 +1503,19 @@ Init_default_shapes(void)
1505
1503
1506
1504
#ifdef HAVE_MMAP
1507
1505
size_t shape_cache_mmap_size = rb_size_mul_or_raise (REDBLACK_CACHE_SIZE , sizeof (redblack_node_t ), rb_eRuntimeError );
1508
- rb_shape_tree -> shape_cache = (redblack_node_t * )mmap (NULL , shape_cache_mmap_size ,
1506
+ rb_shape_tree . shape_cache = (redblack_node_t * )mmap (NULL , shape_cache_mmap_size ,
1509
1507
PROT_READ | PROT_WRITE , MAP_PRIVATE | MAP_ANONYMOUS , -1 , 0 );
1510
- rb_shape_tree -> cache_size = 0 ;
1508
+ rb_shape_tree . cache_size = 0 ;
1511
1509
1512
1510
// If mmap fails, then give up on the redblack tree cache.
1513
1511
// We set the cache size such that the redblack node allocators think
1514
1512
// the cache is full.
1515
- if (rb_shape_tree -> shape_cache == MAP_FAILED ) {
1516
- rb_shape_tree -> shape_cache = 0 ;
1517
- rb_shape_tree -> cache_size = REDBLACK_CACHE_SIZE ;
1513
+ if (rb_shape_tree . shape_cache == MAP_FAILED ) {
1514
+ rb_shape_tree . shape_cache = 0 ;
1515
+ rb_shape_tree . cache_size = REDBLACK_CACHE_SIZE ;
1518
1516
}
1519
1517
else {
1520
- ruby_annotate_mmap (rb_shape_tree -> shape_cache , shape_cache_mmap_size , "Ruby:Init_default_shapes:shape_cache" );
1518
+ ruby_annotate_mmap (rb_shape_tree . shape_cache , shape_cache_mmap_size , "Ruby:Init_default_shapes:shape_cache" );
1521
1519
}
1522
1520
#endif
1523
1521
@@ -1528,8 +1526,8 @@ Init_default_shapes(void)
1528
1526
rb_shape_t * root = rb_shape_alloc_with_parent_id (0 , INVALID_SHAPE_ID );
1529
1527
root -> capacity = 0 ;
1530
1528
root -> type = SHAPE_ROOT ;
1531
- rb_shape_tree -> root_shape = root ;
1532
- RUBY_ASSERT (raw_shape_id (rb_shape_tree -> root_shape ) == ROOT_SHAPE_ID );
1529
+ rb_shape_tree . root_shape = root ;
1530
+ RUBY_ASSERT (raw_shape_id (rb_shape_tree . root_shape ) == ROOT_SHAPE_ID );
1533
1531
1534
1532
rb_shape_t * root_with_obj_id = rb_shape_alloc_with_parent_id (0 , ROOT_SHAPE_ID );
1535
1533
root_with_obj_id -> type = SHAPE_OBJ_ID ;
@@ -1541,8 +1539,7 @@ Init_default_shapes(void)
1541
1539
void
1542
1540
rb_shape_free_all (void )
1543
1541
{
1544
- xfree ((void * )rb_shape_tree -> capacities );
1545
- xfree (rb_shape_tree );
1542
+ xfree ((void * )rb_shape_tree .capacities );
1546
1543
}
1547
1544
1548
1545
void
0 commit comments