8000 shape.c: Implement a lock-free version of get_next_shape_internal · ruby/ruby@8246a67 · GitHub
[go: up one dir, main page]

Skip to content

Commit 8246a67

Browse files
byrootetiennebarrie
andcommitted
shape.c: Implement a lock-free version of get_next_shape_internal
Whenever we run into an inline cache miss when we try to set an ivar, we may need to take the global lock, just to be able to lookup inside `shape->edges`. To solve that, when we're in multi-ractor mode, we can treat the `shape->edges` as immutable. When we need to add a new edge, we first copy the table, and then replace it with CAS. This increases memory allocations, however we expect that creating new transitions becomes increasingly rare over time. ```ruby class A def initialize(bool) @A = 1 if bool @b = 2 else @c = 3 end end def test @d = 4 end end def bench(iterations) i = iterations while i > 0 A.new(true).test A.new(false).test i -= 1 end end if ARGV.first == "ractor" ractors = 8.times.map do Ractor.new do bench(20_000_000 / 8) end end ractors.each(&:take) else bench(20_000_000) end ``` The above benchmark takes 27 seconds in Ractor mode on Ruby 3.4, and only 1.7s with this branch. Co-Authored-By: Étienne Barrié <etienne.barrie@gmail.com>
1 parent f483bef commit 8246a67

File tree

8 files changed

+305
-106
lines changed

8 files changed

+305
-106
lines changed

id_table.c

Lines changed: 101 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -80,9 +80,10 @@ round_capa(int capa)
8080
return (capa + 1) << 2;
8181
}
8282

83-
static struct rb_id_table *
84-
rb_id_table_init(struct rb_id_table *tbl, int capa)
83+
struct rb_id_table *
84+
rb_id_table_init(struct rb_id_table *tbl, size_t s_capa)
8585
{
86+
int capa = (int)s_capa;
8687
MEMZERO(tbl, struct rb_id_table, 1);
8788
if (capa > 0) {
8889
capa = round_capa(capa);
@@ -96,7 +97,13 @@ struct rb_id_table *
9697
rb_id_table_create(size_t capa)
9798
{
9899
struct rb_id_table *tbl = ALLOC(struct rb_id_table);
99-
return rb_id_table_init(tbl, (int)capa);
100+
return rb_id_table_init(tbl, capa);
101+
}
102+
103+
void
104+
rb_id_table_free_items(struct rb_id_table *tbl)
105+
{
106+
xfree(tbl->items);
100107
}
101108

102109
void
@@ -324,3 +331,94 @@ rb_id_table_foreach_values_with_replace(struct rb_id_table *tbl, rb_id_table_for
324331
}
325332
}
326333

334+
static void
335+
managed_id_table_free(void *data)
336+
{
337+
struct rb_id_table *tbl = (struct rb_id_table *)data;
338+
rb_id_table_free_items(tbl);
339+
}
340+
341+
static size_t
342+
managed_id_table_memsize(const void *data)
343+
{
344+
const struct rb_id_table *tbl = (const struct rb_id_table *)data;
345+
return rb_id_table_memsize(tbl) - sizeof(struct rb_id_table);
346+
}
347+
348+
static const rb_data_type_t managed_id_table_type = {
349+
.wrap_struct_name = "VM/managed_id_table",
350+
.function = {
351+
.dmark = NULL, // Nothing to mark
352+
.dfree = (RUBY_DATA_FUNC)managed_id_table_free,
353+
.dsize = managed_id_table_memsize,
354+
},
355+
.flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE,
356+
};
357+
358+
static inline struct rb_id_table *
359+
managed_id_table_ptr(VALUE obj)
360+
{
361+
return RTYPEDDATA_GET_DATA(obj);
362+
}
363+
364+
VALUE
365+
rb_managed_id_table_new(size_t capa)
366+
{
367+
struct rb_id_table *tbl;
368+
VALUE obj = TypedData_Make_Struct(0, struct rb_id_table, &managed_id_table_type, tbl);
369+
rb_id_table_init(tbl, capa);
370+
return obj;
371+
}
372+
373+
static enum rb_id_table_iterator_result
374+
managed_id_table_dup_i(ID id, VALUE val, void *data)
375+
{
376+
struct rb_id_table *new_tbl = (struct rb_id_table *)data;
377+
rb_id_table_insert(new_tbl, id, val);
378+
return ID_TABLE_CONTINUE;
379+
}
380+
381+
VALUE
382+
rb_managed_id_table_dup(VALUE old_table)
383+
{
384+
RUBY_ASSERT(rb_typeddata_inherited_p(RTYPEDDATA_TYPE(old_table), &managed_id_table_type));
385+
386+
struct rb_id_table *new_tbl;
387+
VALUE obj = TypedData_Make_Struct(0, struct rb_id_table, &managed_id_table_type, new_tbl);
388+
struct rb_id_table *old_tbl = RTYPEDDATA_GET_DATA(old_table);
389+
rb_id_table_init(new_tbl, old_tbl->num + 1);
390+
rb_id_table_foreach(old_tbl, managed_id_table_dup_i, new_tbl);
391+
return obj;
392+
}
393+
394+
int
395+
rb_managed_id_table_lookup(VALUE table, ID id, VALUE *valp)
396+
{
397+
RUBY_ASSERT(rb_typeddata_inherited_p(RTYPEDDATA_TYPE(table), &managed_id_table_type));
398+
399+
return rb_id_table_lookup(RTYPEDDATA_GET_DATA(table), id, valp);
400+
}
401+
402+
int
403+
rb_managed_id_table_insert(VALUE table, ID id, VALUE val)
404+
{
405+
RUBY_ASSERT(rb_typeddata_inherited_p(RTYPEDDATA_TYPE(table), &managed_id_table_type));
406+
407+
return rb_id_table_insert(RTYPEDDATA_GET_DATA(table), id, val);
408+
}
409+
410+
size_t
411+
rb_managed_id_table_size(VALUE table)
412+
{
413+
RUBY_ASSERT(rb_typeddata_inherited_p(RTYPEDDATA_TYPE(table), &managed_id_table_type));
414+
415+
return rb_id_table_size(RTYPEDDATA_GET_DATA(table));
416+
}
417+
418+
void
419+
rb_managed_id_table_foreach(VALUE table, rb_id_table_foreach_func_t *func, void *data)
420+
{
421+
RUBY_ASSERT(rb_typeddata_inherited_p(RTYPEDDATA_TYPE(table), &managed_id_table_type));
422+
423+
rb_id_table_foreach(RTYPEDDATA_GET_DATA(table), func, data);
424+
}

id_table.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,10 @@ enum rb_id_table_iterator_result {
1616
};
1717

1818
struct rb_id_table *rb_id_table_create(size_t size);
19+
struct rb_id_table *rb_id_table_init(struct rb_id_table *tbl, size_t capa);
20+
1921
void rb_id_table_free(struct rb_id_table *tbl);
22+
void rb_id_table_free_items(struct rb_id_table *tbl);
2023
void rb_id_table_clear(struct rb_id_table *tbl);
2124

2225
size_t rb_id_table_memsize(const struct rb_id_table *tbl);
@@ -32,6 +35,13 @@ void rb_id_table_foreach(struct rb_id_table *tbl, rb_id_table_foreach_func_t *fu
3235
void rb_id_table_foreach_values(struct rb_id_table *tbl, rb_id_table_foreach_values_func_t *func, void *data);
3336
void rb_id_table_foreach_values_with_replace(struct rb_id_table *tbl, rb_id_table_foreach_values_func_t *func, rb_id_table_update_value_callback_func_t *replace, void *data);
3437

38+
VALUE rb_managed_id_table_new(size_t capa);
39+
VALUE rb_managed_id_table_dup(VALUE table);
40+
int rb_managed_id_table_insert(VALUE table, ID id, VALUE val);
41+
int rb_managed_id_table_lookup(VALUE table, ID id, VALUE *valp);
42+
size_t rb_managed_id_table_size(VALUE table);
43+
void rb_managed_id_table_foreach(VALUE table, rb_id_table_foreach_func_t *func, void *data);
44+
3545
RUBY_SYMBOL_EXPORT_BEGIN
3646
size_t rb_id_table_size(const struct rb_id_table *tbl);
3747
RUBY_SYMBOL_EXPORT_END

include/ruby/internal/core/rtypeddata.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -471,8 +471,7 @@ RBIMPL_SYMBOL_EXPORT_END()
471471
/**
472472
* Identical to #TypedData_Wrap_Struct, except it allocates a new data region
473473
* internally instead of taking an existing one. The allocation is done using
474-
* ruby_calloc(). Hence it makes no sense for `data_type->function.dfree` to
475-
* be anything other than ::RUBY_TYPED_DEFAULT_FREE.
474+
* ruby_calloc().
476475
*
477476
* @param klass Ruby level class of the object.
478477
* @param type Type name of the C struct.

0 commit comments

Comments
 (0)
0