diff --git a/compar.c b/compar.c
index c577e4741ffa13..eda7e83824a4e0 100644
--- a/compar.c
+++ b/compar.c
@@ -141,10 +141,15 @@ cmp_lt(VALUE x, VALUE y)
/*
* call-seq:
- * obj <= other -> true or false
+ * self <= other -> true or false
+ *
+ * Returns whether +self+ is "less than or equal to" +other+;
+ * equivalent to (self <=> other) <= 0:
+ *
+ * 'foo' <= 'foo' # => true
+ * 'foo' <= 'food' # => true
+ * 'food' <= 'foo' # => false
*
- * Compares two objects based on the receiver's <=>
- * method, returning true if it returns a value less than or equal to 0.
*/
static VALUE
diff --git a/gc/mmtk/mmtk.c b/gc/mmtk/mmtk.c
index e1678dcf6ab0b4..131aaf38b0fd27 100644
--- a/gc/mmtk/mmtk.c
+++ b/gc/mmtk/mmtk.c
@@ -462,6 +462,13 @@ void rb_gc_impl_set_params(void *objspace_ptr) { }
static VALUE gc_verify_internal_consistency(VALUE self) { return Qnil; }
+#define MMTK_HEAP_COUNT 5
+#define MMTK_MAX_OBJ_SIZE 640
+
+static size_t heap_sizes[MMTK_HEAP_COUNT + 1] = {
+ 40, 80, 160, 320, MMTK_MAX_OBJ_SIZE, 0
+};
+
void
rb_gc_impl_init(void)
{
@@ -469,7 +476,7 @@ rb_gc_impl_init(void)
rb_hash_aset(gc_constants, ID2SYM(rb_intern("BASE_SLOT_SIZE")), SIZET2NUM(sizeof(VALUE) * 5));
rb_hash_aset(gc_constants, ID2SYM(rb_intern("RBASIC_SIZE")), SIZET2NUM(sizeof(struct RBasic)));
rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_OVERHEAD")), INT2NUM(0));
- rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVARGC_MAX_ALLOCATE_SIZE")), LONG2FIX(640));
+ rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVARGC_MAX_ALLOCATE_SIZE")), LONG2FIX(MMTK_MAX_OBJ_SIZE));
// Pretend we have 5 size pools
rb_hash_aset(gc_constants, ID2SYM(rb_intern("SIZE_POOL_COUNT")), LONG2FIX(5));
OBJ_FREEZE(gc_constants);
@@ -485,10 +492,6 @@ rb_gc_impl_init(void)
rb_define_singleton_method(rb_mGC, "verify_compaction_references", rb_f_notimplement, -1);
}
-static size_t heap_sizes[6] = {
- 40, 80, 160, 320, 640, 0
-};
-
size_t *
rb_gc_impl_heap_sizes(void *objspace_ptr)
{
@@ -609,8 +612,8 @@ rb_gc_impl_new_obj(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags
struct objspace *objspace = objspace_ptr;
struct MMTk_ractor_cache *ractor_cache = cache_ptr;
- if (alloc_size > 640) rb_bug("too big");
- for (int i = 0; i < 5; i++) {
+ if (alloc_size > MMTK_MAX_OBJ_SIZE) rb_bug("too big");
+ for (int i = 0; i < MMTK_HEAP_COUNT; i++) {
if (alloc_size == heap_sizes[i]) break;
if (alloc_size < heap_sizes[i]) {
alloc_size = heap_sizes[i];
@@ -658,7 +661,7 @@ rb_gc_impl_heap_id_for_size(void *objspace_ptr, size_t size)
bool
rb_gc_impl_size_allocatable_p(size_t size)
{
- return size <= 640;
+ return size <= MMTK_MAX_OBJ_SIZE;
}
// Malloc
diff --git a/hash.c b/hash.c
index 9e1555518ec037..3669f55d5024d0 100644
--- a/hash.c
+++ b/hash.c
@@ -4888,10 +4888,9 @@ hash_le(VALUE hash1, VALUE hash2)
/*
* call-seq:
- * self <= other_hash -> true or false
+ * self <= other -> true or false
*
- * Returns +true+ if the entries of +self+ are a subset of the entries of +other_hash+,
- * +false+ otherwise:
+ * Returns whether the entries of +self+ are a subset of the entries of +other+:
*
* h0 = {foo: 0, bar: 1}
* h1 = {foo: 0, bar: 1, baz: 2}
diff --git a/numeric.c b/numeric.c
index 3e770ceed3bc88..a71060c55db252 100644
--- a/numeric.c
+++ b/numeric.c
@@ -1738,7 +1738,8 @@ flo_lt(VALUE x, VALUE y)
* call-seq:
* self <= other -> true or false
*
- * Returns +true+ if +self+ is numerically less than or equal to +other+:
+ * Returns whether the value of +self+ is less than or equal to the value of +other+;
+ * +other+ must be numeric, but may not be Complex:
*
* 2.0 <= 3 # => true
* 2.0 <= 3.0 # => true
@@ -5081,10 +5082,10 @@ fix_le(VALUE x, VALUE y)
/*
* call-seq:
- * self <= real -> true or false
+ * self <= other -> true or false
*
- * Returns +true+ if the value of +self+ is less than or equal to
- * that of +other+:
+ * Returns whether the value of +self+ is less than or equal to the value of +other+;
+ * +other+ must be numeric, but may not be Complex:
*
* 1 <= 0 # => false
* 1 <= 1 # => true
diff --git a/string.c b/string.c
index 9819a5910fa76e..b11b441ac58536 100644
--- a/string.c
+++ b/string.c
@@ -4294,23 +4294,26 @@ rb_str_eql(VALUE str1, VALUE str2)
/*
* call-seq:
- * self <=> other_string -> -1, 0, 1, or nil
+ * self <=> other -> -1, 0, 1, or nil
*
- * Compares +self+ and +other_string+, returning:
+ * Compares +self+ and +other+,
+ * evaluating their _contents_, not their _lengths_.
*
- * - -1 if +other_string+ is larger.
- * - 0 if the two are equal.
- * - 1 if +other_string+ is smaller.
- * - +nil+ if the two are incomparable.
+ * Returns:
+ *
+ * - +-1+, if +self+ is smaller.
+ * - +0+, if the two are equal.
+ * - +1+, if +self+ is larger.
+ * - +nil+, if the two are incomparable.
*
* Examples:
*
- * 'foo' <=> 'foo' # => 0
- * 'foo' <=> 'food' # => -1
- * 'food' <=> 'foo' # => 1
- * 'FOO' <=> 'foo' # => -1
- * 'foo' <=> 'FOO' # => 1
- * 'foo' <=> 1 # => nil
+ * 'a' <=> 'b' # => -1
+ * 'a' <=> 'ab' # => -1
+ * 'a' <=> 'a' # => 0
+ * 'b' <=> 'a' # => 1
+ * 'ab' <=> 'a' # => 1
+ * 'a' <=> :a # => nil
*
* Related: see {Comparing}[rdoc-ref:String@Comparing].
*/
diff --git a/test/fiber/test_scheduler.rb b/test/fiber/test_scheduler.rb
index 4a8b4ee62d8e49..c20fe86ff4531d 100644
--- a/test/fiber/test_scheduler.rb
+++ b/test/fiber/test_scheduler.rb
@@ -287,33 +287,36 @@ def test_post_fork_fiber_blocking
end
def test_io_write_on_flush
- fn = File.join(Dir.tmpdir, "ruby_test_io_write_on_flush_#{SecureRandom.hex}")
- write_fd = nil
- io_ops = nil
- thread = Thread.new do
- scheduler = IOScheduler.new
- Fiber.set_scheduler scheduler
+ omit "skip this test because it makes CI fragile"
+ begin
+ fn = File.join(Dir.tmpdir, "ruby_test_io_write_on_flush_#{SecureRandom.hex}")
+ write_fd = nil
+ io_ops = nil
+ thread = Thread.new do
+ scheduler = IOScheduler.new
+ Fiber.set_scheduler scheduler
- Fiber.schedule do
- File.open(fn, 'w+') do |f|
- write_fd = f.fileno
- f << 'foo'
- f.flush
- f << 'bar'
+ Fiber.schedule do
+ File.open(fn, 'w+') do |f|
+ write_fd = f.fileno
+ f << 'foo'
+ f.flush
+ f << 'bar'
+ end
end
+ io_ops = scheduler.__io_ops__
end
- io_ops = scheduler.__io_ops__
- end
- thread.join
- assert_equal [
- [:io_write, write_fd, 'foo'],
- [:io_write, write_fd, 'bar']
- ], io_ops
+ thread.join
+ assert_equal [
+ [:io_write, write_fd, 'foo'],
+ [:io_write, write_fd, 'bar']
+ ], io_ops
- assert_equal 'foobar', IO.read(fn)
- ensure
- thread.kill rescue nil
- FileUtils.rm_f(fn)
+ assert_equal 'foobar', IO.read(fn)
+ ensure
+ thread.kill rescue nil
+ FileUtils.rm_f(fn)
+ end
end
def test_io_read_error
diff --git a/vm_method.c b/vm_method.c
index 2a6323e59300b5..2b3ac74d573434 100644
--- a/vm_method.c
+++ b/vm_method.c
@@ -149,10 +149,21 @@ vm_cc_table_dup_i(ID key, VALUE old_ccs_ptr, void *data)
{
VALUE new_table = (VALUE)data;
struct rb_class_cc_entries *old_ccs = (struct rb_class_cc_entries *)old_ccs_ptr;
+
+ if (METHOD_ENTRY_INVALIDATED(old_ccs->cme)) {
+ // Invalidated CME. This entry will be removed from the old table on
+ // the next GC mark, so it's unsafe (and undesirable) to copy
+ return ID_TABLE_CONTINUE;
+ }
+
size_t memsize = vm_ccs_alloc_size(old_ccs->capa);
struct rb_class_cc_entries *new_ccs = ruby_xcalloc(1, memsize);
rb_managed_id_table_insert(new_table, key, (VALUE)new_ccs);
+ // We hold the VM lock, so invalidation should not have happened between
+ // our earlier invalidation check and now.
+ VM_ASSERT(!METHOD_ENTRY_INVALIDATED(old_ccs->cme));
+
memcpy(new_ccs, old_ccs, memsize);
#if VM_CHECK_MODE > 0
@@ -169,6 +180,7 @@ vm_cc_table_dup_i(ID key, VALUE old_ccs_ptr, void *data)
VALUE
rb_vm_cc_table_dup(VALUE old_table)
{
+ ASSERT_vm_locking();
VALUE new_table = rb_vm_cc_table_create(rb_managed_id_table_size(old_table));
rb_managed_id_table_foreach(old_table, vm_cc_table_dup_i, (void *)new_table);
return new_table;