From 80ff7cd35ad35e6518b539f4eb2517928c2f8945 Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Tue, 27 Aug 2024 10:19:17 -0700 Subject: [PATCH 1/9] [release-branch.go1.23] cmd/cgo: correct padding required by alignment If the aligned offset isn't sufficient for the field offset, we were padding based on the aligned offset. We need to pad based on the original offset instead. Also set the Go alignment correctly for int128. We were defaulting to the maximum alignment, but since we translate int128 into an array of uint8 the correct Go alignment is 1. For #69086 Fixes #69219 Change-Id: I23ce583335c81beac2ac51f7f9336ac97ccebf09 Reviewed-on: https://go-review.googlesource.com/c/go/+/608815 Reviewed-by: Damien Neil LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui Auto-Submit: Ian Lance Taylor (cherry picked from commit c2098929056481d0dc09f5f42b8959f4db8878f2) Reviewed-on: https://go-review.googlesource.com/c/go/+/611296 Reviewed-by: Ian Lance Taylor Auto-Submit: Ian Lance Taylor Commit-Queue: Ian Lance Taylor --- src/cmd/cgo/gcc.go | 23 ++++++++++++------ src/cmd/cgo/internal/test/cgo_test.go | 1 + src/cmd/cgo/internal/test/test.go | 34 +++++++++++++++++++++++++++ 3 files changed, 51 insertions(+), 7 deletions(-) diff --git a/src/cmd/cgo/gcc.go b/src/cmd/cgo/gcc.go index 6c23e59adf19eb..be93c4a24bb566 100644 --- a/src/cmd/cgo/gcc.go +++ b/src/cmd/cgo/gcc.go @@ -2579,6 +2579,11 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ if dt.BitSize > 0 { fatalf("%s: unexpected: %d-bit int type - %s", lineno(pos), dt.BitSize, dtype) } + + if t.Align = t.Size; t.Align >= c.ptrSize { + t.Align = c.ptrSize + } + switch t.Size { default: fatalf("%s: unexpected: %d-byte int type - %s", lineno(pos), t.Size, dtype) @@ -2595,9 +2600,8 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ Len: c.intExpr(t.Size), Elt: c.uint8, } - } - if t.Align = t.Size; t.Align >= c.ptrSize { - t.Align = c.ptrSize + // t.Align is the alignment of the Go type. + t.Align = 1 } case *dwarf.PtrType: @@ -2826,6 +2830,11 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ if dt.BitSize > 0 { fatalf("%s: unexpected: %d-bit uint type - %s", lineno(pos), dt.BitSize, dtype) } + + if t.Align = t.Size; t.Align >= c.ptrSize { + t.Align = c.ptrSize + } + switch t.Size { default: fatalf("%s: unexpected: %d-byte uint type - %s", lineno(pos), t.Size, dtype) @@ -2842,9 +2851,8 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ Len: c.intExpr(t.Size), Elt: c.uint8, } - } - if t.Align = t.Size; t.Align >= c.ptrSize { - t.Align = c.ptrSize + // t.Align is the alignment of the Go type. + t.Align = 1 } case *dwarf.VoidType: @@ -3110,10 +3118,11 @@ func (c *typeConv) Struct(dt *dwarf.StructType, pos token.Pos) (expr *ast.Struct } // Round off up to talign, assumed to be a power of 2. + origOff := off off = (off + talign - 1) &^ (talign - 1) if f.ByteOffset > off { - fld, sizes = c.pad(fld, sizes, f.ByteOffset-off) + fld, sizes = c.pad(fld, sizes, f.ByteOffset-origOff) off = f.ByteOffset } if f.ByteOffset < off { diff --git a/src/cmd/cgo/internal/test/cgo_test.go b/src/cmd/cgo/internal/test/cgo_test.go index 5e02888b3dddd9..5393552e07a4d1 100644 --- a/src/cmd/cgo/internal/test/cgo_test.go +++ b/src/cmd/cgo/internal/test/cgo_test.go @@ -70,6 +70,7 @@ func Test31891(t *testing.T) { test31891(t) } func Test42018(t *testing.T) { test42018(t) } func Test45451(t *testing.T) { test45451(t) } func Test49633(t *testing.T) { test49633(t) } +func Test69086(t *testing.T) { test69086(t) } func TestAlign(t *testing.T) { testAlign(t) } func TestAtol(t *testing.T) { testAtol(t) } func TestBlocking(t *testing.T) { testBlocking(t) } diff --git a/src/cmd/cgo/internal/test/test.go b/src/cmd/cgo/internal/test/test.go index 374689631d77ab..362be79a737bee 100644 --- a/src/cmd/cgo/internal/test/test.go +++ b/src/cmd/cgo/internal/test/test.go @@ -940,6 +940,19 @@ typedef struct { } issue67517struct; static void issue67517(issue67517struct* p) {} +// Issue 69086. +// GCC added the __int128 type in GCC 4.6, released in 2011. +typedef struct { + int a; +#ifdef __SIZEOF_INT128__ + unsigned __int128 b; +#else + uint64_t b; +#endif + unsigned char c; +} issue69086struct; +static int issue690861(issue69086struct* p) { p->b = 1234; return p->c; } +static int issue690862(unsigned long ul1, unsigned long ul2, unsigned int u, issue69086struct s) { return (int)(s.b); } */ import "C" @@ -2349,3 +2362,24 @@ func issue67517() { b: nil, }) } + +// Issue 69086. +func test69086(t *testing.T) { + var s C.issue69086struct + + typ := reflect.TypeOf(s) + for i := 0; i < typ.NumField(); i++ { + f := typ.Field(i) + t.Logf("field %d: name %s size %d align %d offset %d", i, f.Name, f.Type.Size(), f.Type.Align(), f.Offset) + } + + s.c = 1 + got := C.issue690861(&s) + if got != 1 { + t.Errorf("field: got %d, want 1", got) + } + got = C.issue690862(1, 2, 3, s) + if got != 1234 { + t.Errorf("call: got %d, want 1234", got) + } +} From a886959aa2fb1115096a937d3d8a2e921388752f Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Thu, 29 Aug 2024 15:08:33 -0700 Subject: [PATCH 2/9] [release-branch.go1.23] runtime: size maps.Clone destination bucket array safely In rare situations, like during same-sized grows, the source map for maps.Clone may be overloaded (has more than 6.5 entries per bucket). This causes the runtime to allocate a larger bucket array for the destination map than for the source map. The maps.Clone code walks off the end of the source array if it is smaller than the destination array. This is a pretty simple fix, ensuring that the destination bucket array is never longer than the source bucket array. Maybe a better fix is to make the Clone code handle shorter source arrays correctly, but this fix is deliberately simple to reduce the risk of backporting this fix. Fixes #69156 Change-Id: I824c93d1db690999f25a3c43b2816fc28ace7509 Reviewed-on: https://go-review.googlesource.com/c/go/+/610377 Reviewed-by: Dmitri Shuralyov Reviewed-by: Cuong Manh Le LUCI-TryBot-Result: Go LUCI Auto-Submit: Dmitri Shuralyov Reviewed-by: Keith Randall --- src/runtime/map.go | 16 +++++++++- test/fixedbugs/issue69110.go | 57 ++++++++++++++++++++++++++++++++++++ 2 files changed, 72 insertions(+), 1 deletion(-) create mode 100644 test/fixedbugs/issue69110.go diff --git a/src/runtime/map.go b/src/runtime/map.go index 112084f5a74091..52d56fb57a4dc6 100644 --- a/src/runtime/map.go +++ b/src/runtime/map.go @@ -1209,6 +1209,11 @@ func (h *hmap) sameSizeGrow() bool { return h.flags&sameSizeGrow != 0 } +//go:linkname sameSizeGrowForIssue69110Test +func sameSizeGrowForIssue69110Test(h *hmap) bool { + return h.sameSizeGrow() +} + // noldbuckets calculates the number of buckets prior to the current map growth. func (h *hmap) noldbuckets() uintptr { oldB := h.B @@ -1668,7 +1673,16 @@ func moveToBmap(t *maptype, h *hmap, dst *bmap, pos int, src *bmap) (*bmap, int) } func mapclone2(t *maptype, src *hmap) *hmap { - dst := makemap(t, src.count, nil) + hint := src.count + if overLoadFactor(hint, src.B) { + // Note: in rare cases (e.g. during a same-sized grow) the map + // can be overloaded. Make sure we don't allocate a destination + // bucket array larger than the source bucket array. + // This will cause the cloned map to be overloaded also, + // but that's better than crashing. See issue 69110. + hint = int(loadFactorNum * (bucketShift(src.B) / loadFactorDen)) + } + dst := makemap(t, hint, nil) dst.hash0 = src.hash0 dst.nevacuate = 0 // flags do not need to be copied here, just like a new map has no flags. diff --git a/test/fixedbugs/issue69110.go b/test/fixedbugs/issue69110.go new file mode 100644 index 00000000000000..71a4bcac31a16e --- /dev/null +++ b/test/fixedbugs/issue69110.go @@ -0,0 +1,57 @@ +// run + +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "maps" + _ "unsafe" +) + +func main() { + for i := 0; i < 100; i++ { + f() + } +} + +const NB = 4 + +func f() { + // Make a map with NB buckets, at max capacity. + // 6.5 entries/bucket. + ne := NB * 13 / 2 + m := map[int]int{} + for i := 0; i < ne; i++ { + m[i] = i + } + + // delete/insert a lot, to hopefully get lots of overflow buckets + // and trigger a same-size grow. + ssg := false + for i := ne; i < ne+1000; i++ { + delete(m, i-ne) + m[i] = i + if sameSizeGrow(m) { + ssg = true + break + } + } + if !ssg { + return + } + + // Insert 1 more entry, which would ordinarily trigger a growth. + // We can't grow while growing, so we instead go over our + // target capacity. + m[-1] = -1 + + // Cloning in this state will make a map with a destination bucket + // array twice the size of the source. + _ = maps.Clone(m) +} + +//go:linkname sameSizeGrow runtime.sameSizeGrowForIssue69110Test +func sameSizeGrow(m map[int]int) bool From 82575f76b8473effd6aff0a8690582820380d4d4 Mon Sep 17 00:00:00 2001 From: Michael Anthony Knyszek Date: Wed, 4 Sep 2024 03:08:26 +0000 Subject: [PATCH 3/9] [release-branch.go1.23] internal/weak: shade pointer in weak-to-strong conversion There's a bug in the weak-to-strong conversion in that creating the *only* strong pointer to some weakly-held object during the mark phase may result in that object not being properly marked. The exact mechanism for this is that the new strong pointer will always point to a white object (because it was only weakly referenced up until this point) and it can then be stored in a blackened stack, hiding it from the garbage collector. This "hide a white pointer in the stack" problem is pretty much exactly what the Yuasa part of the hybrid write barrier is trying to catch, so we need to do the same thing the write barrier would do: shade the pointer. Added a test and confirmed that it fails with high probability if the pointer shading is missing. For #69210. Fixes #69240. Change-Id: Iaae64ae95ea7e975c2f2c3d4d1960e74e1bd1c3f Reviewed-on: https://go-review.googlesource.com/c/go/+/610396 LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui Reviewed-by: Michael Pratt Auto-Submit: Michael Knyszek (cherry picked from commit 79fd633632cdbaf9ca38f7559e5abb5c07fbbd9d) Reviewed-on: https://go-review.googlesource.com/c/go/+/610696 Auto-Submit: Dmitri Shuralyov --- src/internal/weak/pointer_test.go | 82 +++++++++++++++++++++++++++++++ src/runtime/mheap.go | 27 +++++++++- 2 files changed, 108 insertions(+), 1 deletion(-) diff --git a/src/internal/weak/pointer_test.go b/src/internal/weak/pointer_test.go index e143749230f0a5..5a861bb9ca39d7 100644 --- a/src/internal/weak/pointer_test.go +++ b/src/internal/weak/pointer_test.go @@ -5,9 +5,12 @@ package weak_test import ( + "context" "internal/weak" "runtime" + "sync" "testing" + "time" ) type T struct { @@ -128,3 +131,82 @@ func TestPointerFinalizer(t *testing.T) { t.Errorf("weak pointer is non-nil even after finalization: %v", wt) } } + +// Regression test for issue 69210. +// +// Weak-to-strong conversions must shade the new strong pointer, otherwise +// that might be creating the only strong pointer to a white object which +// is hidden in a blackened stack. +// +// Never fails if correct, fails with some high probability if incorrect. +func TestIssue69210(t *testing.T) { + if testing.Short() { + t.Skip("this is a stress test that takes seconds to run on its own") + } + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + // What we're trying to do is manufacture the conditions under which this + // bug happens. Specifically, we want: + // + // 1. To create a whole bunch of objects that are only weakly-pointed-to, + // 2. To call Strong while the GC is in the mark phase, + // 3. The new strong pointer to be missed by the GC, + // 4. The following GC cycle to mark a free object. + // + // Unfortunately, (2) and (3) are hard to control, but we can increase + // the likelihood by having several goroutines do (1) at once while + // another goroutine constantly keeps us in the GC with runtime.GC. + // Like throwing darts at a dart board until they land just right. + // We can increase the likelihood of (4) by adding some delay after + // creating the strong pointer, but only if it's non-nil. If it's nil, + // that means it was already collected in which case there's no chance + // of triggering the bug, so we want to retry as fast as possible. + // Our heap here is tiny, so the GCs will go by fast. + // + // As of 2024-09-03, removing the line that shades pointers during + // the weak-to-strong conversion causes this test to fail about 50% + // of the time. + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + for { + runtime.GC() + + select { + case <-ctx.Done(): + return + default: + } + } + }() + for range max(runtime.GOMAXPROCS(-1)-1, 1) { + wg.Add(1) + go func() { + defer wg.Done() + for { + for range 5 { + bt := new(T) + wt := weak.Make(bt) + bt = nil + time.Sleep(1 * time.Millisecond) + bt = wt.Strong() + if bt != nil { + time.Sleep(4 * time.Millisecond) + bt.t = bt + bt.a = 12 + } + runtime.KeepAlive(bt) + } + select { + case <-ctx.Done(): + return + default: + } + } + }() + } + wg.Wait() +} diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index 35fd08af50c3c1..a91055387ef35b 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -2073,7 +2073,22 @@ func internal_weak_runtime_makeStrongFromWeak(u unsafe.Pointer) unsafe.Pointer { // Even if we just swept some random span that doesn't contain this object, because // this object is long dead and its memory has since been reused, we'll just observe nil. ptr := unsafe.Pointer(handle.Load()) + + // This is responsible for maintaining the same GC-related + // invariants as the Yuasa part of the write barrier. During + // the mark phase, it's possible that we just created the only + // valid pointer to the object pointed to by ptr. If it's only + // ever referenced from our stack, and our stack is blackened + // already, we could fail to mark it. So, mark it now. + if gcphase != _GCoff { + shade(uintptr(ptr)) + } releasem(mp) + + // Explicitly keep ptr alive. This seems unnecessary since we return ptr, + // but let's be explicit since it's important we keep ptr alive across the + // call to shade. + KeepAlive(ptr) return ptr } @@ -2081,6 +2096,9 @@ func internal_weak_runtime_makeStrongFromWeak(u unsafe.Pointer) unsafe.Pointer { func getOrAddWeakHandle(p unsafe.Pointer) *atomic.Uintptr { // First try to retrieve without allocating. if handle := getWeakHandle(p); handle != nil { + // Keep p alive for the duration of the function to ensure + // that it cannot die while we're trying to do this. + KeepAlive(p) return handle } @@ -2105,6 +2123,10 @@ func getOrAddWeakHandle(p unsafe.Pointer) *atomic.Uintptr { scanblock(uintptr(unsafe.Pointer(&s.handle)), goarch.PtrSize, &oneptrmask[0], gcw, nil) releasem(mp) } + + // Keep p alive for the duration of the function to ensure + // that it cannot die while we're trying to do this. + KeepAlive(p) return s.handle } @@ -2124,7 +2146,7 @@ func getOrAddWeakHandle(p unsafe.Pointer) *atomic.Uintptr { } // Keep p alive for the duration of the function to ensure - // that it cannot die while we're trying to this. + // that it cannot die while we're trying to do this. KeepAlive(p) return handle } @@ -2154,6 +2176,9 @@ func getWeakHandle(p unsafe.Pointer) *atomic.Uintptr { unlock(&span.speciallock) releasem(mp) + // Keep p alive for the duration of the function to ensure + // that it cannot die while we're trying to do this. + KeepAlive(p) return handle } From e6598e7baafa5650f82b9575c053a52c2601bf8f Mon Sep 17 00:00:00 2001 From: Wei Fu Date: Thu, 22 Aug 2024 16:22:53 +0800 Subject: [PATCH 4/9] [release-branch.go1.23] os: dup pidfd if caller sets PidFD manually For #68984. Fixes #69119. Change-Id: I16d25777cb38a337cd4204a8147eaf866c3df9e1 Reviewed-on: https://go-review.googlesource.com/c/go/+/607695 Reviewed-by: Kirill Kolyshkin Reviewed-by: Michael Pratt LUCI-TryBot-Result: Go LUCI Commit-Queue: Ian Lance Taylor Reviewed-by: David Chase Auto-Submit: Ian Lance Taylor (cherry picked from commit 239666cd7343d46c40a5b929c8bec8b532dbe83f) Reviewed-on: https://go-review.googlesource.com/c/go/+/611415 Auto-Submit: Dmitri Shuralyov Reviewed-by: Dmitri Shuralyov TryBot-Bypass: Dmitri Shuralyov --- src/os/exec_posix.go | 5 +++-- src/os/pidfd_linux.go | 27 ++++++++++++++++++++------- src/os/pidfd_linux_test.go | 32 ++++++++++++++++++++++++++++++++ src/os/pidfd_other.go | 6 +++--- 4 files changed, 58 insertions(+), 12 deletions(-) diff --git a/src/os/exec_posix.go b/src/os/exec_posix.go index cba2e151673aba..ff51247d56b72d 100644 --- a/src/os/exec_posix.go +++ b/src/os/exec_posix.go @@ -35,10 +35,11 @@ func startProcess(name string, argv []string, attr *ProcAttr) (p *Process, err e } } + attrSys, shouldDupPidfd := ensurePidfd(attr.Sys) sysattr := &syscall.ProcAttr{ Dir: attr.Dir, Env: attr.Env, - Sys: ensurePidfd(attr.Sys), + Sys: attrSys, } if sysattr.Env == nil { sysattr.Env, err = execenv.Default(sysattr.Sys) @@ -63,7 +64,7 @@ func startProcess(name string, argv []string, attr *ProcAttr) (p *Process, err e // For Windows, syscall.StartProcess above already returned a process handle. if runtime.GOOS != "windows" { var ok bool - h, ok = getPidfd(sysattr.Sys) + h, ok = getPidfd(sysattr.Sys, shouldDupPidfd) if !ok { return newPIDProcess(pid), nil } diff --git a/src/os/pidfd_linux.go b/src/os/pidfd_linux.go index 0404c4ff64b72e..545cfe9613b8b4 100644 --- a/src/os/pidfd_linux.go +++ b/src/os/pidfd_linux.go @@ -19,9 +19,12 @@ import ( "unsafe" ) -func ensurePidfd(sysAttr *syscall.SysProcAttr) *syscall.SysProcAttr { +// ensurePidfd initializes the PidFD field in sysAttr if it is not already set. +// It returns the original or modified SysProcAttr struct and a flag indicating +// whether the PidFD should be duplicated before using. +func ensurePidfd(sysAttr *syscall.SysProcAttr) (*syscall.SysProcAttr, bool) { if !pidfdWorks() { - return sysAttr + return sysAttr, false } var pidfd int @@ -29,23 +32,33 @@ func ensurePidfd(sysAttr *syscall.SysProcAttr) *syscall.SysProcAttr { if sysAttr == nil { return &syscall.SysProcAttr{ PidFD: &pidfd, - } + }, false } if sysAttr.PidFD == nil { newSys := *sysAttr // copy newSys.PidFD = &pidfd - return &newSys + return &newSys, false } - return sysAttr + return sysAttr, true } -func getPidfd(sysAttr *syscall.SysProcAttr) (uintptr, bool) { +// getPidfd returns the value of sysAttr.PidFD (or its duplicate if needDup is +// set) and a flag indicating whether the value can be used. +func getPidfd(sysAttr *syscall.SysProcAttr, needDup bool) (uintptr, bool) { if !pidfdWorks() { return 0, false } - return uintptr(*sysAttr.PidFD), true + h := *sysAttr.PidFD + if needDup { + dupH, e := unix.Fcntl(h, syscall.F_DUPFD_CLOEXEC, 0) + if e != nil { + return 0, false + } + h = dupH + } + return uintptr(h), true } func pidfdFind(pid int) (uintptr, error) { diff --git a/src/os/pidfd_linux_test.go b/src/os/pidfd_linux_test.go index 837593706bae8e..fa0877037baadc 100644 --- a/src/os/pidfd_linux_test.go +++ b/src/os/pidfd_linux_test.go @@ -6,6 +6,7 @@ package os_test import ( "errors" + "internal/syscall/unix" "internal/testenv" "os" "syscall" @@ -57,3 +58,34 @@ func TestFindProcessViaPidfd(t *testing.T) { t.Fatalf("Release: got %v, want ", err) } } + +func TestStartProcessWithPidfd(t *testing.T) { + testenv.MustHaveGoBuild(t) + t.Parallel() + + if err := os.CheckPidfdOnce(); err != nil { + // Non-pidfd code paths tested in exec_unix_test.go. + t.Skipf("skipping: pidfd not available: %v", err) + } + + var pidfd int + p, err := os.StartProcess(testenv.GoToolPath(t), []string{"go"}, &os.ProcAttr{ + Sys: &syscall.SysProcAttr{ + PidFD: &pidfd, + }, + }) + if err != nil { + t.Fatalf("starting test process: %v", err) + } + defer syscall.Close(pidfd) + + if _, err := p.Wait(); err != nil { + t.Fatalf("Wait: got %v, want ", err) + } + + // Check the pidfd is still valid + err = unix.PidFDSendSignal(uintptr(pidfd), syscall.Signal(0)) + if !errors.Is(err, syscall.ESRCH) { + t.Errorf("SendSignal: got %v, want %v", err, syscall.ESRCH) + } +} diff --git a/src/os/pidfd_other.go b/src/os/pidfd_other.go index dda4bd0feccae6..ba9cbcb93830c0 100644 --- a/src/os/pidfd_other.go +++ b/src/os/pidfd_other.go @@ -8,11 +8,11 @@ package os import "syscall" -func ensurePidfd(sysAttr *syscall.SysProcAttr) *syscall.SysProcAttr { - return sysAttr +func ensurePidfd(sysAttr *syscall.SysProcAttr) (*syscall.SysProcAttr, bool) { + return sysAttr, false } -func getPidfd(_ *syscall.SysProcAttr) (uintptr, bool) { +func getPidfd(_ *syscall.SysProcAttr, _ bool) (uintptr, bool) { return 0, false } From a74951c5af5498db5d4be0c14dcaa45fb452e23a Mon Sep 17 00:00:00 2001 From: Michael Anthony Knyszek Date: Wed, 4 Sep 2024 16:46:33 +0000 Subject: [PATCH 5/9] [release-branch.go1.23] unique: don't retain uncloned input as key Currently the unique package tries to clone strings that get stored in its internal map to avoid retaining large strings. However, this falls over entirely due to the fact that the original string is *still* stored in the map as a key. Whoops. Fix this by storing the cloned value in the map instead. This change also adds a test which fails without this change. For #69370. Fixes #69383. Change-Id: I1a6bb68ed79b869ea12ab6be061a5ae4b4377ddb Reviewed-on: https://go-review.googlesource.com/c/go/+/610738 Reviewed-by: Michael Pratt Auto-Submit: Michael Knyszek LUCI-TryBot-Result: Go LUCI (cherry picked from commit 21ac23a96f204dfb558a8d3071380c1d105a93ba) Reviewed-on: https://go-review.googlesource.com/c/go/+/612295 Auto-Submit: Tim King --- src/unique/handle.go | 7 ++++--- src/unique/handle_test.go | 22 ++++++++++++++++++++++ 2 files changed, 26 insertions(+), 3 deletions(-) diff --git a/src/unique/handle.go b/src/unique/handle.go index 96d8fedb0cabe6..abc620f60fe14e 100644 --- a/src/unique/handle.go +++ b/src/unique/handle.go @@ -50,13 +50,13 @@ func Make[T comparable](value T) Handle[T] { toInsert *T // Keep this around to keep it alive. toInsertWeak weak.Pointer[T] ) - newValue := func() weak.Pointer[T] { + newValue := func() (T, weak.Pointer[T]) { if toInsert == nil { toInsert = new(T) *toInsert = clone(value, &m.cloneSeq) toInsertWeak = weak.Make(toInsert) } - return toInsertWeak + return *toInsert, toInsertWeak } var ptr *T for { @@ -64,7 +64,8 @@ func Make[T comparable](value T) Handle[T] { wp, ok := m.Load(value) if !ok { // Try to insert a new value into the map. - wp, _ = m.LoadOrStore(value, newValue()) + k, v := newValue() + wp, _ = m.LoadOrStore(k, v) } // Now that we're sure there's a value in the map, let's // try to get the pointer we need out of it. diff --git a/src/unique/handle_test.go b/src/unique/handle_test.go index b031bbf6852c6b..dd4b01ef79900b 100644 --- a/src/unique/handle_test.go +++ b/src/unique/handle_test.go @@ -9,7 +9,10 @@ import ( "internal/abi" "reflect" "runtime" + "strings" "testing" + "time" + "unsafe" ) // Set up special types. Because the internal maps are sharded by type, @@ -110,3 +113,22 @@ func checkMapsFor[T comparable](t *testing.T, value T) { } t.Errorf("failed to drain internal maps of %v", value) } + +func TestMakeClonesStrings(t *testing.T) { + s := strings.Clone("abcdefghijklmnopqrstuvwxyz") // N.B. Must be big enough to not be tiny-allocated. + ran := make(chan bool) + runtime.SetFinalizer(unsafe.StringData(s), func(_ *byte) { + ran <- true + }) + h := Make(s) + + // Clean up s (hopefully) and run the finalizer. + runtime.GC() + + select { + case <-time.After(1 * time.Second): + t.Fatal("string was improperly retained") + case <-ran: + } + runtime.KeepAlive(h) +} From c8c6f9abfbbd2f949285a527036a3d0dbd991e74 Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Fri, 6 Sep 2024 12:19:01 -0700 Subject: [PATCH 6/9] [release-branch.go1.23] syscall: on exec failure, close pidfd For #69284 Fixes #69402 Change-Id: I6350209302778ba5e44fa03d0b9e680d2b4ec192 Reviewed-on: https://go-review.googlesource.com/c/go/+/611495 LUCI-TryBot-Result: Go LUCI Reviewed-by: roger peppe Reviewed-by: Tim King Auto-Submit: Ian Lance Taylor Reviewed-by: Dmitri Shuralyov (cherry picked from commit 8926ca9c5ec3ea0b51e413e87f737aeb1422ea48) Reviewed-on: https://go-review.googlesource.com/c/go/+/613616 Reviewed-by: Ian Lance Taylor Auto-Submit: Ian Lance Taylor Reviewed-by: Tobias Klauser --- src/os/pidfd_linux_test.go | 60 +++++++++++++++++++++++++++++++++++++ src/syscall/exec_bsd.go | 5 ++++ src/syscall/exec_freebsd.go | 5 ++++ src/syscall/exec_libc.go | 5 ++++ src/syscall/exec_libc2.go | 5 ++++ src/syscall/exec_linux.go | 8 +++++ src/syscall/exec_unix.go | 4 +++ 7 files changed, 92 insertions(+) diff --git a/src/os/pidfd_linux_test.go b/src/os/pidfd_linux_test.go index fa0877037baadc..c1f41d02d66c73 100644 --- a/src/os/pidfd_linux_test.go +++ b/src/os/pidfd_linux_test.go @@ -9,6 +9,7 @@ import ( "internal/syscall/unix" "internal/testenv" "os" + "os/exec" "syscall" "testing" ) @@ -89,3 +90,62 @@ func TestStartProcessWithPidfd(t *testing.T) { t.Errorf("SendSignal: got %v, want %v", err, syscall.ESRCH) } } + +// Issue #69284 +func TestPidfdLeak(t *testing.T) { + testenv.MustHaveExec(t) + exe, err := os.Executable() + if err != nil { + t.Fatal(err) + } + + // Find the next 10 descriptors. + // We need to get more than one descriptor in practice; + // the pidfd winds up not being the next descriptor. + const count = 10 + want := make([]int, count) + for i := range count { + var err error + want[i], err = syscall.Open(exe, syscall.O_RDONLY, 0) + if err != nil { + t.Fatal(err) + } + } + + // Close the descriptors. + for _, d := range want { + syscall.Close(d) + } + + // Start a process 10 times. + for range 10 { + // For testing purposes this has to be an absolute path. + // Otherwise we will fail finding the executable + // and won't start a process at all. + cmd := exec.Command("/noSuchExecutable") + cmd.Run() + } + + // Open the next 10 descriptors again. + got := make([]int, count) + for i := range count { + var err error + got[i], err = syscall.Open(exe, syscall.O_RDONLY, 0) + if err != nil { + t.Fatal(err) + } + } + + // Close the descriptors + for _, d := range got { + syscall.Close(d) + } + + t.Logf("got %v", got) + t.Logf("want %v", want) + + // Allow some slack for runtime epoll descriptors and the like. + if got[count-1] > want[count-1]+5 { + t.Errorf("got descriptor %d, want %d", got[count-1], want[count-1]) + } +} diff --git a/src/syscall/exec_bsd.go b/src/syscall/exec_bsd.go index 149cc2f11c128c..bbdab46de48c03 100644 --- a/src/syscall/exec_bsd.go +++ b/src/syscall/exec_bsd.go @@ -293,3 +293,8 @@ childerror: RawSyscall(SYS_EXIT, 253, 0, 0) } } + +// forkAndExecFailureCleanup cleans up after an exec failure. +func forkAndExecFailureCleanup(attr *ProcAttr, sys *SysProcAttr) { + // Nothing to do. +} diff --git a/src/syscall/exec_freebsd.go b/src/syscall/exec_freebsd.go index 3226cb88cd999a..686fd23bef435d 100644 --- a/src/syscall/exec_freebsd.go +++ b/src/syscall/exec_freebsd.go @@ -317,3 +317,8 @@ childerror: RawSyscall(SYS_EXIT, 253, 0, 0) } } + +// forkAndExecFailureCleanup cleans up after an exec failure. +func forkAndExecFailureCleanup(attr *ProcAttr, sys *SysProcAttr) { + // Nothing to do. +} diff --git a/src/syscall/exec_libc.go b/src/syscall/exec_libc.go index 768e8c131c1323..0e886508737d1e 100644 --- a/src/syscall/exec_libc.go +++ b/src/syscall/exec_libc.go @@ -314,6 +314,11 @@ childerror: } } +// forkAndExecFailureCleanup cleans up after an exec failure. +func forkAndExecFailureCleanup(attr *ProcAttr, sys *SysProcAttr) { + // Nothing to do. +} + func ioctlPtr(fd, req uintptr, arg unsafe.Pointer) (err Errno) { return ioctl(fd, req, uintptr(arg)) } diff --git a/src/syscall/exec_libc2.go b/src/syscall/exec_libc2.go index 7a6750084486cf..a0579627a300bf 100644 --- a/src/syscall/exec_libc2.go +++ b/src/syscall/exec_libc2.go @@ -289,3 +289,8 @@ childerror: rawSyscall(abi.FuncPCABI0(libc_exit_trampoline), 253, 0, 0) } } + +// forkAndExecFailureCleanup cleans up after an exec failure. +func forkAndExecFailureCleanup(attr *ProcAttr, sys *SysProcAttr) { + // Nothing to do. +} diff --git a/src/syscall/exec_linux.go b/src/syscall/exec_linux.go index e4b9ce1bf47da3..26844121910e8f 100644 --- a/src/syscall/exec_linux.go +++ b/src/syscall/exec_linux.go @@ -735,3 +735,11 @@ func writeUidGidMappings(pid int, sys *SysProcAttr) error { return nil } + +// forkAndExecFailureCleanup cleans up after an exec failure. +func forkAndExecFailureCleanup(attr *ProcAttr, sys *SysProcAttr) { + if sys.PidFD != nil && *sys.PidFD != -1 { + Close(*sys.PidFD) + *sys.PidFD = -1 + } +} diff --git a/src/syscall/exec_unix.go b/src/syscall/exec_unix.go index 1b90aa7e72e0ed..4747fa075834af 100644 --- a/src/syscall/exec_unix.go +++ b/src/syscall/exec_unix.go @@ -237,6 +237,10 @@ func forkExec(argv0 string, argv []string, attr *ProcAttr) (pid int, err error) for err1 == EINTR { _, err1 = Wait4(pid, &wstatus, 0, nil) } + + // OS-specific cleanup on failure. + forkAndExecFailureCleanup(attr, sys) + return 0, err } From fbddfae62f19b5f04555aa593970ac4c6f5a38e5 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Wed, 18 Sep 2024 22:39:05 +0700 Subject: [PATCH 7/9] [release-branch.go1.23] cmd/compile: fix wrong esacpe analysis for rangefunc CL 584596 "-range" suffix to the name of closure generated for a rangefunc loop body. However, this breaks the condition that escape analysis uses for checking whether a closure contains within function, which is "F.funcN" for outer function "F" and closure "funcN". Fixing this by adding new "-rangeN" to the condition. Updates #69434 Fixes #69511 Change-Id: I411de8f63b69a6514a9e9504d49d62e00ce4115d Reviewed-on: https://go-review.googlesource.com/c/go/+/614096 Reviewed-by: David Chase Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI Auto-Submit: Cuong Manh Le Reviewed-on: https://go-review.googlesource.com/c/go/+/614195 --- src/cmd/compile/internal/escape/solve.go | 4 +- test/fixedbugs/issue69434.go | 173 +++++++++++++++++++++++ test/fixedbugs/issue69507.go | 133 +++++++++++++++++ 3 files changed, 308 insertions(+), 2 deletions(-) create mode 100644 test/fixedbugs/issue69434.go create mode 100644 test/fixedbugs/issue69507.go diff --git a/src/cmd/compile/internal/escape/solve.go b/src/cmd/compile/internal/escape/solve.go index 2675a16a241fe3..ef17bc48ef2342 100644 --- a/src/cmd/compile/internal/escape/solve.go +++ b/src/cmd/compile/internal/escape/solve.go @@ -318,9 +318,9 @@ func containsClosure(f, c *ir.Func) bool { return false } - // Closures within function Foo are named like "Foo.funcN..." + // Closures within function Foo are named like "Foo.funcN..." or "Foo-rangeN". // TODO(mdempsky): Better way to recognize this. fn := f.Sym().Name cn := c.Sym().Name - return len(cn) > len(fn) && cn[:len(fn)] == fn && cn[len(fn)] == '.' + return len(cn) > len(fn) && cn[:len(fn)] == fn && (cn[len(fn)] == '.' || cn[len(fn)] == '-') } diff --git a/test/fixedbugs/issue69434.go b/test/fixedbugs/issue69434.go new file mode 100644 index 00000000000000..682046601960da --- /dev/null +++ b/test/fixedbugs/issue69434.go @@ -0,0 +1,173 @@ +// run + +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bufio" + "fmt" + "io" + "iter" + "math/rand" + "os" + "strings" + "unicode" +) + +// WordReader is the struct that implements io.Reader +type WordReader struct { + scanner *bufio.Scanner +} + +// NewWordReader creates a new WordReader from an io.Reader +func NewWordReader(r io.Reader) *WordReader { + scanner := bufio.NewScanner(r) + scanner.Split(bufio.ScanWords) + return &WordReader{ + scanner: scanner, + } +} + +// Read reads data from the input stream and returns a single lowercase word at a time +func (wr *WordReader) Read(p []byte) (n int, err error) { + if !wr.scanner.Scan() { + if err := wr.scanner.Err(); err != nil { + return 0, err + } + return 0, io.EOF + } + word := wr.scanner.Text() + cleanedWord := removeNonAlphabetic(word) + if len(cleanedWord) == 0 { + return wr.Read(p) + } + n = copy(p, []byte(cleanedWord)) + return n, nil +} + +// All returns an iterator allowing the caller to iterate over the WordReader using for/range. +func (wr *WordReader) All() iter.Seq[string] { + word := make([]byte, 1024) + return func(yield func(string) bool) { + var err error + var n int + for n, err = wr.Read(word); err == nil; n, err = wr.Read(word) { + if !yield(string(word[:n])) { + return + } + } + if err != io.EOF { + fmt.Fprintf(os.Stderr, "error reading word: %v\n", err) + } + } +} + +// removeNonAlphabetic removes non-alphabetic characters from a word using strings.Map +func removeNonAlphabetic(word string) string { + return strings.Map(func(r rune) rune { + if unicode.IsLetter(r) { + return unicode.ToLower(r) + } + return -1 + }, word) +} + +// ProbabilisticSkipper determines if an item should be retained with probability 1/(1<>= 1 + pr.counter-- + if pr.counter == 0 { + pr.refreshCounter() + } + return remove +} + +// EstimateUniqueWordsIter estimates the number of unique words using a probabilistic counting method +func EstimateUniqueWordsIter(reader io.Reader, memorySize int) int { + wordReader := NewWordReader(reader) + words := make(map[string]struct{}, memorySize) + + rounds := 0 + roundRemover := NewProbabilisticSkipper(1) + wordSkipper := NewProbabilisticSkipper(rounds) + wordSkipper.check(rounds) + + for word := range wordReader.All() { + wordSkipper.check(rounds) + if wordSkipper.ShouldSkip() { + delete(words, word) + } else { + words[word] = struct{}{} + + if len(words) >= memorySize { + rounds++ + + wordSkipper = NewProbabilisticSkipper(rounds) + for w := range words { + if roundRemover.ShouldSkip() { + delete(words, w) + } + } + } + } + wordSkipper.check(rounds) + } + + if len(words) == 0 { + return 0 + } + + invProbability := 1 << rounds + estimatedUniqueWords := len(words) * invProbability + return estimatedUniqueWords +} + +func main() { + input := "Hello, world! This is a test. Hello, world, hello!" + expectedUniqueWords := 6 // "hello", "world", "this", "is", "a", "test" (but "hello" and "world" are repeated) + memorySize := 6 + + reader := strings.NewReader(input) + estimatedUniqueWords := EstimateUniqueWordsIter(reader, memorySize) + if estimatedUniqueWords != expectedUniqueWords { + // ... + } +} diff --git a/test/fixedbugs/issue69507.go b/test/fixedbugs/issue69507.go new file mode 100644 index 00000000000000..fc300c848ee62f --- /dev/null +++ b/test/fixedbugs/issue69507.go @@ -0,0 +1,133 @@ +// run + +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func main() { + err := run() + if err != nil { + panic(err) + } +} + +func run() error { + methods := "AB" + + type node struct { + tag string + choices []string + } + all := []node{ + {"000", permutations(methods)}, + } + + next := 1 + for len(all) > 0 { + cur := all[0] + k := copy(all, all[1:]) + all = all[:k] + + if len(cur.choices) == 1 { + continue + } + + var bestM map[byte][]string + bMax := len(cur.choices) + 1 + bMin := -1 + for sel := range selections(methods) { + m := make(map[byte][]string) + for _, order := range cur.choices { + x := findFirstMatch(order, sel) + m[x] = append(m[x], order) + } + + min := len(cur.choices) + 1 + max := -1 + for _, v := range m { + if len(v) < min { + min = len(v) + } + if len(v) > max { + max = len(v) + } + } + if max < bMax || (max == bMax && min > bMin) { + bestM = m + bMin = min + bMax = max + } + } + + if bMax == len(cur.choices) { + continue + } + + cc := Keys(bestM) + for c := range cc { + choices := bestM[c] + next++ + + switch c { + case 'A': + case 'B': + default: + panic("unexpected selector type " + string(c)) + } + all = append(all, node{"", choices}) + } + } + return nil +} + +func permutations(s string) []string { + if len(s) <= 1 { + return []string{s} + } + + var result []string + for i, char := range s { + rest := s[:i] + s[i+1:] + for _, perm := range permutations(rest) { + result = append(result, string(char)+perm) + } + } + return result +} + +type Seq[V any] func(yield func(V) bool) + +func selections(s string) Seq[string] { + return func(yield func(string) bool) { + for bits := 1; bits < 1< Date: Fri, 6 Sep 2024 17:19:34 -0700 Subject: [PATCH 8/9] [release-branch.go1.23] runtime: if stop/reset races with running timer, return correct result The timer code is careful to ensure that if stop/reset is called while a timer is being run, we cancel the run. However, the code failed to ensure that in that case stop/reset returned true, meaning that the timer had been stopped. In the racing case stop/reset could see that t.when had been set to zero, and return false, even though the timer had not and never would fire. Fix this by tracking whether a timer run is in progress, and using that to reliably detect that the run was cancelled, meaning that stop/reset should return true. For #69312 Fixes #69333 Change-Id: I78e870063eb96650638f12c056e32c931417c84a Reviewed-on: https://go-review.googlesource.com/c/go/+/611496 Reviewed-by: David Chase Reviewed-by: Cuong Manh Le Reviewed-by: Michael Knyszek LUCI-TryBot-Result: Go LUCI Auto-Submit: Ian Lance Taylor (cherry picked from commit 2ebaff4890596ed6064e2dcbbe5e68bc93bed882) Reviewed-on: https://go-review.googlesource.com/c/go/+/616096 Reviewed-by: Ian Lance Taylor Commit-Queue: Ian Lance Taylor Auto-Submit: Ian Lance Taylor --- src/runtime/time.go | 82 +++++++++++++++++++++++++++++++++++++++--- src/time/sleep_test.go | 62 ++++++++++++++++++++++++++++++++ 2 files changed, 139 insertions(+), 5 deletions(-) diff --git a/src/runtime/time.go b/src/runtime/time.go index fc664f49eb8d7c..b43cf9589bf90b 100644 --- a/src/runtime/time.go +++ b/src/runtime/time.go @@ -26,10 +26,40 @@ type timer struct { // mu protects reads and writes to all fields, with exceptions noted below. mu mutex - astate atomic.Uint8 // atomic copy of state bits at last unlock - state uint8 // state bits - isChan bool // timer has a channel; immutable; can be read without lock - blocked uint32 // number of goroutines blocked on timer's channel + astate atomic.Uint8 // atomic copy of state bits at last unlock + state uint8 // state bits + isChan bool // timer has a channel; immutable; can be read without lock + + // isSending is used to handle races between running a + // channel timer and stopping or resetting the timer. + // It is used only for channel timers (t.isChan == true). + // The lowest zero bit is set when about to send a value on the channel, + // and cleared after sending the value. + // The stop/reset code uses this to detect whether it + // stopped the channel send. + // + // An isSending bit is set only when t.mu is held. + // An isSending bit is cleared only when t.sendLock is held. + // isSending is read only when both t.mu and t.sendLock are held. + // + // Setting and clearing Uint8 bits handles the case of + // a timer that is reset concurrently with unlockAndRun. + // If the reset timer runs immediately, we can wind up with + // concurrent calls to unlockAndRun for the same timer. + // Using matched bit set and clear in unlockAndRun + // ensures that the value doesn't get temporarily out of sync. + // + // We use a uint8 to keep the timer struct small. + // This means that we can only support up to 8 concurrent + // runs of a timer, where a concurrent run can only occur if + // we start a run, unlock the timer, the timer is reset to a new + // value (or the ticker fires again), it is ready to run, + // and it is actually run, all before the first run completes. + // Since completing a run is fast, even 2 concurrent timer runs are + // nearly impossible, so this should be safe in practice. + isSending atomic.Uint8 + + blocked uint32 // number of goroutines blocked on timer's channel // Timer wakes up at when, and then at when+period, ... (period > 0 only) // each time calling f(arg, seq, delay) in the timer goroutine, so f must be @@ -431,6 +461,15 @@ func (t *timer) stop() bool { // Stop any future sends with stale values. // See timer.unlockAndRun. t.seq++ + + // If there is currently a send in progress, + // incrementing seq is going to prevent that + // send from actually happening. That means + // that we should return true: the timer was + // stopped, even though t.when may be zero. + if t.isSending.Load() > 0 { + pending = true + } } t.unlock() if !async && t.isChan { @@ -525,6 +564,15 @@ func (t *timer) modify(when, period int64, f func(arg any, seq uintptr, delay in // Stop any future sends with stale values. // See timer.unlockAndRun. t.seq++ + + // If there is currently a send in progress, + // incrementing seq is going to prevent that + // send from actually happening. That means + // that we should return true: the timer was + // stopped, even though t.when may be zero. + if t.isSending.Load() > 0 { + pending = true + } } t.unlock() if !async && t.isChan { @@ -1013,6 +1061,24 @@ func (t *timer) unlockAndRun(now int64) { } t.updateHeap() } + + async := debug.asynctimerchan.Load() != 0 + var isSendingClear uint8 + if !async && t.isChan { + // Tell Stop/Reset that we are sending a value. + // Set the lowest zero bit. + // We do this awkward step because atomic.Uint8 + // doesn't support Add or CompareAndSwap. + // We only set bits with t locked. + v := t.isSending.Load() + i := sys.TrailingZeros8(^v) + if i == 8 { + throw("too many concurrent timer firings") + } + isSendingClear = 1 << i + t.isSending.Or(isSendingClear) + } + t.unlock() if raceenabled { @@ -1028,7 +1094,6 @@ func (t *timer) unlockAndRun(now int64) { ts.unlock() } - async := debug.asynctimerchan.Load() != 0 if !async && t.isChan { // For a timer channel, we want to make sure that no stale sends // happen after a t.stop or t.modify, but we cannot hold t.mu @@ -1044,6 +1109,10 @@ func (t *timer) unlockAndRun(now int64) { // and double-check that t.seq is still the seq value we saw above. // If not, the timer has been updated and we should skip the send. // We skip the send by reassigning f to a no-op function. + // + // The isSending field tells t.stop or t.modify that we have + // started to send the value. That lets them correctly return + // true meaning that no value was sent. lock(&t.sendLock) if t.seq != seq { f = func(any, uintptr, int64) {} @@ -1053,6 +1122,9 @@ func (t *timer) unlockAndRun(now int64) { f(arg, seq, delay) if !async && t.isChan { + // We are no longer sending a value. + t.isSending.And(^isSendingClear) + unlock(&t.sendLock) } diff --git a/src/time/sleep_test.go b/src/time/sleep_test.go index 29f56ef7520baa..5357ed23c8e352 100644 --- a/src/time/sleep_test.go +++ b/src/time/sleep_test.go @@ -785,6 +785,68 @@ func TestAdjustTimers(t *testing.T) { } } +func TestStopResult(t *testing.T) { + testStopResetResult(t, true) +} + +func TestResetResult(t *testing.T) { + testStopResetResult(t, false) +} + +// Test that when racing between running a timer and stopping a timer Stop +// consistently indicates whether a value can be read from the channel. +// Issue #69312. +func testStopResetResult(t *testing.T, testStop bool) { + for _, name := range []string{"0", "1", "2"} { + t.Run("asynctimerchan="+name, func(t *testing.T) { + testStopResetResultGODEBUG(t, testStop, name) + }) + } +} + +func testStopResetResultGODEBUG(t *testing.T, testStop bool, godebug string) { + t.Setenv("GODEBUG", "asynctimerchan="+godebug) + + stopOrReset := func(timer *Timer) bool { + if testStop { + return timer.Stop() + } else { + return timer.Reset(1 * Hour) + } + } + + start := make(chan struct{}) + var wg sync.WaitGroup + const N = 1000 + wg.Add(N) + for range N { + go func() { + defer wg.Done() + <-start + for j := 0; j < 100; j++ { + timer1 := NewTimer(1 * Millisecond) + timer2 := NewTimer(1 * Millisecond) + select { + case <-timer1.C: + if !stopOrReset(timer2) { + // The test fails if this + // channel read times out. + <-timer2.C + } + case <-timer2.C: + if !stopOrReset(timer1) { + // The test fails if this + // channel read times out. + <-timer1.C + } + } + } + }() + } + close(start) + wg.Wait() +} + // Benchmark timer latency when the thread that creates the timer is busy with // other work and the timers must be serviced by other threads. // https://golang.org/issue/38860 From ed07b321aef7632f956ce991dd10fdd7e1abd827 Mon Sep 17 00:00:00 2001 From: Gopher Robot Date: Tue, 1 Oct 2024 16:56:29 +0000 Subject: [PATCH 9/9] [release-branch.go1.23] go1.23.2 Change-Id: I904d2e951796dd4142d6e9de4a55af07852bca51 Reviewed-on: https://go-review.googlesource.com/c/go/+/617019 LUCI-TryBot-Result: Go LUCI Reviewed-by: Michael Knyszek Auto-Submit: Gopher Robot Reviewed-by: David Chase --- VERSION | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/VERSION b/VERSION index c0bacc9e28f531..7c5b4094049322 100644 --- a/VERSION +++ b/VERSION @@ -1,2 +1,2 @@ -go1.23.1 -time 2024-08-29T20:56:24Z +go1.23.2 +time 2024-09-28T01:34:15Z