8000 add a test for releasing · coder/coder@cb6ff4a · GitHub
[go: up one dir, main page]

Skip to content

Commit cb6ff4a

Browse files
committed
add a test for releasing
1 parent a7187b2 commit cb6ff4a

File tree

1 file changed

+41
-4
lines changed

1 file changed

+41
-4
lines changed

coderd/files/cache_internal_test.go

Lines changed: 41 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -10,19 +10,20 @@ import (
1010

1111
"github.com/coder/coder/v2/testutil"
1212
"github.com/google/uuid"
13-
"github.com/spf13/afero"
1413
"github.com/stretchr/testify/require"
1514
"golang.org/x/sync/errgroup"
1615
)
1716

18-
func TestTryAgain(t *testing.T) {
17+
func TestConcurrency(t *testing.T) {
18+
t.Parallel()
19+
1920
var fetches atomic.Int64
2021
c := newTestCache(func(_ context.Context, _ uuid.UUID) (fs.FS, error) {
2122
fetches.Add(1)
2223
// Wait long enough before returning to make sure that all of the goroutines
2324
// will be waiting in line, ensuring that no one duplicated a fetch.
2425
time.Sleep(testutil.IntervalMedium)
25-
return afero.NewIOFS(afero.NewMemMapFs()), nil
26+
return nil, nil
2627
})
2728

2829
batches := 1000
@@ -32,7 +33,8 @@ func TestTryAgain(t *testing.T) {
3233
}
3334

3435
// Call Acquire with a unique ID per batch, many times per batch, with many
35-
// batches all in parallel. This gives us
36+
// batches all in parallel. This is pretty much the worst-case scenario:
37+
// thousands of concurrent reads, with both warm and cold loads happening.
3638
batchSize := 10
3739
for _, g := range groups {
3840
id := uuid.New()
@@ -50,6 +52,41 @@ func TestTryAgain(t *testing.T) {
5052
require.Equal(t, int64(batches), fetches.Load())
5153
}
5254

55+
func TestRelease(t *testing.T) {
56+
t.Parallel()
57+
58+
c := newTestCache(func(_ context.Context, _ uuid.UUID) (fs.FS, error) {
59+
return nil, nil
60+
})
61+
62+
batches := 100
63+
ids := make([]uuid.UUID, 0, batches)
64+
for range batches {
65+
ids = append(ids, uuid.New())
66+
}
67+
68+
// Acquire a bunch of references
69+
batchSize := 10
70+
for _, id := range ids {
71+
for range batchSize {
72+
c.Acquire(t.Context(), id)
73+
}
74+
}
75+
76+
// Make sure cache is fully loaded
77+
require.Equal(t, len(c.data), batches)
78+
79+
// Now release all of the references
80+
for _, id := range ids {
81+
for range batchSize {
82+
c.Release(id)
83+
}
84+
}
85+
86+
// ...and make sure that the cache has emptied itself.
87+
require.Equal(t, len(c.data), 0)
88+
}
89+
5390
func newTestCache(fetcher func(context.Context, uuid.UUID) (fs.FS, error)) Cache {
5491
return Cache{
5592
lock: sync.Mutex{},

0 commit comments

Comments
 (0)
0