8000 feat: add filecache prometheus metrics by Emyrk · Pull Request #18089 · coder/coder · GitHub
[go: up one dir, main page]

Skip to content

feat: add filecache prometheus metrics #18089

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 8 commits into from
May 30, 2025
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
address PR comments
  • Loading branch information
Emyrk committed May 30, 2025
commit e54e48a5ca3f94f2ee07bb52c40df31ac4ef1349
31 changes: 18 additions & 13 deletions coderd/files/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,17 @@ import (
// NewFromStore returns a file cache that will fetch files from the provided
// database.
func NewFromStore(store database.Store, registerer prometheus.Registerer) *Cache {
fetch := func(ctx context.Context, fileID uuid.UUID) (fs.FS, int64, error) {
fetch := func(ctx context.Context, fileID uuid.UUID) (cacheEntryValue, error) {
file, err := store.GetFileByID(ctx, fileID)
if err != nil {
return nil, 0, xerrors.Errorf("failed to read file from database: %w", err)
return cacheEntryValue{}, xerrors.Errorf("failed to read file from database: %w", err)
}

content := bytes.NewBuffer(file.Data)
return archivefs.FromTarReader(content), int64(content.Len()), nil
return cacheEntryValue{
FS: archivefs.FromTarReader(content),
size: int64(content.Len()),
}, nil
}

return New(fetch, registerer)
Expand Down Expand Up @@ -100,6 +103,10 @@ type Cache struct {
fetcher

// metrics
cacheMetrics
}

type cacheMetrics struct {
currentOpenFileReferences prometheus.Gauge
totalOpenFileReferences prometheus.Counter

Expand All @@ -111,7 +118,7 @@ type Cache struct {
}

type cacheEntryValue struct {
dir fs.FS
fs.FS
size int64
}

Expand All @@ -121,7 +128,7 @@ type cacheEntry struct {
value *lazy.ValueWithError[cacheEntryValue]
}

type fetcher func(context.Context, uuid.UUID) (dir fs.FS, size int64, err error)
type fetcher func(context.Context, uuid.UUID) (cacheEntryValue, error)

// Acquire will load the fs.FS for the given file. It guarantees that parallel
// calls for the same fileID will only result in one fetch, and that parallel
Expand All @@ -136,8 +143,9 @@ func (c *Cache) Acquire(ctx context.Context, fileID uuid.UUID) (fs.FS, error) {
it, err := c.prepare(ctx, fileID).Load()
if err != nil {
c.Release(fileID)
return nil, err
}
return it.dir, err
return it.FS, err
}

func (c *Cache) prepare(ctx context.Context, fileID uuid.UUID) *lazy.ValueWithError[cacheEntryValue] {
Expand All @@ -147,18 +155,15 @@ func (c *Cache) prepare(ctx context.Context, fileID uuid.UUID) *lazy.ValueWithEr
entry, ok := c.data[fileID]
if !ok {
value := lazy.NewWithError(func() (cacheEntryValue, error) {
dir, size, err := c.fetcher(ctx, fileID)
val, err := c.fetcher(ctx, fileID)

// Always add to the cache size the bytes of the file loaded.
if err == nil {
c.currentCacheSize.Add(float64(size))
c.totalCacheSize.Add(float64(size))
c.currentCacheSize.Add(float64(val.size))
c.totalCacheSize.Add(float64(val.size))
}

return cacheEntryValue{
dir: dir,
size: size,
}, err
return val, err
})

entry = &cacheEntry{
Expand Down
12 changes: 7 additions & 5 deletions coderd/files/cache_internal_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ package files

import (
"context"
"io/fs"
"sync/atomic"
"testing"
"time"
Expand All @@ -28,12 +27,12 @@ func TestConcurrency(t *testing.T) {
emptyFS := afero.NewIOFS(afero.NewReadOnlyFs(afero.NewMemMapFs()))
var fetches atomic.Int64
reg := prometheus.NewRegistry()
c := New(func(_ context.Context, _ uuid.UUID) (fs.FS, int64, error) {
c := New(func(_ context.Context, _ uuid.UUID) (cacheEntryValue, error) {
fetches.Add(1)
// Wait long enough before returning to make sure that all of the goroutines
// will be waiting in line, ensuring that no one duplicated a fetch.
time.Sleep(testutil.IntervalMedium)
return emptyFS, fileSize, nil
return cacheEntryValue{FS: emptyFS, size: fileSize}, nil
}, reg)

batches := 1000
Expand Down Expand Up @@ -79,8 +78,11 @@ func TestRelease(t *testing.T) {
const fileSize = 10
emptyFS := afero.NewIOFS(afero.NewReadOnlyFs(afero.NewMemMapFs()))
reg := prometheus.NewRegistry()
c := New(func(_ context.Context, _ uuid.UUID) (fs.FS, int64, error) {
return emptyFS, fileSize, nil
c := New(func(_ context.Context, _ uuid.UUID) (cacheEntryValue, error) {
return cacheEntryValue{
FS: emptyFS,
size: fileSize,
}, nil
}, reg)

batches := 100
Expand Down
Loading
0