diff --git a/coderd/agentapi/stats_test.go b/coderd/agentapi/stats_test.go index 57534208be110..49314aa6e9f5b 100644 --- a/coderd/agentapi/stats_test.go +++ b/coderd/agentapi/stats_test.go @@ -69,6 +69,11 @@ func TestUpdateStates(t *testing.T) { } batcher = &workspacestatstest.StatsBatcher{} updateAgentMetricsFnCalled = false + tickCh = make(chan time.Time) + flushCh = make(chan int, 1) + wut = workspacestats.NewTracker(dbM, + workspacestats.TrackerWithTickFlush(tickCh, flushCh), + ) req = &agentproto.UpdateStatsRequest{ Stats: &agentproto.Stats{ @@ -108,6 +113,7 @@ func TestUpdateStates(t *testing.T) { Database: dbM, Pubsub: ps, StatsBatcher: batcher, + UsageTracker: wut, TemplateScheduleStore: templateScheduleStorePtr(templateScheduleStore), UpdateAgentMetricsFn: func(ctx context.Context, labels prometheusmetrics.AgentMetricLabels, metrics []*agentproto.Stats_Metric) { updateAgentMetricsFnCalled = true @@ -125,6 +131,7 @@ func TestUpdateStates(t *testing.T) { return now }, } + defer wut.Close() // Workspace gets fetched. dbM.EXPECT().GetWorkspaceByAgentID(gomock.Any(), agent.ID).Return(database.GetWorkspaceByAgentIDRow{ @@ -132,6 +139,9 @@ func TestUpdateStates(t *testing.T) { TemplateName: template.Name, }, nil) + // User gets fetched to hit the UpdateAgentMetricsFn. + dbM.EXPECT().GetUserByID(gomock.Any(), user.ID).Return(user, nil) + // We expect an activity bump because ConnectionCount > 0. dbM.EXPECT().ActivityBumpWorkspace(gomock.Any(), database.ActivityBumpWorkspaceParams{ WorkspaceID: workspace.ID, @@ -139,14 +149,11 @@ func TestUpdateStates(t *testing.T) { }).Return(nil) // Workspace last used at gets bumped. - dbM.EXPECT().UpdateWorkspaceLastUsedAt(gomock.Any(), database.UpdateWorkspaceLastUsedAtParams{ - ID: workspace.ID, + dbM.EXPECT().BatchUpdateWorkspaceLastUsedAt(gomock.Any(), database.BatchUpdateWorkspaceLastUsedAtParams{ + IDs: []uuid.UUID{workspace.ID}, LastUsedAt: now, }).Return(nil) - // User gets fetched to hit the UpdateAgentMetricsFn. - dbM.EXPECT().GetUserByID(gomock.Any(), user.ID).Return(user, nil) - // Ensure that pubsub notifications are sent. notifyDescription := make(chan []byte) ps.Subscribe(codersdk.WorkspaceNotifyChannel(workspace.ID), func(_ context.Context, description []byte) { @@ -161,6 +168,10 @@ func TestUpdateStates(t *testing.T) { ReportInterval: durationpb.New(10 * time.Second), }, resp) + tickCh <- now + count := <-flushCh + require.Equal(t, 1, count, "expected one flush with one id") + batcher.Mu.Lock() defer batcher.Mu.Unlock() require.Equal(t, int64(1), batcher.Called) @@ -213,6 +224,7 @@ func TestUpdateStates(t *testing.T) { StatsReporter: workspacestats.NewReporter(workspacestats.ReporterOptions{ Database: dbM, Pubsub: ps, + UsageTracker: workspacestats.NewTracker(dbM), StatsBatcher: batcher, TemplateScheduleStore: templateScheduleStorePtr(templateScheduleStore), // Ignored when nil. @@ -230,12 +242,6 @@ func TestUpdateStates(t *testing.T) { TemplateName: template.Name, }, nil) - // Workspace last used at gets bumped. - dbM.EXPECT().UpdateWorkspaceLastUsedAt(gomock.Any(), database.UpdateWorkspaceLastUsedAtParams{ - ID: workspace.ID, - LastUsedAt: now, - }).Return(nil) - _, err := api.UpdateStats(context.Background(), req) require.NoError(t, err) }) @@ -311,6 +317,11 @@ func TestUpdateStates(t *testing.T) { } batcher = &workspacestatstest.StatsBatcher{} updateAgentMetricsFnCalled = false + tickCh = make(chan time.Time) + flushCh = make(chan int, 1) + wut = workspacestats.NewTracker(dbM, + workspacestats.TrackerWithTickFlush(tickCh, flushCh), + ) req = &agentproto.UpdateStatsRequest{ Stats: &agentproto.Stats{ @@ -330,6 +341,7 @@ func TestUpdateStates(t *testing.T) { StatsReporter: workspacestats.NewReporter(workspacestats.ReporterOptions{ Database: dbM, Pubsub: ps, + UsageTracker: wut, StatsBatcher: batcher, TemplateScheduleStore: templateScheduleStorePtr(templateScheduleStore), UpdateAgentMetricsFn: func(ctx context.Context, labels prometheusmetrics.AgentMetricLabels, metrics []*agentproto.Stats_Metric) { @@ -348,6 +360,7 @@ func TestUpdateStates(t *testing.T) { return now }, } + defer wut.Close() // Workspace gets fetched. dbM.EXPECT().GetWorkspaceByAgentID(gomock.Any(), agent.ID).Return(database.GetWorkspaceByAgentIDRow{ @@ -363,9 +376,9 @@ func TestUpdateStates(t *testing.T) { }).Return(nil) // Workspace last used at gets bumped. - dbM.EXPECT().UpdateWorkspaceLastUsedAt(gomock.Any(), database.UpdateWorkspaceLastUsedAtParams{ - ID: workspace.ID, - LastUsedAt: now, + dbM.EXPECT().BatchUpdateWorkspaceLastUsedAt(gomock.Any(), database.BatchUpdateWorkspaceLastUsedAtParams{ + IDs: []uuid.UUID{workspace.ID}, + LastUsedAt: now.UTC(), }).Return(nil) // User gets fetched to hit the UpdateAgentMetricsFn. @@ -377,6 +390,10 @@ func TestUpdateStates(t *testing.T) { ReportInterval: durationpb.New(15 * time.Second), }, resp) + tickCh <- now + count := <-flushCh + require.Equal(t, 1, count, "expected one flush with one id") + require.True(t, updateAgentMetricsFnCalled) }) @@ -400,6 +417,11 @@ func TestUpdateStates(t *testing.T) { } batcher = &workspacestatstest.StatsBatcher{} updateAgentMetricsFnCalled = false + tickCh = make(chan time.Time) + flushCh = make(chan int, 1) + wut = workspacestats.NewTracker(dbM, + workspacestats.TrackerWithTickFlush(tickCh, flushCh), + ) req = &agentproto.UpdateStatsRequest{ Stats: &agentproto.Stats{ @@ -430,6 +452,7 @@ func TestUpdateStates(t *testing.T) { }, } ) + defer wut.Close() api := agentapi.StatsAPI{ AgentFn: func(context.Context) (database.WorkspaceAgent, error) { return agent, nil @@ -439,6 +462,7 @@ func TestUpdateStates(t *testing.T) { Database: dbM, Pubsub: ps, StatsBatcher: batcher, + UsageTracker: wut, TemplateScheduleStore: templateScheduleStorePtr(templateScheduleStore), UpdateAgentMetricsFn: func(ctx context.Context, labels prometheusmetrics.AgentMetricLabels, metrics []*agentproto.Stats_Metric) { updateAgentMetricsFnCalled = true @@ -473,8 +497,8 @@ func TestUpdateStates(t *testing.T) { }).Return(nil) // Workspace last used at gets bumped. - dbM.EXPECT().UpdateWorkspaceLastUsedAt(gomock.Any(), database.UpdateWorkspaceLastUsedAtParams{ - ID: workspace.ID, + dbM.EXPECT().BatchUpdateWorkspaceLastUsedAt(gomock.Any(), database.BatchUpdateWorkspaceLastUsedAtParams{ + IDs: []uuid.UUID{workspace.ID}, LastUsedAt: now, }).Return(nil) @@ -495,6 +519,10 @@ func TestUpdateStates(t *testing.T) { ReportInterval: durationpb.New(10 * time.Second), }, resp) + tickCh <- now + count := <-flushCh + require.Equal(t, 1, count, "expected one flush with one id") + batcher.Mu.Lock() defer batcher.Mu.Unlock() require.EqualValues(t, 1, batcher.Called) diff --git a/coderd/httpapi/websocket.go b/coderd/httpapi/websocket.go index 629dcac8131f3..2d6f131fd5aa3 100644 --- a/coderd/httpapi/websocket.go +++ b/coderd/httpapi/websocket.go @@ -2,8 +2,10 @@ package httpapi import ( "context" + "errors" "time" + "golang.org/x/xerrors" "nhooyr.io/websocket" "cdr.dev/slog" @@ -31,7 +33,8 @@ func Heartbeat(ctx context.Context, conn *websocket.Conn) { // Heartbeat loops to ping a WebSocket to keep it alive. It calls `exit` on ping // failure. func HeartbeatClose(ctx context.Context, logger slog.Logger, exit func(), conn *websocket.Conn) { - ticker := time.NewTicker(15 * time.Second) + interval := 15 * time.Second + ticker := time.NewTicker(interval) defer ticker.Stop() for { @@ -40,12 +43,26 @@ func HeartbeatClose(ctx context.Context, logger slog.Logger, exit func(), conn * return case <-ticker.C: } - err := conn.Ping(ctx) + err := pingWithTimeout(ctx, conn, interval) if err != nil { + // context.DeadlineExceeded is expected when the client disconnects without sending a close frame + if !errors.Is(err, context.DeadlineExceeded) { + logger.Error(ctx, "failed to heartbeat ping", slog.Error(err)) + } _ = conn.Close(websocket.StatusGoingAway, "Ping failed") - logger.Info(ctx, "failed to heartbeat ping", slog.Error(err)) exit() return } } } + +func pingWithTimeout(ctx context.Context, conn *websocket.Conn, timeout time.Duration) error { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + err := conn.Ping(ctx) + if err != nil { + return xerrors.Errorf("failed to ping: %w", err) + } + + return nil +} diff --git a/coderd/insights_test.go b/coderd/insights_test.go index 06fe8d46ca5ac..bf8aa4bc44506 100644 --- a/coderd/insights_test.go +++ b/coderd/insights_test.go @@ -700,14 +700,13 @@ func TestTemplateInsights_Golden(t *testing.T) { connectionCount = 0 } for createdAt.Before(stat.endedAt) { - err = batcher.Add(createdAt, workspace.agentID, workspace.template.id, workspace.user.(*testUser).sdk.ID, workspace.id, &agentproto.Stats{ + batcher.Add(createdAt, workspace.agentID, workspace.template.id, workspace.user.(*testUser).sdk.ID, workspace.id, &agentproto.Stats{ ConnectionCount: connectionCount, SessionCountVscode: stat.sessionCountVSCode, SessionCountJetbrains: stat.sessionCountJetBrains, SessionCountReconnectingPty: stat.sessionCountReconnectingPTY, SessionCountSsh: stat.sessionCountSSH, }, false) - require.NoError(t, err, "want no error inserting agent stats") createdAt = createdAt.Add(30 * time.Second) } } @@ -1599,14 +1598,13 @@ func TestUserActivityInsights_Golden(t *testing.T) { connectionCount = 0 } for createdAt.Before(stat.endedAt) { - err = batcher.Add(createdAt, workspace.agentID, workspace.template.id, workspace.user.(*testUser).sdk.ID, workspace.id, &agentproto.Stats{ + batcher.Add(createdAt, workspace.agentID, workspace.template.id, workspace.user.(*testUser).sdk.ID, workspace.id, &agentproto.Stats{ ConnectionCount: connectionCount, SessionCountVscode: stat.sessionCountVSCode, SessionCountJetbrains: stat.sessionCountJetBrains, SessionCountReconnectingPty: stat.sessionCountReconnectingPTY, SessionCountSsh: stat.sessionCountSSH, }, false) - require.NoError(t, err, "want no error inserting agent stats") createdAt = createdAt.Add(30 * time.Second) } } diff --git a/coderd/workspaceagentsrpc_test.go b/coderd/workspaceagentsrpc_test.go index ca8f334d4e766..df57442462e2f 100644 --- a/coderd/workspaceagentsrpc_test.go +++ b/coderd/workspaceagentsrpc_test.go @@ -3,6 +3,7 @@ package coderd_test import ( "context" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -11,6 +12,7 @@ import ( "github.com/coder/coder/v2/coderd/coderdtest" "github.com/coder/coder/v2/coderd/database" "github.com/coder/coder/v2/coderd/database/dbfake" + "github.com/coder/coder/v2/coderd/database/dbtime" "github.com/coder/coder/v2/codersdk/agentsdk" "github.com/coder/coder/v2/provisionersdk/proto" "github.com/coder/coder/v2/testutil" @@ -20,7 +22,12 @@ import ( func TestWorkspaceAgentReportStats(t *testing.T) { t.Parallel() - client, db := coderdtest.NewWithDatabase(t, nil) + tickCh := make(chan time.Time) + flushCh := make(chan int, 1) + client, db := coderdtest.NewWithDatabase(t, &coderdtest.Options{ + WorkspaceUsageTrackerFlush: flushCh, + WorkspaceUsageTrackerTick: tickCh, + }) user := coderdtest.CreateFirstUser(t, client) r := dbfake.WorkspaceBuild(t, db, database.Workspace{ OrganizationID: user.OrganizationID, @@ -53,6 +60,10 @@ func TestWorkspaceAgentReportStats(t *testing.T) { }) require.NoError(t, err) + tickCh <- dbtime.Now() + count := <-flushCh + require.Equal(t, 1, count, "expected one flush with one id") + newWorkspace, err := client.Workspace(context.Background(), r.Workspace.ID) require.NoError(t, err) diff --git a/coderd/workspaceapps/proxy.go b/coderd/workspaceapps/proxy.go index 69f1aadca49b2..c6cd01395db5c 100644 --- a/coderd/workspaceapps/proxy.go +++ b/coderd/workspaceapps/proxy.go @@ -593,7 +593,6 @@ func (s *Server) proxyWorkspaceApp(rw http.ResponseWriter, r *http.Request, appT tracing.EndHTTPSpan(r, http.StatusOK, trace.SpanFromContext(ctx)) report := newStatsReportFromSignedToken(appToken) - s.collectStats(report) defer func() { // We must use defer here because ServeHTTP may panic. report.SessionEndedAt = dbtime.Now() @@ -614,7 +613,8 @@ func (s *Server) proxyWorkspaceApp(rw http.ResponseWriter, r *http.Request, appT // @Success 101 // @Router /workspaceagents/{workspaceagent}/pty [get] func (s *Server) workspaceAgentPTY(rw http.ResponseWriter, r *http.Request) { - ctx := r.Context() + ctx, cancel := context.WithCancel(r.Context()) + defer cancel() s.websocketWaitMutex.Lock() s.websocketWaitGroup.Add(1) @@ -670,12 +670,11 @@ func (s *Server) workspaceAgentPTY(rw http.ResponseWriter, r *http.Request) { }) return } + go httpapi.HeartbeatClose(ctx, s.Logger, cancel, conn) ctx, wsNetConn := WebsocketNetConn(ctx, conn, websocket.MessageBinary) defer wsNetConn.Close() // Also closes conn. - go httpapi.Heartbeat(ctx, conn) - agentConn, release, err := s.AgentProvider.AgentConn(ctx, appToken.AgentID) if err != nil { log.Debug(ctx, "dial workspace agent", slog.Error(err)) diff --git a/coderd/workspacestats/batcher.go b/coderd/workspacestats/batcher.go index 1f14c5cec5a17..46efc69170562 100644 --- a/coderd/workspacestats/batcher.go +++ b/coderd/workspacestats/batcher.go @@ -25,7 +25,7 @@ const ( ) type Batcher interface { - Add(now time.Time, agentID uuid.UUID, templateID uuid.UUID, userID uuid.UUID, workspaceID uuid.UUID, st *agentproto.Stats, usage bool) error + Add(now time.Time, agentID uuid.UUID, templateID uuid.UUID, userID uuid.UUID, workspaceID uuid.UUID, st *agentproto.Stats, usage bool) } // DBBatcher holds a buffer of agent stats and periodically flushes them to @@ -139,7 +139,7 @@ func (b *DBBatcher) Add( workspaceID uuid.UUID, st *agentproto.Stats, usage bool, -) error { +) { b.mu.Lock() defer b.mu.Unlock() @@ -176,7 +176,6 @@ func (b *DBBatcher) Add( b.flushLever <- struct{}{} b.flushForced.Store(true) } - return nil } // Run runs the batcher. diff --git a/coderd/workspacestats/batcher_internal_test.go b/coderd/workspacestats/batcher_internal_test.go index 2f7a25b152127..1d96789d57376 100644 --- a/coderd/workspacestats/batcher_internal_test.go +++ b/coderd/workspacestats/batcher_internal_test.go @@ -63,7 +63,7 @@ func TestBatchStats(t *testing.T) { // Given: a single data point is added for workspace t2 := t1.Add(time.Second) t.Logf("inserting 1 stat") - require.NoError(t, b.Add(t2.Add(time.Millisecond), deps1.Agent.ID, deps1.User.ID, deps1.Template.ID, deps1.Workspace.ID, randStats(t), false)) + b.Add(t2.Add(time.Millisecond), deps1.Agent.ID, deps1.User.ID, deps1.Template.ID, deps1.Workspace.ID, randStats(t), false) // When: it becomes time to report stats // Signal a tick and wait for a flush to complete. @@ -87,9 +87,9 @@ func TestBatchStats(t *testing.T) { t.Logf("inserting %d stats", defaultBufferSize) for i := 0; i < defaultBufferSize; i++ { if i%2 == 0 { - require.NoError(t, b.Add(t3.Add(time.Millisecond), deps1.Agent.ID, deps1.User.ID, deps1.Template.ID, deps1.Workspace.ID, randStats(t), false)) + b.Add(t3.Add(time.Millisecond), deps1.Agent.ID, deps1.User.ID, deps1.Template.ID, deps1.Workspace.ID, randStats(t), false) } else { - require.NoError(t, b.Add(t3.Add(time.Millisecond), deps2.Agent.ID, deps2.User.ID, deps2.Template.ID, deps2.Workspace.ID, randStats(t), false)) + b.Add(t3.Add(time.Millisecond), deps2.Agent.ID, deps2.User.ID, deps2.Template.ID, deps2.Workspace.ID, randStats(t), false) } } }() diff --git a/coderd/workspacestats/reporter.go b/coderd/workspacestats/reporter.go index fecfd1b1eda92..6bb1b2dea4028 100644 --- a/coderd/workspacestats/reporter.go +++ b/coderd/workspacestats/reporter.go @@ -6,7 +6,6 @@ import ( "time" "github.com/google/uuid" - "golang.org/x/sync/errgroup" "golang.org/x/xerrors" "cdr.dev/slog" @@ -119,69 +118,57 @@ func (r *Reporter) ReportAppStats(ctx context.Context, stats []workspaceapps.Sta } func (r *Reporter) ReportAgentStats(ctx context.Context, now time.Time, workspace database.Workspace, workspaceAgent database.WorkspaceAgent, templateName string, stats *agentproto.Stats, usage bool) error { - if stats.ConnectionCount > 0 { - var nextAutostart time.Time - if workspace.AutostartSchedule.String != "" { - templateSchedule, err := (*(r.opts.TemplateScheduleStore.Load())).Get(ctx, r.opts.Database, workspace.TemplateID) - // If the template schedule fails to load, just default to bumping - // without the next transition and log it. - if err != nil { - r.opts.Logger.Error(ctx, "failed to load template schedule bumping activity, defaulting to bumping by 60min", - slog.F("workspace_id", workspace.ID), - slog.F("template_id", workspace.TemplateID), - slog.Error(err), - ) - } else { - next, allowed := schedule.NextAutostart(now, workspace.AutostartSchedule.String, templateSchedule) - if allowed { - nextAutostart = next - } - } - } - ActivityBumpWorkspace(ctx, r.opts.Logger.Named("activity_bump"), r.opts.Database, workspace.ID, nextAutostart) - } + // update agent stats + r.opts.StatsBatcher.Add(now, workspaceAgent.ID, workspace.TemplateID, workspace.OwnerID, workspace.ID, stats, usage) - var errGroup errgroup.Group - errGroup.Go(func() error { - err := r.opts.StatsBatcher.Add(now, workspaceAgent.ID, workspace.TemplateID, workspace.OwnerID, workspace.ID, stats, usage) + // update prometheus metrics + if r.opts.UpdateAgentMetricsFn != nil { + user, err := r.opts.Database.GetUserByID(ctx, workspace.OwnerID) if err != nil { - r.opts.Logger.Error(ctx, "add agent stats to batcher", slog.Error(err)) - return xerrors.Errorf("insert workspace agent stats batch: %w", err) + return xerrors.Errorf("get user: %w", err) } + + r.opts.UpdateAgentMetricsFn(ctx, prometheusmetrics.AgentMetricLabels{ + Username: user.Username, + WorkspaceName: workspace.Name, + AgentName: workspaceAgent.Name, + TemplateName: templateName, + }, stats.Metrics) + } + + // if no active connections we do not bump activity + if stats.ConnectionCount == 0 { return nil - }) - errGroup.Go(func() error { - err := r.opts.Database.UpdateWorkspaceLastUsedAt(ctx, database.UpdateWorkspaceLastUsedAtParams{ - ID: workspace.ID, - LastUsedAt: now, - }) + } + + // check next autostart + var nextAutostart time.Time + if workspace.AutostartSchedule.String != "" { + templateSchedule, err := (*(r.opts.TemplateScheduleStore.Load())).Get(ctx, r.opts.Database, workspace.TemplateID) + // If the template schedule fails to load, just default to bumping + // without the next transition and log it. if err != nil { - return xerrors.Errorf("update workspace LastUsedAt: %w", err) - } - return nil - }) - if r.opts.UpdateAgentMetricsFn != nil { - errGroup.Go(func() error { - user, err := r.opts.Database.GetUserByID(ctx, workspace.OwnerID) - if err != nil { - return xerrors.Errorf("get user: %w", err) + r.opts.Logger.Error(ctx, "failed to load template schedule bumping activity, defaulting to bumping by 60min", + slog.F("workspace_id", workspace.ID), + slog.F("template_id", workspace.TemplateID), + slog.Error(err), + ) + } else { + next, allowed := schedule.NextAutostart(now, workspace.AutostartSchedule.String, templateSchedule) + if allowed { + nextAutostart = next } - - r.opts.UpdateAgentMetricsFn(ctx, prometheusmetrics.AgentMetricLabels{ - Username: user.Username, - WorkspaceName: workspace.Name, - AgentName: workspaceAgent.Name, - TemplateName: templateName, - }, stats.Metrics) - return nil - }) - } - err := errGroup.Wait() - if err != nil { - return xerrors.Errorf("update stats in database: %w", err) + } } - err = r.opts.Pubsub.Publish(codersdk.WorkspaceNotifyChannel(workspace.ID), []byte{}) + // bump workspace activity + ActivityBumpWorkspace(ctx, r.opts.Logger.Named("activity_bump"), r.opts.Database, workspace.ID, nextAutostart) + + // bump workspace last_used_at + r.opts.UsageTracker.Add(workspace.ID) + + // notify workspace update + err := r.opts.Pubsub.Publish(codersdk.WorkspaceNotifyChannel(workspace.ID), []byte{}) if err != nil { r.opts.Logger.Warn(ctx, "failed to publish workspace agent stats", slog.F("workspace_id", workspace.ID), slog.Error(err)) diff --git a/coderd/workspacestats/tracker.go b/coderd/workspacestats/tracker.go index 33532247b36e0..f55edde3b57e6 100644 --- a/coderd/workspacestats/tracker.go +++ b/coderd/workspacestats/tracker.go @@ -130,7 +130,6 @@ func (tr *UsageTracker) flush(now time.Time) { authCtx := dbauthz.AsSystemRestricted(ctx) tr.flushLock.Lock() defer tr.flushLock.Unlock() - // nolint:gocritic // (#13146) Will be moved soon as part of refactor. if err := tr.s.BatchUpdateWorkspaceLastUsedAt(authCtx, database.BatchUpdateWorkspaceLastUsedAtParams{ LastUsedAt: now, IDs: ids, diff --git a/coderd/workspacestats/workspacestatstest/batcher.go b/coderd/workspacestats/workspacestatstest/batcher.go index 2f5dd7d13aa0a..592e244518790 100644 --- a/coderd/workspacestats/workspacestatstest/batcher.go +++ b/coderd/workspacestats/workspacestatstest/batcher.go @@ -25,7 +25,7 @@ type StatsBatcher struct { var _ workspacestats.Batcher = &StatsBatcher{} -func (b *StatsBatcher) Add(now time.Time, agentID uuid.UUID, templateID uuid.UUID, userID uuid.UUID, workspaceID uuid.UUID, st *agentproto.Stats, usage bool) error { +func (b *StatsBatcher) Add(now time.Time, agentID uuid.UUID, templateID uuid.UUID, userID uuid.UUID, workspaceID uuid.UUID, st *agentproto.Stats, usage bool) { b.Mu.Lock() defer b.Mu.Unlock() b.Called++ @@ -36,5 +36,4 @@ func (b *StatsBatcher) Add(now time.Time, agentID uuid.UUID, templateID uuid.UUI b.LastWorkspaceID = workspaceID b.LastStats = st b.LastUsage = usage - return nil }