diff --git a/cli/server.go b/cli/server.go
index 589937001e04b..45d8d7dd471bd 100644
--- a/cli/server.go
+++ b/cli/server.go
@@ -1012,7 +1012,8 @@ func (r *RootCmd) Server(newAPI func(context.Context, *coderd.Options) (*coderd.
autobuildTicker := time.NewTicker(vals.AutobuildPollInterval.Value())
defer autobuildTicker.Stop()
- autobuildExecutor := autobuild.NewExecutor(ctx, options.Database, coderAPI.TemplateScheduleStore, logger, autobuildTicker.C)
+ autobuildExecutor := autobuild.NewExecutor(
+ ctx, options.Database, options.Pubsub, coderAPI.TemplateScheduleStore, logger, autobuildTicker.C)
autobuildExecutor.Run()
hangDetectorTicker := time.NewTicker(vals.JobHangDetectorInterval.Value())
@@ -1378,16 +1379,12 @@ func newProvisionerDaemon(
connector[string(database.ProvisionerTypeTerraform)] = sdkproto.NewDRPCProvisionerClient(terraformClient)
}
- debounce := time.Second
return provisionerd.New(func(ctx context.Context) (proto.DRPCProvisionerDaemonClient, error) {
// This debounces calls to listen every second. Read the comment
// in provisionerdserver.go to learn more!
- return coderAPI.CreateInMemoryProvisionerDaemon(ctx, debounce)
+ return coderAPI.CreateInMemoryProvisionerDaemon(ctx)
}, &provisionerd.Options{
Logger: logger.Named("provisionerd"),
- JobPollInterval: cfg.Provisioner.DaemonPollInterval.Value(),
- JobPollJitter: cfg.Provisioner.DaemonPollJitter.Value(),
- JobPollDebounce: debounce,
UpdateInterval: time.Second,
ForceCancelInterval: cfg.Provisioner.ForceCancelInterval.Value(),
Connector: connector,
diff --git a/cli/testdata/coder_server_--help.golden b/cli/testdata/coder_server_--help.golden
index 5bdd654d1d119..cb389b93a5ec4 100644
--- a/cli/testdata/coder_server_--help.golden
+++ b/cli/testdata/coder_server_--help.golden
@@ -393,10 +393,10 @@ updating, and deleting workspace resources.
Time to force cancel provisioning tasks that are stuck.
--provisioner-daemon-poll-interval duration, $CODER_PROVISIONER_DAEMON_POLL_INTERVAL (default: 1s)
- Time to wait before polling for a new job.
+ Deprecated and ignored.
--provisioner-daemon-poll-jitter duration, $CODER_PROVISIONER_DAEMON_POLL_JITTER (default: 100ms)
- Random jitter added to the poll interval.
+ Deprecated and ignored.
--provisioner-daemon-psk string, $CODER_PROVISIONER_DAEMON_PSK
Pre-shared key to authenticate external provisioner daemons to Coder
diff --git a/cli/testdata/server-config.yaml.golden b/cli/testdata/server-config.yaml.golden
index 78ec76dfa8d04..689004ba5be57 100644
--- a/cli/testdata/server-config.yaml.golden
+++ b/cli/testdata/server-config.yaml.golden
@@ -348,10 +348,10 @@ provisioning:
# tests.
# (default: false, type: bool)
daemonsEcho: false
- # Time to wait before polling for a new job.
+ # Deprecated and ignored.
# (default: 1s, type: duration)
daemonPollInterval: 1s
- # Random jitter added to the poll interval.
+ # Deprecated and ignored.
# (default: 100ms, type: duration)
daemonPollJitter: 100ms
# Time to force cancel provisioning tasks that are stuck.
diff --git a/coderd/activitybump_internal_test.go b/coderd/activitybump_internal_test.go
index 8c77ae2a11dd1..c561c7664f0ce 100644
--- a/coderd/activitybump_internal_test.go
+++ b/coderd/activitybump_internal_test.go
@@ -134,7 +134,7 @@ func Test_ActivityBumpWorkspace(t *testing.T) {
TemplateID: template.ID,
Ttl: sql.NullInt64{Valid: true, Int64: int64(tt.workspaceTTL)},
})
- job = dbgen.ProvisionerJob(t, db, database.ProvisionerJob{
+ job = dbgen.ProvisionerJob(t, db, nil, database.ProvisionerJob{
OrganizationID: org.ID,
CompletedAt: tt.jobCompletedAt,
})
@@ -225,7 +225,7 @@ func Test_ActivityBumpWorkspace(t *testing.T) {
func insertPrevWorkspaceBuild(t *testing.T, db database.Store, orgID, tvID, workspaceID uuid.UUID, transition database.WorkspaceTransition, buildNumber int32) {
t.Helper()
- job := dbgen.ProvisionerJob(t, db, database.ProvisionerJob{
+ job := dbgen.ProvisionerJob(t, db, nil, database.ProvisionerJob{
OrganizationID: orgID,
})
_ = dbgen.WorkspaceResource(t, db, database.WorkspaceResource{
diff --git a/coderd/autobuild/lifecycle_executor.go b/coderd/autobuild/lifecycle_executor.go
index 2b3b3195f8387..294db0034447b 100644
--- a/coderd/autobuild/lifecycle_executor.go
+++ b/coderd/autobuild/lifecycle_executor.go
@@ -16,6 +16,8 @@ import (
"github.com/coder/coder/v2/coderd/database/db2sdk"
"github.com/coder/coder/v2/coderd/database/dbauthz"
"github.com/coder/coder/v2/coderd/database/dbtime"
+ "github.com/coder/coder/v2/coderd/database/provisionerjobs"
+ "github.com/coder/coder/v2/coderd/database/pubsub"
"github.com/coder/coder/v2/coderd/schedule"
"github.com/coder/coder/v2/coderd/schedule/cron"
"github.com/coder/coder/v2/coderd/wsbuilder"
@@ -26,6 +28,7 @@ import (
type Executor struct {
ctx context.Context
db database.Store
+ ps pubsub.Pubsub
templateScheduleStore *atomic.Pointer[schedule.TemplateScheduleStore]
log slog.Logger
tick <-chan time.Time
@@ -40,11 +43,12 @@ type Stats struct {
}
// New returns a new wsactions executor.
-func NewExecutor(ctx context.Context, db database.Store, tss *atomic.Pointer[schedule.TemplateScheduleStore], log slog.Logger, tick <-chan time.Time) *Executor {
+func NewExecutor(ctx context.Context, db database.Store, ps pubsub.Pubsub, tss *atomic.Pointer[schedule.TemplateScheduleStore], log slog.Logger, tick <-chan time.Time) *Executor {
le := &Executor{
//nolint:gocritic // Autostart has a limited set of permissions.
ctx: dbauthz.AsAutostart(ctx),
db: db,
+ ps: ps,
templateScheduleStore: tss,
tick: tick,
log: log.Named("autobuild"),
@@ -129,6 +133,7 @@ func (e *Executor) runOnce(t time.Time) Stats {
log := e.log.With(slog.F("workspace_id", wsID))
eg.Go(func() error {
+ var job *database.ProvisionerJob
err := e.db.InTx(func(tx database.Store) error {
// Re-check eligibility since the first check was outside the
// transaction and the workspace settings may have changed.
@@ -168,7 +173,8 @@ func (e *Executor) runOnce(t time.Time) Stats {
SetLastWorkspaceBuildJobInTx(&latestJob).
Reason(reason)
- if _, _, err := builder.Build(e.ctx, tx, nil); err != nil {
+ _, job, err = builder.Build(e.ctx, tx, nil)
+ if err != nil {
log.Error(e.ctx, "unable to transition workspace",
slog.F("transition", nextTransition),
slog.Error(err),
@@ -230,6 +236,17 @@ func (e *Executor) runOnce(t time.Time) Stats {
if err != nil {
log.Error(e.ctx, "workspace scheduling failed", slog.Error(err))
}
+ if job != nil && err == nil {
+ // Note that we can't refactor such that posting the job happens inside wsbuilder because it's called
+ // with an outer transaction like this, and we need to make sure the outer transaction commits before
+ // posting the job. If we post before the transaction commits, provisionerd might try to acquire the
+ // job, fail, and then sit idle instead of picking up the job.
+ err = provisionerjobs.PostJob(e.ps, *job)
+ if err != nil {
+ // Client probably doesn't care about this error, so just log it.
+ log.Error(e.ctx, "failed to post provisioner job to pubsub", slog.Error(err))
+ }
+ }
return nil
})
}
diff --git a/coderd/batchstats/batcher_internal_test.go b/coderd/batchstats/batcher_internal_test.go
index 29529527a2af5..f9bc9e13726fa 100644
--- a/coderd/batchstats/batcher_internal_test.go
+++ b/coderd/batchstats/batcher_internal_test.go
@@ -14,6 +14,7 @@ import (
"github.com/coder/coder/v2/coderd/database/dbgen"
"github.com/coder/coder/v2/coderd/database/dbtestutil"
"github.com/coder/coder/v2/coderd/database/dbtime"
+ "github.com/coder/coder/v2/coderd/database/pubsub"
"github.com/coder/coder/v2/coderd/rbac"
"github.com/coder/coder/v2/codersdk/agentsdk"
"github.com/coder/coder/v2/cryptorand"
@@ -26,11 +27,11 @@ func TestBatchStats(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
log := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug)
- store, _ := dbtestutil.NewDB(t)
+ store, ps := dbtestutil.NewDB(t)
// Set up some test dependencies.
- deps1 := setupDeps(t, store)
- deps2 := setupDeps(t, store)
+ deps1 := setupDeps(t, store, ps)
+ deps2 := setupDeps(t, store, ps)
tick := make(chan time.Time)
flushed := make(chan int, 1)
@@ -168,7 +169,7 @@ type deps struct {
// It creates an organization, user, template, workspace, and agent
// along with all the other miscellaneous plumbing required to link
// them together.
-func setupDeps(t *testing.T, store database.Store) deps {
+func setupDeps(t *testing.T, store database.Store, ps pubsub.Pubsub) deps {
t.Helper()
org := dbgen.Organization(t, store, database.Organization{})
@@ -194,7 +195,7 @@ func setupDeps(t *testing.T, store database.Store) deps {
OrganizationID: org.ID,
LastUsedAt: time.Now().Add(-time.Hour),
})
- pj := dbgen.ProvisionerJob(t, store, database.ProvisionerJob{
+ pj := dbgen.ProvisionerJob(t, store, ps, database.ProvisionerJob{
InitiatorID: user.ID,
OrganizationID: org.ID,
})
diff --git a/coderd/coderd.go b/coderd/coderd.go
index 52c7740de481d..ccbb0fc703959 100644
--- a/coderd/coderd.go
+++ b/coderd/coderd.go
@@ -4,7 +4,6 @@ import (
"context"
"crypto/tls"
"crypto/x509"
- "encoding/json"
"flag"
"fmt"
"io"
@@ -366,6 +365,11 @@ func New(options *Options) *API {
UserQuietHoursScheduleStore: options.UserQuietHoursScheduleStore,
Experiments: experiments,
healthCheckGroup: &singleflight.Group[string, *healthcheck.Report]{},
+ Acquirer: provisionerdserver.NewAcquirer(
+ ctx,
+ options.Logger.Named("acquirer"),
+ options.Database,
+ options.Pubsub),
}
if options.UpdateCheckOptions != nil {
api.updateChecker = updatecheck.New(
@@ -1016,6 +1020,8 @@ type API struct {
healthCheckCache atomic.Pointer[healthcheck.Report]
statsBatcher *batchstats.Batcher
+
+ Acquirer *provisionerdserver.Acquirer
}
// Close waits for all WebSocket connections to drain before returning.
@@ -1067,7 +1073,7 @@ func compressHandler(h http.Handler) http.Handler {
// CreateInMemoryProvisionerDaemon is an in-memory connection to a provisionerd.
// Useful when starting coderd and provisionerd in the same process.
-func (api *API) CreateInMemoryProvisionerDaemon(ctx context.Context, debounce time.Duration) (client proto.DRPCProvisionerDaemonClient, err error) {
+func (api *API) CreateInMemoryProvisionerDaemon(ctx context.Context) (client proto.DRPCProvisionerDaemonClient, err error) {
tracer := api.TracerProvider.Tracer(tracing.TracerName)
clientSession, serverSession := provisionersdk.MemTransportPipe()
defer func() {
@@ -1077,11 +1083,8 @@ func (api *API) CreateInMemoryProvisionerDaemon(ctx context.Context, debounce ti
}
}()
- tags, err := json.Marshal(database.StringMap{
+ tags := provisionerdserver.Tags{
provisionerdserver.TagScope: provisionerdserver.ScopeOrganization,
- })
- if err != nil {
- return nil, xerrors.Errorf("marshal tags: %w", err)
}
mux := drpcmux.New()
@@ -1098,6 +1101,7 @@ func (api *API) CreateInMemoryProvisionerDaemon(ctx context.Context, debounce ti
tags,
api.Database,
api.Pubsub,
+ api.Acquirer,
api.Telemetry,
tracer,
&api.QuotaCommitter,
@@ -1105,7 +1109,6 @@ func (api *API) CreateInMemoryProvisionerDaemon(ctx context.Context, debounce ti
api.TemplateScheduleStore,
api.UserQuietHoursScheduleStore,
api.DeploymentValues,
- debounce,
provisionerdserver.Options{
OIDCConfig: api.OIDCConfig,
GitAuthConfigs: api.GitAuthConfigs,
diff --git a/coderd/coderdtest/coderdtest.go b/coderd/coderdtest/coderdtest.go
index f1780058ac97b..5a00a679d808e 100644
--- a/coderd/coderdtest/coderdtest.go
+++ b/coderd/coderdtest/coderdtest.go
@@ -266,6 +266,7 @@ func NewOptions(t testing.TB, options *Options) (func(http.Handler), context.Can
lifecycleExecutor := autobuild.NewExecutor(
ctx,
options.Database,
+ options.Pubsub,
&templateScheduleStore,
slogtest.Make(t, nil).Named("autobuild.executor").Leveled(slog.LevelDebug),
options.AutobuildTicker,
@@ -453,6 +454,30 @@ func NewWithAPI(t testing.TB, options *Options) (*codersdk.Client, io.Closer, *c
return client, provisionerCloser, coderAPI
}
+// provisionerdCloser wraps a provisioner daemon as an io.Closer that can be called multiple times
+type provisionerdCloser struct {
+ mu sync.Mutex
+ closed bool
+ d *provisionerd.Server
+}
+
+func (c *provisionerdCloser) Close() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.closed {
+ return nil
+ }
+ c.closed = true
+ ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
+ defer cancel()
+ shutdownErr := c.d.Shutdown(ctx)
+ closeErr := c.d.Close()
+ if shutdownErr != nil {
+ return shutdownErr
+ }
+ return closeErr
+}
+
// NewProvisionerDaemon launches a provisionerd instance configured to work
// well with coderd testing. It registers the "echo" provisioner for
// quick testing.
@@ -482,17 +507,17 @@ func NewProvisionerDaemon(t testing.TB, coderAPI *coderd.API) io.Closer {
assert.NoError(t, err)
}()
- closer := provisionerd.New(func(ctx context.Context) (provisionerdproto.DRPCProvisionerDaemonClient, error) {
- return coderAPI.CreateInMemoryProvisionerDaemon(ctx, 0)
+ daemon := provisionerd.New(func(ctx context.Context) (provisionerdproto.DRPCProvisionerDaemonClient, error) {
+ return coderAPI.CreateInMemoryProvisionerDaemon(ctx)
}, &provisionerd.Options{
Logger: coderAPI.Logger.Named("provisionerd").Leveled(slog.LevelDebug),
- JobPollInterval: 50 * time.Millisecond,
UpdateInterval: 250 * time.Millisecond,
ForceCancelInterval: time.Second,
Connector: provisionerd.LocalProvisioners{
string(database.ProvisionerTypeEcho): sdkproto.NewDRPCProvisionerClient(echoClient),
},
})
+ closer := &provisionerdCloser{d: daemon}
t.Cleanup(func() {
_ = closer.Close()
})
@@ -518,7 +543,7 @@ func NewExternalProvisionerDaemon(t *testing.T, client *codersdk.Client, org uui
assert.NoError(t, err)
}()
- closer := provisionerd.New(func(ctx context.Context) (provisionerdproto.DRPCProvisionerDaemonClient, error) {
+ daemon := provisionerd.New(func(ctx context.Context) (provisionerdproto.DRPCProvisionerDaemonClient, error) {
return client.ServeProvisionerDaemon(ctx, codersdk.ServeProvisionerDaemonRequest{
Organization: org,
Provisioners: []codersdk.ProvisionerType{codersdk.ProvisionerTypeEcho},
@@ -526,13 +551,13 @@ func NewExternalProvisionerDaemon(t *testing.T, client *codersdk.Client, org uui
})
}, &provisionerd.Options{
Logger: slogtest.Make(t, nil).Named("provisionerd").Leveled(slog.LevelDebug),
- JobPollInterval: 50 * time.Millisecond,
UpdateInterval: 250 * time.Millisecond,
ForceCancelInterval: time.Second,
Connector: provisionerd.LocalProvisioners{
string(database.ProvisionerTypeEcho): sdkproto.NewDRPCProvisionerClient(echoClient),
},
})
+ closer := &provisionerdCloser{d: daemon}
t.Cleanup(func() {
_ = closer.Close()
})
diff --git a/coderd/database/dbauthz/dbauthz_test.go b/coderd/database/dbauthz/dbauthz_test.go
index 5dab130857a39..66fb104be82b6 100644
--- a/coderd/database/dbauthz/dbauthz_test.go
+++ b/coderd/database/dbauthz/dbauthz_test.go
@@ -344,14 +344,14 @@ func (s *MethodTestSuite) TestGroup() {
func (s *MethodTestSuite) TestProvsionerJob() {
s.Run("Build/GetProvisionerJobByID", s.Subtest(func(db database.Store, check *expects) {
w := dbgen.Workspace(s.T(), db, database.Workspace{})
- j := dbgen.ProvisionerJob(s.T(), db, database.ProvisionerJob{
+ j := dbgen.ProvisionerJob(s.T(), db, nil, database.ProvisionerJob{
Type: database.ProvisionerJobTypeWorkspaceBuild,
})
_ = dbgen.WorkspaceBuild(s.T(), db, database.WorkspaceBuild{JobID: j.ID, WorkspaceID: w.ID})
check.Args(j.ID).Asserts(w, rbac.ActionRead).Returns(j)
}))
s.Run("TemplateVersion/GetProvisionerJobByID", s.Subtest(func(db database.Store, check *expects) {
- j := dbgen.ProvisionerJob(s.T(), db, database.ProvisionerJob{
+ j := dbgen.ProvisionerJob(s.T(), db, nil, database.ProvisionerJob{
Type: database.ProvisionerJobTypeTemplateVersionImport,
})
tpl := dbgen.Template(s.T(), db, database.Template{})
@@ -366,7 +366,7 @@ func (s *MethodTestSuite) TestProvsionerJob() {
v := dbgen.TemplateVersion(s.T(), db, database.TemplateVersion{
TemplateID: uuid.NullUUID{UUID: tpl.ID, Valid: true},
})
- j := dbgen.ProvisionerJob(s.T(), db, database.ProvisionerJob{
+ j := dbgen.ProvisionerJob(s.T(), db, nil, database.ProvisionerJob{
Type: database.ProvisionerJobTypeTemplateVersionDryRun,
Input: must(json.Marshal(struct {
TemplateVersionID uuid.UUID `json:"template_version_id"`
@@ -377,7 +377,7 @@ func (s *MethodTestSuite) TestProvsionerJob() {
s.Run("Build/UpdateProvisionerJobWithCancelByID", s.Subtest(func(db database.Store, check *expects) {
tpl := dbgen.Template(s.T(), db, database.Template{AllowUserCancelWorkspaceJobs: true})
w := dbgen.Workspace(s.T(), db, database.Workspace{TemplateID: tpl.ID})
- j := dbgen.ProvisionerJob(s.T(), db, database.ProvisionerJob{
+ j := dbgen.ProvisionerJob(s.T(), db, nil, database.ProvisionerJob{
Type: database.ProvisionerJobTypeWorkspaceBuild,
})
_ = dbgen.WorkspaceBuild(s.T(), db, database.WorkspaceBuild{JobID: j.ID, WorkspaceID: w.ID})
@@ -386,14 +386,14 @@ func (s *MethodTestSuite) TestProvsionerJob() {
s.Run("BuildFalseCancel/UpdateProvisionerJobWithCancelByID", s.Subtest(func(db database.Store, check *expects) {
tpl := dbgen.Template(s.T(), db, database.Template{AllowUserCancelWorkspaceJobs: false})
w := dbgen.Workspace(s.T(), db, database.Workspace{TemplateID: tpl.ID})
- j := dbgen.ProvisionerJob(s.T(), db, database.ProvisionerJob{
+ j := dbgen.ProvisionerJob(s.T(), db, nil, database.ProvisionerJob{
Type: database.ProvisionerJobTypeWorkspaceBuild,
})
_ = dbgen.WorkspaceBuild(s.T(), db, database.WorkspaceBuild{JobID: j.ID, WorkspaceID: w.ID})
check.Args(database.UpdateProvisionerJobWithCancelByIDParams{ID: j.ID}).Asserts(w, rbac.ActionUpdate).Returns()
}))
s.Run("TemplateVersion/UpdateProvisionerJobWithCancelByID", s.Subtest(func(db database.Store, check *expects) {
- j := dbgen.ProvisionerJob(s.T(), db, database.ProvisionerJob{
+ j := dbgen.ProvisionerJob(s.T(), db, nil, database.ProvisionerJob{
Type: database.ProvisionerJobTypeTemplateVersionImport,
})
tpl := dbgen.Template(s.T(), db, database.Template{})
@@ -405,7 +405,7 @@ func (s *MethodTestSuite) TestProvsionerJob() {
Asserts(v.RBACObject(tpl), []rbac.Action{rbac.ActionRead, rbac.ActionUpdate}).Returns()
}))
s.Run("TemplateVersionNoTemplate/UpdateProvisionerJobWithCancelByID", s.Subtest(func(db database.Store, check *expects) {
- j := dbgen.ProvisionerJob(s.T(), db, database.ProvisionerJob{
+ j := dbgen.ProvisionerJob(s.T(), db, nil, database.ProvisionerJob{
Type: database.ProvisionerJobTypeTemplateVersionImport,
})
v := dbgen.TemplateVersion(s.T(), db, database.TemplateVersion{
@@ -420,7 +420,7 @@ func (s *MethodTestSuite) TestProvsionerJob() {
v := dbgen.TemplateVersion(s.T(), db, database.TemplateVersion{
TemplateID: uuid.NullUUID{UUID: tpl.ID, Valid: true},
})
- j := dbgen.ProvisionerJob(s.T(), db, database.ProvisionerJob{
+ j := dbgen.ProvisionerJob(s.T(), db, nil, database.ProvisionerJob{
Type: database.ProvisionerJobTypeTemplateVersionDryRun,
Input: must(json.Marshal(struct {
TemplateVersionID uuid.UUID `json:"template_version_id"`
@@ -430,13 +430,13 @@ func (s *MethodTestSuite) TestProvsionerJob() {
Asserts(v.RBACObject(tpl), []rbac.Action{rbac.ActionRead, rbac.ActionUpdate}).Returns()
}))
s.Run("GetProvisionerJobsByIDs", s.Subtest(func(db database.Store, check *expects) {
- a := dbgen.ProvisionerJob(s.T(), db, database.ProvisionerJob{})
- b := dbgen.ProvisionerJob(s.T(), db, database.ProvisionerJob{})
+ a := dbgen.ProvisionerJob(s.T(), db, nil, database.ProvisionerJob{})
+ b := dbgen.ProvisionerJob(s.T(), db, nil, database.ProvisionerJob{})
check.Args([]uuid.UUID{a.ID, b.ID}).Asserts().Returns(slice.New(a, b))
}))
s.Run("GetProvisionerLogsAfterID", s.Subtest(func(db database.Store, check *expects) {
w := dbgen.Workspace(s.T(), db, database.Workspace{})
- j := dbgen.ProvisionerJob(s.T(), db, database.ProvisionerJob{
+ j := dbgen.ProvisionerJob(s.T(), db, nil, database.ProvisionerJob{
Type: database.ProvisionerJobTypeWorkspaceBuild,
})
_ = dbgen.WorkspaceBuild(s.T(), db, database.WorkspaceBuild{JobID: j.ID, WorkspaceID: w.ID})
@@ -1151,20 +1151,20 @@ func (s *MethodTestSuite) TestWorkspace() {
s.Run("GetWorkspaceResourceByID", s.Subtest(func(db database.Store, check *expects) {
ws := dbgen.Workspace(s.T(), db, database.Workspace{})
build := dbgen.WorkspaceBuild(s.T(), db, database.WorkspaceBuild{WorkspaceID: ws.ID, JobID: uuid.New()})
- _ = dbgen.ProvisionerJob(s.T(), db, database.ProvisionerJob{ID: build.JobID, Type: database.ProvisionerJobTypeWorkspaceBuild})
+ _ = dbgen.ProvisionerJob(s.T(), db, nil, database.ProvisionerJob{ID: build.JobID, Type: database.ProvisionerJobTypeWorkspaceBuild})
res := dbgen.WorkspaceResource(s.T(), db, database.WorkspaceResource{JobID: build.JobID})
check.Args(res.ID).Asserts(ws, rbac.ActionRead).Returns(res)
}))
s.Run("Build/GetWorkspaceResourcesByJobID", s.Subtest(func(db database.Store, check *expects) {
ws := dbgen.Workspace(s.T(), db, database.Workspace{})
build := dbgen.WorkspaceBuild(s.T(), db, database.WorkspaceBuild{WorkspaceID: ws.ID, JobID: uuid.New()})
- job := dbgen.ProvisionerJob(s.T(), db, database.ProvisionerJob{ID: build.JobID, Type: database.ProvisionerJobTypeWorkspaceBuild})
+ job := dbgen.ProvisionerJob(s.T(), db, nil, database.ProvisionerJob{ID: build.JobID, Type: database.ProvisionerJobTypeWorkspaceBuild})
check.Args(job.ID).Asserts(ws, rbac.ActionRead).Returns([]database.WorkspaceResource{})
}))
s.Run("Template/GetWorkspaceResourcesByJobID", s.Subtest(func(db database.Store, check *expects) {
tpl := dbgen.Template(s.T(), db, database.Template{})
v := dbgen.TemplateVersion(s.T(), db, database.TemplateVersion{TemplateID: uuid.NullUUID{UUID: tpl.ID, Valid: true}, JobID: uuid.New()})
- job := dbgen.ProvisionerJob(s.T(), db, database.ProvisionerJob{ID: v.JobID, Type: database.ProvisionerJobTypeTemplateVersionImport})
+ job := dbgen.ProvisionerJob(s.T(), db, nil, database.ProvisionerJob{ID: v.JobID, Type: database.ProvisionerJobTypeTemplateVersionImport})
check.Args(job.ID).Asserts(v.RBACObject(tpl), []rbac.Action{rbac.ActionRead, rbac.ActionRead}).Returns([]database.WorkspaceResource{})
}))
s.Run("InsertWorkspace", s.Subtest(func(db database.Store, check *expects) {
@@ -1411,7 +1411,7 @@ func (s *MethodTestSuite) TestSystemFunctions() {
}))
s.Run("GetProvisionerJobsCreatedAfter", s.Subtest(func(db database.Store, check *expects) {
// TODO: add provisioner job resource type
- _ = dbgen.ProvisionerJob(s.T(), db, database.ProvisionerJob{CreatedAt: time.Now().Add(-time.Hour)})
+ _ = dbgen.ProvisionerJob(s.T(), db, nil, database.ProvisionerJob{CreatedAt: time.Now().Add(-time.Hour)})
check.Args(time.Now()).Asserts( /*rbac.ResourceSystem, rbac.ActionRead*/ )
}))
s.Run("GetTemplateVersionsByIDs", s.Subtest(func(db database.Store, check *expects) {
@@ -1450,11 +1450,11 @@ func (s *MethodTestSuite) TestSystemFunctions() {
s.Run("GetWorkspaceResourcesByJobIDs", s.Subtest(func(db database.Store, check *expects) {
tpl := dbgen.Template(s.T(), db, database.Template{})
v := dbgen.TemplateVersion(s.T(), db, database.TemplateVersion{TemplateID: uuid.NullUUID{UUID: tpl.ID, Valid: true}, JobID: uuid.New()})
- tJob := dbgen.ProvisionerJob(s.T(), db, database.ProvisionerJob{ID: v.JobID, Type: database.ProvisionerJobTypeTemplateVersionImport})
+ tJob := dbgen.ProvisionerJob(s.T(), db, nil, database.ProvisionerJob{ID: v.JobID, Type: database.ProvisionerJobTypeTemplateVersionImport})
ws := dbgen.Workspace(s.T(), db, database.Workspace{})
build := dbgen.WorkspaceBuild(s.T(), db, database.WorkspaceBuild{WorkspaceID: ws.ID, JobID: uuid.New()})
- wJob := dbgen.ProvisionerJob(s.T(), db, database.ProvisionerJob{ID: build.JobID, Type: database.ProvisionerJobTypeWorkspaceBuild})
+ wJob := dbgen.ProvisionerJob(s.T(), db, nil, database.ProvisionerJob{ID: build.JobID, Type: database.ProvisionerJobTypeWorkspaceBuild})
check.Args([]uuid.UUID{tJob.ID, wJob.ID}).
Asserts(rbac.ResourceSystem, rbac.ActionRead).
Returns([]database.WorkspaceResource{})
@@ -1462,7 +1462,7 @@ func (s *MethodTestSuite) TestSystemFunctions() {
s.Run("GetWorkspaceResourceMetadataByResourceIDs", s.Subtest(func(db database.Store, check *expects) {
ws := dbgen.Workspace(s.T(), db, database.Workspace{})
build := dbgen.WorkspaceBuild(s.T(), db, database.WorkspaceBuild{WorkspaceID: ws.ID, JobID: uuid.New()})
- _ = dbgen.ProvisionerJob(s.T(), db, database.ProvisionerJob{ID: build.JobID, Type: database.ProvisionerJobTypeWorkspaceBuild})
+ _ = dbgen.ProvisionerJob(s.T(), db, nil, database.ProvisionerJob{ID: build.JobID, Type: database.ProvisionerJobTypeWorkspaceBuild})
a := dbgen.WorkspaceResource(s.T(), db, database.WorkspaceResource{JobID: build.JobID})
b := dbgen.WorkspaceResource(s.T(), db, database.WorkspaceResource{JobID: build.JobID})
check.Args([]uuid.UUID{a.ID, b.ID}).
@@ -1479,8 +1479,8 @@ func (s *MethodTestSuite) TestSystemFunctions() {
}))
s.Run("GetProvisionerJobsByIDs", s.Subtest(func(db database.Store, check *expects) {
// TODO: add a ProvisionerJob resource type
- a := dbgen.ProvisionerJob(s.T(), db, database.ProvisionerJob{})
- b := dbgen.ProvisionerJob(s.T(), db, database.ProvisionerJob{})
+ a := dbgen.ProvisionerJob(s.T(), db, nil, database.ProvisionerJob{})
+ b := dbgen.ProvisionerJob(s.T(), db, nil, database.ProvisionerJob{})
check.Args([]uuid.UUID{a.ID, b.ID}).
Asserts( /*rbac.ResourceSystem, rbac.ActionRead*/ ).
Returns(slice.New(a, b))
@@ -1514,7 +1514,7 @@ func (s *MethodTestSuite) TestSystemFunctions() {
}))
s.Run("AcquireProvisionerJob", s.Subtest(func(db database.Store, check *expects) {
// TODO: we need to create a ProvisionerJob resource
- j := dbgen.ProvisionerJob(s.T(), db, database.ProvisionerJob{
+ j := dbgen.ProvisionerJob(s.T(), db, nil, database.ProvisionerJob{
StartedAt: sql.NullTime{Valid: false},
})
check.Args(database.AcquireProvisionerJobParams{Types: []database.ProvisionerType{j.Provisioner}, Tags: must(json.Marshal(j.Tags))}).
@@ -1522,14 +1522,14 @@ func (s *MethodTestSuite) TestSystemFunctions() {
}))
s.Run("UpdateProvisionerJobWithCompleteByID", s.Subtest(func(db database.Store, check *expects) {
// TODO: we need to create a ProvisionerJob resource
- j := dbgen.ProvisionerJob(s.T(), db, database.ProvisionerJob{})
+ j := dbgen.ProvisionerJob(s.T(), db, nil, database.ProvisionerJob{})
check.Args(database.UpdateProvisionerJobWithCompleteByIDParams{
ID: j.ID,
}).Asserts( /*rbac.ResourceSystem, rbac.ActionUpdate*/ )
}))
s.Run("UpdateProvisionerJobByID", s.Subtest(func(db database.Store, check *expects) {
// TODO: we need to create a ProvisionerJob resource
- j := dbgen.ProvisionerJob(s.T(), db, database.ProvisionerJob{})
+ j := dbgen.ProvisionerJob(s.T(), db, nil, database.ProvisionerJob{})
check.Args(database.UpdateProvisionerJobByIDParams{
ID: j.ID,
UpdatedAt: time.Now(),
@@ -1546,7 +1546,7 @@ func (s *MethodTestSuite) TestSystemFunctions() {
}))
s.Run("InsertProvisionerJobLogs", s.Subtest(func(db database.Store, check *expects) {
// TODO: we need to create a ProvisionerJob resource
- j := dbgen.ProvisionerJob(s.T(), db, database.ProvisionerJob{})
+ j := dbgen.ProvisionerJob(s.T(), db, nil, database.ProvisionerJob{})
check.Args(database.InsertProvisionerJobLogsParams{
JobID: j.ID,
}).Asserts( /*rbac.ResourceSystem, rbac.ActionCreate*/ )
diff --git a/coderd/database/dbgen/dbgen.go b/coderd/database/dbgen/dbgen.go
index 56f85d10ef476..cb2b2187bd747 100644
--- a/coderd/database/dbgen/dbgen.go
+++ b/coderd/database/dbgen/dbgen.go
@@ -19,6 +19,8 @@ import (
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/dbauthz"
"github.com/coder/coder/v2/coderd/database/dbtime"
+ "github.com/coder/coder/v2/coderd/database/provisionerjobs"
+ "github.com/coder/coder/v2/coderd/database/pubsub"
"github.com/coder/coder/v2/coderd/rbac"
"github.com/coder/coder/v2/cryptorand"
)
@@ -315,8 +317,9 @@ func GroupMember(t testing.TB, db database.Store, orig database.GroupMember) dat
return member
}
-// ProvisionerJob is a bit more involved to get the values such as "completedAt", "startedAt", "cancelledAt" set.
-func ProvisionerJob(t testing.TB, db database.Store, orig database.ProvisionerJob) database.ProvisionerJob {
+// ProvisionerJob is a bit more involved to get the values such as "completedAt", "startedAt", "cancelledAt" set. ps
+// can be set to nil if you are SURE that you don't require a provisionerdaemon to acquire the job in your test.
+func ProvisionerJob(t testing.TB, db database.Store, ps pubsub.Pubsub, orig database.ProvisionerJob) database.ProvisionerJob {
id := takeFirst(orig.ID, uuid.New())
// Always set some tags to prevent Acquire from grabbing jobs it should not.
if !orig.StartedAt.Time.IsZero() {
@@ -341,7 +344,10 @@ func ProvisionerJob(t testing.TB, db database.Store, orig database.ProvisionerJo
Tags: orig.Tags,
})
require.NoError(t, err, "insert job")
-
+ if ps != nil {
+ err = provisionerjobs.PostJob(ps, job)
+ require.NoError(t, err, "post job to pubsub")
+ }
if !orig.StartedAt.Time.IsZero() {
job, err = db.AcquireProvisionerJob(genCtx, database.AcquireProvisionerJobParams{
StartedAt: orig.StartedAt,
diff --git a/coderd/database/dbgen/dbgen_test.go b/coderd/database/dbgen/dbgen_test.go
index de403d23f49c0..9fc7a5f427f8b 100644
--- a/coderd/database/dbgen/dbgen_test.go
+++ b/coderd/database/dbgen/dbgen_test.go
@@ -86,7 +86,7 @@ func TestGenerator(t *testing.T) {
t.Run("Job", func(t *testing.T) {
t.Parallel()
db := dbfake.New()
- exp := dbgen.ProvisionerJob(t, db, database.ProvisionerJob{})
+ exp := dbgen.ProvisionerJob(t, db, nil, database.ProvisionerJob{})
require.Equal(t, exp, must(db.GetProvisionerJobByID(context.Background(), exp.ID)))
})
diff --git a/coderd/database/provisionerjobs/provisionerjobs.go b/coderd/database/provisionerjobs/provisionerjobs.go
new file mode 100644
index 0000000000000..6ee5bee495421
--- /dev/null
+++ b/coderd/database/provisionerjobs/provisionerjobs.go
@@ -0,0 +1,29 @@
+package provisionerjobs
+
+import (
+ "encoding/json"
+
+ "golang.org/x/xerrors"
+
+ "github.com/coder/coder/v2/coderd/database"
+ "github.com/coder/coder/v2/coderd/database/pubsub"
+)
+
+const EventJobPosted = "provisioner_job_posted"
+
+type JobPosting struct {
+ ProvisionerType database.ProvisionerType `json:"type"`
+ Tags map[string]string `json:"tags"`
+}
+
+func PostJob(ps pubsub.Pubsub, job database.ProvisionerJob) error {
+ msg, err := json.Marshal(JobPosting{
+ ProvisionerType: job.Provisioner,
+ Tags: job.Tags,
+ })
+ if err != nil {
+ return xerrors.Errorf("marshal job posting: %w", err)
+ }
+ err = ps.Publish(EventJobPosted, msg)
+ return err
+}
diff --git a/coderd/database/querier_test.go b/coderd/database/querier_test.go
index 2e3ceeb95b644..9184117aa2896 100644
--- a/coderd/database/querier_test.go
+++ b/coderd/database/querier_test.go
@@ -103,7 +103,7 @@ func TestInsertWorkspaceAgentLogs(t *testing.T) {
require.NoError(t, err)
db := database.New(sqlDB)
org := dbgen.Organization(t, db, database.Organization{})
- job := dbgen.ProvisionerJob(t, db, database.ProvisionerJob{
+ job := dbgen.ProvisionerJob(t, db, nil, database.ProvisionerJob{
OrganizationID: org.ID,
})
resource := dbgen.WorkspaceResource(t, db, database.WorkspaceResource{
@@ -335,7 +335,7 @@ func TestQueuePosition(t *testing.T) {
jobs := []database.ProvisionerJob{}
jobIDs := []uuid.UUID{}
for i := 0; i < jobCount; i++ {
- job := dbgen.ProvisionerJob(t, db, database.ProvisionerJob{
+ job := dbgen.ProvisionerJob(t, db, nil, database.ProvisionerJob{
OrganizationID: org.ID,
Tags: database.StringMap{},
})
diff --git a/coderd/httpmw/workspaceagent_test.go b/coderd/httpmw/workspaceagent_test.go
index 126526e963199..57885406289ae 100644
--- a/coderd/httpmw/workspaceagent_test.go
+++ b/coderd/httpmw/workspaceagent_test.go
@@ -83,7 +83,7 @@ func setup(t testing.TB, db database.Store, authToken uuid.UUID, mw func(http.Ha
OrganizationID: org.ID,
TemplateID: template.ID,
})
- job := dbgen.ProvisionerJob(t, db, database.ProvisionerJob{
+ job := dbgen.ProvisionerJob(t, db, nil, database.ProvisionerJob{
OrganizationID: org.ID,
})
resource := dbgen.WorkspaceResource(t, db, database.WorkspaceResource{
diff --git a/coderd/httpmw/workspaceagentparam_test.go b/coderd/httpmw/workspaceagentparam_test.go
index 233b5d0d8b570..0ac2bb9eb01b9 100644
--- a/coderd/httpmw/workspaceagentparam_test.go
+++ b/coderd/httpmw/workspaceagentparam_test.go
@@ -34,7 +34,7 @@ func TestWorkspaceAgentParam(t *testing.T) {
Transition: database.WorkspaceTransitionStart,
Reason: database.BuildReasonInitiator,
})
- job = dbgen.ProvisionerJob(t, db, database.ProvisionerJob{
+ job = dbgen.ProvisionerJob(t, db, nil, database.ProvisionerJob{
ID: build.JobID,
Type: database.ProvisionerJobTypeWorkspaceBuild,
Provisioner: database.ProvisionerTypeEcho,
diff --git a/coderd/httpmw/workspaceparam_test.go b/coderd/httpmw/workspaceparam_test.go
index d3360cbe6e313..2ce079d3b3438 100644
--- a/coderd/httpmw/workspaceparam_test.go
+++ b/coderd/httpmw/workspaceparam_test.go
@@ -363,7 +363,7 @@ func setupWorkspaceWithAgents(t testing.TB, cfg setupConfig) (database.Store, *h
Transition: database.WorkspaceTransitionStart,
Reason: database.BuildReasonInitiator,
})
- job = dbgen.ProvisionerJob(t, db, database.ProvisionerJob{
+ job = dbgen.ProvisionerJob(t, db, nil, database.ProvisionerJob{
ID: build.JobID,
Type: database.ProvisionerJobTypeWorkspaceBuild,
Provisioner: database.ProvisionerTypeEcho,
diff --git a/coderd/httpmw/workspaceresourceparam_test.go b/coderd/httpmw/workspaceresourceparam_test.go
index 61c4d77fbf3da..e61e4016cb261 100644
--- a/coderd/httpmw/workspaceresourceparam_test.go
+++ b/coderd/httpmw/workspaceresourceparam_test.go
@@ -21,7 +21,7 @@ func TestWorkspaceResourceParam(t *testing.T) {
setup := func(t *testing.T, db database.Store, jobType database.ProvisionerJobType) (*http.Request, database.WorkspaceResource) {
r := httptest.NewRequest("GET", "/", nil)
- job := dbgen.ProvisionerJob(t, db, database.ProvisionerJob{
+ job := dbgen.ProvisionerJob(t, db, nil, database.ProvisionerJob{
Type: jobType,
Provisioner: database.ProvisionerTypeEcho,
StorageMethod: database.ProvisionerStorageMethodFile,
diff --git a/coderd/provisionerdserver/acquirer.go b/coderd/provisionerdserver/acquirer.go
index 7fb759faa2612..c9a43d660b671 100644
--- a/coderd/provisionerdserver/acquirer.go
+++ b/coderd/provisionerdserver/acquirer.go
@@ -15,14 +15,13 @@ import (
"cdr.dev/slog"
"github.com/coder/coder/v2/coderd/database"
- "github.com/coder/coder/v2/coderd/database/dbauthz"
"github.com/coder/coder/v2/coderd/database/dbtime"
+ "github.com/coder/coder/v2/coderd/database/provisionerjobs"
"github.com/coder/coder/v2/coderd/database/pubsub"
)
const (
- EventJobPosted = "provisioner_job_posted"
- dbMaxBackoff = 10 * time.Second
+ dbMaxBackoff = 10 * time.Second
// backPollDuration is the period for the backup polling described in Acquirer comment
backupPollDuration = 30 * time.Second
)
@@ -106,8 +105,6 @@ func (a *Acquirer) AcquireJob(
}
// buffer of 1 so that cancel doesn't deadlock while writing to the channel
clearance := make(chan struct{}, 1)
- //nolint:gocritic // Provisionerd has specific authz rules.
- principal := dbauthz.AsProvisionerd(ctx)
for {
a.want(pt, tags, clearance)
select {
@@ -122,7 +119,7 @@ func (a *Acquirer) AcquireJob(
return database.ProvisionerJob{}, err
case <-clearance:
logger.Debug(ctx, "got clearance to call database")
- job, err := a.store.AcquireProvisionerJob(principal, database.AcquireProvisionerJobParams{
+ job, err := a.store.AcquireProvisionerJob(ctx, database.AcquireProvisionerJobParams{
StartedAt: sql.NullTime{
Time: dbtime.Now(),
Valid: true,
@@ -298,7 +295,7 @@ func (a *Acquirer) subscribe() {
bkoff := backoff.WithContext(eb, a.ctx)
var cancel context.CancelFunc
err := backoff.Retry(func() error {
- cancelFn, err := a.ps.SubscribeWithErr(EventJobPosted, a.jobPosted)
+ cancelFn, err := a.ps.SubscribeWithErr(provisionerjobs.EventJobPosted, a.jobPosted)
if err != nil {
a.logger.Warn(a.ctx, "failed to subscribe to job postings", slog.Error(err))
return err
@@ -335,7 +332,7 @@ func (a *Acquirer) jobPosted(ctx context.Context, message []byte, err error) {
a.logger.Warn(a.ctx, "unhandled pubsub error", slog.Error(err))
return
}
- posting := JobPosting{}
+ posting := provisionerjobs.JobPosting{}
err = json.Unmarshal(message, &posting)
if err != nil {
a.logger.Error(a.ctx, "unable to parse job posting",
@@ -457,7 +454,7 @@ type domain struct {
acquirees map[chan<- struct{}]*acquiree
}
-func (d domain) contains(p JobPosting) bool {
+func (d domain) contains(p provisionerjobs.JobPosting) bool {
if !slices.Contains(d.pt, p.ProvisionerType) {
return false
}
@@ -485,8 +482,3 @@ func (d domain) poll(dur time.Duration) {
}
}
}
-
-type JobPosting struct {
- ProvisionerType database.ProvisionerType `json:"type"`
- Tags map[string]string `json:"tags"`
-}
diff --git a/coderd/provisionerdserver/acquirer_test.go b/coderd/provisionerdserver/acquirer_test.go
index 6d72da5f7ffe4..7036df817b264 100644
--- a/coderd/provisionerdserver/acquirer_test.go
+++ b/coderd/provisionerdserver/acquirer_test.go
@@ -18,6 +18,7 @@ import (
"cdr.dev/slog/sloggers/slogtest"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/dbfake"
+ "github.com/coder/coder/v2/coderd/database/provisionerjobs"
"github.com/coder/coder/v2/coderd/database/pubsub"
"github.com/coder/coder/v2/coderd/provisionerdserver"
"github.com/coder/coder/v2/testutil"
@@ -316,12 +317,12 @@ func TestAcquirer_UnblockOnCancel(t *testing.T) {
func postJob(t *testing.T, ps pubsub.Pubsub, pt database.ProvisionerType, tags provisionerdserver.Tags) {
t.Helper()
- msg, err := json.Marshal(provisionerdserver.JobPosting{
+ msg, err := json.Marshal(provisionerjobs.JobPosting{
ProvisionerType: pt,
Tags: tags,
})
require.NoError(t, err)
- err = ps.Publish(provisionerdserver.EventJobPosted, msg)
+ err = ps.Publish(provisionerjobs.EventJobPosted, msg)
require.NoError(t, err)
}
diff --git a/coderd/provisionerdserver/provisionerdserver.go b/coderd/provisionerdserver/provisionerdserver.go
index b324b7d5bca04..6acb807022223 100644
--- a/coderd/provisionerdserver/provisionerdserver.go
+++ b/coderd/provisionerdserver/provisionerdserver.go
@@ -11,7 +11,6 @@ import (
"reflect"
"strconv"
"strings"
- "sync"
"sync/atomic"
"time"
@@ -44,16 +43,18 @@ import (
sdkproto "github.com/coder/coder/v2/provisionersdk/proto"
)
-var (
- lastAcquire time.Time
- lastAcquireMutex sync.RWMutex
-)
+// DefaultAcquireJobLongPollDur is the time the (deprecated) AcquireJob rpc waits to try to obtain a job before
+// canceling and returning an empty job.
+const DefaultAcquireJobLongPollDur = time.Second * 5
type Options struct {
OIDCConfig httpmw.OAuth2Config
GitAuthConfigs []*gitauth.Config
// TimeNowFn is only used in tests
TimeNowFn func() time.Time
+
+ // AcquireJobLongPollDur is used in tests
+ AcquireJobLongPollDur time.Duration
}
type server struct {
@@ -62,9 +63,10 @@ type server struct {
Logger slog.Logger
Provisioners []database.ProvisionerType
GitAuthConfigs []*gitauth.Config
- Tags json.RawMessage
+ Tags Tags
Database database.Store
Pubsub pubsub.Pubsub
+ Acquirer *Acquirer
Telemetry telemetry.Reporter
Tracer trace.Tracer
QuotaCommitter *atomic.Pointer[proto.QuotaCommitter]
@@ -73,10 +75,11 @@ type server struct {
UserQuietHoursScheduleStore *atomic.Pointer[schedule.UserQuietHoursScheduleStore]
DeploymentValues *codersdk.DeploymentValues
- AcquireJobDebounce time.Duration
- OIDCConfig httpmw.OAuth2Config
+ OIDCConfig httpmw.OAuth2Config
TimeNowFn func() time.Time
+
+ acquireJobLongPollDur time.Duration
}
// We use the null byte (0x00) in generating a canonical map key for tags, so
@@ -108,9 +111,10 @@ func NewServer(
id uuid.UUID,
logger slog.Logger,
provisioners []database.ProvisionerType,
- tags json.RawMessage,
+ tags Tags,
db database.Store,
ps pubsub.Pubsub,
+ acquirer *Acquirer,
tel telemetry.Reporter,
tracer trace.Tracer,
quotaCommitter *atomic.Pointer[proto.QuotaCommitter],
@@ -118,7 +122,6 @@ func NewServer(
templateScheduleStore *atomic.Pointer[schedule.TemplateScheduleStore],
userQuietHoursScheduleStore *atomic.Pointer[schedule.UserQuietHoursScheduleStore],
deploymentValues *codersdk.DeploymentValues,
- acquireJobDebounce time.Duration,
options Options,
) (proto.DRPCProvisionerDaemonServer, error) {
// Panic early if pointers are nil
@@ -137,6 +140,18 @@ func NewServer(
if deploymentValues == nil {
return nil, xerrors.New("deploymentValues is nil")
}
+ if acquirer == nil {
+ return nil, xerrors.New("acquirer is nil")
+ }
+ if tags == nil {
+ return nil, xerrors.Errorf("tags is nil")
+ }
+ if err := tags.Valid(); err != nil {
+ return nil, xerrors.Errorf("invalid tags: %w", err)
+ }
+ if options.AcquireJobLongPollDur == 0 {
+ options.AcquireJobLongPollDur = DefaultAcquireJobLongPollDur
+ }
return &server{
AccessURL: accessURL,
ID: id,
@@ -146,6 +161,7 @@ func NewServer(
Tags: tags,
Database: db,
Pubsub: ps,
+ Acquirer: acquirer,
Telemetry: tel,
Tracer: tracer,
QuotaCommitter: quotaCommitter,
@@ -153,9 +169,9 @@ func NewServer(
TemplateScheduleStore: templateScheduleStore,
UserQuietHoursScheduleStore: userQuietHoursScheduleStore,
DeploymentValues: deploymentValues,
- AcquireJobDebounce: acquireJobDebounce,
OIDCConfig: options.OIDCConfig,
TimeNowFn: options.TimeNowFn,
+ acquireJobLongPollDur: options.AcquireJobLongPollDur,
}, nil
}
@@ -169,50 +185,119 @@ func (s *server) timeNow() time.Time {
}
// AcquireJob queries the database to lock a job.
+//
+// Deprecated: This method is only available for back-level provisioner daemons.
func (s *server) AcquireJob(ctx context.Context, _ *proto.Empty) (*proto.AcquiredJob, error) {
//nolint:gocritic // Provisionerd has specific authz rules.
ctx = dbauthz.AsProvisionerd(ctx)
- // This prevents loads of provisioner daemons from consistently
- // querying the database when no jobs are available.
- //
- // The debounce only occurs when no job is returned, so if loads of
- // jobs are added at once, they will start after at most this duration.
- lastAcquireMutex.RLock()
- if !lastAcquire.IsZero() && time.Since(lastAcquire) < s.AcquireJobDebounce {
- s.Logger.Debug(ctx, "debounce acquire job", slog.F("debounce", s.AcquireJobDebounce), slog.F("last_acquire", lastAcquire))
- lastAcquireMutex.RUnlock()
- return &proto.AcquiredJob{}, nil
- }
- lastAcquireMutex.RUnlock()
- // This marks the job as locked in the database.
- job, err := s.Database.AcquireProvisionerJob(ctx, database.AcquireProvisionerJobParams{
- StartedAt: sql.NullTime{
- Time: dbtime.Now(),
- Valid: true,
- },
- WorkerID: uuid.NullUUID{
- UUID: s.ID,
- Valid: true,
- },
- Types: s.Provisioners,
- Tags: s.Tags,
- })
- if errors.Is(err, sql.ErrNoRows) {
- // The provisioner daemon assumes no jobs are available if
- // an empty struct is returned.
- lastAcquireMutex.Lock()
- lastAcquire = dbtime.Now()
- lastAcquireMutex.Unlock()
+ // Since AcquireJob blocks until a job is available, we set a long (5s by default) timeout. This allows back-level
+ // provisioner daemons to gracefully shut down within a few seconds, but keeps them from rapidly polling the
+ // database.
+ acqCtx, acqCancel := context.WithTimeout(ctx, s.acquireJobLongPollDur)
+ defer acqCancel()
+ job, err := s.Acquirer.AcquireJob(acqCtx, s.ID, s.Provisioners, s.Tags)
+ if xerrors.Is(err, context.DeadlineExceeded) {
+ s.Logger.Debug(ctx, "successful cancel")
return &proto.AcquiredJob{}, nil
}
if err != nil {
return nil, xerrors.Errorf("acquire job: %w", err)
}
s.Logger.Debug(ctx, "locked job from database", slog.F("job_id", job.ID))
+ return s.acquireProtoJob(ctx, job)
+}
+
+type jobAndErr struct {
+ job database.ProvisionerJob
+ err error
+}
+
+// AcquireJobWithCancel queries the database to lock a job.
+func (s *server) AcquireJobWithCancel(stream proto.DRPCProvisionerDaemon_AcquireJobWithCancelStream) (retErr error) {
+ //nolint:gocritic // Provisionerd has specific authz rules.
+ streamCtx := dbauthz.AsProvisionerd(stream.Context())
+ defer func() {
+ closeErr := stream.Close()
+ s.Logger.Debug(streamCtx, "closed stream", slog.Error(closeErr))
+ if retErr == nil {
+ retErr = closeErr
+ }
+ }()
+ acqCtx, acqCancel := context.WithCancel(streamCtx)
+ defer acqCancel()
+ recvCh := make(chan error, 1)
+ go func() {
+ _, err := stream.Recv() // cancel is the only message
+ recvCh <- err
+ }()
+ jec := make(chan jobAndErr, 1)
+ go func() {
+ job, err := s.Acquirer.AcquireJob(acqCtx, s.ID, s.Provisioners, s.Tags)
+ jec <- jobAndErr{job: job, err: err}
+ }()
+ var recvErr error
+ var je jobAndErr
+ select {
+ case recvErr = <-recvCh:
+ acqCancel()
+ je = <-jec
+ case je = <-jec:
+ }
+ if xerrors.Is(je.err, context.Canceled) {
+ s.Logger.Debug(streamCtx, "successful cancel")
+ err := stream.Send(&proto.AcquiredJob{})
+ if err != nil {
+ // often this is just because the other side hangs up and doesn't wait for the cancel, so log at INFO
+ s.Logger.Info(streamCtx, "failed to send empty job", slog.Error(err))
+ return err
+ }
+ return nil
+ }
+ if je.err != nil {
+ return xerrors.Errorf("acquire job: %w", je.err)
+ }
+ logger := s.Logger.With(slog.F("job_id", je.job.ID))
+ logger.Debug(streamCtx, "locked job from database")
+ if recvErr != nil {
+ logger.Error(streamCtx, "recv error and failed to cancel acquire job", slog.Error(recvErr))
+ // Well, this is awkward. We hit an error receiving from the stream, but didn't cancel before we locked a job
+ // in the database. We need to mark this job as failed so the end user can retry if they want to.
+ err := s.Database.UpdateProvisionerJobWithCompleteByID(
+ context.Background(),
+ database.UpdateProvisionerJobWithCompleteByIDParams{
+ ID: je.job.ID,
+ CompletedAt: sql.NullTime{
+ Time: dbtime.Now(),
+ Valid: true,
+ },
+ Error: sql.NullString{
+ String: "connection to provisioner daemon broken",
+ Valid: true,
+ },
+ })
+ if err != nil {
+ logger.Error(streamCtx, "error updating failed job", slog.Error(err))
+ }
+ return recvErr
+ }
+
+ pj, err := s.acquireProtoJob(streamCtx, je.job)
+ if err != nil {
+ return err
+ }
+ err = stream.Send(pj)
+ if err != nil {
+ s.Logger.Error(streamCtx, "failed to send job", slog.Error(err))
+ return err
+ }
+ return nil
+}
+
+func (s *server) acquireProtoJob(ctx context.Context, job database.ProvisionerJob) (*proto.AcquiredJob, error) {
// Marks the acquired job as failed with the error message provided.
failJob := func(errorMessage string) error {
- err = s.Database.UpdateProvisionerJobWithCompleteByID(ctx, database.UpdateProvisionerJobWithCompleteByIDParams{
+ err := s.Database.UpdateProvisionerJobWithCompleteByID(ctx, database.UpdateProvisionerJobWithCompleteByIDParams{
ID: job.ID,
CompletedAt: sql.NullTime{
Time: dbtime.Now(),
diff --git a/coderd/provisionerdserver/provisionerdserver_test.go b/coderd/provisionerdserver/provisionerdserver_test.go
index 50c621235b493..871024f41c691 100644
--- a/coderd/provisionerdserver/provisionerdserver_test.go
+++ b/coderd/provisionerdserver/provisionerdserver_test.go
@@ -4,12 +4,19 @@ import (
"context"
"database/sql"
"encoding/json"
+ "io"
"net/url"
"strings"
+ "sync"
"sync/atomic"
"testing"
"time"
+ "golang.org/x/xerrors"
+ "storj.io/drpc"
+
+ "cdr.dev/slog"
+
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -57,400 +64,411 @@ func testUserQuietHoursScheduleStore() *atomic.Pointer[schedule.UserQuietHoursSc
return ptr
}
-func TestAcquireJob(t *testing.T) {
+func TestAcquireJob_LongPoll(t *testing.T) {
t.Parallel()
- t.Run("Debounce", func(t *testing.T) {
- t.Parallel()
- db := dbfake.New()
- ps := pubsub.NewInMemory()
- srv, err := provisionerdserver.NewServer(
- &url.URL{},
- uuid.New(),
- slogtest.Make(t, nil),
- []database.ProvisionerType{database.ProvisionerTypeEcho},
- nil,
- db,
- ps,
- telemetry.NewNoop(),
- trace.NewNoopTracerProvider().Tracer("noop"),
- &atomic.Pointer[proto.QuotaCommitter]{},
- mockAuditor(),
- testTemplateScheduleStore(),
- testUserQuietHoursScheduleStore(),
- &codersdk.DeploymentValues{},
- time.Hour,
- provisionerdserver.Options{},
- )
- require.NoError(t, err)
- job, err := srv.AcquireJob(context.Background(), nil)
- require.NoError(t, err)
- require.Equal(t, &proto.AcquiredJob{}, job)
- _, err = db.InsertProvisionerJob(context.Background(), database.InsertProvisionerJobParams{
- ID: uuid.New(),
- InitiatorID: uuid.New(),
- Provisioner: database.ProvisionerTypeEcho,
- StorageMethod: database.ProvisionerStorageMethodFile,
- Type: database.ProvisionerJobTypeTemplateVersionDryRun,
- })
- require.NoError(t, err)
- job, err = srv.AcquireJob(context.Background(), nil)
- require.NoError(t, err)
- require.Equal(t, &proto.AcquiredJob{}, job)
- })
- t.Run("NoJobs", func(t *testing.T) {
- t.Parallel()
- srv, _, _ := setup(t, false, nil)
- job, err := srv.AcquireJob(context.Background(), nil)
- require.NoError(t, err)
- require.Equal(t, &proto.AcquiredJob{}, job)
- })
- t.Run("InitiatorNotFound", func(t *testing.T) {
- t.Parallel()
- srv, db, _ := setup(t, false, nil)
- _, err := db.InsertProvisionerJob(context.Background(), database.InsertProvisionerJobParams{
- ID: uuid.New(),
- InitiatorID: uuid.New(),
- Provisioner: database.ProvisionerTypeEcho,
- StorageMethod: database.ProvisionerStorageMethodFile,
- Type: database.ProvisionerJobTypeTemplateVersionDryRun,
- })
- require.NoError(t, err)
- _, err = srv.AcquireJob(context.Background(), nil)
- require.ErrorContains(t, err, "sql: no rows in result set")
- })
- t.Run("WorkspaceBuildJob", func(t *testing.T) {
- t.Parallel()
- // Set the max session token lifetime so we can assert we
- // create an API key with an expiration within the bounds of the
- // deployment config.
- dv := &codersdk.DeploymentValues{MaxTokenLifetime: clibase.Duration(time.Hour)}
- gitAuthProvider := "github"
- srv, db, ps := setup(t, false, &overrides{
- deploymentValues: dv,
- gitAuthConfigs: []*gitauth.Config{{
- ID: gitAuthProvider,
- OAuth2Config: &testutil.OAuth2Config{},
- }},
- })
- ctx := context.Background()
-
- user := dbgen.User(t, db, database.User{})
- link := dbgen.UserLink(t, db, database.UserLink{
- LoginType: database.LoginTypeOIDC,
- UserID: user.ID,
- OAuthExpiry: dbtime.Now().Add(time.Hour),
- OAuthAccessToken: "access-token",
- })
- dbgen.GitAuthLink(t, db, database.GitAuthLink{
- ProviderID: gitAuthProvider,
- UserID: user.ID,
- })
- template := dbgen.Template(t, db, database.Template{
- Name: "template",
- Provisioner: database.ProvisionerTypeEcho,
- })
- file := dbgen.File(t, db, database.File{CreatedBy: user.ID})
- versionFile := dbgen.File(t, db, database.File{CreatedBy: user.ID})
- version := dbgen.TemplateVersion(t, db, database.TemplateVersion{
- TemplateID: uuid.NullUUID{
- UUID: template.ID,
- Valid: true,
- },
- JobID: uuid.New(),
- })
- err := db.UpdateTemplateVersionGitAuthProvidersByJobID(ctx, database.UpdateTemplateVersionGitAuthProvidersByJobIDParams{
- JobID: version.JobID,
- GitAuthProviders: []string{gitAuthProvider},
- UpdatedAt: dbtime.Now(),
- })
+ srv, _, _ := setup(t, false, &overrides{acquireJobLongPollDuration: time.Microsecond})
+ job, err := srv.AcquireJob(context.Background(), nil)
+ require.NoError(t, err)
+ require.Equal(t, &proto.AcquiredJob{}, job)
+}
+
+func TestAcquireJobWithCancel_Cancel(t *testing.T) {
+ t.Parallel()
+ srv, _, _ := setup(t, false, nil)
+ ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
+ defer cancel()
+ fs := newFakeStream(ctx)
+ errCh := make(chan error)
+ go func() {
+ errCh <- srv.AcquireJobWithCancel(fs)
+ }()
+ fs.cancel()
+ select {
+ case <-ctx.Done():
+ t.Fatal("timed out waiting for AcquireJobWithCancel")
+ case err := <-errCh:
require.NoError(t, err)
- // Import version job
- _ = dbgen.ProvisionerJob(t, db, database.ProvisionerJob{
- ID: version.JobID,
- InitiatorID: user.ID,
- FileID: versionFile.ID,
- Provisioner: database.ProvisionerTypeEcho,
- StorageMethod: database.ProvisionerStorageMethodFile,
- Type: database.ProvisionerJobTypeTemplateVersionImport,
- Input: must(json.Marshal(provisionerdserver.TemplateVersionImportJob{
- TemplateVersionID: version.ID,
- UserVariableValues: []codersdk.VariableValue{
- {Name: "second", Value: "bah"},
- },
- })),
- })
- _ = dbgen.TemplateVersionVariable(t, db, database.TemplateVersionVariable{
- TemplateVersionID: version.ID,
- Name: "first",
- Value: "first_value",
- DefaultValue: "default_value",
- Sensitive: true,
- })
- _ = dbgen.TemplateVersionVariable(t, db, database.TemplateVersionVariable{
- TemplateVersionID: version.ID,
- Name: "second",
- Value: "second_value",
- DefaultValue: "default_value",
- Required: true,
- Sensitive: false,
- })
- workspace := dbgen.Workspace(t, db, database.Workspace{
- TemplateID: template.ID,
- OwnerID: user.ID,
- })
- build := dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{
- WorkspaceID: workspace.ID,
- BuildNumber: 1,
- JobID: uuid.New(),
- TemplateVersionID: version.ID,
- Transition: database.WorkspaceTransitionStart,
- Reason: database.BuildReasonInitiator,
- })
- _ = dbgen.ProvisionerJob(t, db, database.ProvisionerJob{
- ID: build.ID,
- InitiatorID: user.ID,
- Provisioner: database.ProvisionerTypeEcho,
- StorageMethod: database.ProvisionerStorageMethodFile,
- FileID: file.ID,
- Type: database.ProvisionerJobTypeWorkspaceBuild,
- Input: must(json.Marshal(provisionerdserver.WorkspaceProvisionJob{
- WorkspaceBuildID: build.ID,
- })),
- })
+ }
+ job, err := fs.waitForJob()
+ require.NoError(t, err)
+ require.NotNil(t, job)
+ require.Equal(t, "", job.JobId)
+}
+
+func TestAcquireJob(t *testing.T) {
+ t.Parallel()
- startPublished := make(chan struct{})
- var closed bool
- closeStartSubscribe, err := ps.Subscribe(codersdk.WorkspaceNotifyChannel(workspace.ID), func(_ context.Context, _ []byte) {
- if !closed {
- close(startPublished)
- closed = true
+ // These test acquiring a single job without canceling, and tests both AcquireJob (deprecated) and
+ // AcquireJobWithCancel as the way to get the job.
+ cases := []struct {
+ name string
+ acquire func(context.Context, proto.DRPCProvisionerDaemonServer) (*proto.AcquiredJob, error)
+ }{
+ {name: "Deprecated", acquire: func(ctx context.Context, srv proto.DRPCProvisionerDaemonServer) (*proto.AcquiredJob, error) {
+ return srv.AcquireJob(ctx, nil)
+ }},
+ {name: "WithCancel", acquire: func(ctx context.Context, srv proto.DRPCProvisionerDaemonServer) (*proto.AcquiredJob, error) {
+ fs := newFakeStream(ctx)
+ err := srv.AcquireJobWithCancel(fs)
+ if err != nil {
+ return nil, err
}
- })
- require.NoError(t, err)
- defer closeStartSubscribe()
+ return fs.waitForJob()
+ }},
+ }
+ for _, tc := range cases {
+ tc := tc
+ t.Run(tc.name+"_InitiatorNotFound", func(t *testing.T) {
+ t.Parallel()
+ srv, db, _ := setup(t, false, nil)
+ ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
+ defer cancel()
+ _, err := db.InsertProvisionerJob(context.Background(), database.InsertProvisionerJobParams{
+ ID: uuid.New(),
+ InitiatorID: uuid.New(),
+ Provisioner: database.ProvisionerTypeEcho,
+ StorageMethod: database.ProvisionerStorageMethodFile,
+ Type: database.ProvisionerJobTypeTemplateVersionDryRun,
+ })
+ require.NoError(t, err)
+ _, err = tc.acquire(ctx, srv)
+ require.ErrorContains(t, err, "sql: no rows in result set")
+ })
+ t.Run(tc.name+"_WorkspaceBuildJob", func(t *testing.T) {
+ t.Parallel()
+ // Set the max session token lifetime so we can assert we
+ // create an API key with an expiration within the bounds of the
+ // deployment config.
+ dv := &codersdk.DeploymentValues{MaxTokenLifetime: clibase.Duration(time.Hour)}
+ gitAuthProvider := "github"
+ srv, db, ps := setup(t, false, &overrides{
+ deploymentValues: dv,
+ gitAuthConfigs: []*gitauth.Config{{
+ ID: gitAuthProvider,
+ OAuth2Config: &testutil.OAuth2Config{},
+ }},
+ })
+ ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
+ defer cancel()
- var job *proto.AcquiredJob
+ user := dbgen.User(t, db, database.User{})
+ link := dbgen.UserLink(t, db, database.UserLink{
+ LoginType: database.LoginTypeOIDC,
+ UserID: user.ID,
+ OAuthExpiry: dbtime.Now().Add(time.Hour),
+ OAuthAccessToken: "access-token",
+ })
+ dbgen.GitAuthLink(t, db, database.GitAuthLink{
+ ProviderID: gitAuthProvider,
+ UserID: user.ID,
+ })
+ template := dbgen.Template(t, db, database.Template{
+ Name: "template",
+ Provisioner: database.ProvisionerTypeEcho,
+ })
+ file := dbgen.File(t, db, database.File{CreatedBy: user.ID})
+ versionFile := dbgen.File(t, db, database.File{CreatedBy: user.ID})
+ version := dbgen.TemplateVersion(t, db, database.TemplateVersion{
+ TemplateID: uuid.NullUUID{
+ UUID: template.ID,
+ Valid: true,
+ },
+ JobID: uuid.New(),
+ })
+ err := db.UpdateTemplateVersionGitAuthProvidersByJobID(ctx, database.UpdateTemplateVersionGitAuthProvidersByJobIDParams{
+ JobID: version.JobID,
+ GitAuthProviders: []string{gitAuthProvider},
+ UpdatedAt: dbtime.Now(),
+ })
+ require.NoError(t, err)
+ // Import version job
+ _ = dbgen.ProvisionerJob(t, db, ps, database.ProvisionerJob{
+ ID: version.JobID,
+ InitiatorID: user.ID,
+ FileID: versionFile.ID,
+ Provisioner: database.ProvisionerTypeEcho,
+ StorageMethod: database.ProvisionerStorageMethodFile,
+ Type: database.ProvisionerJobTypeTemplateVersionImport,
+ Input: must(json.Marshal(provisionerdserver.TemplateVersionImportJob{
+ TemplateVersionID: version.ID,
+ UserVariableValues: []codersdk.VariableValue{
+ {Name: "second", Value: "bah"},
+ },
+ })),
+ })
+ _ = dbgen.TemplateVersionVariable(t, db, database.TemplateVersionVariable{
+ TemplateVersionID: version.ID,
+ Name: "first",
+ Value: "first_value",
+ DefaultValue: "default_value",
+ Sensitive: true,
+ })
+ _ = dbgen.TemplateVersionVariable(t, db, database.TemplateVersionVariable{
+ TemplateVersionID: version.ID,
+ Name: "second",
+ Value: "second_value",
+ DefaultValue: "default_value",
+ Required: true,
+ Sensitive: false,
+ })
+ workspace := dbgen.Workspace(t, db, database.Workspace{
+ TemplateID: template.ID,
+ OwnerID: user.ID,
+ })
+ build := dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{
+ WorkspaceID: workspace.ID,
+ BuildNumber: 1,
+ JobID: uuid.New(),
+ TemplateVersionID: version.ID,
+ Transition: database.WorkspaceTransitionStart,
+ Reason: database.BuildReasonInitiator,
+ })
+ _ = dbgen.ProvisionerJob(t, db, ps, database.ProvisionerJob{
+ ID: build.ID,
+ InitiatorID: user.ID,
+ Provisioner: database.ProvisionerTypeEcho,
+ StorageMethod: database.ProvisionerStorageMethodFile,
+ FileID: file.ID,
+ Type: database.ProvisionerJobTypeWorkspaceBuild,
+ Input: must(json.Marshal(provisionerdserver.WorkspaceProvisionJob{
+ WorkspaceBuildID: build.ID,
+ })),
+ })
- for {
- // Grab jobs until we find the workspace build job. There is also
- // an import version job that we need to ignore.
- job, err = srv.AcquireJob(ctx, nil)
+ startPublished := make(chan struct{})
+ var closed bool
+ closeStartSubscribe, err := ps.Subscribe(codersdk.WorkspaceNotifyChannel(workspace.ID), func(_ context.Context, _ []byte) {
+ if !closed {
+ close(startPublished)
+ closed = true
+ }
+ })
require.NoError(t, err)
- if _, ok := job.Type.(*proto.AcquiredJob_WorkspaceBuild_); ok {
- break
+ defer closeStartSubscribe()
+
+ var job *proto.AcquiredJob
+
+ for {
+ // Grab jobs until we find the workspace build job. There is also
+ // an import version job that we need to ignore.
+ job, err = tc.acquire(ctx, srv)
+ require.NoError(t, err)
+ if _, ok := job.Type.(*proto.AcquiredJob_WorkspaceBuild_); ok {
+ break
+ }
}
- }
- <-startPublished
+ <-startPublished
- got, err := json.Marshal(job.Type)
- require.NoError(t, err)
+ got, err := json.Marshal(job.Type)
+ require.NoError(t, err)
- // Validate that a session token is generated during the job.
- sessionToken := job.Type.(*proto.AcquiredJob_WorkspaceBuild_).WorkspaceBuild.Metadata.WorkspaceOwnerSessionToken
- require.NotEmpty(t, sessionToken)
- toks := strings.Split(sessionToken, "-")
- require.Len(t, toks, 2, "invalid api key")
- key, err := db.GetAPIKeyByID(ctx, toks[0])
- require.NoError(t, err)
- require.Equal(t, int64(dv.MaxTokenLifetime.Value().Seconds()), key.LifetimeSeconds)
- require.WithinDuration(t, time.Now().Add(dv.MaxTokenLifetime.Value()), key.ExpiresAt, time.Minute)
-
- want, err := json.Marshal(&proto.AcquiredJob_WorkspaceBuild_{
- WorkspaceBuild: &proto.AcquiredJob_WorkspaceBuild{
- WorkspaceBuildId: build.ID.String(),
- WorkspaceName: workspace.Name,
- VariableValues: []*sdkproto.VariableValue{
- {
- Name: "first",
- Value: "first_value",
- Sensitive: true,
+ // Validate that a session token is generated during the job.
+ sessionToken := job.Type.(*proto.AcquiredJob_WorkspaceBuild_).WorkspaceBuild.Metadata.WorkspaceOwnerSessionToken
+ require.NotEmpty(t, sessionToken)
+ toks := strings.Split(sessionToken, "-")
+ require.Len(t, toks, 2, "invalid api key")
+ key, err := db.GetAPIKeyByID(ctx, toks[0])
+ require.NoError(t, err)
+ require.Equal(t, int64(dv.MaxTokenLifetime.Value().Seconds()), key.LifetimeSeconds)
+ require.WithinDuration(t, time.Now().Add(dv.MaxTokenLifetime.Value()), key.ExpiresAt, time.Minute)
+
+ want, err := json.Marshal(&proto.AcquiredJob_WorkspaceBuild_{
+ WorkspaceBuild: &proto.AcquiredJob_WorkspaceBuild{
+ WorkspaceBuildId: build.ID.String(),
+ WorkspaceName: workspace.Name,
+ VariableValues: []*sdkproto.VariableValue{
+ {
+ Name: "first",
+ Value: "first_value",
+ Sensitive: true,
+ },
+ {
+ Name: "second",
+ Value: "second_value",
+ },
},
- {
- Name: "second",
- Value: "second_value",
+ GitAuthProviders: []*sdkproto.GitAuthProvider{{
+ Id: gitAuthProvider,
+ AccessToken: "access_token",
+ }},
+ Metadata: &sdkproto.Metadata{
+ CoderUrl: (&url.URL{}).String(),
+ WorkspaceTransition: sdkproto.WorkspaceTransition_START,
+ WorkspaceName: workspace.Name,
+ WorkspaceOwner: user.Username,
+ WorkspaceOwnerEmail: user.Email,
+ WorkspaceOwnerOidcAccessToken: link.OAuthAccessToken,
+ WorkspaceId: workspace.ID.String(),
+ WorkspaceOwnerId: user.ID.String(),
+ TemplateId: template.ID.String(),
+ TemplateName: template.Name,
+ TemplateVersion: version.Name,
+ WorkspaceOwnerSessionToken: sessionToken,
},
},
- GitAuthProviders: []*sdkproto.GitAuthProvider{{
- Id: gitAuthProvider,
- AccessToken: "access_token",
- }},
- Metadata: &sdkproto.Metadata{
- CoderUrl: (&url.URL{}).String(),
- WorkspaceTransition: sdkproto.WorkspaceTransition_START,
- WorkspaceName: workspace.Name,
- WorkspaceOwner: user.Username,
- WorkspaceOwnerEmail: user.Email,
- WorkspaceOwnerOidcAccessToken: link.OAuthAccessToken,
- WorkspaceId: workspace.ID.String(),
- WorkspaceOwnerId: user.ID.String(),
- TemplateId: template.ID.String(),
- TemplateName: template.Name,
- TemplateVersion: version.Name,
- WorkspaceOwnerSessionToken: sessionToken,
- },
- },
- })
- require.NoError(t, err)
-
- require.JSONEq(t, string(want), string(got))
-
- // Assert that we delete the session token whenever
- // a stop is issued.
- stopbuild := dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{
- WorkspaceID: workspace.ID,
- BuildNumber: 2,
- JobID: uuid.New(),
- TemplateVersionID: version.ID,
- Transition: database.WorkspaceTransitionStop,
- Reason: database.BuildReasonInitiator,
- })
- _ = dbgen.ProvisionerJob(t, db, database.ProvisionerJob{
- ID: stopbuild.ID,
- InitiatorID: user.ID,
- Provisioner: database.ProvisionerTypeEcho,
- StorageMethod: database.ProvisionerStorageMethodFile,
- FileID: file.ID,
- Type: database.ProvisionerJobTypeWorkspaceBuild,
- Input: must(json.Marshal(provisionerdserver.WorkspaceProvisionJob{
- WorkspaceBuildID: stopbuild.ID,
- })),
- })
-
- stopPublished := make(chan struct{})
- closeStopSubscribe, err := ps.Subscribe(codersdk.WorkspaceNotifyChannel(workspace.ID), func(_ context.Context, _ []byte) {
- close(stopPublished)
- })
- require.NoError(t, err)
- defer closeStopSubscribe()
-
- // Grab jobs until we find the workspace build job. There is also
- // an import version job that we need to ignore.
- job, err = srv.AcquireJob(ctx, nil)
- require.NoError(t, err)
- _, ok := job.Type.(*proto.AcquiredJob_WorkspaceBuild_)
- require.True(t, ok, "acquired job not a workspace build?")
+ })
+ require.NoError(t, err)
- <-stopPublished
+ require.JSONEq(t, string(want), string(got))
- // Validate that a session token is deleted during a stop job.
- sessionToken = job.Type.(*proto.AcquiredJob_WorkspaceBuild_).WorkspaceBuild.Metadata.WorkspaceOwnerSessionToken
- require.Empty(t, sessionToken)
- _, err = db.GetAPIKeyByID(ctx, key.ID)
- require.ErrorIs(t, err, sql.ErrNoRows)
- })
-
- t.Run("TemplateVersionDryRun", func(t *testing.T) {
- t.Parallel()
- srv, db, _ := setup(t, false, nil)
- ctx := context.Background()
-
- user := dbgen.User(t, db, database.User{})
- version := dbgen.TemplateVersion(t, db, database.TemplateVersion{})
- file := dbgen.File(t, db, database.File{CreatedBy: user.ID})
- _ = dbgen.ProvisionerJob(t, db, database.ProvisionerJob{
- InitiatorID: user.ID,
- Provisioner: database.ProvisionerTypeEcho,
- StorageMethod: database.ProvisionerStorageMethodFile,
- FileID: file.ID,
- Type: database.ProvisionerJobTypeTemplateVersionDryRun,
- Input: must(json.Marshal(provisionerdserver.TemplateVersionDryRunJob{
+ // Assert that we delete the session token whenever
+ // a stop is issued.
+ stopbuild := dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{
+ WorkspaceID: workspace.ID,
+ BuildNumber: 2,
+ JobID: uuid.New(),
TemplateVersionID: version.ID,
- WorkspaceName: "testing",
- })),
- })
+ Transition: database.WorkspaceTransitionStop,
+ Reason: database.BuildReasonInitiator,
+ })
+ _ = dbgen.ProvisionerJob(t, db, ps, database.ProvisionerJob{
+ ID: stopbuild.ID,
+ InitiatorID: user.ID,
+ Provisioner: database.ProvisionerTypeEcho,
+ StorageMethod: database.ProvisionerStorageMethodFile,
+ FileID: file.ID,
+ Type: database.ProvisionerJobTypeWorkspaceBuild,
+ Input: must(json.Marshal(provisionerdserver.WorkspaceProvisionJob{
+ WorkspaceBuildID: stopbuild.ID,
+ })),
+ })
- job, err := srv.AcquireJob(ctx, nil)
- require.NoError(t, err)
+ stopPublished := make(chan struct{})
+ closeStopSubscribe, err := ps.Subscribe(codersdk.WorkspaceNotifyChannel(workspace.ID), func(_ context.Context, _ []byte) {
+ close(stopPublished)
+ })
+ require.NoError(t, err)
+ defer closeStopSubscribe()
- got, err := json.Marshal(job.Type)
- require.NoError(t, err)
+ // Grab jobs until we find the workspace build job. There is also
+ // an import version job that we need to ignore.
+ job, err = tc.acquire(ctx, srv)
+ require.NoError(t, err)
+ _, ok := job.Type.(*proto.AcquiredJob_WorkspaceBuild_)
+ require.True(t, ok, "acquired job not a workspace build?")
+
+ <-stopPublished
+
+ // Validate that a session token is deleted during a stop job.
+ sessionToken = job.Type.(*proto.AcquiredJob_WorkspaceBuild_).WorkspaceBuild.Metadata.WorkspaceOwnerSessionToken
+ require.Empty(t, sessionToken)
+ _, err = db.GetAPIKeyByID(ctx, key.ID)
+ require.ErrorIs(t, err, sql.ErrNoRows)
+ })
+
+ t.Run(tc.name+"_TemplateVersionDryRun", func(t *testing.T) {
+ t.Parallel()
+ srv, db, ps := setup(t, false, nil)
+ ctx := context.Background()
+
+ user := dbgen.User(t, db, database.User{})
+ version := dbgen.TemplateVersion(t, db, database.TemplateVersion{})
+ file := dbgen.File(t, db, database.File{CreatedBy: user.ID})
+ _ = dbgen.ProvisionerJob(t, db, ps, database.ProvisionerJob{
+ InitiatorID: user.ID,
+ Provisioner: database.ProvisionerTypeEcho,
+ StorageMethod: database.ProvisionerStorageMethodFile,
+ FileID: file.ID,
+ Type: database.ProvisionerJobTypeTemplateVersionDryRun,
+ Input: must(json.Marshal(provisionerdserver.TemplateVersionDryRunJob{
+ TemplateVersionID: version.ID,
+ WorkspaceName: "testing",
+ })),
+ })
- want, err := json.Marshal(&proto.AcquiredJob_TemplateDryRun_{
- TemplateDryRun: &proto.AcquiredJob_TemplateDryRun{
- Metadata: &sdkproto.Metadata{
- CoderUrl: (&url.URL{}).String(),
- WorkspaceName: "testing",
- },
- },
- })
- require.NoError(t, err)
- require.JSONEq(t, string(want), string(got))
- })
- t.Run("TemplateVersionImport", func(t *testing.T) {
- t.Parallel()
- srv, db, _ := setup(t, false, nil)
- ctx := context.Background()
+ job, err := tc.acquire(ctx, srv)
+ require.NoError(t, err)
- user := dbgen.User(t, db, database.User{})
- file := dbgen.File(t, db, database.File{CreatedBy: user.ID})
- _ = dbgen.ProvisionerJob(t, db, database.ProvisionerJob{
- FileID: file.ID,
- InitiatorID: user.ID,
- Provisioner: database.ProvisionerTypeEcho,
- StorageMethod: database.ProvisionerStorageMethodFile,
- Type: database.ProvisionerJobTypeTemplateVersionImport,
- })
+ got, err := json.Marshal(job.Type)
+ require.NoError(t, err)
- job, err := srv.AcquireJob(ctx, nil)
- require.NoError(t, err)
+ want, err := json.Marshal(&proto.AcquiredJob_TemplateDryRun_{
+ TemplateDryRun: &proto.AcquiredJob_TemplateDryRun{
+ Metadata: &sdkproto.Metadata{
+ CoderUrl: (&url.URL{}).String(),
+ WorkspaceName: "testing",
+ },
+ },
+ })
+ require.NoError(t, err)
+ require.JSONEq(t, string(want), string(got))
+ })
+ t.Run(tc.name+"_TemplateVersionImport", func(t *testing.T) {
+ t.Parallel()
+ srv, db, ps := setup(t, false, nil)
+ ctx := context.Background()
+
+ user := dbgen.User(t, db, database.User{})
+ file := dbgen.File(t, db, database.File{CreatedBy: user.ID})
+ _ = dbgen.ProvisionerJob(t, db, ps, database.ProvisionerJob{
+ FileID: file.ID,
+ InitiatorID: user.ID,
+ Provisioner: database.ProvisionerTypeEcho,
+ StorageMethod: database.ProvisionerStorageMethodFile,
+ Type: database.ProvisionerJobTypeTemplateVersionImport,
+ })
- got, err := json.Marshal(job.Type)
- require.NoError(t, err)
+ job, err := tc.acquire(ctx, srv)
+ require.NoError(t, err)
- want, err := json.Marshal(&proto.AcquiredJob_TemplateImport_{
- TemplateImport: &proto.AcquiredJob_TemplateImport{
- Metadata: &sdkproto.Metadata{
- CoderUrl: (&url.URL{}).String(),
- },
- },
- })
- require.NoError(t, err)
- require.JSONEq(t, string(want), string(got))
- })
- t.Run("TemplateVersionImportWithUserVariable", func(t *testing.T) {
- t.Parallel()
- srv, db, _ := setup(t, false, nil)
+ got, err := json.Marshal(job.Type)
+ require.NoError(t, err)
- user := dbgen.User(t, db, database.User{})
- version := dbgen.TemplateVersion(t, db, database.TemplateVersion{})
- file := dbgen.File(t, db, database.File{CreatedBy: user.ID})
- _ = dbgen.ProvisionerJob(t, db, database.ProvisionerJob{
- FileID: file.ID,
- InitiatorID: user.ID,
- Provisioner: database.ProvisionerTypeEcho,
- StorageMethod: database.ProvisionerStorageMethodFile,
- Type: database.ProvisionerJobTypeTemplateVersionImport,
- Input: must(json.Marshal(provisionerdserver.TemplateVersionImportJob{
- TemplateVersionID: version.ID,
- UserVariableValues: []codersdk.VariableValue{
- {Name: "first", Value: "first_value"},
+ want, err := json.Marshal(&proto.AcquiredJob_TemplateImport_{
+ TemplateImport: &proto.AcquiredJob_TemplateImport{
+ Metadata: &sdkproto.Metadata{
+ CoderUrl: (&url.URL{}).String(),
+ },
},
- })),
- })
+ })
+ require.NoError(t, err)
+ require.JSONEq(t, string(want), string(got))
+ })
+ t.Run(tc.name+"_TemplateVersionImportWithUserVariable", func(t *testing.T) {
+ t.Parallel()
+ srv, db, ps := setup(t, false, nil)
+
+ user := dbgen.User(t, db, database.User{})
+ version := dbgen.TemplateVersion(t, db, database.TemplateVersion{})
+ file := dbgen.File(t, db, database.File{CreatedBy: user.ID})
+ _ = dbgen.ProvisionerJob(t, db, ps, database.ProvisionerJob{
+ FileID: file.ID,
+ InitiatorID: user.ID,
+ Provisioner: database.ProvisionerTypeEcho,
+ StorageMethod: database.ProvisionerStorageMethodFile,
+ Type: database.ProvisionerJobTypeTemplateVersionImport,
+ Input: must(json.Marshal(provisionerdserver.TemplateVersionImportJob{
+ TemplateVersionID: version.ID,
+ UserVariableValues: []codersdk.VariableValue{
+ {Name: "first", Value: "first_value"},
+ },
+ })),
+ })
- ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
- defer cancel()
+ ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
+ defer cancel()
- job, err := srv.AcquireJob(ctx, nil)
- require.NoError(t, err)
+ job, err := tc.acquire(ctx, srv)
+ require.NoError(t, err)
- got, err := json.Marshal(job.Type)
- require.NoError(t, err)
+ got, err := json.Marshal(job.Type)
+ require.NoError(t, err)
- want, err := json.Marshal(&proto.AcquiredJob_TemplateImport_{
- TemplateImport: &proto.AcquiredJob_TemplateImport{
- UserVariableValues: []*sdkproto.VariableValue{
- {Name: "first", Sensitive: true, Value: "first_value"},
- },
- Metadata: &sdkproto.Metadata{
- CoderUrl: (&url.URL{}).String(),
+ want, err := json.Marshal(&proto.AcquiredJob_TemplateImport_{
+ TemplateImport: &proto.AcquiredJob_TemplateImport{
+ UserVariableValues: []*sdkproto.VariableValue{
+ {Name: "first", Sensitive: true, Value: "first_value"},
+ },
+ Metadata: &sdkproto.Metadata{
+ CoderUrl: (&url.URL{}).String(),
+ },
},
- },
+ })
+ require.NoError(t, err)
+ require.JSONEq(t, string(want), string(got))
})
- require.NoError(t, err)
- require.JSONEq(t, string(want), string(got))
- })
+ }
}
func TestUpdateJob(t *testing.T) {
@@ -1142,7 +1160,7 @@ func TestCompleteJob(t *testing.T) {
Transition: c.transition,
Reason: database.BuildReasonInitiator,
})
- job := dbgen.ProvisionerJob(t, db, database.ProvisionerJob{
+ job := dbgen.ProvisionerJob(t, db, ps, database.ProvisionerJob{
FileID: file.ID,
Type: database.ProvisionerJobTypeWorkspaceBuild,
Input: must(json.Marshal(provisionerdserver.WorkspaceProvisionJob{
@@ -1390,7 +1408,7 @@ func TestCompleteJob(t *testing.T) {
Transition: c.transition,
Reason: database.BuildReasonInitiator,
})
- job := dbgen.ProvisionerJob(t, db, database.ProvisionerJob{
+ job := dbgen.ProvisionerJob(t, db, ps, database.ProvisionerJob{
FileID: file.ID,
Type: database.ProvisionerJobTypeWorkspaceBuild,
Input: must(json.Marshal(provisionerdserver.WorkspaceProvisionJob{
@@ -1662,10 +1680,14 @@ type overrides struct {
templateScheduleStore *atomic.Pointer[schedule.TemplateScheduleStore]
userQuietHoursScheduleStore *atomic.Pointer[schedule.UserQuietHoursScheduleStore]
timeNowFn func() time.Time
+ acquireJobLongPollDuration time.Duration
}
func setup(t *testing.T, ignoreLogErrors bool, ov *overrides) (proto.DRPCProvisionerDaemonServer, database.Store, pubsub.Pubsub) {
t.Helper()
+ ctx, cancel := context.WithCancel(context.Background())
+ t.Cleanup(cancel)
+ logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
db := dbfake.New()
ps := pubsub.NewInMemory()
deploymentValues := &codersdk.DeploymentValues{}
@@ -1674,6 +1696,7 @@ func setup(t *testing.T, ignoreLogErrors bool, ov *overrides) (proto.DRPCProvisi
tss := testTemplateScheduleStore()
uqhss := testUserQuietHoursScheduleStore()
var timeNowFn func() time.Time
+ pollDur := time.Duration(0)
if ov != nil {
if ov.deploymentValues != nil {
deploymentValues = ov.deploymentValues
@@ -1705,6 +1728,7 @@ func setup(t *testing.T, ignoreLogErrors bool, ov *overrides) (proto.DRPCProvisi
if ov.timeNowFn != nil {
timeNowFn = ov.timeNowFn
}
+ pollDur = ov.acquireJobLongPollDuration
}
srv, err := provisionerdserver.NewServer(
@@ -1712,9 +1736,10 @@ func setup(t *testing.T, ignoreLogErrors bool, ov *overrides) (proto.DRPCProvisi
srvID,
slogtest.Make(t, &slogtest.Options{IgnoreErrors: ignoreLogErrors}),
[]database.ProvisionerType{database.ProvisionerTypeEcho},
- nil,
+ provisionerdserver.Tags{},
db,
ps,
+ provisionerdserver.NewAcquirer(ctx, logger.Named("acquirer"), db, ps),
telemetry.NewNoop(),
trace.NewNoopTracerProvider().Tracer("noop"),
&atomic.Pointer[proto.QuotaCommitter]{},
@@ -1722,12 +1747,11 @@ func setup(t *testing.T, ignoreLogErrors bool, ov *overrides) (proto.DRPCProvisi
tss,
uqhss,
deploymentValues,
- // Negative values cause the debounce to never kick in.
- -time.Minute,
provisionerdserver.Options{
- GitAuthConfigs: gitAuthConfigs,
- TimeNowFn: timeNowFn,
- OIDCConfig: &oauth2.Config{},
+ GitAuthConfigs: gitAuthConfigs,
+ TimeNowFn: timeNowFn,
+ OIDCConfig: &oauth2.Config{},
+ AcquireJobLongPollDur: pollDur,
},
)
require.NoError(t, err)
@@ -1740,3 +1764,95 @@ func must[T any](value T, err error) T {
}
return value
}
+
+var (
+ errUnimplemented = xerrors.New("unimplemented")
+ errClosed = xerrors.New("closed")
+)
+
+type fakeStream struct {
+ ctx context.Context
+ c *sync.Cond
+ closed bool
+ canceled bool
+ sendCalled bool
+ job *proto.AcquiredJob
+}
+
+func newFakeStream(ctx context.Context) *fakeStream {
+ return &fakeStream{
+ ctx: ctx,
+ c: sync.NewCond(&sync.Mutex{}),
+ }
+}
+
+func (s *fakeStream) Send(j *proto.AcquiredJob) error {
+ s.c.L.Lock()
+ defer s.c.L.Unlock()
+ s.sendCalled = true
+ s.job = j
+ s.c.Broadcast()
+ return nil
+}
+
+func (s *fakeStream) Recv() (*proto.CancelAcquire, error) {
+ s.c.L.Lock()
+ defer s.c.L.Unlock()
+ for !(s.canceled || s.closed) {
+ s.c.Wait()
+ }
+ if s.canceled {
+ return &proto.CancelAcquire{}, nil
+ }
+ return nil, io.EOF
+}
+
+// Context returns the context associated with the stream. It is canceled
+// when the Stream is closed and no more messages will ever be sent or
+// received on it.
+func (s *fakeStream) Context() context.Context {
+ return s.ctx
+}
+
+// MsgSend sends the Message to the remote.
+func (*fakeStream) MsgSend(drpc.Message, drpc.Encoding) error {
+ return errUnimplemented
+}
+
+// MsgRecv receives a Message from the remote.
+func (*fakeStream) MsgRecv(drpc.Message, drpc.Encoding) error {
+ return errUnimplemented
+}
+
+// CloseSend signals to the remote that we will no longer send any messages.
+func (*fakeStream) CloseSend() error {
+ return errUnimplemented
+}
+
+// Close closes the stream.
+func (s *fakeStream) Close() error {
+ s.c.L.Lock()
+ defer s.c.L.Unlock()
+ s.closed = true
+ s.c.Broadcast()
+ return nil
+}
+
+func (s *fakeStream) waitForJob() (*proto.AcquiredJob, error) {
+ s.c.L.Lock()
+ defer s.c.L.Unlock()
+ for !(s.sendCalled || s.closed) {
+ s.c.Wait()
+ }
+ if s.sendCalled {
+ return s.job, nil
+ }
+ return nil, errClosed
+}
+
+func (s *fakeStream) cancel() {
+ s.c.L.Lock()
+ defer s.c.L.Unlock()
+ s.canceled = true
+ s.c.Broadcast()
+}
diff --git a/coderd/telemetry/telemetry_test.go b/coderd/telemetry/telemetry_test.go
index cd1561ea9abfe..cec216564b99b 100644
--- a/coderd/telemetry/telemetry_test.go
+++ b/coderd/telemetry/telemetry_test.go
@@ -40,7 +40,7 @@ func TestTelemetry(t *testing.T) {
ctx := testutil.Context(t, testutil.WaitMedium)
_, _ = dbgen.APIKey(t, db, database.APIKey{})
- _ = dbgen.ProvisionerJob(t, db, database.ProvisionerJob{
+ _ = dbgen.ProvisionerJob(t, db, nil, database.ProvisionerJob{
Provisioner: database.ProvisionerTypeTerraform,
StorageMethod: database.ProvisionerStorageMethodFile,
Type: database.ProvisionerJobTypeTemplateVersionDryRun,
diff --git a/coderd/templateversions.go b/coderd/templateversions.go
index 52dc8c2b12a5c..364c80814bde6 100644
--- a/coderd/templateversions.go
+++ b/coderd/templateversions.go
@@ -21,6 +21,7 @@ import (
"github.com/coder/coder/v2/coderd/audit"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/dbtime"
+ "github.com/coder/coder/v2/coderd/database/provisionerjobs"
"github.com/coder/coder/v2/coderd/gitauth"
"github.com/coder/coder/v2/coderd/httpapi"
"github.com/coder/coder/v2/coderd/httpmw"
@@ -502,6 +503,11 @@ func (api *API) postTemplateVersionDryRun(rw http.ResponseWriter, r *http.Reques
})
return
}
+ err = provisionerjobs.PostJob(api.Pubsub, provisionerJob)
+ if err != nil {
+ // Client probably doesn't care about this error, so just log it.
+ api.Logger.Error(ctx, "failed to post provisioner job to pubsub", slog.Error(err))
+ }
httpapi.Write(ctx, rw, http.StatusCreated, convertProvisionerJob(database.GetProvisionerJobsByIDsWithQueuePositionRow{
ProvisionerJob: provisionerJob,
@@ -1289,6 +1295,11 @@ func (api *API) postTemplateVersionsByOrganization(rw http.ResponseWriter, r *ht
return
}
aReq.New = templateVersion
+ err = provisionerjobs.PostJob(api.Pubsub, provisionerJob)
+ if err != nil {
+ // Client probably doesn't care about this error, so just log it.
+ api.Logger.Error(ctx, "failed to post provisioner job to pubsub", slog.Error(err))
+ }
httpapi.Write(ctx, rw, http.StatusCreated, convertTemplateVersion(templateVersion, convertProvisionerJob(database.GetProvisionerJobsByIDsWithQueuePositionRow{
ProvisionerJob: provisionerJob,
diff --git a/coderd/unhanger/detector_test.go b/coderd/unhanger/detector_test.go
index 45e52cafdcb55..99705fb159211 100644
--- a/coderd/unhanger/detector_test.go
+++ b/coderd/unhanger/detector_test.go
@@ -67,7 +67,7 @@ func TestDetectorNoHungJobs(t *testing.T) {
user := dbgen.User(t, db, database.User{})
file := dbgen.File(t, db, database.File{})
for i := 0; i < 5; i++ {
- dbgen.ProvisionerJob(t, db, database.ProvisionerJob{
+ dbgen.ProvisionerJob(t, db, pubsub, database.ProvisionerJob{
CreatedAt: now.Add(-time.Minute * 5),
UpdatedAt: now.Add(-time.Minute * time.Duration(i)),
StartedAt: sql.NullTime{
@@ -135,7 +135,7 @@ func TestDetectorHungWorkspaceBuild(t *testing.T) {
// Previous build.
expectedWorkspaceBuildState = []byte(`{"dean":"cool","colin":"also cool"}`)
- previousWorkspaceBuildJob = dbgen.ProvisionerJob(t, db, database.ProvisionerJob{
+ previousWorkspaceBuildJob = dbgen.ProvisionerJob(t, db, pubsub, database.ProvisionerJob{
CreatedAt: twentyMinAgo,
UpdatedAt: twentyMinAgo,
StartedAt: sql.NullTime{
@@ -163,7 +163,7 @@ func TestDetectorHungWorkspaceBuild(t *testing.T) {
})
// Current build.
- currentWorkspaceBuildJob = dbgen.ProvisionerJob(t, db, database.ProvisionerJob{
+ currentWorkspaceBuildJob = dbgen.ProvisionerJob(t, db, pubsub, database.ProvisionerJob{
CreatedAt: tenMinAgo,
UpdatedAt: sixMinAgo,
StartedAt: sql.NullTime{
@@ -256,7 +256,7 @@ func TestDetectorHungWorkspaceBuildNoOverrideState(t *testing.T) {
})
// Previous build.
- previousWorkspaceBuildJob = dbgen.ProvisionerJob(t, db, database.ProvisionerJob{
+ previousWorkspaceBuildJob = dbgen.ProvisionerJob(t, db, pubsub, database.ProvisionerJob{
CreatedAt: twentyMinAgo,
UpdatedAt: twentyMinAgo,
StartedAt: sql.NullTime{
@@ -285,7 +285,7 @@ func TestDetectorHungWorkspaceBuildNoOverrideState(t *testing.T) {
// Current build.
expectedWorkspaceBuildState = []byte(`{"dean":"cool","colin":"also cool"}`)
- currentWorkspaceBuildJob = dbgen.ProvisionerJob(t, db, database.ProvisionerJob{
+ currentWorkspaceBuildJob = dbgen.ProvisionerJob(t, db, pubsub, database.ProvisionerJob{
CreatedAt: tenMinAgo,
UpdatedAt: sixMinAgo,
StartedAt: sql.NullTime{
@@ -379,7 +379,7 @@ func TestDetectorHungWorkspaceBuildNoOverrideStateIfNoExistingBuild(t *testing.T
// First build.
expectedWorkspaceBuildState = []byte(`{"dean":"cool","colin":"also cool"}`)
- currentWorkspaceBuildJob = dbgen.ProvisionerJob(t, db, database.ProvisionerJob{
+ currentWorkspaceBuildJob = dbgen.ProvisionerJob(t, db, pubsub, database.ProvisionerJob{
CreatedAt: tenMinAgo,
UpdatedAt: sixMinAgo,
StartedAt: sql.NullTime{
@@ -454,7 +454,7 @@ func TestDetectorHungOtherJobTypes(t *testing.T) {
file = dbgen.File(t, db, database.File{})
// Template import job.
- templateImportJob = dbgen.ProvisionerJob(t, db, database.ProvisionerJob{
+ templateImportJob = dbgen.ProvisionerJob(t, db, pubsub, database.ProvisionerJob{
CreatedAt: tenMinAgo,
UpdatedAt: sixMinAgo,
StartedAt: sql.NullTime{
@@ -471,7 +471,7 @@ func TestDetectorHungOtherJobTypes(t *testing.T) {
})
// Template dry-run job.
- templateDryRunJob = dbgen.ProvisionerJob(t, db, database.ProvisionerJob{
+ templateDryRunJob = dbgen.ProvisionerJob(t, db, pubsub, database.ProvisionerJob{
CreatedAt: tenMinAgo,
UpdatedAt: sixMinAgo,
StartedAt: sql.NullTime{
@@ -545,7 +545,7 @@ func TestDetectorHungCanceledJob(t *testing.T) {
file = dbgen.File(t, db, database.File{})
// Template import job.
- templateImportJob = dbgen.ProvisionerJob(t, db, database.ProvisionerJob{
+ templateImportJob = dbgen.ProvisionerJob(t, db, pubsub, database.ProvisionerJob{
CreatedAt: tenMinAgo,
CanceledAt: sql.NullTime{
Time: tenMinAgo,
@@ -642,7 +642,7 @@ func TestDetectorPushesLogs(t *testing.T) {
file = dbgen.File(t, db, database.File{})
// Template import job.
- templateImportJob = dbgen.ProvisionerJob(t, db, database.ProvisionerJob{
+ templateImportJob = dbgen.ProvisionerJob(t, db, pubsub, database.ProvisionerJob{
CreatedAt: tenMinAgo,
UpdatedAt: sixMinAgo,
StartedAt: sql.NullTime{
@@ -752,7 +752,7 @@ func TestDetectorMaxJobsPerRun(t *testing.T) {
// Create unhanger.MaxJobsPerRun + 1 hung jobs.
now := time.Now()
for i := 0; i < unhanger.MaxJobsPerRun+1; i++ {
- dbgen.ProvisionerJob(t, db, database.ProvisionerJob{
+ dbgen.ProvisionerJob(t, db, pubsub, database.ProvisionerJob{
CreatedAt: now.Add(-time.Hour),
UpdatedAt: now.Add(-time.Hour),
StartedAt: sql.NullTime{
diff --git a/coderd/workspacebuilds.go b/coderd/workspacebuilds.go
index 6081d07580c61..947f2ec8774a6 100644
--- a/coderd/workspacebuilds.go
+++ b/coderd/workspacebuilds.go
@@ -20,6 +20,7 @@ import (
"github.com/coder/coder/v2/coderd/database/db2sdk"
"github.com/coder/coder/v2/coderd/database/dbauthz"
"github.com/coder/coder/v2/coderd/database/dbtime"
+ "github.com/coder/coder/v2/coderd/database/provisionerjobs"
"github.com/coder/coder/v2/coderd/httpapi"
"github.com/coder/coder/v2/coderd/httpmw"
"github.com/coder/coder/v2/coderd/rbac"
@@ -373,6 +374,11 @@ func (api *API) postWorkspaceBuilds(rw http.ResponseWriter, r *http.Request) {
})
return
}
+ err = provisionerjobs.PostJob(api.Pubsub, *provisionerJob)
+ if err != nil {
+ // Client probably doesn't care about this error, so just log it.
+ api.Logger.Error(ctx, "failed to post provisioner job to pubsub", slog.Error(err))
+ }
users, err := api.Database.GetUsersByIDs(ctx, []uuid.UUID{
workspace.OwnerID,
diff --git a/coderd/workspaces.go b/coderd/workspaces.go
index 7b6ad9f0b8c5d..f80df62398551 100644
--- a/coderd/workspaces.go
+++ b/coderd/workspaces.go
@@ -19,6 +19,7 @@ import (
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/dbauthz"
"github.com/coder/coder/v2/coderd/database/dbtime"
+ "github.com/coder/coder/v2/coderd/database/provisionerjobs"
"github.com/coder/coder/v2/coderd/httpapi"
"github.com/coder/coder/v2/coderd/httpmw"
"github.com/coder/coder/v2/coderd/rbac"
@@ -485,7 +486,9 @@ func (api *API) postWorkspacesByOrganization(rw http.ResponseWriter, r *http.Req
}
workspaceBuild, provisionerJob, err = builder.Build(
- ctx, db, func(action rbac.Action, object rbac.Objecter) bool {
+ ctx,
+ db,
+ func(action rbac.Action, object rbac.Objecter) bool {
return api.Authorize(r, action, object)
})
return err
@@ -505,6 +508,11 @@ func (api *API) postWorkspacesByOrganization(rw http.ResponseWriter, r *http.Req
})
return
}
+ err = provisionerjobs.PostJob(api.Pubsub, *provisionerJob)
+ if err != nil {
+ // Client probably doesn't care about this error, so just log it.
+ api.Logger.Error(ctx, "failed to post provisioner job to pubsub", slog.Error(err))
+ }
aReq.New = workspace
initiator, err := api.Database.GetUserByID(ctx, workspaceBuild.InitiatorID)
diff --git a/coderd/workspaces_test.go b/coderd/workspaces_test.go
index 43ca99deb2d9e..83884f862489c 100644
--- a/coderd/workspaces_test.go
+++ b/coderd/workspaces_test.go
@@ -789,7 +789,7 @@ func TestWorkspaceFilterAllStatus(t *testing.T) {
file := dbgen.File(t, db, database.File{
CreatedBy: owner.UserID,
})
- versionJob := dbgen.ProvisionerJob(t, db, database.ProvisionerJob{
+ versionJob := dbgen.ProvisionerJob(t, db, pubsub, database.ProvisionerJob{
OrganizationID: owner.OrganizationID,
InitiatorID: owner.UserID,
WorkerID: uuid.NullUUID{},
@@ -825,7 +825,7 @@ func TestWorkspaceFilterAllStatus(t *testing.T) {
job.Tags = database.StringMap{
jobID.String(): "true",
}
- job = dbgen.ProvisionerJob(t, db, job)
+ job = dbgen.ProvisionerJob(t, db, pubsub, job)
build := dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{
WorkspaceID: workspace.ID,
diff --git a/codersdk/deployment.go b/codersdk/deployment.go
index 833af156ec0ce..62aad64345088 100644
--- a/codersdk/deployment.go
+++ b/codersdk/deployment.go
@@ -1272,7 +1272,7 @@ when required by your organization's security policy.`,
},
{
Name: "Poll Interval",
- Description: "Time to wait before polling for a new job.",
+ Description: "Deprecated and ignored.",
Flag: "provisioner-daemon-poll-interval",
Env: "CODER_PROVISIONER_DAEMON_POLL_INTERVAL",
Default: time.Second.String(),
@@ -1282,7 +1282,7 @@ when required by your organization's security policy.`,
},
{
Name: "Poll Jitter",
- Description: "Random jitter added to the poll interval.",
+ Description: "Deprecated and ignored.",
Flag: "provisioner-daemon-poll-jitter",
Env: "CODER_PROVISIONER_DAEMON_POLL_JITTER",
Default: (100 * time.Millisecond).String(),
diff --git a/docs/cli/provisionerd_start.md b/docs/cli/provisionerd_start.md
index b129605933db3..8f7e72b01207a 100644
--- a/docs/cli/provisionerd_start.md
+++ b/docs/cli/provisionerd_start.md
@@ -30,7 +30,7 @@ Directory to store cached data.
| Environment | $CODER_PROVISIONERD_POLL_INTERVAL
|
| Default | 1s
|
-How often to poll for provisioner jobs.
+Deprecated and ignored.
### --poll-jitter
@@ -40,7 +40,7 @@ How often to poll for provisioner jobs.
| Environment | $CODER_PROVISIONERD_POLL_JITTER
|
| Default | 100ms
|
-How much to jitter the poll interval by.
+Deprecated and ignored.
### --psk
diff --git a/docs/cli/server.md b/docs/cli/server.md
index 76e8b84ebef70..0db6c9e871489 100644
--- a/docs/cli/server.md
+++ b/docs/cli/server.md
@@ -644,7 +644,7 @@ URL pointing to the icon to use on the OpenID Connect login button.
| YAML | provisioning.daemonPollInterval
|
| Default | 1s
|
-Time to wait before polling for a new job.
+Deprecated and ignored.
### --provisioner-daemon-poll-jitter
@@ -655,7 +655,7 @@ Time to wait before polling for a new job.
| YAML | provisioning.daemonPollJitter
|
| Default | 100ms
|
-Random jitter added to the poll interval.
+Deprecated and ignored.
### --postgres-url
diff --git a/enterprise/cli/provisionerdaemons.go b/enterprise/cli/provisionerdaemons.go
index 2cb5f98d49343..837cb2e671766 100644
--- a/enterprise/cli/provisionerdaemons.go
+++ b/enterprise/cli/provisionerdaemons.go
@@ -136,11 +136,9 @@ func (r *RootCmd) provisionerDaemonStart() *clibase.Cmd {
PreSharedKey: preSharedKey,
})
}, &provisionerd.Options{
- Logger: logger,
- JobPollInterval: pollInterval,
- JobPollJitter: pollJitter,
- UpdateInterval: 500 * time.Millisecond,
- Connector: connector,
+ Logger: logger,
+ UpdateInterval: 500 * time.Millisecond,
+ Connector: connector,
})
var exitErr error
@@ -189,13 +187,13 @@ func (r *RootCmd) provisionerDaemonStart() *clibase.Cmd {
Flag: "poll-interval",
Env: "CODER_PROVISIONERD_POLL_INTERVAL",
Default: time.Second.String(),
- Description: "How often to poll for provisioner jobs.",
+ Description: "Deprecated and ignored.",
Value: clibase.DurationOf(&pollInterval),
},
{
Flag: "poll-jitter",
Env: "CODER_PROVISIONERD_POLL_JITTER",
- Description: "How much to jitter the poll interval by.",
+ Description: "Deprecated and ignored.",
Default: (100 * time.Millisecond).String(),
Value: clibase.DurationOf(&pollJitter),
},
diff --git a/enterprise/cli/testdata/coder_provisionerd_start_--help.golden b/enterprise/cli/testdata/coder_provisionerd_start_--help.golden
index 356795fc44f37..80d28883a8854 100644
--- a/enterprise/cli/testdata/coder_provisionerd_start_--help.golden
+++ b/enterprise/cli/testdata/coder_provisionerd_start_--help.golden
@@ -10,10 +10,10 @@ OPTIONS:
Directory to store cached data.
--poll-interval duration, $CODER_PROVISIONERD_POLL_INTERVAL (default: 1s)
- How often to poll for provisioner jobs.
+ Deprecated and ignored.
--poll-jitter duration, $CODER_PROVISIONERD_POLL_JITTER (default: 100ms)
- How much to jitter the poll interval by.
+ Deprecated and ignored.
--psk string, $CODER_PROVISIONER_DAEMON_PSK
Pre-shared key to authenticate with Coder server.
diff --git a/enterprise/cli/testdata/coder_server_--help.golden b/enterprise/cli/testdata/coder_server_--help.golden
index ab023e0be01f4..f9faa4e5bf66c 100644
--- a/enterprise/cli/testdata/coder_server_--help.golden
+++ b/enterprise/cli/testdata/coder_server_--help.golden
@@ -394,10 +394,10 @@ updating, and deleting workspace resources.
Time to force cancel provisioning tasks that are stuck.
--provisioner-daemon-poll-interval duration, $CODER_PROVISIONER_DAEMON_POLL_INTERVAL (default: 1s)
- Time to wait before polling for a new job.
+ Deprecated and ignored.
--provisioner-daemon-poll-jitter duration, $CODER_PROVISIONER_DAEMON_POLL_JITTER (default: 100ms)
- Random jitter added to the poll interval.
+ Deprecated and ignored.
--provisioner-daemon-psk string, $CODER_PROVISIONER_DAEMON_PSK
Pre-shared key to authenticate external provisioner daemons to Coder
diff --git a/enterprise/coderd/provisionerdaemons.go b/enterprise/coderd/provisionerdaemons.go
index e3f2bda732934..7ab337d169f95 100644
--- a/enterprise/coderd/provisionerdaemons.go
+++ b/enterprise/coderd/provisionerdaemons.go
@@ -4,14 +4,12 @@ import (
"context"
"crypto/subtle"
"database/sql"
- "encoding/json"
"errors"
"fmt"
"io"
"net"
"net/http"
"strings"
- "time"
"github.com/google/uuid"
"github.com/hashicorp/yamux"
@@ -180,6 +178,15 @@ func (api *API) provisionerDaemonServe(rw http.ResponseWriter, r *http.Request)
return
}
api.Logger.Debug(ctx, "provisioner authorized", slog.F("tags", tags))
+ if err := provisionerdserver.Tags(tags).Valid(); err != nil {
+ httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
+ Message: "Given tags are not acceptable to the service",
+ Validations: []codersdk.ValidationError{
+ {Field: "tags", Detail: err.Error()},
+ },
+ })
+ return
+ }
provisioners := make([]database.ProvisionerType, 0)
for p := range provisionersMap {
@@ -197,17 +204,6 @@ func (api *API) provisionerDaemonServe(rw http.ResponseWriter, r *http.Request)
slog.F("provisioners", provisioners),
slog.F("tags", tags),
)
- rawTags, err := json.Marshal(tags)
- if err != nil {
- if !xerrors.Is(err, context.Canceled) {
- log.Error(ctx, "marshal provisioner tags", slog.Error(err))
- }
- httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
- Message: "Internal error marshaling daemon tags.",
- Detail: err.Error(),
- })
- return
- }
api.AGPL.WebsocketWaitMutex.Lock()
api.AGPL.WebsocketWaitGroup.Add(1)
@@ -251,9 +247,10 @@ func (api *API) provisionerDaemonServe(rw http.ResponseWriter, r *http.Request)
uuid.New(),
logger,
provisioners,
- rawTags,
+ tags,
api.Database,
api.Pubsub,
+ api.AGPL.Acquirer,
api.Telemetry,
trace.NewNoopTracerProvider().Tracer("noop"),
&api.AGPL.QuotaCommitter,
@@ -261,8 +258,6 @@ func (api *API) provisionerDaemonServe(rw http.ResponseWriter, r *http.Request)
api.AGPL.TemplateScheduleStore,
api.AGPL.UserQuietHoursScheduleStore,
api.DeploymentValues,
- // TODO(spikecurtis) - fix debounce to not cause flaky tests.
- time.Duration(0),
provisionerdserver.Options{
GitAuthConfigs: api.GitAuthConfigs,
OIDCConfig: api.OIDCConfig,
diff --git a/enterprise/coderd/schedule/template_test.go b/enterprise/coderd/schedule/template_test.go
index 23bda7b21a798..bb6b4c30dc720 100644
--- a/enterprise/coderd/schedule/template_test.go
+++ b/enterprise/coderd/schedule/template_test.go
@@ -31,7 +31,7 @@ func TestTemplateUpdateBuildDeadlines(t *testing.T) {
file = dbgen.File(t, db, database.File{
CreatedBy: user.ID,
})
- templateJob = dbgen.ProvisionerJob(t, db, database.ProvisionerJob{
+ templateJob = dbgen.ProvisionerJob(t, db, nil, database.ProvisionerJob{
OrganizationID: org.ID,
FileID: file.ID,
InitiatorID: user.ID,
@@ -149,7 +149,7 @@ func TestTemplateUpdateBuildDeadlines(t *testing.T) {
OwnerID: user.ID,
TemplateID: template.ID,
})
- job = dbgen.ProvisionerJob(t, db, database.ProvisionerJob{
+ job = dbgen.ProvisionerJob(t, db, nil, database.ProvisionerJob{
OrganizationID: org.ID,
FileID: file.ID,
InitiatorID: user.ID,
@@ -255,7 +255,7 @@ func TestTemplateUpdateBuildDeadlinesSkip(t *testing.T) {
file = dbgen.File(t, db, database.File{
CreatedBy: user.ID,
})
- templateJob = dbgen.ProvisionerJob(t, db, database.ProvisionerJob{
+ templateJob = dbgen.ProvisionerJob(t, db, nil, database.ProvisionerJob{
OrganizationID: org.ID,
FileID: file.ID,
InitiatorID: user.ID,
@@ -405,7 +405,7 @@ func TestTemplateUpdateBuildDeadlinesSkip(t *testing.T) {
})
wsID = ws.ID
}
- job := dbgen.ProvisionerJob(t, db, database.ProvisionerJob{
+ job := dbgen.ProvisionerJob(t, db, nil, database.ProvisionerJob{
OrganizationID: org.ID,
FileID: file.ID,
InitiatorID: user.ID,
diff --git a/provisionerd/proto/provisionerd.pb.go b/provisionerd/proto/provisionerd.pb.go
index 018e0f25ac8e1..186f4f79bad4d 100644
--- a/provisionerd/proto/provisionerd.pb.go
+++ b/provisionerd/proto/provisionerd.pb.go
@@ -809,6 +809,44 @@ func (x *CommitQuotaResponse) GetBudget() int32 {
return 0
}
+type CancelAcquire struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *CancelAcquire) Reset() {
+ *x = CancelAcquire{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_provisionerd_proto_provisionerd_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CancelAcquire) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CancelAcquire) ProtoMessage() {}
+
+func (x *CancelAcquire) ProtoReflect() protoreflect.Message {
+ mi := &file_provisionerd_proto_provisionerd_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CancelAcquire.ProtoReflect.Descriptor instead.
+func (*CancelAcquire) Descriptor() ([]byte, []int) {
+ return file_provisionerd_proto_provisionerd_proto_rawDescGZIP(), []int{9}
+}
+
type AcquiredJob_WorkspaceBuild struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -827,7 +865,7 @@ type AcquiredJob_WorkspaceBuild struct {
func (x *AcquiredJob_WorkspaceBuild) Reset() {
*x = AcquiredJob_WorkspaceBuild{}
if protoimpl.UnsafeEnabled {
- mi := &file_provisionerd_proto_provisionerd_proto_msgTypes[9]
+ mi := &file_provisionerd_proto_provisionerd_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -840,7 +878,7 @@ func (x *AcquiredJob_WorkspaceBuild) String() string {
func (*AcquiredJob_WorkspaceBuild) ProtoMessage() {}
func (x *AcquiredJob_WorkspaceBuild) ProtoReflect() protoreflect.Message {
- mi := &file_provisionerd_proto_provisionerd_proto_msgTypes[9]
+ mi := &file_provisionerd_proto_provisionerd_proto_msgTypes[10]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -924,7 +962,7 @@ type AcquiredJob_TemplateImport struct {
func (x *AcquiredJob_TemplateImport) Reset() {
*x = AcquiredJob_TemplateImport{}
if protoimpl.UnsafeEnabled {
- mi := &file_provisionerd_proto_provisionerd_proto_msgTypes[10]
+ mi := &file_provisionerd_proto_provisionerd_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -937,7 +975,7 @@ func (x *AcquiredJob_TemplateImport) String() string {
func (*AcquiredJob_TemplateImport) ProtoMessage() {}
func (x *AcquiredJob_TemplateImport) ProtoReflect() protoreflect.Message {
- mi := &file_provisionerd_proto_provisionerd_proto_msgTypes[10]
+ mi := &file_provisionerd_proto_provisionerd_proto_msgTypes[11]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -980,7 +1018,7 @@ type AcquiredJob_TemplateDryRun struct {
func (x *AcquiredJob_TemplateDryRun) Reset() {
*x = AcquiredJob_TemplateDryRun{}
if protoimpl.UnsafeEnabled {
- mi := &file_provisionerd_proto_provisionerd_proto_msgTypes[11]
+ mi := &file_provisionerd_proto_provisionerd_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -993,7 +1031,7 @@ func (x *AcquiredJob_TemplateDryRun) String() string {
func (*AcquiredJob_TemplateDryRun) ProtoMessage() {}
func (x *AcquiredJob_TemplateDryRun) ProtoReflect() protoreflect.Message {
- mi := &file_provisionerd_proto_provisionerd_proto_msgTypes[11]
+ mi := &file_provisionerd_proto_provisionerd_proto_msgTypes[12]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1041,7 +1079,7 @@ type FailedJob_WorkspaceBuild struct {
func (x *FailedJob_WorkspaceBuild) Reset() {
*x = FailedJob_WorkspaceBuild{}
if protoimpl.UnsafeEnabled {
- mi := &file_provisionerd_proto_provisionerd_proto_msgTypes[13]
+ mi := &file_provisionerd_proto_provisionerd_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1054,7 +1092,7 @@ func (x *FailedJob_WorkspaceBuild) String() string {
func (*FailedJob_WorkspaceBuild) ProtoMessage() {}
func (x *FailedJob_WorkspaceBuild) ProtoReflect() protoreflect.Message {
- mi := &file_provisionerd_proto_provisionerd_proto_msgTypes[13]
+ mi := &file_provisionerd_proto_provisionerd_proto_msgTypes[14]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1086,7 +1124,7 @@ type FailedJob_TemplateImport struct {
func (x *FailedJob_TemplateImport) Reset() {
*x = FailedJob_TemplateImport{}
if protoimpl.UnsafeEnabled {
- mi := &file_provisionerd_proto_provisionerd_proto_msgTypes[14]
+ mi := &file_provisionerd_proto_provisionerd_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1099,7 +1137,7 @@ func (x *FailedJob_TemplateImport) String() string {
func (*FailedJob_TemplateImport) ProtoMessage() {}
func (x *FailedJob_TemplateImport) ProtoReflect() protoreflect.Message {
- mi := &file_provisionerd_proto_provisionerd_proto_msgTypes[14]
+ mi := &file_provisionerd_proto_provisionerd_proto_msgTypes[15]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1124,7 +1162,7 @@ type FailedJob_TemplateDryRun struct {
func (x *FailedJob_TemplateDryRun) Reset() {
*x = FailedJob_TemplateDryRun{}
if protoimpl.UnsafeEnabled {
- mi := &file_provisionerd_proto_provisionerd_proto_msgTypes[15]
+ mi := &file_provisionerd_proto_provisionerd_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1137,7 +1175,7 @@ func (x *FailedJob_TemplateDryRun) String() string {
func (*FailedJob_TemplateDryRun) ProtoMessage() {}
func (x *FailedJob_TemplateDryRun) ProtoReflect() protoreflect.Message {
- mi := &file_provisionerd_proto_provisionerd_proto_msgTypes[15]
+ mi := &file_provisionerd_proto_provisionerd_proto_msgTypes[16]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1165,7 +1203,7 @@ type CompletedJob_WorkspaceBuild struct {
func (x *CompletedJob_WorkspaceBuild) Reset() {
*x = CompletedJob_WorkspaceBuild{}
if protoimpl.UnsafeEnabled {
- mi := &file_provisionerd_proto_provisionerd_proto_msgTypes[16]
+ mi := &file_provisionerd_proto_provisionerd_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1178,7 +1216,7 @@ func (x *CompletedJob_WorkspaceBuild) String() string {
func (*CompletedJob_WorkspaceBuild) ProtoMessage() {}
func (x *CompletedJob_WorkspaceBuild) ProtoReflect() protoreflect.Message {
- mi := &file_provisionerd_proto_provisionerd_proto_msgTypes[16]
+ mi := &file_provisionerd_proto_provisionerd_proto_msgTypes[17]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1222,7 +1260,7 @@ type CompletedJob_TemplateImport struct {
func (x *CompletedJob_TemplateImport) Reset() {
*x = CompletedJob_TemplateImport{}
if protoimpl.UnsafeEnabled {
- mi := &file_provisionerd_proto_provisionerd_proto_msgTypes[17]
+ mi := &file_provisionerd_proto_provisionerd_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1235,7 +1273,7 @@ func (x *CompletedJob_TemplateImport) String() string {
func (*CompletedJob_TemplateImport) ProtoMessage() {}
func (x *CompletedJob_TemplateImport) ProtoReflect() protoreflect.Message {
- mi := &file_provisionerd_proto_provisionerd_proto_msgTypes[17]
+ mi := &file_provisionerd_proto_provisionerd_proto_msgTypes[18]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1290,7 +1328,7 @@ type CompletedJob_TemplateDryRun struct {
func (x *CompletedJob_TemplateDryRun) Reset() {
*x = CompletedJob_TemplateDryRun{}
if protoimpl.UnsafeEnabled {
- mi := &file_provisionerd_proto_provisionerd_proto_msgTypes[18]
+ mi := &file_provisionerd_proto_provisionerd_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1303,7 +1341,7 @@ func (x *CompletedJob_TemplateDryRun) String() string {
func (*CompletedJob_TemplateDryRun) ProtoMessage() {}
func (x *CompletedJob_TemplateDryRun) ProtoReflect() protoreflect.Message {
- mi := &file_provisionerd_proto_provisionerd_proto_msgTypes[18]
+ mi := &file_provisionerd_proto_provisionerd_proto_msgTypes[19]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1543,37 +1581,44 @@ var file_provisionerd_proto_provisionerd_proto_rawDesc = []byte{
0x69, 0x74, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01,
0x28, 0x05, 0x52, 0x0f, 0x63, 0x72, 0x65, 0x64, 0x69, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x75,
0x6d, 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x64, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x05, 0x52, 0x06, 0x62, 0x75, 0x64, 0x67, 0x65, 0x74, 0x2a, 0x34, 0x0a, 0x09, 0x4c,
- 0x6f, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x50, 0x52, 0x4f, 0x56,
- 0x49, 0x53, 0x49, 0x4f, 0x4e, 0x45, 0x52, 0x5f, 0x44, 0x41, 0x45, 0x4d, 0x4f, 0x4e, 0x10, 0x00,
- 0x12, 0x0f, 0x0a, 0x0b, 0x50, 0x52, 0x4f, 0x56, 0x49, 0x53, 0x49, 0x4f, 0x4e, 0x45, 0x52, 0x10,
- 0x01, 0x32, 0xec, 0x02, 0x0a, 0x11, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65,
- 0x72, 0x44, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x12, 0x3c, 0x0a, 0x0a, 0x41, 0x63, 0x71, 0x75, 0x69,
- 0x72, 0x65, 0x4a, 0x6f, 0x62, 0x12, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f,
- 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x19, 0x2e, 0x70, 0x72, 0x6f,
- 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x41, 0x63, 0x71, 0x75, 0x69, 0x72,
- 0x65, 0x64, 0x4a, 0x6f, 0x62, 0x12, 0x52, 0x0a, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x51,
- 0x75, 0x6f, 0x74, 0x61, 0x12, 0x20, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e,
- 0x65, 0x72, 0x64, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69,
- 0x6f, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x51, 0x75, 0x6f, 0x74,
- 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4c, 0x0a, 0x09, 0x55, 0x70, 0x64,
- 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x12, 0x1e, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69,
- 0x6f, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69,
- 0x6f, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x46, 0x61, 0x69, 0x6c, 0x4a,
- 0x6f, 0x62, 0x12, 0x17, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72,
- 0x64, 0x2e, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x4a, 0x6f, 0x62, 0x1a, 0x13, 0x2e, 0x70, 0x72,
- 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79,
- 0x12, 0x3e, 0x0a, 0x0b, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x12,
- 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x43,
- 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x4a, 0x6f, 0x62, 0x1a, 0x13, 0x2e, 0x70, 0x72,
- 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79,
- 0x42, 0x2e, 0x5a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63,
- 0x6f, 0x64, 0x65, 0x72, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72,
- 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x01, 0x28, 0x05, 0x52, 0x06, 0x62, 0x75, 0x64, 0x67, 0x65, 0x74, 0x22, 0x0f, 0x0a, 0x0d, 0x43,
+ 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x41, 0x63, 0x71, 0x75, 0x69, 0x72, 0x65, 0x2a, 0x34, 0x0a, 0x09,
+ 0x4c, 0x6f, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x50, 0x52, 0x4f,
+ 0x56, 0x49, 0x53, 0x49, 0x4f, 0x4e, 0x45, 0x52, 0x5f, 0x44, 0x41, 0x45, 0x4d, 0x4f, 0x4e, 0x10,
+ 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x50, 0x52, 0x4f, 0x56, 0x49, 0x53, 0x49, 0x4f, 0x4e, 0x45, 0x52,
+ 0x10, 0x01, 0x32, 0xc5, 0x03, 0x0a, 0x11, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e,
+ 0x65, 0x72, 0x44, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0a, 0x41, 0x63, 0x71, 0x75,
+ 0x69, 0x72, 0x65, 0x4a, 0x6f, 0x62, 0x12, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69,
+ 0x6f, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x19, 0x2e, 0x70, 0x72,
+ 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x41, 0x63, 0x71, 0x75, 0x69,
+ 0x72, 0x65, 0x64, 0x4a, 0x6f, 0x62, 0x22, 0x03, 0x88, 0x02, 0x01, 0x12, 0x52, 0x0a, 0x14, 0x41,
+ 0x63, 0x71, 0x75, 0x69, 0x72, 0x65, 0x4a, 0x6f, 0x62, 0x57, 0x69, 0x74, 0x68, 0x43, 0x61, 0x6e,
+ 0x63, 0x65, 0x6c, 0x12, 0x1b, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65,
+ 0x72, 0x64, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x41, 0x63, 0x71, 0x75, 0x69, 0x72, 0x65,
+ 0x1a, 0x19, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x64, 0x2e,
+ 0x41, 0x63, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x4a, 0x6f, 0x62, 0x28, 0x01, 0x30, 0x01, 0x12,
+ 0x52, 0x0a, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x12, 0x20,
+ 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x43, 0x6f,
+ 0x6d, 0x6d, 0x69, 0x74, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x21, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x64, 0x2e,
+ 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x4c, 0x0a, 0x09, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x62,
+ 0x12, 0x1e, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x64, 0x2e,
+ 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x1f, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x64, 0x2e,
+ 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x37, 0x0a, 0x07, 0x46, 0x61, 0x69, 0x6c, 0x4a, 0x6f, 0x62, 0x12, 0x17, 0x2e, 0x70,
+ 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x46, 0x61, 0x69, 0x6c,
+ 0x65, 0x64, 0x4a, 0x6f, 0x62, 0x1a, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f,
+ 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x3e, 0x0a, 0x0b, 0x43, 0x6f,
+ 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x12, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x76,
+ 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74,
+ 0x65, 0x64, 0x4a, 0x6f, 0x62, 0x1a, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f,
+ 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x2e, 0x5a, 0x2c, 0x67, 0x69,
+ 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2f, 0x63,
+ 0x6f, 0x64, 0x65, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f,
+ 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x33,
}
var (
@@ -1589,7 +1634,7 @@ func file_provisionerd_proto_provisionerd_proto_rawDescGZIP() []byte {
}
var file_provisionerd_proto_provisionerd_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
-var file_provisionerd_proto_provisionerd_proto_msgTypes = make([]protoimpl.MessageInfo, 19)
+var file_provisionerd_proto_provisionerd_proto_msgTypes = make([]protoimpl.MessageInfo, 20)
var file_provisionerd_proto_provisionerd_proto_goTypes = []interface{}{
(LogSource)(0), // 0: provisionerd.LogSource
(*Empty)(nil), // 1: provisionerd.Empty
@@ -1601,68 +1646,71 @@ var file_provisionerd_proto_provisionerd_proto_goTypes = []interface{}{
(*UpdateJobResponse)(nil), // 7: provisionerd.UpdateJobResponse
(*CommitQuotaRequest)(nil), // 8: provisionerd.CommitQuotaRequest
(*CommitQuotaResponse)(nil), // 9: provisionerd.CommitQuotaResponse
- (*AcquiredJob_WorkspaceBuild)(nil), // 10: provisionerd.AcquiredJob.WorkspaceBuild
- (*AcquiredJob_TemplateImport)(nil), // 11: provisionerd.AcquiredJob.TemplateImport
- (*AcquiredJob_TemplateDryRun)(nil), // 12: provisionerd.AcquiredJob.TemplateDryRun
- nil, // 13: provisionerd.AcquiredJob.TraceMetadataEntry
- (*FailedJob_WorkspaceBuild)(nil), // 14: provisionerd.FailedJob.WorkspaceBuild
- (*FailedJob_TemplateImport)(nil), // 15: provisionerd.FailedJob.TemplateImport
- (*FailedJob_TemplateDryRun)(nil), // 16: provisionerd.FailedJob.TemplateDryRun
- (*CompletedJob_WorkspaceBuild)(nil), // 17: provisionerd.CompletedJob.WorkspaceBuild
- (*CompletedJob_TemplateImport)(nil), // 18: provisionerd.CompletedJob.TemplateImport
- (*CompletedJob_TemplateDryRun)(nil), // 19: provisionerd.CompletedJob.TemplateDryRun
- (proto.LogLevel)(0), // 20: provisioner.LogLevel
- (*proto.TemplateVariable)(nil), // 21: provisioner.TemplateVariable
- (*proto.VariableValue)(nil), // 22: provisioner.VariableValue
- (*proto.RichParameterValue)(nil), // 23: provisioner.RichParameterValue
- (*proto.GitAuthProvider)(nil), // 24: provisioner.GitAuthProvider
- (*proto.Metadata)(nil), // 25: provisioner.Metadata
- (*proto.Resource)(nil), // 26: provisioner.Resource
- (*proto.RichParameter)(nil), // 27: provisioner.RichParameter
+ (*CancelAcquire)(nil), // 10: provisionerd.CancelAcquire
+ (*AcquiredJob_WorkspaceBuild)(nil), // 11: provisionerd.AcquiredJob.WorkspaceBuild
+ (*AcquiredJob_TemplateImport)(nil), // 12: provisionerd.AcquiredJob.TemplateImport
+ (*AcquiredJob_TemplateDryRun)(nil), // 13: provisionerd.AcquiredJob.TemplateDryRun
+ nil, // 14: provisionerd.AcquiredJob.TraceMetadataEntry
+ (*FailedJob_WorkspaceBuild)(nil), // 15: provisionerd.FailedJob.WorkspaceBuild
+ (*FailedJob_TemplateImport)(nil), // 16: provisionerd.FailedJob.TemplateImport
+ (*FailedJob_TemplateDryRun)(nil), // 17: provisionerd.FailedJob.TemplateDryRun
+ (*CompletedJob_WorkspaceBuild)(nil), // 18: provisionerd.CompletedJob.WorkspaceBuild
+ (*CompletedJob_TemplateImport)(nil), // 19: provisionerd.CompletedJob.TemplateImport
+ (*CompletedJob_TemplateDryRun)(nil), // 20: provisionerd.CompletedJob.TemplateDryRun
+ (proto.LogLevel)(0), // 21: provisioner.LogLevel
+ (*proto.TemplateVariable)(nil), // 22: provisioner.TemplateVariable
+ (*proto.VariableValue)(nil), // 23: provisioner.VariableValue
+ (*proto.RichParameterValue)(nil), // 24: provisioner.RichParameterValue
+ (*proto.GitAuthProvider)(nil), // 25: provisioner.GitAuthProvider
+ (*proto.Metadata)(nil), // 26: provisioner.Metadata
+ (*proto.Resource)(nil), // 27: provisioner.Resource
+ (*proto.RichParameter)(nil), // 28: provisioner.RichParameter
}
var file_provisionerd_proto_provisionerd_proto_depIdxs = []int32{
- 10, // 0: provisionerd.AcquiredJob.workspace_build:type_name -> provisionerd.AcquiredJob.WorkspaceBuild
- 11, // 1: provisionerd.AcquiredJob.template_import:type_name -> provisionerd.AcquiredJob.TemplateImport
- 12, // 2: provisionerd.AcquiredJob.template_dry_run:type_name -> provisionerd.AcquiredJob.TemplateDryRun
- 13, // 3: provisionerd.AcquiredJob.trace_metadata:type_name -> provisionerd.AcquiredJob.TraceMetadataEntry
- 14, // 4: provisionerd.FailedJob.workspace_build:type_name -> provisionerd.FailedJob.WorkspaceBuild
- 15, // 5: provisionerd.FailedJob.template_import:type_name -> provisionerd.FailedJob.TemplateImport
- 16, // 6: provisionerd.FailedJob.template_dry_run:type_name -> provisionerd.FailedJob.TemplateDryRun
- 17, // 7: provisionerd.CompletedJob.workspace_build:type_name -> provisionerd.CompletedJob.WorkspaceBuild
- 18, // 8: provisionerd.CompletedJob.template_import:type_name -> provisionerd.CompletedJob.TemplateImport
- 19, // 9: provisionerd.CompletedJob.template_dry_run:type_name -> provisionerd.CompletedJob.TemplateDryRun
+ 11, // 0: provisionerd.AcquiredJob.workspace_build:type_name -> provisionerd.AcquiredJob.WorkspaceBuild
+ 12, // 1: provisionerd.AcquiredJob.template_import:type_name -> provisionerd.AcquiredJob.TemplateImport
+ 13, // 2: provisionerd.AcquiredJob.template_dry_run:type_name -> provisionerd.AcquiredJob.TemplateDryRun
+ 14, // 3: provisionerd.AcquiredJob.trace_metadata:type_name -> provisionerd.AcquiredJob.TraceMetadataEntry
+ 15, // 4: provisionerd.FailedJob.workspace_build:type_name -> provisionerd.FailedJob.WorkspaceBuild
+ 16, // 5: provisionerd.FailedJob.template_import:type_name -> provisionerd.FailedJob.TemplateImport
+ 17, // 6: provisionerd.FailedJob.template_dry_run:type_name -> provisionerd.FailedJob.TemplateDryRun
+ 18, // 7: provisionerd.CompletedJob.workspace_build:type_name -> provisionerd.CompletedJob.WorkspaceBuild
+ 19, // 8: provisionerd.CompletedJob.template_import:type_name -> provisionerd.CompletedJob.TemplateImport
+ 20, // 9: provisionerd.CompletedJob.template_dry_run:type_name -> provisionerd.CompletedJob.TemplateDryRun
0, // 10: provisionerd.Log.source:type_name -> provisionerd.LogSource
- 20, // 11: provisionerd.Log.level:type_name -> provisioner.LogLevel
+ 21, // 11: provisionerd.Log.level:type_name -> provisioner.LogLevel
5, // 12: provisionerd.UpdateJobRequest.logs:type_name -> provisionerd.Log
- 21, // 13: provisionerd.UpdateJobRequest.template_variables:type_name -> provisioner.TemplateVariable
- 22, // 14: provisionerd.UpdateJobRequest.user_variable_values:type_name -> provisioner.VariableValue
- 22, // 15: provisionerd.UpdateJobResponse.variable_values:type_name -> provisioner.VariableValue
- 23, // 16: provisionerd.AcquiredJob.WorkspaceBuild.rich_parameter_values:type_name -> provisioner.RichParameterValue
- 22, // 17: provisionerd.AcquiredJob.WorkspaceBuild.variable_values:type_name -> provisioner.VariableValue
- 24, // 18: provisionerd.AcquiredJob.WorkspaceBuild.git_auth_providers:type_name -> provisioner.GitAuthProvider
- 25, // 19: provisionerd.AcquiredJob.WorkspaceBuild.metadata:type_name -> provisioner.Metadata
- 25, // 20: provisionerd.AcquiredJob.TemplateImport.metadata:type_name -> provisioner.Metadata
- 22, // 21: provisionerd.AcquiredJob.TemplateImport.user_variable_values:type_name -> provisioner.VariableValue
- 23, // 22: provisionerd.AcquiredJob.TemplateDryRun.rich_parameter_values:type_name -> provisioner.RichParameterValue
- 22, // 23: provisionerd.AcquiredJob.TemplateDryRun.variable_values:type_name -> provisioner.VariableValue
- 25, // 24: provisionerd.AcquiredJob.TemplateDryRun.metadata:type_name -> provisioner.Metadata
- 26, // 25: provisionerd.CompletedJob.WorkspaceBuild.resources:type_name -> provisioner.Resource
- 26, // 26: provisionerd.CompletedJob.TemplateImport.start_resources:type_name -> provisioner.Resource
- 26, // 27: provisionerd.CompletedJob.TemplateImport.stop_resources:type_name -> provisioner.Resource
- 27, // 28: provisionerd.CompletedJob.TemplateImport.rich_parameters:type_name -> provisioner.RichParameter
- 26, // 29: provisionerd.CompletedJob.TemplateDryRun.resources:type_name -> provisioner.Resource
+ 22, // 13: provisionerd.UpdateJobRequest.template_variables:type_name -> provisioner.TemplateVariable
+ 23, // 14: provisionerd.UpdateJobRequest.user_variable_values:type_name -> provisioner.VariableValue
+ 23, // 15: provisionerd.UpdateJobResponse.variable_values:type_name -> provisioner.VariableValue
+ 24, // 16: provisionerd.AcquiredJob.WorkspaceBuild.rich_parameter_values:type_name -> provisioner.RichParameterValue
+ 23, // 17: provisionerd.AcquiredJob.WorkspaceBuild.variable_values:type_name -> provisioner.VariableValue
+ 25, // 18: provisionerd.AcquiredJob.WorkspaceBuild.git_auth_providers:type_name -> provisioner.GitAuthProvider
+ 26, // 19: provisionerd.AcquiredJob.WorkspaceBuild.metadata:type_name -> provisioner.Metadata
+ 26, // 20: provisionerd.AcquiredJob.TemplateImport.metadata:type_name -> provisioner.Metadata
+ 23, // 21: provisionerd.AcquiredJob.TemplateImport.user_variable_values:type_name -> provisioner.VariableValue
+ 24, // 22: provisionerd.AcquiredJob.TemplateDryRun.rich_parameter_values:type_name -> provisioner.RichParameterValue
+ 23, // 23: provisionerd.AcquiredJob.TemplateDryRun.variable_values:type_name -> provisioner.VariableValue
+ 26, // 24: provisionerd.AcquiredJob.TemplateDryRun.metadata:type_name -> provisioner.Metadata
+ 27, // 25: provisionerd.CompletedJob.WorkspaceBuild.resources:type_name -> provisioner.Resource
+ 27, // 26: provisionerd.CompletedJob.TemplateImport.start_resources:type_name -> provisioner.Resource
+ 27, // 27: provisionerd.CompletedJob.TemplateImport.stop_resources:type_name -> provisioner.Resource
+ 28, // 28: provisionerd.CompletedJob.TemplateImport.rich_parameters:type_name -> provisioner.RichParameter
+ 27, // 29: provisionerd.CompletedJob.TemplateDryRun.resources:type_name -> provisioner.Resource
1, // 30: provisionerd.ProvisionerDaemon.AcquireJob:input_type -> provisionerd.Empty
- 8, // 31: provisionerd.ProvisionerDaemon.CommitQuota:input_type -> provisionerd.CommitQuotaRequest
- 6, // 32: provisionerd.ProvisionerDaemon.UpdateJob:input_type -> provisionerd.UpdateJobRequest
- 3, // 33: provisionerd.ProvisionerDaemon.FailJob:input_type -> provisionerd.FailedJob
- 4, // 34: provisionerd.ProvisionerDaemon.CompleteJob:input_type -> provisionerd.CompletedJob
- 2, // 35: provisionerd.ProvisionerDaemon.AcquireJob:output_type -> provisionerd.AcquiredJob
- 9, // 36: provisionerd.ProvisionerDaemon.CommitQuota:output_type -> provisionerd.CommitQuotaResponse
- 7, // 37: provisionerd.ProvisionerDaemon.UpdateJob:output_type -> provisionerd.UpdateJobResponse
- 1, // 38: provisionerd.ProvisionerDaemon.FailJob:output_type -> provisionerd.Empty
- 1, // 39: provisionerd.ProvisionerDaemon.CompleteJob:output_type -> provisionerd.Empty
- 35, // [35:40] is the sub-list for method output_type
- 30, // [30:35] is the sub-list for method input_type
+ 10, // 31: provisionerd.ProvisionerDaemon.AcquireJobWithCancel:input_type -> provisionerd.CancelAcquire
+ 8, // 32: provisionerd.ProvisionerDaemon.CommitQuota:input_type -> provisionerd.CommitQuotaRequest
+ 6, // 33: provisionerd.ProvisionerDaemon.UpdateJob:input_type -> provisionerd.UpdateJobRequest
+ 3, // 34: provisionerd.ProvisionerDaemon.FailJob:input_type -> provisionerd.FailedJob
+ 4, // 35: provisionerd.ProvisionerDaemon.CompleteJob:input_type -> provisionerd.CompletedJob
+ 2, // 36: provisionerd.ProvisionerDaemon.AcquireJob:output_type -> provisionerd.AcquiredJob
+ 2, // 37: provisionerd.ProvisionerDaemon.AcquireJobWithCancel:output_type -> provisionerd.AcquiredJob
+ 9, // 38: provisionerd.ProvisionerDaemon.CommitQuota:output_type -> provisionerd.CommitQuotaResponse
+ 7, // 39: provisionerd.ProvisionerDaemon.UpdateJob:output_type -> provisionerd.UpdateJobResponse
+ 1, // 40: provisionerd.ProvisionerDaemon.FailJob:output_type -> provisionerd.Empty
+ 1, // 41: provisionerd.ProvisionerDaemon.CompleteJob:output_type -> provisionerd.Empty
+ 36, // [36:42] is the sub-list for method output_type
+ 30, // [30:36] is the sub-list for method input_type
30, // [30:30] is the sub-list for extension type_name
30, // [30:30] is the sub-list for extension extendee
0, // [0:30] is the sub-list for field type_name
@@ -1783,7 +1831,7 @@ func file_provisionerd_proto_provisionerd_proto_init() {
}
}
file_provisionerd_proto_provisionerd_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AcquiredJob_WorkspaceBuild); i {
+ switch v := v.(*CancelAcquire); i {
case 0:
return &v.state
case 1:
@@ -1795,7 +1843,7 @@ func file_provisionerd_proto_provisionerd_proto_init() {
}
}
file_provisionerd_proto_provisionerd_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AcquiredJob_TemplateImport); i {
+ switch v := v.(*AcquiredJob_WorkspaceBuild); i {
case 0:
return &v.state
case 1:
@@ -1807,6 +1855,18 @@ func file_provisionerd_proto_provisionerd_proto_init() {
}
}
file_provisionerd_proto_provisionerd_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AcquiredJob_TemplateImport); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_provisionerd_proto_provisionerd_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*AcquiredJob_TemplateDryRun); i {
case 0:
return &v.state
@@ -1818,7 +1878,7 @@ func file_provisionerd_proto_provisionerd_proto_init() {
return nil
}
}
- file_provisionerd_proto_provisionerd_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ file_provisionerd_proto_provisionerd_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*FailedJob_WorkspaceBuild); i {
case 0:
return &v.state
@@ -1830,7 +1890,7 @@ func file_provisionerd_proto_provisionerd_proto_init() {
return nil
}
}
- file_provisionerd_proto_provisionerd_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ file_provisionerd_proto_provisionerd_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*FailedJob_TemplateImport); i {
case 0:
return &v.state
@@ -1842,7 +1902,7 @@ func file_provisionerd_proto_provisionerd_proto_init() {
return nil
}
}
- file_provisionerd_proto_provisionerd_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ file_provisionerd_proto_provisionerd_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*FailedJob_TemplateDryRun); i {
case 0:
return &v.state
@@ -1854,7 +1914,7 @@ func file_provisionerd_proto_provisionerd_proto_init() {
return nil
}
}
- file_provisionerd_proto_provisionerd_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
+ file_provisionerd_proto_provisionerd_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*CompletedJob_WorkspaceBuild); i {
case 0:
return &v.state
@@ -1866,7 +1926,7 @@ func file_provisionerd_proto_provisionerd_proto_init() {
return nil
}
}
- file_provisionerd_proto_provisionerd_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
+ file_provisionerd_proto_provisionerd_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*CompletedJob_TemplateImport); i {
case 0:
return &v.state
@@ -1878,7 +1938,7 @@ func file_provisionerd_proto_provisionerd_proto_init() {
return nil
}
}
- file_provisionerd_proto_provisionerd_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ file_provisionerd_proto_provisionerd_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*CompletedJob_TemplateDryRun); i {
case 0:
return &v.state
@@ -1912,7 +1972,7 @@ func file_provisionerd_proto_provisionerd_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_provisionerd_proto_provisionerd_proto_rawDesc,
NumEnums: 1,
- NumMessages: 19,
+ NumMessages: 20,
NumExtensions: 0,
NumServices: 1,
},
diff --git a/provisionerd/proto/provisionerd.proto b/provisionerd/proto/provisionerd.proto
index 8d4fadffc6373..bab2ad5c0cb69 100644
--- a/provisionerd/proto/provisionerd.proto
+++ b/provisionerd/proto/provisionerd.proto
@@ -135,11 +135,22 @@ message CommitQuotaResponse {
int32 budget = 3;
}
+message CancelAcquire {}
+
service ProvisionerDaemon {
// AcquireJob requests a job. Implementations should
// hold a lock on the job until CompleteJob() is
// called with the matching ID.
- rpc AcquireJob(Empty) returns (AcquiredJob);
+ rpc AcquireJob(Empty) returns (AcquiredJob) {
+ option deprecated = true;
+ };
+ // AcquireJobWithCancel requests a job, blocking until
+ // a job is available or the client sends CancelAcquire.
+ // Server will send exactly one AcquiredJob, which is
+ // empty if a cancel was successful. This RPC is a bidirectional
+ // stream since both messages are asynchronous with no implied
+ // ordering.
+ rpc AcquireJobWithCancel(stream CancelAcquire) returns (stream AcquiredJob);
rpc CommitQuota(CommitQuotaRequest) returns (CommitQuotaResponse);
diff --git a/provisionerd/proto/provisionerd_drpc.pb.go b/provisionerd/proto/provisionerd_drpc.pb.go
index ed3155fb21eaa..60d78a86acb17 100644
--- a/provisionerd/proto/provisionerd_drpc.pb.go
+++ b/provisionerd/proto/provisionerd_drpc.pb.go
@@ -39,6 +39,7 @@ type DRPCProvisionerDaemonClient interface {
DRPCConn() drpc.Conn
AcquireJob(ctx context.Context, in *Empty) (*AcquiredJob, error)
+ AcquireJobWithCancel(ctx context.Context) (DRPCProvisionerDaemon_AcquireJobWithCancelClient, error)
CommitQuota(ctx context.Context, in *CommitQuotaRequest) (*CommitQuotaResponse, error)
UpdateJob(ctx context.Context, in *UpdateJobRequest) (*UpdateJobResponse, error)
FailJob(ctx context.Context, in *FailedJob) (*Empty, error)
@@ -64,6 +65,45 @@ func (c *drpcProvisionerDaemonClient) AcquireJob(ctx context.Context, in *Empty)
return out, nil
}
+func (c *drpcProvisionerDaemonClient) AcquireJobWithCancel(ctx context.Context) (DRPCProvisionerDaemon_AcquireJobWithCancelClient, error) {
+ stream, err := c.cc.NewStream(ctx, "/provisionerd.ProvisionerDaemon/AcquireJobWithCancel", drpcEncoding_File_provisionerd_proto_provisionerd_proto{})
+ if err != nil {
+ return nil, err
+ }
+ x := &drpcProvisionerDaemon_AcquireJobWithCancelClient{stream}
+ return x, nil
+}
+
+type DRPCProvisionerDaemon_AcquireJobWithCancelClient interface {
+ drpc.Stream
+ Send(*CancelAcquire) error
+ Recv() (*AcquiredJob, error)
+}
+
+type drpcProvisionerDaemon_AcquireJobWithCancelClient struct {
+ drpc.Stream
+}
+
+func (x *drpcProvisionerDaemon_AcquireJobWithCancelClient) GetStream() drpc.Stream {
+ return x.Stream
+}
+
+func (x *drpcProvisionerDaemon_AcquireJobWithCancelClient) Send(m *CancelAcquire) error {
+ return x.MsgSend(m, drpcEncoding_File_provisionerd_proto_provisionerd_proto{})
+}
+
+func (x *drpcProvisionerDaemon_AcquireJobWithCancelClient) Recv() (*AcquiredJob, error) {
+ m := new(AcquiredJob)
+ if err := x.MsgRecv(m, drpcEncoding_File_provisionerd_proto_provisionerd_proto{}); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (x *drpcProvisionerDaemon_AcquireJobWithCancelClient) RecvMsg(m *AcquiredJob) error {
+ return x.MsgRecv(m, drpcEncoding_File_provisionerd_proto_provisionerd_proto{})
+}
+
func (c *drpcProvisionerDaemonClient) CommitQuota(ctx context.Context, in *CommitQuotaRequest) (*CommitQuotaResponse, error) {
out := new(CommitQuotaResponse)
err := c.cc.Invoke(ctx, "/provisionerd.ProvisionerDaemon/CommitQuota", drpcEncoding_File_provisionerd_proto_provisionerd_proto{}, in, out)
@@ -102,6 +142,7 @@ func (c *drpcProvisionerDaemonClient) CompleteJob(ctx context.Context, in *Compl
type DRPCProvisionerDaemonServer interface {
AcquireJob(context.Context, *Empty) (*AcquiredJob, error)
+ AcquireJobWithCancel(DRPCProvisionerDaemon_AcquireJobWithCancelStream) error
CommitQuota(context.Context, *CommitQuotaRequest) (*CommitQuotaResponse, error)
UpdateJob(context.Context, *UpdateJobRequest) (*UpdateJobResponse, error)
FailJob(context.Context, *FailedJob) (*Empty, error)
@@ -114,6 +155,10 @@ func (s *DRPCProvisionerDaemonUnimplementedServer) AcquireJob(context.Context, *
return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
}
+func (s *DRPCProvisionerDaemonUnimplementedServer) AcquireJobWithCancel(DRPCProvisionerDaemon_AcquireJobWithCancelStream) error {
+ return drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
+}
+
func (s *DRPCProvisionerDaemonUnimplementedServer) CommitQuota(context.Context, *CommitQuotaRequest) (*CommitQuotaResponse, error) {
return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
}
@@ -132,7 +177,7 @@ func (s *DRPCProvisionerDaemonUnimplementedServer) CompleteJob(context.Context,
type DRPCProvisionerDaemonDescription struct{}
-func (DRPCProvisionerDaemonDescription) NumMethods() int { return 5 }
+func (DRPCProvisionerDaemonDescription) NumMethods() int { return 6 }
func (DRPCProvisionerDaemonDescription) Method(n int) (string, drpc.Encoding, drpc.Receiver, interface{}, bool) {
switch n {
@@ -146,6 +191,14 @@ func (DRPCProvisionerDaemonDescription) Method(n int) (string, drpc.Encoding, dr
)
}, DRPCProvisionerDaemonServer.AcquireJob, true
case 1:
+ return "/provisionerd.ProvisionerDaemon/AcquireJobWithCancel", drpcEncoding_File_provisionerd_proto_provisionerd_proto{},
+ func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
+ return nil, srv.(DRPCProvisionerDaemonServer).
+ AcquireJobWithCancel(
+ &drpcProvisionerDaemon_AcquireJobWithCancelStream{in1.(drpc.Stream)},
+ )
+ }, DRPCProvisionerDaemonServer.AcquireJobWithCancel, true
+ case 2:
return "/provisionerd.ProvisionerDaemon/CommitQuota", drpcEncoding_File_provisionerd_proto_provisionerd_proto{},
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
return srv.(DRPCProvisionerDaemonServer).
@@ -154,7 +207,7 @@ func (DRPCProvisionerDaemonDescription) Method(n int) (string, drpc.Encoding, dr
in1.(*CommitQuotaRequest),
)
}, DRPCProvisionerDaemonServer.CommitQuota, true
- case 2:
+ case 3:
return "/provisionerd.ProvisionerDaemon/UpdateJob", drpcEncoding_File_provisionerd_proto_provisionerd_proto{},
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
return srv.(DRPCProvisionerDaemonServer).
@@ -163,7 +216,7 @@ func (DRPCProvisionerDaemonDescription) Method(n int) (string, drpc.Encoding, dr
in1.(*UpdateJobRequest),
)
}, DRPCProvisionerDaemonServer.UpdateJob, true
- case 3:
+ case 4:
return "/provisionerd.ProvisionerDaemon/FailJob", drpcEncoding_File_provisionerd_proto_provisionerd_proto{},
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
return srv.(DRPCProvisionerDaemonServer).
@@ -172,7 +225,7 @@ func (DRPCProvisionerDaemonDescription) Method(n int) (string, drpc.Encoding, dr
in1.(*FailedJob),
)
}, DRPCProvisionerDaemonServer.FailJob, true
- case 4:
+ case 5:
return "/provisionerd.ProvisionerDaemon/CompleteJob", drpcEncoding_File_provisionerd_proto_provisionerd_proto{},
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
return srv.(DRPCProvisionerDaemonServer).
@@ -206,6 +259,32 @@ func (x *drpcProvisionerDaemon_AcquireJobStream) SendAndClose(m *AcquiredJob) er
return x.CloseSend()
}
+type DRPCProvisionerDaemon_AcquireJobWithCancelStream interface {
+ drpc.Stream
+ Send(*AcquiredJob) error
+ Recv() (*CancelAcquire, error)
+}
+
+type drpcProvisionerDaemon_AcquireJobWithCancelStream struct {
+ drpc.Stream
+}
+
+func (x *drpcProvisionerDaemon_AcquireJobWithCancelStream) Send(m *AcquiredJob) error {
+ return x.MsgSend(m, drpcEncoding_File_provisionerd_proto_provisionerd_proto{})
+}
+
+func (x *drpcProvisionerDaemon_AcquireJobWithCancelStream) Recv() (*CancelAcquire, error) {
+ m := new(CancelAcquire)
+ if err := x.MsgRecv(m, drpcEncoding_File_provisionerd_proto_provisionerd_proto{}); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (x *drpcProvisionerDaemon_AcquireJobWithCancelStream) RecvMsg(m *CancelAcquire) error {
+ return x.MsgRecv(m, drpcEncoding_File_provisionerd_proto_provisionerd_proto{})
+}
+
type DRPCProvisionerDaemon_CommitQuotaStream interface {
drpc.Stream
SendAndClose(*CommitQuotaResponse) error
diff --git a/provisionerd/provisionerd.go b/provisionerd/provisionerd.go
index e873d1901ddea..9072085ff5e09 100644
--- a/provisionerd/provisionerd.go
+++ b/provisionerd/provisionerd.go
@@ -16,13 +16,10 @@ import (
"go.opentelemetry.io/otel/attribute"
semconv "go.opentelemetry.io/otel/semconv/v1.14.0"
"go.opentelemetry.io/otel/trace"
- "go.uber.org/atomic"
"golang.org/x/xerrors"
"cdr.dev/slog"
"github.com/coder/coder/v2/coderd/tracing"
- "github.com/coder/coder/v2/coderd/util/ptr"
- "github.com/coder/coder/v2/cryptorand"
"github.com/coder/coder/v2/provisionerd/proto"
"github.com/coder/coder/v2/provisionerd/runner"
sdkproto "github.com/coder/coder/v2/provisionersdk/proto"
@@ -60,9 +57,6 @@ type Options struct {
ForceCancelInterval time.Duration
UpdateInterval time.Duration
LogBufferInterval time.Duration
- JobPollInterval time.Duration
- JobPollJitter time.Duration
- JobPollDebounce time.Duration
Connector Connector
}
@@ -71,12 +65,6 @@ func New(clientDialer Dialer, opts *Options) *Server {
if opts == nil {
opts = &Options{}
}
- if opts.JobPollInterval == 0 {
- opts.JobPollInterval = 5 * time.Second
- }
- if opts.JobPollJitter == 0 {
- opts.JobPollJitter = time.Second
- }
if opts.UpdateInterval == 0 {
opts.UpdateInterval = 5 * time.Second
}
@@ -101,14 +89,18 @@ func New(clientDialer Dialer, opts *Options) *Server {
tracer: opts.TracerProvider.Tracer(tracing.TracerName),
clientDialer: clientDialer,
+ clientCh: make(chan proto.DRPCProvisionerDaemonClient),
- closeContext: ctx,
- closeCancel: ctxCancel,
-
- shutdown: make(chan struct{}),
+ closeContext: ctx,
+ closeCancel: ctxCancel,
+ closedCh: make(chan struct{}),
+ shuttingDownCh: make(chan struct{}),
+ acquireDoneCh: make(chan struct{}),
}
- go daemon.connect(ctx)
+ daemon.wg.Add(2)
+ go daemon.connect()
+ go daemon.acquireLoop()
return daemon
}
@@ -117,15 +109,28 @@ type Server struct {
tracer trace.Tracer
clientDialer Dialer
- clientValue atomic.Pointer[proto.DRPCProvisionerDaemonClient]
+ clientCh chan proto.DRPCProvisionerDaemonClient
+
+ wg sync.WaitGroup
- // Locked when closing the daemon, shutting down, or starting a new job.
- mutex sync.Mutex
+ // mutex protects all subsequent fields
+ mutex sync.Mutex
+ // closeContext is canceled when we start closing.
closeContext context.Context
closeCancel context.CancelFunc
- closeError error
- shutdown chan struct{}
- activeJob *runner.Runner
+ // closeError stores the error when closing to return to subsequent callers
+ closeError error
+ // closingB is set to true when we start closing
+ closingB bool
+ // closedCh will receive when we complete closing
+ closedCh chan struct{}
+ // shuttingDownB is set to true when we start graceful shutdown
+ shuttingDownB bool
+ // shuttingDownCh will receive when we start graceful shutdown
+ shuttingDownCh chan struct{}
+ // acquireDoneCh will receive when the acquireLoop exits
+ acquireDoneCh chan struct{}
+ activeJob *runner.Runner
}
type Metrics struct {
@@ -176,16 +181,20 @@ func NewMetrics(reg prometheus.Registerer) Metrics {
}
// Connect establishes a connection to coderd.
-func (p *Server) connect(ctx context.Context) {
+func (p *Server) connect() {
+ defer p.opts.Logger.Debug(p.closeContext, "connect loop exited")
+ defer p.wg.Done()
// An exponential back-off occurs when the connection is failing to dial.
// This is to prevent server spam in case of a coderd outage.
- for retrier := retry.New(50*time.Millisecond, 10*time.Second); retrier.Wait(ctx); {
+connectLoop:
+ for retrier := retry.New(50*time.Millisecond, 10*time.Second); retrier.Wait(p.closeContext); {
// It's possible for the provisioner daemon to be shut down
// before the wait is complete!
if p.isClosed() {
return
}
- client, err := p.clientDialer(ctx)
+ p.opts.Logger.Debug(p.closeContext, "dialing coderd")
+ client, err := p.clientDialer(p.closeContext)
if err != nil {
if errors.Is(err, context.Canceled) {
return
@@ -193,144 +202,75 @@ func (p *Server) connect(ctx context.Context) {
if p.isClosed() {
return
}
- p.opts.Logger.Warn(context.Background(), "coderd client failed to dial", slog.Error(err))
+ p.opts.Logger.Warn(p.closeContext, "coderd client failed to dial", slog.Error(err))
continue
}
- // Ensure connection is not left hanging during a race between
- // close and dial succeeding.
- p.mutex.Lock()
- if p.isClosed() {
- client.DRPCConn().Close()
- p.mutex.Unlock()
- break
- }
- p.clientValue.Store(ptr.Ref(client))
- p.mutex.Unlock()
-
- p.opts.Logger.Debug(ctx, "successfully connected to coderd")
- break
- }
- select {
- case <-ctx.Done():
- return
- default:
- }
-
- go func() {
- if p.isClosed() {
- return
- }
- client, ok := p.client()
- if !ok {
- return
- }
- select {
- case <-p.closeContext.Done():
- return
- case <-client.DRPCConn().Closed():
- // We use the update stream to detect when the connection
- // has been interrupted. This works well, because logs need
- // to buffer if a job is running in the background.
- p.opts.Logger.Debug(context.Background(), "client stream ended")
- p.connect(ctx)
- }
- }()
+ p.opts.Logger.Info(p.closeContext, "successfully connected to coderd")
+ retrier.Reset()
- go func() {
- if p.isClosed() {
- return
- }
- timer := time.NewTimer(p.opts.JobPollInterval)
- defer timer.Stop()
+ // serve the client until we are closed or it disconnects
for {
- client, ok := p.client()
- if !ok {
- return
- }
select {
case <-p.closeContext.Done():
+ client.DRPCConn().Close()
return
case <-client.DRPCConn().Closed():
- return
- case <-timer.C:
- p.acquireJob(ctx)
- timer.Reset(p.nextInterval())
+ p.opts.Logger.Info(p.closeContext, "connection to coderd closed")
+ continue connectLoop
+ case p.clientCh <- client:
+ continue
}
}
- }()
-}
-
-func (p *Server) nextInterval() time.Duration {
- r, err := cryptorand.Float64()
- if err != nil {
- panic("get random float:" + err.Error())
}
-
- return p.opts.JobPollInterval + time.Duration(float64(p.opts.JobPollJitter)*r)
}
func (p *Server) client() (proto.DRPCProvisionerDaemonClient, bool) {
- client := p.clientValue.Load()
- if client == nil {
+ select {
+ case <-p.closeContext.Done():
return nil, false
+ case client := <-p.clientCh:
+ return client, true
}
- return *client, true
}
-// isRunningJob returns true if a job is running. Caller must hold the mutex.
-func (p *Server) isRunningJob() bool {
- if p.activeJob == nil {
- return false
- }
- select {
- case <-p.activeJob.Done():
- return false
- default:
- return true
+func (p *Server) acquireLoop() {
+ defer p.opts.Logger.Debug(p.closeContext, "acquire loop exited")
+ defer p.wg.Done()
+ defer func() { close(p.acquireDoneCh) }()
+ ctx := p.closeContext
+ for {
+ if p.acquireExit() {
+ return
+ }
+ client, ok := p.client()
+ if !ok {
+ p.opts.Logger.Debug(ctx, "shut down before client (re) connected")
+ return
+ }
+ p.acquireAndRunOne(client)
}
}
-var (
- lastAcquire time.Time
- lastAcquireMutex sync.RWMutex
-)
-
-// Locks a job in the database, and runs it!
-func (p *Server) acquireJob(ctx context.Context) {
+// acquireExit returns true if the acquire loop should exit
+func (p *Server) acquireExit() bool {
p.mutex.Lock()
defer p.mutex.Unlock()
- if p.isClosed() {
- return
- }
- if p.isRunningJob() {
- return
- }
- if p.isShutdown() {
- p.opts.Logger.Debug(context.Background(), "skipping acquire; provisionerd is shutting down")
- return
- }
-
- // This prevents loads of provisioner daemons from consistently sending
- // requests when no jobs are available.
- //
- // The debounce only occurs when no job is returned, so if loads of jobs are
- // added at once, they will start after at most this duration.
- lastAcquireMutex.RLock()
- if !lastAcquire.IsZero() && time.Since(lastAcquire) < p.opts.JobPollDebounce {
- lastAcquireMutex.RUnlock()
- p.opts.Logger.Debug(ctx, "debounce acquire job")
- return
+ if p.closingB {
+ p.opts.Logger.Debug(p.closeContext, "exiting acquire; provisionerd is closing")
+ return true
}
- lastAcquireMutex.RUnlock()
-
- var err error
- client, ok := p.client()
- if !ok {
- return
+ if p.shuttingDownB {
+ p.opts.Logger.Debug(p.closeContext, "exiting acquire; provisionerd is shutting down")
+ return true
}
+ return false
+}
- job, err := client.AcquireJob(ctx, &proto.Empty{})
- p.opts.Logger.Debug(ctx, "called AcquireJob on client", slog.F("job_id", job.GetJobId()), slog.Error(err))
+func (p *Server) acquireAndRunOne(client proto.DRPCProvisionerDaemonClient) {
+ ctx := p.closeContext
+ p.opts.Logger.Debug(ctx, "start of acquireAndRunOne")
+ job, err := p.acquireGraceful(client)
+ p.opts.Logger.Debug(ctx, "graceful acquire done", slog.F("job_id", job.GetJobId()), slog.Error(err))
if err != nil {
if errors.Is(err, context.Canceled) ||
errors.Is(err, yamux.ErrSessionShutdown) ||
@@ -342,9 +282,7 @@ func (p *Server) acquireJob(ctx context.Context) {
return
}
if job.JobId == "" {
- lastAcquireMutex.Lock()
- lastAcquire = time.Now()
- lastAcquireMutex.Unlock()
+ p.opts.Logger.Debug(ctx, "acquire job successfully canceled")
return
}
@@ -405,6 +343,7 @@ func (p *Server) acquireJob(ctx context.Context) {
return
}
+ p.mutex.Lock()
p.activeJob = runner.New(
ctx,
job,
@@ -420,8 +359,39 @@ func (p *Server) acquireJob(ctx context.Context) {
Metrics: p.opts.Metrics.Runner,
},
)
+ p.mutex.Unlock()
+ p.activeJob.Run()
+ p.mutex.Lock()
+ p.activeJob = nil
+ p.mutex.Unlock()
+}
- go p.activeJob.Run()
+// acquireGraceful attempts to acquire a job from the server, handling canceling the acquisition if we gracefully shut
+// down.
+func (p *Server) acquireGraceful(client proto.DRPCProvisionerDaemonClient) (*proto.AcquiredJob, error) {
+ stream, err := client.AcquireJobWithCancel(p.closeContext)
+ if err != nil {
+ return nil, err
+ }
+ acquireDone := make(chan struct{})
+ go func() {
+ select {
+ case <-p.closeContext.Done():
+ return
+ case <-p.shuttingDownCh:
+ p.opts.Logger.Debug(p.closeContext, "sending acquire job cancel")
+ err := stream.Send(&proto.CancelAcquire{})
+ if err != nil {
+ p.opts.Logger.Warn(p.closeContext, "failed to gracefully cancel acquire job")
+ }
+ return
+ case <-acquireDone:
+ return
+ }
+ }()
+ job, err := stream.Recv()
+ close(acquireDone)
+ return job, err
}
func retryable(err error) bool {
@@ -496,36 +466,23 @@ func (p *Server) isClosed() bool {
}
}
-// isShutdown returns whether the API is shutdown or not.
-func (p *Server) isShutdown() bool {
- select {
- case <-p.shutdown:
- return true
- default:
- return false
- }
-}
-
// Shutdown triggers a graceful exit of each registered provisioner.
-// It exits when an active job stops.
func (p *Server) Shutdown(ctx context.Context) error {
p.mutex.Lock()
- defer p.mutex.Unlock()
- if !p.isRunningJob() {
- return nil
- }
p.opts.Logger.Info(ctx, "attempting graceful shutdown")
- close(p.shutdown)
- if p.activeJob == nil {
- return nil
+ if !p.shuttingDownB {
+ close(p.shuttingDownCh)
+ p.shuttingDownB = true
}
- // wait for active job
- p.activeJob.Cancel()
+ if p.activeJob != nil {
+ p.activeJob.Cancel()
+ }
+ p.mutex.Unlock()
select {
case <-ctx.Done():
p.opts.Logger.Warn(ctx, "graceful shutdown failed", slog.Error(ctx.Err()))
return ctx.Err()
- case <-p.activeJob.Done():
+ case <-p.acquireDoneCh:
p.opts.Logger.Info(ctx, "gracefully shutdown")
return nil
}
@@ -533,41 +490,51 @@ func (p *Server) Shutdown(ctx context.Context) error {
// Close ends the provisioner. It will mark any running jobs as failed.
func (p *Server) Close() error {
+ p.opts.Logger.Info(p.closeContext, "closing provisionerd")
return p.closeWithError(nil)
}
// closeWithError closes the provisioner; subsequent reads/writes will return the error err.
func (p *Server) closeWithError(err error) error {
p.mutex.Lock()
- defer p.mutex.Unlock()
- if p.isClosed() {
- return p.closeError
- }
- p.closeError = err
-
- errMsg := "provisioner daemon was shutdown gracefully"
- if err != nil {
- errMsg = err.Error()
- }
- if p.activeJob != nil {
+ var activeJob *runner.Runner
+ first := false
+ if !p.closingB {
+ first = true
+ p.closingB = true
+ // only the first caller to close should attempt to fail the active job
+ activeJob = p.activeJob
+ }
+ // don't hold the mutex while doing I/O.
+ p.mutex.Unlock()
+ if activeJob != nil {
+ errMsg := "provisioner daemon was shutdown gracefully"
+ if err != nil {
+ errMsg = err.Error()
+ }
+ p.opts.Logger.Debug(p.closeContext, "failing active job because of close")
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
- failErr := p.activeJob.Fail(ctx, &proto.FailedJob{Error: errMsg})
+ failErr := activeJob.Fail(ctx, &proto.FailedJob{Error: errMsg})
if failErr != nil {
- p.activeJob.ForceStop()
+ activeJob.ForceStop()
}
if err == nil {
err = failErr
}
}
- p.closeCancel()
-
- p.opts.Logger.Debug(context.Background(), "closing server with error", slog.Error(err))
-
- if c, ok := p.client(); ok {
- _ = c.DRPCConn().Close()
+ if first {
+ p.closeCancel()
+ p.opts.Logger.Debug(context.Background(), "waiting for goroutines to exit")
+ p.wg.Wait()
+ p.opts.Logger.Debug(context.Background(), "closing server with error", slog.Error(err))
+ p.closeError = err
+ close(p.closedCh)
+ return err
}
-
- return err
+ p.opts.Logger.Debug(p.closeContext, "waiting for first closer to complete")
+ <-p.closedCh
+ p.opts.Logger.Debug(p.closeContext, "first closer completed")
+ return p.closeError
}
diff --git a/provisionerd/provisionerd_test.go b/provisionerd/provisionerd_test.go
index 79d8b1c108e6f..3d8617a4a5551 100644
--- a/provisionerd/provisionerd_test.go
+++ b/provisionerd/provisionerd_test.go
@@ -59,7 +59,7 @@ func TestProvisionerd(t *testing.T) {
close(done)
})
closer := createProvisionerd(t, func(ctx context.Context) (proto.DRPCProvisionerDaemonClient, error) {
- return createProvisionerDaemonClient(ctx, t, done, provisionerDaemonTestServer{}), nil
+ return createProvisionerDaemonClient(t, done, provisionerDaemonTestServer{}), nil
}, provisionerd.LocalProvisioners{})
require.NoError(t, closer.Close())
})
@@ -79,33 +79,6 @@ func TestProvisionerd(t *testing.T) {
require.NoError(t, closer.Close())
})
- t.Run("AcquireEmptyJob", func(t *testing.T) {
- // The provisioner daemon is supposed to skip the job acquire if
- // the job provided is empty. This is to show it successfully
- // tried to get a job, but none were available.
- t.Parallel()
- done := make(chan struct{})
- t.Cleanup(func() {
- close(done)
- })
- completeChan := make(chan struct{})
- closer := createProvisionerd(t, func(ctx context.Context) (proto.DRPCProvisionerDaemonClient, error) {
- acquireJobAttempt := 0
- return createProvisionerDaemonClient(ctx, t, done, provisionerDaemonTestServer{
- acquireJob: func(ctx context.Context, _ *proto.Empty) (*proto.AcquiredJob, error) {
- if acquireJobAttempt == 1 {
- close(completeChan)
- }
- acquireJobAttempt++
- return &proto.AcquiredJob{}, nil
- },
- updateJob: noopUpdateJob,
- }), nil
- }, provisionerd.LocalProvisioners{})
- require.Condition(t, closedWithin(completeChan, testutil.WaitShort))
- require.NoError(t, closer.Close())
- })
-
t.Run("CloseCancelsJob", func(t *testing.T) {
t.Parallel()
done := make(chan struct{})
@@ -118,9 +91,9 @@ func TestProvisionerd(t *testing.T) {
var closerMutex sync.Mutex
closerMutex.Lock()
closer = createProvisionerd(t, func(ctx context.Context) (proto.DRPCProvisionerDaemonClient, error) {
- return createProvisionerDaemonClient(ctx, t, done, provisionerDaemonTestServer{
- acquireJob: func(ctx context.Context, _ *proto.Empty) (*proto.AcquiredJob, error) {
- return &proto.AcquiredJob{
+ return createProvisionerDaemonClient(t, done, provisionerDaemonTestServer{
+ acquireJobWithCancel: func(stream proto.DRPCProvisionerDaemon_AcquireJobWithCancelStream) error {
+ err := stream.Send(&proto.AcquiredJob{
JobId: "test",
Provisioner: "someprovisioner",
TemplateSourceArchive: createTar(t, map[string]string{
@@ -131,7 +104,9 @@ func TestProvisionerd(t *testing.T) {
Metadata: &sdkproto.Metadata{},
},
},
- }, nil
+ })
+ assert.NoError(t, err)
+ return nil
},
updateJob: noopUpdateJob,
failJob: func(ctx context.Context, job *proto.FailedJob) (*proto.Empty, error) {
@@ -174,9 +149,9 @@ func TestProvisionerd(t *testing.T) {
)
closer := createProvisionerd(t, func(ctx context.Context) (proto.DRPCProvisionerDaemonClient, error) {
- return createProvisionerDaemonClient(ctx, t, done, provisionerDaemonTestServer{
- acquireJob: func(ctx context.Context, _ *proto.Empty) (*proto.AcquiredJob, error) {
- return &proto.AcquiredJob{
+ return createProvisionerDaemonClient(t, done, provisionerDaemonTestServer{
+ acquireJobWithCancel: func(stream proto.DRPCProvisionerDaemon_AcquireJobWithCancelStream) error {
+ err := stream.Send(&proto.AcquiredJob{
JobId: "test",
Provisioner: "someprovisioner",
TemplateSourceArchive: createTar(t, map[string]string{
@@ -187,7 +162,9 @@ func TestProvisionerd(t *testing.T) {
Metadata: &sdkproto.Metadata{},
},
},
- }, nil
+ })
+ assert.NoError(t, err)
+ return nil
},
updateJob: noopUpdateJob,
failJob: func(ctx context.Context, job *proto.FailedJob) (*proto.Empty, error) {
@@ -214,9 +191,9 @@ func TestProvisionerd(t *testing.T) {
)
closer := createProvisionerd(t, func(ctx context.Context) (proto.DRPCProvisionerDaemonClient, error) {
- return createProvisionerDaemonClient(ctx, t, done, provisionerDaemonTestServer{
- acquireJob: func(ctx context.Context, _ *proto.Empty) (*proto.AcquiredJob, error) {
- return &proto.AcquiredJob{
+ return createProvisionerDaemonClient(t, done, provisionerDaemonTestServer{
+ acquireJobWithCancel: func(stream proto.DRPCProvisionerDaemon_AcquireJobWithCancelStream) error {
+ err := stream.Send(&proto.AcquiredJob{
JobId: "test",
Provisioner: "someprovisioner",
TemplateSourceArchive: createTar(t, map[string]string{
@@ -227,7 +204,9 @@ func TestProvisionerd(t *testing.T) {
Metadata: &sdkproto.Metadata{},
},
},
- }, nil
+ })
+ assert.NoError(t, err)
+ return nil
},
updateJob: func(ctx context.Context, update *proto.UpdateJobRequest) (*proto.UpdateJobResponse, error) {
completeOnce.Do(func() { close(completeChan) })
@@ -260,36 +239,27 @@ func TestProvisionerd(t *testing.T) {
close(done)
})
var (
- didComplete atomic.Bool
- didLog atomic.Bool
- didAcquireJob atomic.Bool
- didReadme atomic.Bool
- completeChan = make(chan struct{})
- completeOnce sync.Once
+ didComplete atomic.Bool
+ didLog atomic.Bool
+ didReadme atomic.Bool
+ acq = newAcquireOne(t, &proto.AcquiredJob{
+ JobId: "test",
+ Provisioner: "someprovisioner",
+ TemplateSourceArchive: createTar(t, map[string]string{
+ "test.txt": "content",
+ provisionersdk.ReadmeFile: "# A cool template 😎\n",
+ }),
+ Type: &proto.AcquiredJob_TemplateImport_{
+ TemplateImport: &proto.AcquiredJob_TemplateImport{
+ Metadata: &sdkproto.Metadata{},
+ },
+ },
+ })
)
closer := createProvisionerd(t, func(ctx context.Context) (proto.DRPCProvisionerDaemonClient, error) {
- return createProvisionerDaemonClient(ctx, t, done, provisionerDaemonTestServer{
- acquireJob: func(ctx context.Context, _ *proto.Empty) (*proto.AcquiredJob, error) {
- if !didAcquireJob.CAS(false, true) {
- completeOnce.Do(func() { close(completeChan) })
- return &proto.AcquiredJob{}, nil
- }
-
- return &proto.AcquiredJob{
- JobId: "test",
- Provisioner: "someprovisioner",
- TemplateSourceArchive: createTar(t, map[string]string{
- "test.txt": "content",
- provisionersdk.ReadmeFile: "# A cool template 😎\n",
- }),
- Type: &proto.AcquiredJob_TemplateImport_{
- TemplateImport: &proto.AcquiredJob_TemplateImport{
- Metadata: &sdkproto.Metadata{},
- },
- },
- }, nil
- },
+ return createProvisionerDaemonClient(t, done, provisionerDaemonTestServer{
+ acquireJobWithCancel: acq.acquireWithCancel,
updateJob: func(ctx context.Context, update *proto.UpdateJobRequest) (*proto.UpdateJobResponse, error) {
if len(update.Logs) > 0 {
didLog.Store(true)
@@ -338,7 +308,7 @@ func TestProvisionerd(t *testing.T) {
}),
})
- require.Condition(t, closedWithin(completeChan, testutil.WaitShort))
+ require.Condition(t, closedWithin(acq.complete, testutil.WaitShort))
require.NoError(t, closer.Close())
assert.True(t, didLog.Load(), "should log some updates")
assert.True(t, didComplete.Load(), "should complete the job")
@@ -351,36 +321,26 @@ func TestProvisionerd(t *testing.T) {
close(done)
})
var (
- didComplete atomic.Bool
- didLog atomic.Bool
- didAcquireJob atomic.Bool
- completeChan = make(chan struct{})
- completeOnce sync.Once
-
- metadata = &sdkproto.Metadata{}
+ didComplete atomic.Bool
+ didLog atomic.Bool
+ metadata = &sdkproto.Metadata{}
+ acq = newAcquireOne(t, &proto.AcquiredJob{
+ JobId: "test",
+ Provisioner: "someprovisioner",
+ TemplateSourceArchive: createTar(t, map[string]string{
+ "test.txt": "content",
+ }),
+ Type: &proto.AcquiredJob_TemplateDryRun_{
+ TemplateDryRun: &proto.AcquiredJob_TemplateDryRun{
+ Metadata: metadata,
+ },
+ },
+ })
)
closer := createProvisionerd(t, func(ctx context.Context) (proto.DRPCProvisionerDaemonClient, error) {
- return createProvisionerDaemonClient(ctx, t, done, provisionerDaemonTestServer{
- acquireJob: func(ctx context.Context, _ *proto.Empty) (*proto.AcquiredJob, error) {
- if !didAcquireJob.CAS(false, true) {
- completeOnce.Do(func() { close(completeChan) })
- return &proto.AcquiredJob{}, nil
- }
-
- return &proto.AcquiredJob{
- JobId: "test",
- Provisioner: "someprovisioner",
- TemplateSourceArchive: createTar(t, map[string]string{
- "test.txt": "content",
- }),
- Type: &proto.AcquiredJob_TemplateDryRun_{
- TemplateDryRun: &proto.AcquiredJob_TemplateDryRun{
- Metadata: metadata,
- },
- },
- }, nil
- },
+ return createProvisionerDaemonClient(t, done, provisionerDaemonTestServer{
+ acquireJobWithCancel: acq.acquireWithCancel,
updateJob: func(ctx context.Context, update *proto.UpdateJobRequest) (*proto.UpdateJobResponse, error) {
if len(update.Logs) == 0 {
t.Log("provisionerDaemonTestServer: no log messages")
@@ -420,7 +380,7 @@ func TestProvisionerd(t *testing.T) {
}),
})
- require.Condition(t, closedWithin(completeChan, testutil.WaitShort))
+ require.Condition(t, closedWithin(acq.complete, testutil.WaitShort))
require.NoError(t, closer.Close())
assert.True(t, didLog.Load(), "should log some updates")
assert.True(t, didComplete.Load(), "should complete the job")
@@ -433,34 +393,25 @@ func TestProvisionerd(t *testing.T) {
close(done)
})
var (
- didComplete atomic.Bool
- didLog atomic.Bool
- didAcquireJob atomic.Bool
- completeChan = make(chan struct{})
- completeOnce sync.Once
+ didComplete atomic.Bool
+ didLog atomic.Bool
+ acq = newAcquireOne(t, &proto.AcquiredJob{
+ JobId: "test",
+ Provisioner: "someprovisioner",
+ TemplateSourceArchive: createTar(t, map[string]string{
+ "test.txt": "content",
+ }),
+ Type: &proto.AcquiredJob_WorkspaceBuild_{
+ WorkspaceBuild: &proto.AcquiredJob_WorkspaceBuild{
+ Metadata: &sdkproto.Metadata{},
+ },
+ },
+ })
)
closer := createProvisionerd(t, func(ctx context.Context) (proto.DRPCProvisionerDaemonClient, error) {
- return createProvisionerDaemonClient(ctx, t, done, provisionerDaemonTestServer{
- acquireJob: func(ctx context.Context, _ *proto.Empty) (*proto.AcquiredJob, error) {
- if !didAcquireJob.CAS(false, true) {
- completeOnce.Do(func() { close(completeChan) })
- return &proto.AcquiredJob{}, nil
- }
-
- return &proto.AcquiredJob{
- JobId: "test",
- Provisioner: "someprovisioner",
- TemplateSourceArchive: createTar(t, map[string]string{
- "test.txt": "content",
- }),
- Type: &proto.AcquiredJob_WorkspaceBuild_{
- WorkspaceBuild: &proto.AcquiredJob_WorkspaceBuild{
- Metadata: &sdkproto.Metadata{},
- },
- },
- }, nil
- },
+ return createProvisionerDaemonClient(t, done, provisionerDaemonTestServer{
+ acquireJobWithCancel: acq.acquireWithCancel,
updateJob: func(ctx context.Context, update *proto.UpdateJobRequest) (*proto.UpdateJobResponse, error) {
if len(update.Logs) != 0 {
didLog.Store(true)
@@ -491,7 +442,7 @@ func TestProvisionerd(t *testing.T) {
},
}),
})
- require.Condition(t, closedWithin(completeChan, testutil.WaitShort))
+ require.Condition(t, closedWithin(acq.complete, testutil.WaitShort))
require.NoError(t, closer.Close())
assert.True(t, didLog.Load(), "should log some updates")
assert.True(t, didComplete.Load(), "should complete the job")
@@ -504,35 +455,26 @@ func TestProvisionerd(t *testing.T) {
close(done)
})
var (
- didComplete atomic.Bool
- didLog atomic.Bool
- didAcquireJob atomic.Bool
- didFail atomic.Bool
- completeChan = make(chan struct{})
- completeOnce sync.Once
+ didComplete atomic.Bool
+ didLog atomic.Bool
+ didFail atomic.Bool
+ acq = newAcquireOne(t, &proto.AcquiredJob{
+ JobId: "test",
+ Provisioner: "someprovisioner",
+ TemplateSourceArchive: createTar(t, map[string]string{
+ "test.txt": "content",
+ }),
+ Type: &proto.AcquiredJob_WorkspaceBuild_{
+ WorkspaceBuild: &proto.AcquiredJob_WorkspaceBuild{
+ Metadata: &sdkproto.Metadata{},
+ },
+ },
+ })
)
closer := createProvisionerd(t, func(ctx context.Context) (proto.DRPCProvisionerDaemonClient, error) {
- return createProvisionerDaemonClient(ctx, t, done, provisionerDaemonTestServer{
- acquireJob: func(ctx context.Context, _ *proto.Empty) (*proto.AcquiredJob, error) {
- if !didAcquireJob.CAS(false, true) {
- completeOnce.Do(func() { close(completeChan) })
- return &proto.AcquiredJob{}, nil
- }
-
- return &proto.AcquiredJob{
- JobId: "test",
- Provisioner: "someprovisioner",
- TemplateSourceArchive: createTar(t, map[string]string{
- "test.txt": "content",
- }),
- Type: &proto.AcquiredJob_WorkspaceBuild_{
- WorkspaceBuild: &proto.AcquiredJob_WorkspaceBuild{
- Metadata: &sdkproto.Metadata{},
- },
- },
- }, nil
- },
+ return createProvisionerDaemonClient(t, done, provisionerDaemonTestServer{
+ acquireJobWithCancel: acq.acquireWithCancel,
updateJob: func(ctx context.Context, update *proto.UpdateJobRequest) (*proto.UpdateJobResponse, error) {
if len(update.Logs) != 0 {
didLog.Store(true)
@@ -591,7 +533,7 @@ func TestProvisionerd(t *testing.T) {
},
}),
})
- require.Condition(t, closedWithin(completeChan, testutil.WaitShort))
+ require.Condition(t, closedWithin(acq.complete, testutil.WaitShort))
require.NoError(t, closer.Close())
assert.True(t, didLog.Load(), "should log some updates")
assert.False(t, didComplete.Load(), "should not complete the job")
@@ -605,34 +547,25 @@ func TestProvisionerd(t *testing.T) {
close(done)
})
var (
- didFail atomic.Bool
- didAcquireJob atomic.Bool
- completeChan = make(chan struct{})
- completeOnce sync.Once
+ didFail atomic.Bool
+ acq = newAcquireOne(t, &proto.AcquiredJob{
+ JobId: "test",
+ Provisioner: "someprovisioner",
+ TemplateSourceArchive: createTar(t, map[string]string{
+ "test.txt": "content",
+ }),
+ Type: &proto.AcquiredJob_WorkspaceBuild_{
+ WorkspaceBuild: &proto.AcquiredJob_WorkspaceBuild{
+ Metadata: &sdkproto.Metadata{},
+ },
+ },
+ })
)
closer := createProvisionerd(t, func(ctx context.Context) (proto.DRPCProvisionerDaemonClient, error) {
- return createProvisionerDaemonClient(ctx, t, done, provisionerDaemonTestServer{
- acquireJob: func(ctx context.Context, _ *proto.Empty) (*proto.AcquiredJob, error) {
- if !didAcquireJob.CAS(false, true) {
- completeOnce.Do(func() { close(completeChan) })
- return &proto.AcquiredJob{}, nil
- }
-
- return &proto.AcquiredJob{
- JobId: "test",
- Provisioner: "someprovisioner",
- TemplateSourceArchive: createTar(t, map[string]string{
- "test.txt": "content",
- }),
- Type: &proto.AcquiredJob_WorkspaceBuild_{
- WorkspaceBuild: &proto.AcquiredJob_WorkspaceBuild{
- Metadata: &sdkproto.Metadata{},
- },
- },
- }, nil
- },
- updateJob: noopUpdateJob,
+ return createProvisionerDaemonClient(t, done, provisionerDaemonTestServer{
+ acquireJobWithCancel: acq.acquireWithCancel,
+ updateJob: noopUpdateJob,
failJob: func(ctx context.Context, job *proto.FailedJob) (*proto.Empty, error) {
didFail.Store(true)
return &proto.Empty{}, nil
@@ -661,7 +594,7 @@ func TestProvisionerd(t *testing.T) {
},
}),
})
- require.Condition(t, closedWithin(completeChan, testutil.WaitShort))
+ require.Condition(t, closedWithin(acq.complete, testutil.WaitShort))
require.NoError(t, closer.Close())
assert.True(t, didFail.Load(), "should fail the job")
})
@@ -677,9 +610,9 @@ func TestProvisionerd(t *testing.T) {
updateChan := make(chan struct{})
completeChan := make(chan struct{})
server := createProvisionerd(t, func(ctx context.Context) (proto.DRPCProvisionerDaemonClient, error) {
- return createProvisionerDaemonClient(ctx, t, done, provisionerDaemonTestServer{
- acquireJob: func(ctx context.Context, _ *proto.Empty) (*proto.AcquiredJob, error) {
- return &proto.AcquiredJob{
+ return createProvisionerDaemonClient(t, done, provisionerDaemonTestServer{
+ acquireJobWithCancel: func(stream proto.DRPCProvisionerDaemon_AcquireJobWithCancelStream) error {
+ err := stream.Send(&proto.AcquiredJob{
JobId: "test",
Provisioner: "someprovisioner",
TemplateSourceArchive: createTar(t, map[string]string{
@@ -690,7 +623,9 @@ func TestProvisionerd(t *testing.T) {
Metadata: &sdkproto.Metadata{},
},
},
- }, nil
+ })
+ assert.NoError(t, err)
+ return nil
},
updateJob: func(ctx context.Context, update *proto.UpdateJobRequest) (*proto.UpdateJobResponse, error) {
if len(update.Logs) > 0 {
@@ -755,9 +690,9 @@ func TestProvisionerd(t *testing.T) {
updateChan := make(chan struct{})
completeChan := make(chan struct{})
server := createProvisionerd(t, func(ctx context.Context) (proto.DRPCProvisionerDaemonClient, error) {
- return createProvisionerDaemonClient(ctx, t, done, provisionerDaemonTestServer{
- acquireJob: func(ctx context.Context, _ *proto.Empty) (*proto.AcquiredJob, error) {
- return &proto.AcquiredJob{
+ return createProvisionerDaemonClient(t, done, provisionerDaemonTestServer{
+ acquireJobWithCancel: func(stream proto.DRPCProvisionerDaemon_AcquireJobWithCancelStream) error {
+ err := stream.Send(&proto.AcquiredJob{
JobId: "test",
Provisioner: "someprovisioner",
TemplateSourceArchive: createTar(t, map[string]string{
@@ -768,7 +703,9 @@ func TestProvisionerd(t *testing.T) {
Metadata: &sdkproto.Metadata{},
},
},
- }, nil
+ })
+ assert.NoError(t, err)
+ return nil
},
updateJob: func(ctx context.Context, update *proto.UpdateJobRequest) (*proto.UpdateJobResponse, error) {
resp := &proto.UpdateJobResponse{}
@@ -825,6 +762,9 @@ func TestProvisionerd(t *testing.T) {
})
require.Condition(t, closedWithin(updateChan, testutil.WaitShort))
require.Condition(t, closedWithin(completeChan, testutil.WaitShort))
+ ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
+ defer cancel()
+ require.NoError(t, server.Shutdown(ctx))
require.NoError(t, server.Close())
})
@@ -844,12 +784,9 @@ func TestProvisionerd(t *testing.T) {
completeOnce sync.Once
)
server := createProvisionerd(t, func(ctx context.Context) (proto.DRPCProvisionerDaemonClient, error) {
- client := createProvisionerDaemonClient(ctx, t, done, provisionerDaemonTestServer{
- acquireJob: func(ctx context.Context, _ *proto.Empty) (*proto.AcquiredJob, error) {
- if second.Load() {
- return &proto.AcquiredJob{}, nil
- }
- return &proto.AcquiredJob{
+ client := createProvisionerDaemonClient(t, done, provisionerDaemonTestServer{
+ acquireJobWithCancel: func(stream proto.DRPCProvisionerDaemon_AcquireJobWithCancelStream) error {
+ job := &proto.AcquiredJob{
JobId: "test",
Provisioner: "someprovisioner",
TemplateSourceArchive: createTar(t, map[string]string{
@@ -860,7 +797,15 @@ func TestProvisionerd(t *testing.T) {
Metadata: &sdkproto.Metadata{},
},
},
- }, nil
+ }
+ if second.Load() {
+ job = &proto.AcquiredJob{}
+ _, err := stream.Recv()
+ assert.NoError(t, err)
+ }
+ err := stream.Send(job)
+ assert.NoError(t, err)
+ return nil
},
updateJob: func(ctx context.Context, update *proto.UpdateJobRequest) (*proto.UpdateJobResponse, error) {
return &proto.UpdateJobResponse{}, nil
@@ -908,6 +853,9 @@ func TestProvisionerd(t *testing.T) {
}),
})
require.Condition(t, closedWithin(completeChan, testutil.WaitShort))
+ ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
+ defer cancel()
+ require.NoError(t, server.Shutdown(ctx))
require.NoError(t, server.Close())
})
@@ -927,13 +875,15 @@ func TestProvisionerd(t *testing.T) {
completeOnce sync.Once
)
server := createProvisionerd(t, func(ctx context.Context) (proto.DRPCProvisionerDaemonClient, error) {
- client := createProvisionerDaemonClient(ctx, t, done, provisionerDaemonTestServer{
- acquireJob: func(ctx context.Context, _ *proto.Empty) (*proto.AcquiredJob, error) {
+ client := createProvisionerDaemonClient(t, done, provisionerDaemonTestServer{
+ acquireJobWithCancel: func(stream proto.DRPCProvisionerDaemon_AcquireJobWithCancelStream) error {
if second.Load() {
completeOnce.Do(func() { close(completeChan) })
- return &proto.AcquiredJob{}, nil
+ _, err := stream.Recv()
+ assert.NoError(t, err)
+ return nil
}
- return &proto.AcquiredJob{
+ job := &proto.AcquiredJob{
JobId: "test",
Provisioner: "someprovisioner",
TemplateSourceArchive: createTar(t, map[string]string{
@@ -944,7 +894,10 @@ func TestProvisionerd(t *testing.T) {
Metadata: &sdkproto.Metadata{},
},
},
- }, nil
+ }
+ err := stream.Send(job)
+ assert.NoError(t, err)
+ return nil
},
failJob: func(ctx context.Context, job *proto.FailedJob) (*proto.Empty, error) {
return nil, yamux.ErrSessionShutdown
@@ -990,6 +943,10 @@ func TestProvisionerd(t *testing.T) {
}),
})
require.Condition(t, closedWithin(completeChan, testutil.WaitShort))
+ t.Log("completeChan closed")
+ ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
+ defer cancel()
+ require.NoError(t, server.Shutdown(ctx))
require.NoError(t, server.Close())
})
@@ -1006,17 +963,21 @@ func TestProvisionerd(t *testing.T) {
completeOnce := sync.Once{}
server := createProvisionerd(t, func(ctx context.Context) (proto.DRPCProvisionerDaemonClient, error) {
- return createProvisionerDaemonClient(ctx, t, done, provisionerDaemonTestServer{
- acquireJob: func(ctx context.Context, _ *proto.Empty) (*proto.AcquiredJob, error) {
+ return createProvisionerDaemonClient(t, done, provisionerDaemonTestServer{
+ acquireJobWithCancel: func(stream proto.DRPCProvisionerDaemon_AcquireJobWithCancelStream) error {
m.Lock()
defer m.Unlock()
logger.Info(ctx, "provisioner stage: AcquiredJob")
if len(ops) > 0 {
- return &proto.AcquiredJob{}, nil
+ _, err := stream.Recv()
+ assert.NoError(t, err)
+ err = stream.Send(&proto.AcquiredJob{})
+ assert.NoError(t, err)
+ return nil
}
ops = append(ops, "AcquireJob")
- return &proto.AcquiredJob{
+ err := stream.Send(&proto.AcquiredJob{
JobId: "test",
Provisioner: "someprovisioner",
TemplateSourceArchive: createTar(t, map[string]string{
@@ -1027,7 +988,9 @@ func TestProvisionerd(t *testing.T) {
Metadata: &sdkproto.Metadata{},
},
},
- }, nil
+ })
+ assert.NoError(t, err)
+ return nil
},
updateJob: func(ctx context.Context, update *proto.UpdateJobRequest) (*proto.UpdateJobResponse, error) {
m.Lock()
@@ -1076,6 +1039,9 @@ func TestProvisionerd(t *testing.T) {
}),
})
require.Condition(t, closedWithin(completeChan, testutil.WaitShort))
+ ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
+ defer cancel()
+ require.NoError(t, server.Shutdown(ctx))
require.NoError(t, server.Close())
assert.Equal(t, ops[len(ops)-1], "CompleteJob")
assert.Contains(t, ops[0:len(ops)-1], "Log: Cleaning Up | ")
@@ -1105,12 +1071,14 @@ func createTar(t *testing.T, files map[string]string) []byte {
// Creates a provisionerd implementation with the provided dialer and provisioners.
func createProvisionerd(t *testing.T, dialer provisionerd.Dialer, connector provisionerd.LocalProvisioners) *provisionerd.Server {
server := provisionerd.New(dialer, &provisionerd.Options{
- Logger: slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Named("provisionerd").Leveled(slog.LevelDebug),
- JobPollInterval: 50 * time.Millisecond,
- UpdateInterval: 50 * time.Millisecond,
- Connector: connector,
+ Logger: slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Named("provisionerd").Leveled(slog.LevelDebug),
+ UpdateInterval: 50 * time.Millisecond,
+ Connector: connector,
})
t.Cleanup(func() {
+ ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
+ defer cancel()
+ _ = server.Shutdown(ctx)
_ = server.Close()
})
return server
@@ -1118,7 +1086,7 @@ func createProvisionerd(t *testing.T, dialer provisionerd.Dialer, connector prov
// Creates a provisionerd protobuf client that's connected
// to the server implementation provided.
-func createProvisionerDaemonClient(ctx context.Context, t *testing.T, done <-chan struct{}, server provisionerDaemonTestServer) proto.DRPCProvisionerDaemonClient {
+func createProvisionerDaemonClient(t *testing.T, done <-chan struct{}, server provisionerDaemonTestServer) proto.DRPCProvisionerDaemonClient {
t.Helper()
if server.failJob == nil {
// Default to asserting the error from the failure, otherwise
@@ -1137,7 +1105,7 @@ func createProvisionerDaemonClient(ctx context.Context, t *testing.T, done <-cha
err := proto.DRPCRegisterProvisionerDaemon(mux, &server)
require.NoError(t, err)
srv := drpcserver.New(mux)
- ctx, cancelFunc := context.WithCancel(ctx)
+ ctx, cancelFunc := context.WithCancel(context.Background())
closed := make(chan struct{})
go func() {
defer close(closed)
@@ -1148,31 +1116,13 @@ func createProvisionerDaemonClient(ctx context.Context, t *testing.T, done <-cha
<-closed
select {
case <-done:
- // It's possible to get unlucky since the dialer is run in a retry in a goroutine.
- // The following can occur:
- // 1. The provisionerd.connect goroutine checks if we're closed prior to attempting to establish a connection
- // with coderd, sees that we're not.
- // 2. A test closes the server.
- // 3. The provisionerd.connect goroutine calls the dialer to establish a connection. This
- // function detects that someone has tried to create a client after the test finishes.
- if ctx.Err() == nil {
- t.Error("createProvisionerDaemonClient cleanup after test was done!")
- }
+ t.Error("createProvisionerDaemonClient cleanup after test was done!")
default:
}
})
select {
case <-done:
- // It's possible to get unlucky since the dialer is run in a retry in a goroutine.
- // The following can occur:
- // 1. The provisionerd.connect goroutine checks if we're closed prior to attempting to establish a connection
- // with coderd, sees that we're not.
- // 2. A test closes the server.
- // 3. The provisionerd.connect goroutine calls the dialer to establish a connection. This
- // function detects that someone has tried to create a client after the test finishes.
- if ctx.Err() == nil {
- t.Error("createProvisionerDaemonClient cleanup after test was done!")
- }
+ t.Error("called createProvisionerDaemonClient after test was done!")
default:
}
return proto.NewDRPCProvisionerDaemonClient(clientPipe)
@@ -1235,15 +1185,25 @@ func (p *provisionerTestServer) Apply(s *provisionersdk.Session, r *sdkproto.App
// Fulfills the protobuf interface for a ProvisionerDaemon with
// passable functions for dynamic functionality.
type provisionerDaemonTestServer struct {
- acquireJob func(ctx context.Context, _ *proto.Empty) (*proto.AcquiredJob, error)
- commitQuota func(ctx context.Context, com *proto.CommitQuotaRequest) (*proto.CommitQuotaResponse, error)
- updateJob func(ctx context.Context, update *proto.UpdateJobRequest) (*proto.UpdateJobResponse, error)
- failJob func(ctx context.Context, job *proto.FailedJob) (*proto.Empty, error)
- completeJob func(ctx context.Context, job *proto.CompletedJob) (*proto.Empty, error)
+ acquireJobWithCancel func(stream proto.DRPCProvisionerDaemon_AcquireJobWithCancelStream) error
+ commitQuota func(ctx context.Context, com *proto.CommitQuotaRequest) (*proto.CommitQuotaResponse, error)
+ updateJob func(ctx context.Context, update *proto.UpdateJobRequest) (*proto.UpdateJobResponse, error)
+ failJob func(ctx context.Context, job *proto.FailedJob) (*proto.Empty, error)
+ completeJob func(ctx context.Context, job *proto.CompletedJob) (*proto.Empty, error)
}
-func (p *provisionerDaemonTestServer) AcquireJob(ctx context.Context, empty *proto.Empty) (*proto.AcquiredJob, error) {
- return p.acquireJob(ctx, empty)
+func (*provisionerDaemonTestServer) AcquireJob(context.Context, *proto.Empty) (*proto.AcquiredJob, error) {
+ return nil, xerrors.New("deprecated!")
+}
+
+func (p *provisionerDaemonTestServer) AcquireJobWithCancel(stream proto.DRPCProvisionerDaemon_AcquireJobWithCancelStream) error {
+ if p.acquireJobWithCancel != nil {
+ return p.acquireJobWithCancel(stream)
+ }
+ // default behavior is to wait for cancel
+ _, _ = stream.Recv()
+ _ = stream.Send(&proto.AcquiredJob{})
+ return nil
}
func (p *provisionerDaemonTestServer) CommitQuota(ctx context.Context, com *proto.CommitQuotaRequest) (*proto.CommitQuotaResponse, error) {
@@ -1266,3 +1226,38 @@ func (p *provisionerDaemonTestServer) FailJob(ctx context.Context, job *proto.Fa
func (p *provisionerDaemonTestServer) CompleteJob(ctx context.Context, job *proto.CompletedJob) (*proto.Empty, error) {
return p.completeJob(ctx, job)
}
+
+// acquireOne provides a function that returns a single provisioner job, then subsequent calls block until canceled.
+// The complete channel is closed on the 2nd call.
+type acquireOne struct {
+ t *testing.T
+ mu sync.Mutex
+ job *proto.AcquiredJob
+ called int
+ complete chan struct{}
+}
+
+func newAcquireOne(t *testing.T, job *proto.AcquiredJob) *acquireOne {
+ return &acquireOne{
+ t: t,
+ job: job,
+ complete: make(chan struct{}),
+ }
+}
+
+func (a *acquireOne) acquireWithCancel(stream proto.DRPCProvisionerDaemon_AcquireJobWithCancelStream) error {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ a.called++
+ if a.called == 2 {
+ close(a.complete)
+ }
+ if a.called > 1 {
+ _, _ = stream.Recv()
+ _ = stream.Send(&proto.AcquiredJob{})
+ return nil
+ }
+ err := stream.Send(a.job)
+ assert.NoError(a.t, err)
+ return nil
+}
diff --git a/site/e2e/playwright.config.ts b/site/e2e/playwright.config.ts
index 26eb0a7d42f9d..b89d3a60d9338 100644
--- a/site/e2e/playwright.config.ts
+++ b/site/e2e/playwright.config.ts
@@ -49,7 +49,6 @@ export default defineConfig({
`--dangerous-disable-rate-limits ` +
`--provisioner-daemons 10 ` +
`--provisioner-daemons-echo ` +
- `--provisioner-daemon-poll-interval 50ms ` +
`--pprof-enable`,
env: {
...process.env,