From 00dc0befb21accb5850b480f9d2c56f645f5983b Mon Sep 17 00:00:00 2001 From: Viacheslav Vasilyev Date: Wed, 14 Jan 2026 23:29:11 +0000 Subject: [PATCH 1/2] Merge concurrent matrix output --- pkg/model/workflow.go | 13 ++ pkg/runner/matrix_outputs_test.go | 197 ++++++++++++++++++++++++++++++ pkg/runner/run_context.go | 29 ++++- 3 files changed, 237 insertions(+), 2 deletions(-) create mode 100644 pkg/runner/matrix_outputs_test.go diff --git a/pkg/model/workflow.go b/pkg/model/workflow.go index 8dd5f6634db..3a7a426e3ad 100644 --- a/pkg/model/workflow.go +++ b/pkg/model/workflow.go @@ -8,6 +8,7 @@ import ( "regexp" "strconv" "strings" + "sync" "github.com/nektos/act/pkg/common" "github.com/nektos/act/pkg/schema" @@ -206,10 +207,22 @@ type Job struct { RawContainer yaml.Node `yaml:"container"` Defaults Defaults `yaml:"defaults"` Outputs map[string]string `yaml:"outputs"` + RawOutputs map[string]string // Stores original output templates for matrix jobs Uses string `yaml:"uses"` With map[string]interface{} `yaml:"with"` RawSecrets yaml.Node `yaml:"secrets"` Result string + outputsMu sync.Mutex // Protects concurrent access to Outputs from parallel matrix jobs +} + +// Lock locks the job's outputs mutex to allow safe concurrent access from parallel matrix jobs +func (j *Job) Lock() { + j.outputsMu.Lock() +} + +// Unlock unlocks the job's outputs mutex +func (j *Job) Unlock() { + j.outputsMu.Unlock() } // Strategy for the job diff --git a/pkg/runner/matrix_outputs_test.go b/pkg/runner/matrix_outputs_test.go new file mode 100644 index 00000000000..4a3a01b899a --- /dev/null +++ b/pkg/runner/matrix_outputs_test.go @@ -0,0 +1,197 @@ +package runner + +import ( + "context" + "math/rand" + "sync" + "testing" + "time" + + "github.com/nektos/act/pkg/model" + "github.com/stretchr/testify/assert" +) + +// TestMatrixJobOutputsMerge tests that outputs from parallel matrix jobs are properly merged +func TestMatrixJobOutputsMerge(t *testing.T) { + // Create a job with outputs that will be set by different matrix runs + job := &model.Job{ + Outputs: map[string]string{ + "v1var": "${{ steps.step1.outputs.v1 }}", + "v2var": "${{ steps.step1.outputs.v2 }}", + }, + } + + workflow := &model.Workflow{ + Jobs: map[string]*model.Job{ + "test": job, + }, + } + + plan := &model.Plan{ + Stages: []*model.Stage{ + { + Runs: []*model.Run{ + { + Workflow: workflow, + JobID: "test", + }, + }, + }, + }, + } + + // Create first run context (matrix.foo = v1) + rc1 := &RunContext{ + Config: &Config{}, + Run: plan.Stages[0].Runs[0], + EventJSON: "{}", + StepResults: map[string]*model.StepResult{ + "step1": { + Outputs: map[string]string{ + "v1": "v1", // Only v1 output is set + }, + }, + }, + } + rc1.ExprEval = rc1.NewExpressionEvaluator(context.Background()) + + // Create second run context (matrix.foo = v2) + rc2 := &RunContext{ + Config: &Config{}, + Run: plan.Stages[0].Runs[0], + EventJSON: "{}", + StepResults: map[string]*model.StepResult{ + "step1": { + Outputs: map[string]string{ + "v2": "v2", // Only v2 output is set + }, + }, + }, + } + rc2.ExprEval = rc2.NewExpressionEvaluator(context.Background()) + + // Simulate parallel execution by calling interpolateOutputs from both contexts + ctx := context.Background() + + // First matrix run sets v1var + err := rc1.interpolateOutputs()(ctx) + assert.NoError(t, err) + + // Second matrix run sets v2var + err = rc2.interpolateOutputs()(ctx) + assert.NoError(t, err) + + // Verify that both outputs are set (merged from both matrix runs) + assert.Equal(t, "v1", job.Outputs["v1var"], "v1var should be set from first matrix run") + assert.Equal(t, "v2", job.Outputs["v2var"], "v2var should be set from second matrix run") +} + +// TestMatrixJobOutputsParallelWithDelay tests that outputs from parallel matrix jobs +// with random delays are properly merged without race conditions +func TestMatrixJobOutputsParallelWithDelay(t *testing.T) { + // Create a job with outputs that will be set by different matrix runs + job := &model.Job{ + Outputs: map[string]string{ + "v1var": "${{ steps.step1.outputs.v1 }}", + "v2var": "${{ steps.step1.outputs.v2 }}", + }, + } + + workflow := &model.Workflow{ + Jobs: map[string]*model.Job{ + "test": job, + }, + } + + plan := &model.Plan{ + Stages: []*model.Stage{ + { + Runs: []*model.Run{ + { + Workflow: workflow, + JobID: "test", + }, + }, + }, + }, + } + + // Create contexts for two parallel matrix runs + contexts := []*RunContext{ + // Matrix run 1: sets v1 output + { + Config: &Config{}, + Run: plan.Stages[0].Runs[0], + EventJSON: "{}", + StepResults: map[string]*model.StepResult{ + "step1": { + Outputs: map[string]string{ + "v1": "v1", + }, + }, + }, + }, + // Matrix run 2: sets v2 output + { + Config: &Config{}, + Run: plan.Stages[0].Runs[0], + EventJSON: "{}", + StepResults: map[string]*model.StepResult{ + "step1": { + Outputs: map[string]string{ + "v2": "v2", + }, + }, + }, + }, + } + + // Initialize expression evaluators + for _, rc := range contexts { + rc.ExprEval = rc.NewExpressionEvaluator(context.Background()) + } + + // Use WaitGroup to ensure both goroutines complete + var wg sync.WaitGroup + var errors []error + var errorsMu sync.Mutex + + ctx := context.Background() + + // Launch parallel matrix runs with random delays + for i, rc := range contexts { + wg.Add(1) + go func(index int, runContext *RunContext) { + defer wg.Done() + + // Random delay between 5 and 10 seconds + delaySeconds := 5 + rand.Intn(6) + t.Logf("Matrix run %d: sleeping for %d seconds", index+1, delaySeconds) + time.Sleep(time.Duration(delaySeconds) * time.Second) + + // Interpolate outputs + t.Logf("Matrix run %d: interpolating outputs", index+1) + if err := runContext.interpolateOutputs()(ctx); err != nil { + errorsMu.Lock() + errors = append(errors, err) + errorsMu.Unlock() + } + t.Logf("Matrix run %d: completed", index+1) + }(i, rc) + } + + // Wait for all goroutines to complete + wg.Wait() + + // Check for errors + assert.Empty(t, errors, "No errors should occur during parallel execution") + + // Verify that both outputs are set (merged from both matrix runs) + assert.Equal(t, "v1", job.Outputs["v1var"], "v1var should be set from first matrix run") + assert.Equal(t, "v2", job.Outputs["v2var"], "v2var should be set from second matrix run") + + // Verify RawOutputs are preserved + assert.NotNil(t, job.RawOutputs, "RawOutputs should be initialized") + assert.Equal(t, "${{ steps.step1.outputs.v1 }}", job.RawOutputs["v1var"], "RawOutputs should preserve original templates") + assert.Equal(t, "${{ steps.step1.outputs.v2 }}", job.RawOutputs["v2var"], "RawOutputs should preserve original templates") +} diff --git a/pkg/runner/run_context.go b/pkg/runner/run_context.go index 5d4277123ed..3936f8a2ba8 100644 --- a/pkg/runner/run_context.go +++ b/pkg/runner/run_context.go @@ -650,11 +650,36 @@ func (rc *RunContext) ActionCacheDir() string { // Interpolate outputs after a job is done func (rc *RunContext) interpolateOutputs() common.Executor { return func(ctx context.Context) error { + job := rc.Run.Job() ee := rc.NewExpressionEvaluator(ctx) - for k, v := range rc.Run.Job().Outputs { + + // For matrix jobs, we need to safely merge outputs from parallel runs + job.Lock() + defer job.Unlock() + + // Initialize RawOutputs with original templates on first run + if job.RawOutputs == nil && len(job.Outputs) > 0 { + job.RawOutputs = make(map[string]string) + for k, v := range job.Outputs { + job.RawOutputs[k] = v + } + } + + // Use RawOutputs as the source for interpolation if available + outputTemplates := job.Outputs + if job.RawOutputs != nil { + outputTemplates = job.RawOutputs + } + + for k, v := range outputTemplates { interpolated := ee.Interpolate(ctx, v) if v != interpolated { - rc.Run.Job().Outputs[k] = interpolated + currentValue := job.Outputs[k] + // For matrix jobs: only set non-empty values, and only if current value is empty or is the original template + // This allows multiple matrix runs to contribute their non-empty outputs + if interpolated != "" && (currentValue == "" || currentValue == v) { + job.Outputs[k] = interpolated + } } } return nil From af90cc99bdd8190762dea57319d8a99efa53f432 Mon Sep 17 00:00:00 2001 From: Viacheslav Vasilyev Date: Mon, 19 Jan 2026 19:41:24 +0000 Subject: [PATCH 2/2] Correctly handle fail-fast --- pkg/common/executor.go | 110 ++++++++++++++ pkg/common/executor_test.go | 238 ++++++++++++++++++++++++++++++ pkg/common/job_error.go | 20 +++ pkg/model/workflow.go | 2 +- pkg/runner/job_executor.go | 9 ++ pkg/runner/matrix_outputs_test.go | 24 +-- pkg/runner/runner.go | 30 +++- 7 files changed, 417 insertions(+), 16 deletions(-) diff --git a/pkg/common/executor.go b/pkg/common/executor.go index 9707bec1b60..e8503430a40 100644 --- a/pkg/common/executor.go +++ b/pkg/common/executor.go @@ -26,6 +26,21 @@ func Warningf(format string, args ...interface{}) Warning { return w } +// FailFastError wraps a context cancellation error with a more informative message +type FailFastError struct { + Err error +} + +// Error returns the error message +func (e FailFastError) Error() string { + return "Job cancelled (fail-fast)" +} + +// Unwrap allows errors.Is and errors.As to work +func (e FailFastError) Unwrap() error { + return e.Err +} + // Executor define contract for the steps of a workflow type Executor func(ctx context.Context) error @@ -131,6 +146,101 @@ func NewParallelExecutor(parallel int, executors ...Executor) Executor { } } +// NewFailFastParallelExecutor creates a parallel executor that respects fail-fast semantics +// When fail-fast is enabled via context, it will cancel remaining work on first error +func NewFailFastParallelExecutor(parallel int, executors ...Executor) Executor { + return func(ctx context.Context) error { + failFast := IsFailFast(ctx) + + // If fail-fast is disabled, use the standard parallel executor + if !failFast { + return NewParallelExecutor(parallel, executors...)(ctx) + } + + // Fail-fast mode: create a cancellable context for workers + workCtx, cancelWork := context.WithCancel(ctx) + defer cancelWork() + + work := make(chan Executor, len(executors)) + errs := make(chan error, len(executors)) + + if 1 > parallel { + log.Debugf("Parallel tasks (%d) below minimum, setting to 1", parallel) + parallel = 1 + } + + // Start worker goroutines + for i := 0; i < parallel; i++ { + go func(work <-chan Executor, errs chan<- error) { + for executor := range work { + // Check if work context was cancelled (fail-fast triggered) + if workCtx.Err() != nil { + errs <- FailFastError{Err: workCtx.Err()} + continue + } + errs <- executor(workCtx) + } + }(work, errs) + } + + // Queue work and monitor for failures + go func() { + defer close(work) + for i := 0; i < len(executors); i++ { + // Check if we should stop queuing due to failure + if workCtx.Err() != nil { + // Don't queue remaining work, but send cancelled errors for remaining executors + for j := i; j < len(executors); j++ { + errs <- FailFastError{Err: workCtx.Err()} + } + return + } + work <- executors[i] + } + }() + + // Collect results and trigger fail-fast on first error + var firstErr error + var firstFailFastErr error + for i := 0; i < len(executors); i++ { + err := <-errs + + if err != nil { + switch err.(type) { + case Warning: + // Warnings don't trigger fail-fast + log.Warning(err.Error()) + case FailFastError: + // FailFastErrors are just cancellation notifications, not the root cause + // Keep the first one for returning if no real error is found + if firstFailFastErr == nil { + firstFailFastErr = err + } + default: + // First real error triggers fail-fast + if firstErr == nil { + firstErr = err + // Cancel remaining work on first real error + cancelWork() + } + } + } + } + + // If we only have FailFastErrors (all jobs were cancelled), return that + if firstErr == nil && firstFailFastErr != nil { + firstErr = firstFailFastErr + } + + // Check if parent context was cancelled + if err := ctx.Err(); err != nil { + return err + } + + return firstErr + } +} + func NewFieldExecutor(name string, value interface{}, exec Executor) Executor { return func(ctx context.Context) error { return exec(WithLogger(ctx, Logger(ctx).WithField(name, value))) diff --git a/pkg/common/executor_test.go b/pkg/common/executor_test.go index baa03b6ae0c..e34572a0b56 100644 --- a/pkg/common/executor_test.go +++ b/pkg/common/executor_test.go @@ -3,6 +3,7 @@ package common import ( "context" "fmt" + "sync" "testing" "time" @@ -150,3 +151,240 @@ func TestNewParallelExecutorCanceled(t *testing.T) { assert.Equal(3, count) assert.Error(errExpected, err) } + +func TestNewFailFastParallelExecutorWithFailFastTrue(t *testing.T) { + assert := assert.New(t) + + ctx := WithFailFast(context.Background(), true) + + executedCount := 0 + var mu sync.Mutex + + // Create executors: some succeed, one fails, rest should be cancelled + executors := []Executor{ + func(ctx context.Context) error { + mu.Lock() + executedCount++ + mu.Unlock() + time.Sleep(100 * time.Millisecond) + return nil + }, + func(ctx context.Context) error { + mu.Lock() + executedCount++ + mu.Unlock() + time.Sleep(100 * time.Millisecond) + return fmt.Errorf("intentional failure") + }, + func(ctx context.Context) error { + mu.Lock() + executedCount++ + mu.Unlock() + time.Sleep(2 * time.Second) // Should be cancelled + return nil + }, + func(ctx context.Context) error { + mu.Lock() + executedCount++ + mu.Unlock() + time.Sleep(2 * time.Second) // Should be cancelled + return nil + }, + } + + err := NewFailFastParallelExecutor(2, executors...)(ctx) + + assert.Error(err) + assert.Contains(err.Error(), "intentional failure") +} + +func TestNewFailFastParallelExecutorWithFailFastFalse(t *testing.T) { + assert := assert.New(t) + + ctx := WithFailFast(context.Background(), false) + + executedCount := 0 + var mu sync.Mutex + + executors := []Executor{ + func(ctx context.Context) error { + mu.Lock() + executedCount++ + mu.Unlock() + return nil + }, + func(ctx context.Context) error { + mu.Lock() + executedCount++ + mu.Unlock() + return fmt.Errorf("intentional failure") + }, + func(ctx context.Context) error { + mu.Lock() + executedCount++ + mu.Unlock() + return nil + }, + } + + err := NewFailFastParallelExecutor(2, executors...)(ctx) + + assert.Error(err) + mu.Lock() + assert.Equal(3, executedCount, "all executors should run when fail-fast is false") + mu.Unlock() +} + +func TestNewFailFastParallelExecutorNoFailFastInContext(t *testing.T) { + assert := assert.New(t) + + ctx := context.Background() + + executedCount := 0 + var mu sync.Mutex + + executors := []Executor{ + func(ctx context.Context) error { + mu.Lock() + executedCount++ + mu.Unlock() + return nil + }, + func(ctx context.Context) error { + mu.Lock() + executedCount++ + mu.Unlock() + return fmt.Errorf("intentional failure") + }, + func(ctx context.Context) error { + mu.Lock() + executedCount++ + mu.Unlock() + return nil + }, + } + + err := NewFailFastParallelExecutor(2, executors...)(ctx) + + assert.Error(err) + mu.Lock() + assert.Equal(3, executedCount, "all executors should run when fail-fast not in context") + mu.Unlock() +} + +func TestNewFailFastParallelExecutorWithWarnings(t *testing.T) { + assert := assert.New(t) + + ctx := WithFailFast(context.Background(), true) + + executedCount := 0 + var mu sync.Mutex + + // Warnings should not trigger fail-fast + executors := []Executor{ + func(ctx context.Context) error { + mu.Lock() + executedCount++ + mu.Unlock() + return Warningf("this is a warning") + }, + func(ctx context.Context) error { + mu.Lock() + executedCount++ + mu.Unlock() + return nil + }, + func(ctx context.Context) error { + mu.Lock() + executedCount++ + mu.Unlock() + return nil + }, + } + + err := NewFailFastParallelExecutor(2, executors...)(ctx) + + // Warnings don't cause executor to fail + assert.NoError(err) + mu.Lock() + assert.Equal(3, executedCount, "all executors should run when only warnings occur") + mu.Unlock() +} + +func TestNewFailFastParallelExecutorParentContextCanceled(t *testing.T) { + assert := assert.New(t) + + ctx, cancel := context.WithCancel(context.Background()) + ctx = WithFailFast(ctx, true) + + executedCount := 0 + var mu sync.Mutex + + executors := []Executor{ + func(ctx context.Context) error { + mu.Lock() + executedCount++ + mu.Unlock() + time.Sleep(100 * time.Millisecond) + return nil + }, + func(ctx context.Context) error { + mu.Lock() + executedCount++ + mu.Unlock() + // Cancel parent context + cancel() + time.Sleep(100 * time.Millisecond) + return nil + }, + func(ctx context.Context) error { + mu.Lock() + executedCount++ + mu.Unlock() + return nil + }, + } + + err := NewFailFastParallelExecutor(2, executors...)(ctx) + + // Should return context.Canceled from parent + assert.ErrorIs(err, context.Canceled) +} + +func TestNewFailFastParallelExecutorAllSuccess(t *testing.T) { + assert := assert.New(t) + + ctx := WithFailFast(context.Background(), true) + + executedCount := 0 + var mu sync.Mutex + + // All executors succeed - fail-fast shouldn't interfere + executors := []Executor{ + func(ctx context.Context) error { + mu.Lock() + executedCount++ + mu.Unlock() + return nil + }, + func(ctx context.Context) error { + mu.Lock() + executedCount++ + mu.Unlock() + return nil + }, + func(ctx context.Context) error { + mu.Lock() + executedCount++ + mu.Unlock() + return nil + }, + } + + err := NewFailFastParallelExecutor(2, executors...)(ctx) + + assert.NoError(err) + mu.Lock() + assert.Equal(3, executedCount, "all executors should run when all succeed") + mu.Unlock() +} diff --git a/pkg/common/job_error.go b/pkg/common/job_error.go index 3eb2128db8d..72b7eaa9384 100644 --- a/pkg/common/job_error.go +++ b/pkg/common/job_error.go @@ -12,6 +12,26 @@ type jobCancelCtx string const JobCancelCtxVal = jobCancelCtx("job.cancel") +type failFastContextKey string + +const FailFastContextKeyVal = failFastContextKey("job.failfast") + +// WithFailFast adds fail-fast configuration to the context +func WithFailFast(ctx context.Context, failFast bool) context.Context { + return context.WithValue(ctx, FailFastContextKeyVal, failFast) +} + +// IsFailFast returns whether fail-fast is enabled for this context +func IsFailFast(ctx context.Context) bool { + val := ctx.Value(FailFastContextKeyVal) + if val != nil { + if ff, ok := val.(bool); ok { + return ff + } + } + return false +} + // JobError returns the job error for current context if any func JobError(ctx context.Context) error { val := ctx.Value(jobErrorContextKeyVal) diff --git a/pkg/model/workflow.go b/pkg/model/workflow.go index 3a7a426e3ad..1ec3d17bbc0 100644 --- a/pkg/model/workflow.go +++ b/pkg/model/workflow.go @@ -212,7 +212,7 @@ type Job struct { With map[string]interface{} `yaml:"with"` RawSecrets yaml.Node `yaml:"secrets"` Result string - outputsMu sync.Mutex // Protects concurrent access to Outputs from parallel matrix jobs + outputsMu sync.Mutex // Protects concurrent access to Outputs from parallel matrix jobs } // Lock locks the job's outputs mutex to allow safe concurrent access from parallel matrix jobs diff --git a/pkg/runner/job_executor.go b/pkg/runner/job_executor.go index 4ab7f2395bf..3584d0afe1f 100644 --- a/pkg/runner/job_executor.go +++ b/pkg/runner/job_executor.go @@ -2,6 +2,7 @@ package runner import ( "context" + "errors" "fmt" "time" @@ -58,6 +59,14 @@ func newJobExecutor(info jobInfo, sf stepFactory, rc *RunContext) common.Executo return nil } logger := common.Logger(ctx) + + // Check if this is a fail-fast cancellation + if errors.Is(err, context.Canceled) && common.IsFailFast(ctx) { + logger.Errorf("Job cancelled (fail-fast)") + common.SetJobError(ctx, err) + return err + } + logger.Errorf("%v", err) common.SetJobError(ctx, err) return err diff --git a/pkg/runner/matrix_outputs_test.go b/pkg/runner/matrix_outputs_test.go index 4a3a01b899a..5d0115d63ff 100644 --- a/pkg/runner/matrix_outputs_test.go +++ b/pkg/runner/matrix_outputs_test.go @@ -42,9 +42,9 @@ func TestMatrixJobOutputsMerge(t *testing.T) { // Create first run context (matrix.foo = v1) rc1 := &RunContext{ - Config: &Config{}, - Run: plan.Stages[0].Runs[0], - EventJSON: "{}", + Config: &Config{}, + Run: plan.Stages[0].Runs[0], + EventJSON: "{}", StepResults: map[string]*model.StepResult{ "step1": { Outputs: map[string]string{ @@ -57,9 +57,9 @@ func TestMatrixJobOutputsMerge(t *testing.T) { // Create second run context (matrix.foo = v2) rc2 := &RunContext{ - Config: &Config{}, - Run: plan.Stages[0].Runs[0], - EventJSON: "{}", + Config: &Config{}, + Run: plan.Stages[0].Runs[0], + EventJSON: "{}", StepResults: map[string]*model.StepResult{ "step1": { Outputs: map[string]string{ @@ -120,9 +120,9 @@ func TestMatrixJobOutputsParallelWithDelay(t *testing.T) { contexts := []*RunContext{ // Matrix run 1: sets v1 output { - Config: &Config{}, - Run: plan.Stages[0].Runs[0], - EventJSON: "{}", + Config: &Config{}, + Run: plan.Stages[0].Runs[0], + EventJSON: "{}", StepResults: map[string]*model.StepResult{ "step1": { Outputs: map[string]string{ @@ -133,9 +133,9 @@ func TestMatrixJobOutputsParallelWithDelay(t *testing.T) { }, // Matrix run 2: sets v2 output { - Config: &Config{}, - Run: plan.Stages[0].Runs[0], - EventJSON: "{}", + Config: &Config{}, + Run: plan.Stages[0].Runs[0], + EventJSON: "{}", StepResults: map[string]*model.StepResult{ "step1": { Outputs: map[string]string{ diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go index 078657eaf6f..f66c05b1346 100644 --- a/pkg/runner/runner.go +++ b/pkg/runner/runner.go @@ -180,7 +180,7 @@ func (runner *runnerImpl) NewPlanExecutor(plan *model.Plan) common.Executor { maxParallel := 4 if job.Strategy != nil { - maxParallel = job.Strategy.MaxParallel + maxParallel = job.Strategy.GetMaxParallel() } if len(matrixes) < maxParallel { @@ -204,10 +204,29 @@ func (runner *runnerImpl) NewPlanExecutor(plan *model.Plan) common.Executor { return err } - return executor(common.WithJobErrorContainer(WithJobLogger(ctx, rc.Run.JobID, jobName, rc.Config, &rc.Masks, matrix))) + jobCtx := common.WithJobErrorContainer(WithJobLogger(ctx, rc.Run.JobID, jobName, rc.Config, &rc.Masks, matrix)) + err = executor(jobCtx) + if err != nil { + return err + } + // Return job error to enable fail-fast behavior + return common.JobError(jobCtx) }) } - pipeline = append(pipeline, common.NewParallelExecutor(maxParallel, stageExecutor...)) + + // Use fail-fast executor for matrix jobs + matrixExecutor := common.NewFailFastParallelExecutor(maxParallel, stageExecutor...) + + // Wrap with fail-fast context based on strategy + pipeline = append(pipeline, func(ctx context.Context) error { + // Inject fail-fast setting into context + failFast := false + if job.Strategy != nil { + failFast = job.Strategy.GetFailFast() + } + ctx = common.WithFailFast(ctx, failFast) + return matrixExecutor(ctx) + }) } log.Debugf("PlanExecutor concurrency: %d", runner.config.GetConcurrentJobs()) @@ -223,6 +242,11 @@ func handleFailure(plan *model.Plan) common.Executor { for _, stage := range plan.Stages { for _, run := range stage.Runs { if run.Job().Result == "failure" { + job := run.Job() + // Check if this was a matrix job with fail-fast + if job.Strategy != nil && job.Strategy.GetFailFast() { + return fmt.Errorf("Job '%s' failed (fail-fast enabled, remaining matrix jobs may have been cancelled)", run.String()) + } return fmt.Errorf("Job '%s' failed", run.String()) } }