diff --git a/README.md b/README.md index d19ccb9b..7e49421d 100644 --- a/README.md +++ b/README.md @@ -131,3 +131,9 @@ Looking to contribute? Try to follow these guidelines: ![design-diagram](https://user-images.githubusercontent.com/19351306/110375142-2ba88680-8017-11eb-80c3-554cc746b165.png) [Jetbrains](https://www.jetbrains.com/?from=gocron) supports this project with GoLand licenses. We appreciate their support for free and open source software! + +## Star History + +[![Star History Chart](https://api.star-history.com/svg?repos=go-co-op/gocron&type=Date)](https://star-history.com/#go-co-op/gocron&Date) + + diff --git a/executor.go b/executor.go index 8a554e6a..144db6ed 100644 --- a/executor.go +++ b/executor.go @@ -25,19 +25,24 @@ const ( ) type executor struct { - jobFunctions chan jobFunction - stopCh chan struct{} - stoppedCh chan struct{} - limitMode limitMode + jobFunctions chan jobFunction // the chan upon which the jobFunctions are passed in from the scheduler + ctx context.Context // used to tell the executor to stop + cancel context.CancelFunc // used to tell the executor to stop + wg *sync.WaitGroup // used by the scheduler to wait for the executor to stop + jobsWg *sync.WaitGroup // used by the executor to wait for all jobs to finish + singletonWgs *sync.Map // used by the executor to wait for the singleton runners to complete + limitMode limitMode // when SetMaxConcurrentJobs() is set upon the scheduler maxRunningJobs *semaphore.Weighted } func newExecutor() executor { - return executor{ + e := executor{ jobFunctions: make(chan jobFunction, 1), - stopCh: make(chan struct{}), - stoppedCh: make(chan struct{}), + singletonWgs: &sync.Map{}, + wg: &sync.WaitGroup{}, } + e.wg.Add(1) + return e } func runJob(f jobFunction) { @@ -51,9 +56,13 @@ func runJob(f jobFunction) { } func (jf *jobFunction) singletonRunner() { + jf.singletonRunnerOn.Store(true) + jf.singletonWg.Add(1) for { select { case <-jf.ctx.Done(): + jf.singletonWg.Done() + jf.singletonRunnerOn.Store(false) return default: if jf.singletonQueue.Load() != 0 { @@ -65,15 +74,12 @@ func (jf *jobFunction) singletonRunner() { } func (e *executor) start() { - stopCtx, cancel := context.WithCancel(context.Background()) - runningJobsWg := sync.WaitGroup{} - for { select { case f := <-e.jobFunctions: - runningJobsWg.Add(1) + e.jobsWg.Add(1) go func() { - defer runningJobsWg.Done() + defer e.jobsWg.Done() panicHandlerMutex.RLock() defer panicHandlerMutex.RUnlock() @@ -94,7 +100,7 @@ func (e *executor) start() { return case WaitMode: select { - case <-stopCtx.Done(): + case <-e.ctx.Done(): return case <-f.ctx.Done(): return @@ -114,19 +120,32 @@ func (e *executor) start() { case defaultMode: runJob(f) case singletonMode: + e.singletonWgs.Store(f.singletonWg, struct{}{}) + + if !f.singletonRunnerOn.Load() { + go f.singletonRunner() + } + f.singletonQueue.Add(1) } }() - case <-e.stopCh: - cancel() - runningJobsWg.Wait() - close(e.stoppedCh) + case <-e.ctx.Done(): + e.jobsWg.Wait() + e.wg.Done() return } } } func (e *executor) stop() { - close(e.stopCh) - <-e.stoppedCh + e.cancel() + e.wg.Wait() + if e.singletonWgs != nil { + e.singletonWgs.Range(func(key, value any) bool { + if wg, ok := key.(*sync.WaitGroup); ok { + wg.Wait() + } + return true + }) + } } diff --git a/executor_test.go b/executor_test.go index 2fc05bd7..668f4958 100644 --- a/executor_test.go +++ b/executor_test.go @@ -1,6 +1,7 @@ package gocron import ( + "context" "sync" "sync/atomic" "testing" @@ -10,6 +11,10 @@ import ( func Test_ExecutorExecute(t *testing.T) { e := newExecutor() + stopCtx, cancel := context.WithCancel(context.Background()) + e.ctx = stopCtx + e.cancel = cancel + e.jobsWg = &sync.WaitGroup{} wg := &sync.WaitGroup{} wg.Add(1) @@ -41,6 +46,10 @@ func Test_ExecutorPanicHandling(t *testing.T) { SetPanicHandler(handler) e := newExecutor() + stopCtx, cancel := context.WithCancel(context.Background()) + e.ctx = stopCtx + e.cancel = cancel + e.jobsWg = &sync.WaitGroup{} wg := &sync.WaitGroup{} wg.Add(1) diff --git a/job.go b/job.go index 05b15fe4..05edcd55 100644 --- a/job.go +++ b/job.go @@ -41,18 +41,20 @@ type random struct { } type jobFunction struct { - eventListeners // additional functions to allow run 'em during job performing - function any // task's function - parameters []any // task's function parameters - parametersLen int // length of the passed parameters - name string // nolint the function name to run - runConfig runConfig // configuration for how many times to run the job - singletonQueue *atomic.Int64 // limits inflight runs of a job to one - ctx context.Context // for cancellation - cancel context.CancelFunc // for cancellation - isRunning *atomic.Bool // whether the job func is currently being run - runStartCount *atomic.Int64 // number of times the job was started - runFinishCount *atomic.Int64 // number of times the job was finished + eventListeners // additional functions to allow run 'em during job performing + function any // task's function + parameters []any // task's function parameters + parametersLen int // length of the passed parameters + name string // nolint the function name to run + runConfig runConfig // configuration for how many times to run the job + singletonQueue *atomic.Int64 // limits inflight runs of a job to one + singletonRunnerOn *atomic.Bool // whether the runner function for singleton is running + ctx context.Context // for cancellation + cancel context.CancelFunc // for cancellation + isRunning *atomic.Bool // whether the job func is currently being run + runStartCount *atomic.Int64 // number of times the job was started + runFinishCount *atomic.Int64 // number of times the job was finished + singletonWg *sync.WaitGroup // used by singleton runner } type eventListeners struct { @@ -66,18 +68,20 @@ type jobMutex struct { func (jf *jobFunction) copy() jobFunction { cp := jobFunction{ - eventListeners: jf.eventListeners, - function: jf.function, - parameters: nil, - parametersLen: jf.parametersLen, - name: jf.name, - runConfig: jf.runConfig, - singletonQueue: jf.singletonQueue, - ctx: jf.ctx, - cancel: jf.cancel, - isRunning: jf.isRunning, - runStartCount: jf.runStartCount, - runFinishCount: jf.runFinishCount, + eventListeners: jf.eventListeners, + function: jf.function, + parameters: nil, + parametersLen: jf.parametersLen, + name: jf.name, + runConfig: jf.runConfig, + singletonQueue: jf.singletonQueue, + ctx: jf.ctx, + cancel: jf.cancel, + isRunning: jf.isRunning, + runStartCount: jf.runStartCount, + runFinishCount: jf.runFinishCount, + singletonWg: jf.singletonWg, + singletonRunnerOn: jf.singletonRunnerOn, } cp.parameters = append(cp.parameters, jf.parameters...) return cp @@ -110,11 +114,12 @@ func newJob(interval int, startImmediately bool, singletonMode bool) *Job { lastRun: time.Time{}, nextRun: time.Time{}, jobFunction: jobFunction{ - ctx: ctx, - cancel: cancel, - isRunning: &atomic.Bool{}, - runStartCount: &atomic.Int64{}, - runFinishCount: &atomic.Int64{}, + ctx: ctx, + cancel: cancel, + isRunning: &atomic.Bool{}, + runStartCount: &atomic.Int64{}, + runFinishCount: &atomic.Int64{}, + singletonRunnerOn: &atomic.Bool{}, }, tags: []string{}, startsImmediately: startImmediately, @@ -389,7 +394,7 @@ func (j *Job) SingletonMode() { defer j.mu.Unlock() j.runConfig.mode = singletonMode j.jobFunction.singletonQueue = &atomic.Int64{} - go j.jobFunction.singletonRunner() + j.jobFunction.singletonWg = &sync.WaitGroup{} } // shouldRun evaluates if this job should run again diff --git a/scheduler.go b/scheduler.go index d744af52..41c47756 100644 --- a/scheduler.go +++ b/scheduler.go @@ -93,6 +93,11 @@ func (s *Scheduler) StartAsync() { // start starts the scheduler, scheduling and running jobs func (s *Scheduler) start() { + stopCtx, cancel := context.WithCancel(context.Background()) + s.executor.ctx = stopCtx + s.executor.cancel = cancel + s.executor.jobsWg = &sync.WaitGroup{} + go s.executor.start() s.setRunning(true) s.runJobs(s.Jobs()) @@ -100,6 +105,11 @@ func (s *Scheduler) start() { func (s *Scheduler) runJobs(jobs []*Job) { for _, job := range jobs { + ctx, cancel := context.WithCancel(context.Background()) + job.mu.Lock() + job.ctx = ctx + job.cancel = cancel + job.mu.Unlock() s.runContinuous(job) } }