// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Author: wsfuyibing <682805@qq.com>
// Date: 2024-07-16

package src

import (
	"context"
	"fmt"
	"gitee.com/go-libs/log"
	"gitee.com/go-libs/redis"
	"github.com/gorhill/cronexpr"
	"reflect"
	"sync/atomic"
	"time"
)

const (
	// DefaultGloballyUnique
	// default to disable globally unique.
	DefaultGloballyUnique = false

	// DefaultNodeUnique
	// default to enabled node unique.
	DefaultNodeUnique = true

	// DefaultRunOnStartup
	// default to disable run for crontab startup.
	DefaultRunOnStartup = false

	defaultSchedulerTimeFormat = "2006-01-02 15:04:05 Z07:00"
)

// Worker
// is a job manager that schedule in crontab. It's call job handlers in a
// goroutine.
type Worker struct {
	class      string
	crontab    *Crontab
	expression *cronexpr.Expression
	job        Job
	strategy   Strategy

	runProcessing                            int32
	runTime                                  time.Time
	runTotal                                 uint64
	runOnStartup, nodeUnique, globallyUnique bool
}

// NewWorker
// creates a worker with job.
func NewWorker(strategy Strategy, job Job) *Worker {
	o := &Worker{strategy: strategy, job: job}
	r := reflect.TypeOf(job)

	// Worker class.
	if r.Kind() == reflect.Ptr {
		// Class from pointer instance.
		e := r.Elem()
		o.class = fmt.Sprintf(`%s.%s`, e.PkgPath(), e.Name())
	} else if r.Kind() == reflect.Struct {
		// Class from struct instance.
		o.class = fmt.Sprintf(`%s.%s`, r.PkgPath(), r.Name())
	} else {
		// Unsupported job type.
		if job != nil {
			o.job = nil
		}
	}

	// With default fields and return.
	o.SetGloballyUnique(DefaultGloballyUnique)
	o.SetNodeUnique(DefaultNodeUnique)
	o.SetRunOnStartup(DefaultRunOnStartup)
	return o
}

// GetClass
// gets worker class.
func (o *Worker) GetClass() string { return o.class }

// GetStrategy
// gets worker strategy.
func (o *Worker) GetStrategy() Strategy {
	return o.strategy
}

// SetGloballyUnique
// skip if previous scheduling in any node is not completed and value of v
// is true.
func (o *Worker) SetGloballyUnique(v bool) *Worker {
	o.globallyUnique = v
	return o
}

// SetNodeUnique
// skip if previous scheduling is not completed and value of v is true.
func (o *Worker) SetNodeUnique(v bool) *Worker {
	o.nodeUnique = v
	return o
}

// SetRunOnStartup
// schedule the job once immediately when crontab starts if enabled.
func (o *Worker) SetRunOnStartup(v bool) *Worker {
	o.runOnStartup = v
	return o
}

// Run
// runs the worker.
func (o *Worker) run(ctx context.Context, tm time.Time) (do bool, err error) {
	// Called
	// when done.
	defer func() {
		if do {
			err = o.schedule(ctx, tm)
		}
	}()

	// Accessed
	// if job configured as run-on-startup and schedule at first time.
	if o.runOnStartup {
		if atomic.LoadUint64(&o.runTotal) == 0 {
			do = true
			return
		}
	}

	// Compare
	// timed execution time with strategy. Return false if not matched.
	if tm.Unix() != o.expression.Next(tm.Add(-1*time.Second)).Unix() {
		return
	}

	// Unique on node checker, Return false if job configured as node-unique and
	// previous scheduling not completed.
	if o.nodeUnique {
		if atomic.LoadInt32(&o.runProcessing) > 0 {
			return
		}
	}

	// End access check.
	do = true
	return
}

// Schedule
// job handlers.
func (o *Worker) schedule(ctx context.Context, tm time.Time) (err error) {
	var (
		locker redis.Locker
		span   log.Span
	)

	// Set
	// last schedule state when scheduler begin.
	o.runTime = tm
	count := atomic.AddUint64(&o.runTotal, 1)
	atomic.AddInt32(&o.runProcessing, 1)

	// Create span.
	span = log.NewSpanWithContext(ctx, o.class)
	span.Info(`schedule begin: class="%s"`, o.class)
	span.GetAttr().Set(`crontab.scheduler.count`, count)
	span.GetAttr().Set(`crontab.scheduler.class`, o.class)
	span.GetAttr().Set(`crontab.scheduler.strategy`, o.strategy)
	span.GetAttr().Set(`crontab.scheduler.time`, tm.Format(defaultSchedulerTimeFormat))
	span.GetAttr().Set(`crontab.scheduler.time.next`, o.expression.Next(tm).Format(defaultSchedulerTimeFormat))

	// Cleanup
	// when scheduler finish.
	defer func() {
		// Log scheduler result.
		if err != nil {
			span.Info("schedule failed: %v", err)
		} else {
			span.Info("schedule succeed")
		}

		// Close span.
		span.End()

		// Release distributed lock resource.
		if locker != nil {
			locker.Release()
		}

		// Reset statistic.
		atomic.AddInt32(&o.runProcessing, -1)
	}()

	// Unique on cluster checker, Return false if job configured as globally-unique
	// and previous scheduling in any node not completed. It's dependent on redis
	// lock.
	if o.globallyUnique {
		span.Info(`apply distributed lock resource`)

		// Apply
		// a distributed lock resource.
		if locker, err = redis.Lock(span.GetContext(), o.class); err != nil {
			return
		}

		// Locked
		// by other goroutine.
		if locker == nil {
			span.Info(`distributed lock resource applied by other goroutine`)
			return
		}

		// Enable
		// distributed lock resource renew.
		locker.Renew()
	}

	// Before
	// handler called.
	if v, ok := o.job.(JobBefore); ok {
		if err = o.scheduleBefore(span.GetContext(), v.OnBefore); err != nil {
			return
		}
	}

	// Finish
	// handler called.
	defer func() {
		if v, ok := o.job.(JobFinish); ok {
			o.scheduleFinish(span.GetContext(), v.OnFinish)
		}
	}()

	// Process
	// handler called.
	if err = o.scheduleRun(span.GetContext(), o.job.OnRun); err != nil {
		// Failed
		// handler called.
		if v, ok := o.job.(JobFailed); ok {
			o.scheduleFailed(span.GetContext(), err, v.OnFailed)
		}
	} else {
		// Succeed
		// handler called.
		if v, ok := o.job.(JobSucceed); ok {
			o.scheduleSucceed(span.GetContext(), v.OnSucceed)
		}
	}
	return
}

func (o *Worker) scheduleBefore(ctx context.Context, handler func(context.Context) error) (err error) {
	var span = log.NewSpanWithContext(ctx, `on:before`)

	// Cleanup
	// when done.
	defer func() {
		// Catch runtime panic.
		if r := recover(); r != nil {
			span.Fatal(`fatal on before: %v`, r)
		}

		// Close span.
		span.End()
	}()

	// Call handler.
	err = handler(span.GetContext())
	return
}

func (o *Worker) scheduleRun(ctx context.Context, handler func(context.Context) error) (err error) {
	var span = log.NewSpanWithContext(ctx, `on:run`)

	// Cleanup
	// when done.
	defer func() {
		// Catch runtime panic.
		if r := recover(); r != nil {
			span.Fatal(`fatal on do: %v`, r)
		}

		// Close span.
		span.End()
	}()

	// Call handler.
	err = handler(span.GetContext())
	return
}

func (o *Worker) scheduleFailed(ctx context.Context, err error, handler func(context.Context, error)) {
	var span = log.NewSpanWithContext(ctx, `on:failed`)

	// Cleanup
	// when done.
	defer func() {
		// Catch runtime panic.
		if r := recover(); r != nil {
			span.Fatal(`fatal on failed: %v`, r)
		}

		// Close span.
		span.End()
	}()

	// Call handler.
	handler(span.GetContext(), err)
}

func (o *Worker) scheduleFinish(ctx context.Context, handler func(context.Context)) {
	var span = log.NewSpanWithContext(ctx, `on:finish`)

	// Cleanup
	// when done.
	defer func() {
		// Catch runtime panic.
		if r := recover(); r != nil {
			span.Fatal(`fatal on finish: %v`, r)
		}

		// Close span.
		span.End()
	}()

	// Call handler.
	handler(span.GetContext())
}

func (o *Worker) scheduleSucceed(ctx context.Context, handler func(context.Context)) {
	var span = log.NewSpanWithContext(ctx, `on:succeed`)

	// Cleanup
	// when done.
	defer func() {
		// Catch runtime panic.
		if r := recover(); r != nil {
			span.Fatal(`fatal on succeed: %v`, r)
		}

		// Close span.
		span.End()
	}()

	// Call handler.
	handler(span.GetContext())
}
