package conductor

import (
    "cing.io/cing-api/planner"
    "cing.io/cing/internal/clientcache"
    "cing.io/cing/internal/storage"
    "context"
    v1 "k8s.io/api/core/v1"
    "k8s.io/client-go/tools/cache"
    "sync"
    "time"

    "github.com/google/uuid"
    log "github.com/sirupsen/logrus"
    metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    "k8s.io/client-go/tools/leaderelection"
    "k8s.io/client-go/tools/leaderelection/resourcelock"

    "cing.io/cing/internal/config"
    "cing.io/cing/internal/k8s"
)

const (
    idPrefix = config.SystemName + "-"
    lockName = idPrefix + "master-lock"
)

var (
    hm sync.Mutex               // hm handle plan mutex
    hs = make(chan struct{}, 1) // hs handle plan signal
)

func PutPlan(plan *planner.SingingPlan) error {
    err := storage.PutPlan(plan)
    if err != nil {
        return err
    }
    if hm.TryLock() {
        hs <- struct{}{}
        hm.Unlock()
    }
    return nil
}

// LeaderBehavior conduct task singing
func LeaderBehavior(ctx context.Context) {
    log.Info("The leader is conducting...")
    handlePlan()

    go func() {
        t := time.Tick(config.ConductDuration)
        for {
            <-t
            if hm.TryLock() {
                hs <- struct{}{}
                hm.Unlock()
            }
        }
    }()

loop:
    for {
        select {
        case <-hs:
            log.Debug("handle plan...")
            handlePlan()
        case <-ctx.Done():
            break loop
        }
    }

    log.Info("Leadership lost! Conduct stopped!")
}

func handlePlan() {
    hm.Lock()
    defer hm.Unlock()

    plans := storage.ListPlanWithTimeRange(0, time.Now().UnixMilli())
    for _, plan := range plans {
        if plan.Status == planner.SingingPlan_NEW {
            go Conduct(plan)
            err := storage.UpdatePlanStatus(plan.TaskFullName, plan.StartTime.AsTime().UnixMilli(), planner.SingingPlan_TODO)
            if err != nil {
                log.Error("conductor update plan status error: ", err)
            }
        }
    }

    plans = storage.ListPlanWithTimeRange(time.Now().UnixMilli(), time.Now().Add(config.ConductDuration).UnixMilli())
    for _, plan := range plans {
        if plan.Status == planner.SingingPlan_NEW {
            time.AfterFunc(plan.StartTime.AsTime().Sub(time.Now())-config.ConductDelay, func() {
                Conduct(plan)
            })
            err := storage.UpdatePlanStatus(plan.TaskFullName, plan.StartTime.AsTime().UnixMilli(), planner.SingingPlan_TODO)
            if err != nil {
                log.Error("conductor update plan status error: ", err)
            }
        }
    }
}

func LeaderElection(ctx context.Context) {
    clientset := k8s.Clientset()

    id := idPrefix + uuid.New().String()

    lock := &resourcelock.LeaseLock{
        LeaseMeta: metav1.ObjectMeta{
            Name:      lockName,
            Namespace: config.SystemNamespace,
        },
        Client: clientset.CoordinationV1(),
        LockConfig: resourcelock.ResourceLockConfig{
            Identity: id,
        },
    }

    var leaderContext context.Context
    var leaderCancelFunc context.CancelFunc

    // start the leader election code loop
    leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{
        Lock: lock,
        // IMPORTANT: you MUST ensure that any code you have that
        // is protected by the lease must terminate **before**
        // you call cancel. Otherwise, you could have a background
        // loop still running and another process could
        // get elected before your background loop finished, violating
        // the stated goal of the lease.
        ReleaseOnCancel: true,
        LeaseDuration:   15 * time.Second,
        RenewDeadline:   10 * time.Second,
        RetryPeriod:     2 * time.Second,
        Callbacks: leaderelection.LeaderCallbacks{
            OnStartedLeading: func(ctx context.Context) {
                log.Infof("This is Leader: %s", id)
                // watch endpoints event to update singer set
                k8s.EndpointsInformer().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
                    AddFunc: func(obj interface{}) {
                        clientcache.Singer.Update(obj.(*v1.Endpoints))
                    },
                    UpdateFunc: func(oldObj, newObj interface{}) {
                        clientcache.Singer.Update(newObj.(*v1.Endpoints))
                    },
                    DeleteFunc: func(obj interface{}) {
                        clientcache.Singer.Update(obj.(*v1.Endpoints))
                    },
                })
                leaderContext, leaderCancelFunc = context.WithCancel(context.Background())
                // do some behavior as leader
                LeaderBehavior(leaderContext)
            },
            OnStoppedLeading: func() {
                log.Infof("Lost leadership: %s", id)

                // todo we can do cleanup here
                clientcache.Singer.Clear()

                // cancel leader doing leader behavior
                leaderCancelFunc()
            },
            OnNewLeader: func(identity string) {
                // we're notified when new leader elected
                if identity == id {
                    // I just got the lock
                    return
                }
                log.Infof("New leader elected: %s", identity)
            },
        },
    })
}
