package batch

import (
	"fmt"
	"time"

	"github.com/astaxie/beego/orm"
	"github.com/golang/glog"
	apps "k8s.io/api/apps/v1"
	v1 "k8s.io/api/core/v1"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	"k8s.io/client-go/kubernetes"
	"k8s.io/client-go/rest"
	"k8s.io/client-go/tools/clientcmd"

	apibatch "eccgateway/pkg/api/batch"
	"eccgateway/pkg/config"
	dbbatch "eccgateway/pkg/db/batch"
	"eccgateway/pkg/util"
	authsvc "eccgateway/pkg/services/auth"
	authdb "eccgateway/pkg/db/auth"
)

const (
	// the retry time for sending request to kubernetes
	RetryTime = 3

	HandlerSleepTime = 1
	WorkerSleepTime  = 1
)

func (s *BatchService) Start(stopCh chan struct{}) {
	glog.Infof("start batch service")
	var err error
	var cfg *rest.Config
	masterURL := config.GetConfig().String(config.MasterURLKey)
	kubeconfig := config.GetConfig().String(config.KubeConfigKey)
	if masterURL == "" && kubeconfig == "" {
		cfg, err = rest.InClusterConfig()
		if err != nil {
			glog.Fatalf("Error building kubeconfig in cluster: %s", err.Error())
		}
	} else {
		cfg, err = clientcmd.BuildConfigFromFlags(masterURL, kubeconfig)
		if err != nil {
			glog.Fatalf("Error building kubeconfig by kube config file: %s", err.Error())
		}
	}
	s.kubeClient, err = kubernetes.NewForConfig(cfg)
	if err != nil {
		glog.Fatalf("Error building kubernetes client: %s", err.Error())
	}

	go s.handleBatchWork(stopCh)
	for i := 0; i < s.workerNum; i++ {
		s.assignChan[i] = make(chan *apibatch.Batch, 1)
		s.stopChan[i] = make(chan string)
		go s.Worker(i)
	}
}

func (s *BatchService) handleBatchWork(stopCh chan struct{}) {
	for true {
		select {
		case <-stopCh:
			glog.Infof("lost controller authority, stop batch service")
			for _, v := range s.stopChan {
				close(v)
			}
			glog.Infof("close all stop chan already, exit handle batch work process")
			return
		default:
			s.process()
		}
	}
}

func (s *BatchService) process() {
	batchList, err := dbbatch.ListAllBatch(orm.NewOrm())
	if err != nil {
		glog.Errorf("list all batch work in db failed, err: %v", err)
		return
	}
	if len(batchList) == 0 {
		glog.V(4).Infof("there is no batch work in db.")
		time.Sleep(HandlerSleepTime * time.Second)
		return
	}

	m := make(map[string]int, len(s.assigned))
	for k, v := range s.assigned {
		m[v] = k
	}

	for _, batch := range batchList {
		if batch.Status == Stopping {
			_, ok := m[batch.ID]
			if ok {
				// send stop message
				s.stopChan[m[batch.ID]] <- batch.ID
			}
		}
	}

	idleWorkerNo := s.getIdleWorker()
	if idleWorkerNo >= s.workerNum {
		glog.V(4).Infof("there is no idle worker, wait a moment")
		time.Sleep(HandlerSleepTime * time.Second)
		return
	}

	batch := s.getNeedDealBatchWork(batchList, m)
	if batch == nil {
		glog.V(4).Infof("there is no batch work need deal, wait a moment")
		time.Sleep(HandlerSleepTime * time.Second)
		return
	}

	glog.Infof("idleWorkerNo: %v, batch: %v", idleWorkerNo, batch.Name)

	// change status
	if batch.Status != Running {
		batch.Status = Running
		batch.StartTimestamp = time.Now()
	}
	// update in db
	err = updateBatch(*batch, false)
	if err != nil {
		glog.Errorf("update batch[%v] in db failed, err: %v", batch.Name, err)
		// ignore err, because it will update later when there ars new progress in this batch work.
	}

	s.assigned[idleWorkerNo] = batch.ID
	s.assignChan[idleWorkerNo] <- batch
}

func (s *BatchService) getIdleWorker() int {
	for k, v := range s.assigned {
		if len(v) == 0 {
			return k
		}
	}
	return s.workerNum
}

func (s *BatchService) getNeedDealBatchWork(batchList []dbbatch.Batch, m map[string]int) *apibatch.Batch {
	pendingBatch := make([]dbbatch.Batch, 0, len(batchList))
	for _, batch := range batchList {
		if batch.Status == Running {
			_, ok := m[batch.ID]
			if !ok {
				// should handle running work immediately
				res := transformDB2API(batch)
				return &res
			}
		} else if batch.Status == Pending {
			pendingBatch = append(pendingBatch, batch)
		}
	}

	if len(pendingBatch) == 0 {
		return nil
	}

	choice := 0
	for k, v := range pendingBatch {
		if v.CreateTimestamp.Before(pendingBatch[choice].CreateTimestamp) {
			choice = k
		}
	}
	batch := transformDB2API(pendingBatch[choice])
	return &batch
}

func (s *BatchService) Worker(num int) {
	var err error
	handle := make(chan *apibatch.Batch, 1)
	for true {
		select {
		case stopBatchID, stopChanIsOpen := <-s.stopChan[num]:
			glog.Infof("the %v worker start exit", num)
			select {
			case batch, ok := <-handle:
				if !ok {
					glog.Errorf("the %v worker's handle closed, there is something wrong", num)
					return
				}
				if stopChanIsOpen {
					if batch.ID == stopBatchID {
						batch.Status = Stop
					} else {
						glog.Warningf("the %v worker receive a stop message for batch[%v], but it is handling this batch[%v]", num, stopBatchID, batch.ID)
						handle <- batch
						continue
					}
				}
				for i := 0; i < RetryTime; i++ {
					err := updateBatch(*batch, true)
					if err != nil {
						glog.Errorf("the %v worker update batch[%v] in db failed, retry time: %v, err: %v", num, batch.Name, i, err)
						break
					}
				}
				s.assigned[num] = ""
			default:
				if stopChanIsOpen {
					glog.Warningf("the %v worker receive a stop message for batch[%v], but it doesn't handle any batch[%v]", num, stopBatchID)
					continue
				} else {
					glog.Infof("the %v worker don't have work, exit directly", num)
					return
				}
			}
		default:
			select {
			case batch, ok := <-s.assignChan[num]:
				if !ok {
					glog.Errorf("s.assignChan[%v] closed, there is something wrong", num)
					return
				}
				glog.Infof("the %v worker assigned the batch work[%v/%v]", num, batch.Name, batch.ID)
				// send to handle
				handle <- batch
			case batch, ok := <-handle:
				// handle next in list
				if !ok {
					glog.Errorf("the %v worker's handle closed, there is something wrong", num)
					return
				}
				glog.Infof("the %v worker handle the batch work[%v/%v]", num, batch.Name, batch.ID)
				err = s.doSingleWorkInBatch(batch)
				if err != nil {
					glog.Errorf("handle batch[v] failed, err: %v", err)
				}
				changeStatus(batch)
				// update in db
				err = updateBatch(*batch, false)
				if err != nil {
					glog.Errorf("the %v worker update batch[%v] in db failed, err: %v", num, batch.Name, err)
					// ignore err, because it will update later when there ars new progress in this batch work.
				}
				if batch.PendingCount == 0 {
					glog.Infof("batch[%v] is finished, the status is %v", batch.Name, batch.Status)
					s.assigned[num] = ""
					continue
				}
				handle <- batch
				time.Sleep(WorkerSleepTime * time.Second)
			default:
				glog.V(6).Infof("the %v worker no work to do, sleep %v second", num, WorkerSleepTime)
				time.Sleep(WorkerSleepTime * time.Second)
			}
		}
	}
}

func changeStatus(batch *apibatch.Batch) {
	if batch.PendingCount != 0 {
		// still running
		return
	}
	batch.EndTimestamp = time.Now()
	if batch.FailureCount == 0 {
		batch.Status = Success
		return
	}
	if batch.SuccessCount == 0 {
		batch.Status = Failure
		return
	}
	batch.Status = PartialSuccess
	return
}

func (s *BatchService) doSingleWorkInBatch(batch *apibatch.Batch) error {
	var err error
	var message string
	work := chooseWork(batch)
	if work == nil {
		return fmt.Errorf("can't choose a work")
	}

	user, err := authsvc.GetUserByUUID(batch.UserID)
	if err != nil {
		glog.Errorf("get user detail failed, batch work: %v, type: %v, deployment: %v, cluster: %v, namespace: %v, node: %v, user: %v/%v, err: %v",
			batch.Name, batch.Type, work.Deployment, work.Cluster, work.Namespace, work.Node, batch.Username, batch.UserID, err)
		changeBatchWork(batch, work, message, false)
		return fmt.Errorf("get user detail failed, %v", err)
	}

	deploy, err := s.generateFileByTemplate(batch.Template, batch.Type, work, user)
	if err != nil {
		glog.Errorf("generate deploy failed, batch work: %v, type: %v, deployment: %v, cluster: %v, namespace: %v, node: %v, err: %v",
			batch.Name, batch.Type, work.Deployment, work.Cluster, work.Namespace, work.Node, err)
		//message = fmt.Sprintf("generate deploy by template failed, %v", err)
		changeBatchWork(batch, work, message, false)
		return fmt.Errorf("generate deploy by template failed, %v", err)
	}

	glog.Infof("start do this work[%v/%v], type: %v", batch.Name, work.Deployment, batch.Type)
	for i := 0; i < RetryTime; i++ {
		if batch.Type == apibatch.CreateType {
			_, err = s.kubeClient.AppsV1().Deployments(deploy.Namespace).Create(deploy)
		} else if batch.Type == apibatch.UpgradeType {
			_, err = s.kubeClient.AppsV1().Deployments(deploy.Namespace).Update(deploy)
		}
		if err == nil {
			glog.Infof("this work finished, batch name: %v, type: %v, deploy name: %v, deploy namespace: %v", batch.Name, batch.Type, deploy.Name, deploy.Namespace)
			changeBatchWork(batch, work, "", true)
			return nil
		}
		message = err.Error()
		glog.Errorf("do this work[%v/%v] failed, retry time: %v, failed message: %v", batch.Name, work.Deployment, i, message)
	}

	glog.Errorf("do this work[%v/%v] failed after retry, write into batch work, failed message: %v", batch.Name, work.Deployment, message)
	changeBatchWork(batch, work, message, false)

	return nil
}

func chooseWork(batch *apibatch.Batch) *apibatch.Detail {
	if batch.PendingCount == 0 || len(batch.Pending) == 0 {
		return nil
	}
	return &apibatch.Detail{
		Cluster:    batch.Pending[0].Cluster,
		Deployment: batch.Pending[0].Deployment,
		Node:       batch.Pending[0].Node,
		Namespace:  batch.Pending[0].Namespace,
		Timestamp:  batch.Pending[0].Timestamp,
	}
}

func (s *BatchService) generateFileByTemplate(template interface{}, Type string, work *apibatch.Detail, user *authdb.User) (*apps.Deployment, error) {
	deploy, err := TransformTemplate2Deployment(template)
	if err != nil {
		return nil, err
	}

	if deploy.Labels == nil {
		deploy.Labels = make(map[string]string)
	}

	if Type == apibatch.CreateType {
		deploy.Name = work.Deployment
		var replicaNum int32 = 1
		deploy.Spec.Replicas = &replicaNum
		deploy.Spec.Template.Spec.NodeName = work.Node

		updateDeploySelectorAndPodLabels(deploy, work, user)

		addOrUpdatePodSpecToleration(&deploy.Spec.Template.Spec)

		err = s.addNamespace(deploy, work)
		if err != nil {
			return nil, err
		}
	} else if Type == apibatch.UpgradeType {
		deploy.Name = work.Deployment

		if err := s.mergeDeployWhenUpgrade(deploy, work.Namespace); err != nil {
			return deploy, err
		}
	} else {
		return nil, fmt.Errorf("type[%v] is invalid", Type)
	}

	// add labels
	deploy.Labels[apibatch.BatchWorkLabel] = util.GetHostname()
	deploy.Labels[apibatch.ClusterLabel] = work.Cluster

	return deploy, nil
}

func updateDeploySelectorAndPodLabels(deploy *apps.Deployment, work *apibatch.Detail, user *authdb.User) {
	if deploy.Labels == nil {
		deploy.Labels = make(map[string]string)
	}
	deploy.Labels[apibatch.AppLabel] = deploy.Name
	deploy.Labels[apibatch.K8sAppLabel] = deploy.Name
	deploy.Labels[apibatch.ClusterLabel] = work.Cluster
	deploy.Labels[apibatch.UserIDKey] = fmt.Sprintf("%v", user.UUID)
	deploy.Labels[apibatch.UserNameKey] = fmt.Sprintf("%v", user.Name)
	deploy.Labels[apibatch.GroupIDKey] = fmt.Sprintf("%v", user.Group.Id)

	if deploy.Spec.Selector == nil {
		deploy.Spec.Selector = &metav1.LabelSelector{}
	}
	if deploy.Spec.Selector.MatchLabels == nil {
		deploy.Spec.Selector.MatchLabels = make(map[string]string)
	}
	deploy.Spec.Selector.MatchLabels[apibatch.AppLabel] = deploy.Name
	deploy.Spec.Selector.MatchLabels[apibatch.K8sAppLabel] = deploy.Name
	deploy.Spec.Selector.MatchLabels[apibatch.ClusterLabel] = work.Cluster
	deploy.Spec.Selector.MatchLabels[apibatch.UserIDKey] = fmt.Sprintf("%v", user.UUID)
	deploy.Spec.Selector.MatchLabels[apibatch.UserNameKey] = fmt.Sprintf("%v", user.Name)
	deploy.Spec.Selector.MatchLabels[apibatch.GroupIDKey] = fmt.Sprintf("%v", user.Group.Id)

	if deploy.Spec.Template.Labels == nil {
		deploy.Spec.Template.Labels = make(map[string]string)
	}
	deploy.Spec.Template.Labels[apibatch.AppLabel] = deploy.Name
	deploy.Spec.Template.Labels[apibatch.K8sAppLabel] = deploy.Name
	deploy.Spec.Template.Labels[apibatch.ClusterLabel] = work.Cluster
	deploy.Spec.Template.Labels[apibatch.UserIDKey] = fmt.Sprintf("%v", user.UUID)
	deploy.Spec.Template.Labels[apibatch.UserNameKey] = fmt.Sprintf("%v", user.Name)
	deploy.Spec.Template.Labels[apibatch.GroupIDKey] = fmt.Sprintf("%v", user.Group.Id)
}

func addOrUpdatePodSpecToleration(spec *v1.PodSpec) {
	addOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
		Key:      v1.TaintNodeNotReady,
		Operator: v1.TolerationOpExists,
		Effect:   v1.TaintEffectNoExecute,
	})

	addOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
		Key:      v1.TaintNodeUnreachable,
		Operator: v1.TolerationOpExists,
		Effect:   v1.TaintEffectNoExecute,
	})

	addOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
		Key:      v1.TaintNodeDiskPressure,
		Operator: v1.TolerationOpExists,
		Effect:   v1.TaintEffectNoSchedule,
	})

	addOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
		Key:      v1.TaintNodeMemoryPressure,
		Operator: v1.TolerationOpExists,
		Effect:   v1.TaintEffectNoSchedule,
	})

	addOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
		Key:      v1.TaintNodePIDPressure,
		Operator: v1.TolerationOpExists,
		Effect:   v1.TaintEffectNoSchedule,
	})

	addOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
		Key:      v1.TaintNodeUnschedulable,
		Operator: v1.TolerationOpExists,
		Effect:   v1.TaintEffectNoSchedule,
	})

	if spec.HostNetwork {
		addOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
			Key:      v1.TaintNodeNetworkUnavailable,
			Operator: v1.TolerationOpExists,
			Effect:   v1.TaintEffectNoSchedule,
		})
	}
}

func (s *BatchService) addNamespace(deploy *apps.Deployment, work *apibatch.Detail) error {
	if work.Namespace != "" {
		deploy.Namespace = work.Namespace
		return nil
	}

	listOptions := metav1.ListOptions{
		LabelSelector: fmt.Sprintf("%s=%s,%s=%s", apibatch.ClusterLabel, work.Cluster, apibatch.DefaultLabel, "true"),
	}
	namespaceList, err := s.kubeClient.CoreV1().Namespaces().List(listOptions)
	if err != nil {
		return err
	}
	if len(namespaceList.Items) != 1 {
		return fmt.Errorf("couldn't find default namespace for cluster %v", work.Cluster)
	}
	deploy.Namespace = namespaceList.Items[0].Name
	return nil
}

// AddOrUpdateTolerationInPodSpec tries to add a toleration to the toleration list in PodSpec.
// Returns true if something was updated, false otherwise.
func addOrUpdateTolerationInPodSpec(spec *v1.PodSpec, toleration *v1.Toleration) bool {
	podTolerations := spec.Tolerations

	var newTolerations []v1.Toleration
	updated := false
	for i := range podTolerations {
		if toleration.MatchToleration(&podTolerations[i]) {
			if toleration.Key == podTolerations[i].Key &&
				toleration.Value == podTolerations[i].Value &&
				toleration.Effect == podTolerations[i].Effect &&
				toleration.Operator == podTolerations[i].Operator &&
				toleration.TolerationSeconds == podTolerations[i].TolerationSeconds {
				return false
			}
			newTolerations = append(newTolerations, *toleration)
			updated = true
			continue
		}

		newTolerations = append(newTolerations, podTolerations[i])
	}

	if !updated {
		newTolerations = append(newTolerations, *toleration)
	}

	spec.Tolerations = newTolerations
	return true
}

func (s *BatchService) mergeDeployWhenUpgrade(deploy *apps.Deployment, namespace string) error {
	old, err := s.kubeClient.AppsV1().Deployments(namespace).Get(deploy.Name, metav1.GetOptions{})
	if err != nil {
		return err
	}

	deploy.Spec.Selector = old.Spec.Selector
	deploy.Spec.Template.Labels = old.Spec.Template.Labels
	deploy.Spec.Template.Spec.NodeSelector = old.Spec.Template.Spec.NodeSelector
	deploy.Spec.Template.Spec.NodeName = old.Spec.Template.Spec.NodeName
	deploy.Spec.Template.Spec.Affinity = old.Spec.Template.Spec.Affinity
	return nil
}

func changeBatchWork(batch *apibatch.Batch, work *apibatch.Detail, message string, isSuccess bool) {
	batch.PendingCount -= 1

	pending := make([]apibatch.Detail, batch.PendingCount)
	for k, v := range batch.Pending {
		if v.Deployment == work.Deployment {
			pending = batch.Pending[:k]
			pending = append(pending, batch.Pending[k+1:]...)
			break
		}
	}
	batch.Pending = pending
	if isSuccess {
		batch.Success = append(batch.Success, apibatch.Detail{
			Cluster:    work.Cluster,
			Deployment: work.Deployment,
			Node:       work.Node,
			Namespace:  work.Namespace,
			Message:    message,
			Timestamp:  time.Now(),
		})
		batch.SuccessCount += 1
		return
	}

	batch.FailureCount += 1
	batch.Failure = append(batch.Failure, apibatch.Detail{
		Cluster:    work.Cluster,
		Deployment: work.Deployment,
		Node:       work.Node,
		Namespace:  work.Namespace,
		Message:    message,
		Timestamp:  time.Now(),
	})
}

func updateBatch(batch apibatch.Batch, isForce bool) error {
	var err error
	var db dbbatch.Batch
	if !isForce {
		db, err = dbbatch.GetBatchByNameAndUserID(orm.NewOrm(), batch.Name, batch.UserID)
		if err != nil {
			return err
		}
		if db.Status == Stopping && batch.Status != Stop {
			// waiting stop
			return nil
		}
	}
	db = transformAPI2DB(batch)
	return dbbatch.UpdateBatch(orm.NewOrm(), db)
}
