/*
Copyright 2024.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package controller

import (
	"context"
	errors2 "errors"
	"fmt"
	batchv1 "k8s.io/api/batch/v1"
	corev1 "k8s.io/api/core/v1"
	"k8s.io/apimachinery/pkg/api/errors"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	cicv1 "schedulerNetAwareController/api/cic.io/v1"
	"sigs.k8s.io/controller-runtime/pkg/reconcile"
	"strconv"
	"time"

	"k8s.io/apimachinery/pkg/runtime"
	ctrl "sigs.k8s.io/controller-runtime"
	"sigs.k8s.io/controller-runtime/pkg/client"
	"sigs.k8s.io/controller-runtime/pkg/log"
)

const (
	PSJobGroupWorkerRoleLabel    = "cic.io/JobGroup.WorkerRole"
	PSJobGroupWorkerRolePioneer  = "pioneer"
	PSJobGroupWorkerRoleFollower = "follower"
)

// SpecOpts 用于修改PodSpec
type SpecOpts func(spec *corev1.PodSpec)

// PSJobGroupReconciler reconciles a PSJobGroup object
type PSJobGroupReconciler struct {
	client.Client
	Scheme *runtime.Scheme
}

// +kubebuilder:rbac:groups=cic.io,resources=psjobgroups,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=cic.io,resources=psjobgroups/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=cic.io,resources=psjobgroups/finalizers,verbs=update
// +kubebuilder:rbac:groups="",resources=nodes;pods;services,verbs=get;list;watch;delete;create
// +kubebuilder:rbac:groups="apps",resources=deployments,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups="batch",resources=jobs,verbs=get;list;watch;create;update;patch;delete

// Reconcile 接受到PSJobGroup资源后新建其对应的资源，包括一个server对应的Service，一个Server和多个worker
func (r *PSJobGroupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
	logger := log.FromContext(ctx)

	jobGroup := cicv1.PSJobGroup{}
	if err := r.Get(ctx, req.NamespacedName, &jobGroup); err != nil {
		if errors.IsNotFound(err) {
			logger.Info("删除PSJobGroup资源")
			return ctrl.Result{}, nil
		} else {
			logger.Error(err, "未找到PSJobGroup")
		}

		// TerminalErr返回后，不会进行requeue操作
		return ctrl.Result{}, reconcile.TerminalError(client.IgnoreNotFound(err))
	}

	// 新建server对应的service
	serverService := newServiceForServer(&jobGroup)
	if err := r.Create(ctx, serverService); err != nil {
		logger.Error(err, fmt.Sprintf("Failed to create server service, err=%v", err))
		return ctrl.Result{}, reconcile.TerminalError(err)
	}

	// 新建server
	serverPod := newPodForServer(&jobGroup)

	if err := r.Create(ctx, serverPod); err != nil {
		logger.Error(err, fmt.Sprintf("Failed to create server pod, err=%v", err))
		return ctrl.Result{}, reconcile.TerminalError(err)
	}

	// 等待server调度成功在进行下面的操作
	time.Sleep(time.Millisecond * 3000)
	serverPodNamespacedKey := client.ObjectKey{Namespace: serverPod.Namespace, Name: serverPod.Name}
	if err := r.Get(ctx, serverPodNamespacedKey, serverPod); err != nil {
		// 此处失败后不回进行重试，但是server pod可能已经部署成功了，也许需要手动删除
		logger.Error(err, fmt.Sprintf("Fail to get server pod, err=%v", err))
		return ctrl.Result{}, reconcile.TerminalError(err)
	}

	serverPodSchedulerNode := serverPod.Spec.NodeName

	// 根据serverPodSchedulerNode筛选worker的带调度node
	logger.Info(fmt.Sprintf("server被部署至node：%s，准备调度worker pod", serverPodSchedulerNode))

	// 新建多个workers
	workerNum := jobGroup.Spec.EnvCfg.WorkerNum

	if workerNum == 1 {
		// workerNum == 1时，worker和server部署到同一个node上
		workerPod := newJobForWorker(&jobGroup,
			0,
			serverService.Name,
			strconv.Itoa(int(serverService.Spec.Ports[0].Port)),
			PSJobGroupWorkerRolePioneer,
			func(spec *corev1.PodSpec) {
				spec.NodeName = serverPodSchedulerNode
			})
		if err := r.Create(ctx, workerPod); err != nil {
			logger.Error(err, fmt.Sprintf("Failed to create worker pod, err=%v", err))
			return ctrl.Result{}, reconcile.TerminalError(err)
		}
		return ctrl.Result{}, nil
	} else {
		// todo: 2表示只有两个节点会进行调度，这里有可以改进的点
		workerIdx := 0
		nodeNum := 2
		followerNum := (workerNum - nodeNum) / nodeNum
		lastRemain := (workerNum - nodeNum) % nodeNum
		jobGroup.Status.WorkerNodeNum = int64(nodeNum)
		err := r.Update(ctx, &jobGroup)
		if err != nil {
			logger.Error(err, "Failed to update jobGroup's 'WorkerNodeNum' Status", "jobGroupName", jobGroup.Name)
			return ctrl.Result{}, err
		}
		for i := 0; i < nodeNum; i++ {
			workerJob := newJobForWorker(&jobGroup,
				workerIdx,
				serverService.Name,
				strconv.Itoa(int(serverService.Spec.Ports[0].Port)),
				PSJobGroupWorkerRolePioneer)
			workerIdx++
			if err := r.Create(ctx, workerJob); err != nil {
				logger.Error(err, fmt.Sprintf("Failed to create worker pod, err=%v", err))
				return ctrl.Result{}, reconcile.TerminalError(err)
			}

			var podList corev1.PodList

			// 此处需要等待pioneer节点部署完成？否则查不到对应的pod？
			time.Sleep(1 * time.Second)
			logger.Info(fmt.Sprintf("pioneer寻找：podList 长度为%d", len(podList.Items)))
			if err := r.Client.List(ctx,
				&podList, client.InNamespace(workerJob.Namespace),
				/*client.MatchingLabels{PSJobGroupWorkerRoleLabel: PSJobGroupWorkerRolePioneer}*/); err != nil {
				logger.Error(err, "Failed to list pods", "workerJob.Namespace", workerJob.Namespace)
				return ctrl.Result{}, reconcile.TerminalError(err)
			}

			// 寻找pioneer pod所在的node
			var p corev1.Pod
			find := false
			for _, each := range podList.Items {
				for _, ownerRef := range each.OwnerReferences {
					// pod和Job关联，因此这里用workerJob来查找
					logger.Info(fmt.Sprintf("pod对应的ownerref的name为%s, workerJob.Name为%s", ownerRef.Name, workerJob.Name))
					if ownerRef.Name == workerJob.Name {

						p = each
						find = true
						break
					}
				}
			}
			if !find {
				logger.Info("Failed to find pod", "workerJob.Name", workerJob.Name)
				return ctrl.Result{}, reconcile.TerminalError(errors2.New("failed to find pod"))
			}
			nodeName := p.Spec.NodeName
			// 调度follower
			if i != nodeNum-1 {
				for j := 0; j < followerNum; j++ {
					workerJob = newJobForWorker(&jobGroup,
						workerIdx,
						serverService.Name,
						strconv.Itoa(int(serverService.Spec.Ports[0].Port)),
						PSJobGroupWorkerRoleFollower,
						func(spec *corev1.PodSpec) {
							spec.NodeName = nodeName // 强行指定对应的node
						})
					workerIdx++
					if err := r.Create(ctx, workerJob); err != nil {
						logger.Error(err, fmt.Sprintf("Failed to create worker pod, err=%v", err))
						return ctrl.Result{}, reconcile.TerminalError(err)
					}
				}
			} else {
				for j := 0; j < lastRemain+followerNum; j++ {
					workerJob = newJobForWorker(&jobGroup,
						workerIdx,
						serverService.Name,
						strconv.Itoa(int(serverService.Spec.Ports[0].Port)),
						PSJobGroupWorkerRoleFollower,
						func(spec *corev1.PodSpec) {
							spec.NodeName = nodeName // 强行指定对应的node
						})
					workerIdx++
					if err := r.Create(ctx, workerJob); err != nil {
						logger.Error(err, fmt.Sprintf("Failed to create worker pod, err=%v", err))
						return ctrl.Result{}, reconcile.TerminalError(err)
					}
				}
			}

		}
	}

	return ctrl.Result{}, nil
}

// SetupWithManager sets up the controller with the Manager.
func (r *PSJobGroupReconciler) SetupWithManager(mgr ctrl.Manager) error {
	return ctrl.NewControllerManagedBy(mgr).
		For(&cicv1.PSJobGroup{}).
		Complete(r)
}

func newServiceForServer(jobGroup *cicv1.PSJobGroup) *corev1.Service {
	labels := make(map[string]string)
	labels["app"] = jobGroup.Name + "-server"
	ownerRef := metav1.NewControllerRef(jobGroup, cicv1.GroupVersion.WithKind("PSJobGroup"))
	ownerRefs := []metav1.OwnerReference{*ownerRef}
	return &corev1.Service{
		ObjectMeta: metav1.ObjectMeta{
			Name:            jobGroup.Name + "-server-svc",
			Namespace:       jobGroup.Namespace,
			Labels:          labels,
			OwnerReferences: ownerRefs,
		},
		Spec: jobGroup.Spec.Template.SvcTemplate.Spec,
	}
}

func newPodForServer(jobGroup *cicv1.PSJobGroup) *corev1.Pod {
	labels := make(map[string]string)
	labels["app"] = jobGroup.Name + "-server"
	ownerRef := metav1.NewControllerRef(jobGroup, cicv1.GroupVersion.WithKind("PSJobGroup"))
	ownerRefs := []metav1.OwnerReference{*ownerRef}

	// 设置server的env
	env := make([]corev1.EnvVar, 0)
	env = append(env, corev1.EnvVar{
		Name:  "MODE",
		Value: "server",
	})
	env = append(env, corev1.EnvVar{
		Name:  "WORKER_NUM",
		Value: strconv.Itoa(jobGroup.Spec.EnvCfg.WorkerNum),
	})
	// server的dataSize需要设置成worker的n倍
	env = append(env, corev1.EnvVar{
		Name:  "DATA_SIZE",
		Value: strconv.Itoa(jobGroup.Spec.EnvCfg.WorkerDataSize * jobGroup.Spec.EnvCfg.WorkerNum),
	})

	return &corev1.Pod{
		ObjectMeta: metav1.ObjectMeta{
			Name:            jobGroup.Name + "-server",
			Namespace:       jobGroup.Namespace,
			Labels:          labels,
			OwnerReferences: ownerRefs,
		},
		Spec: corev1.PodSpec{
			SchedulerName: "cic-scheduler",
			Containers: []corev1.Container{
				{
					Name:  "server",
					Image: jobGroup.Spec.Image,
					Env:   env,
				},
			},
		},
	}
}

func newJobForWorker(group *cicv1.PSJobGroup, i int, svcName string, svcPort string, role string, sepcOpts ...SpecOpts) *batchv1.Job {
	labels := make(map[string]string)
	labels["app"] = group.Name + "-worker"
	ownerRef := metav1.NewControllerRef(group, cicv1.GroupVersion.WithKind("PSJobGroup"))
	ownerRefs := []metav1.OwnerReference{*ownerRef}

	// 设置worker的env
	env := make([]corev1.EnvVar, 0)
	env = append(env, corev1.EnvVar{
		Name:  "MODE",
		Value: "worker",
	})
	env = append(env, corev1.EnvVar{
		Name:  "SERVER_ADDR",
		Value: group.Name + "-server-svc" + ":" + svcPort,
	})
	env = append(env, corev1.EnvVar{
		Name:  "BATCH_SIZE",
		Value: strconv.Itoa(group.Spec.EnvCfg.BatchSize),
	})
	env = append(env, corev1.EnvVar{
		Name:  "WORKER_ID",
		Value: strconv.Itoa(i),
	})
	env = append(env, corev1.EnvVar{
		Name:  "DATA_SIZE",
		Value: strconv.Itoa(group.Spec.EnvCfg.WorkerDataSize),
	})
	env = append(env, corev1.EnvVar{
		Name:  "WORKER_NUM",
		Value: strconv.Itoa(group.Spec.EnvCfg.WorkerNum),
	})
	env = append(env, corev1.EnvVar{
		Name:  "GRADIENT_SIZE",
		Value: "1",
	})
	// initCommand 用于等待service启动，这条命令会设置在Job的initContainer中。
	addr := fmt.Sprintf("%s %s", svcName, svcPort)
	spec := corev1.PodTemplateSpec{
		Spec: corev1.PodSpec{
			InitContainers: []corev1.Container{
				{
					Name:    "init",
					Image:   "registry.cn-shanghai.aliyuncs.com/carl-zyc/busybox:amd64v1",
					Command: []string{"sh", "-c", "until nc -z -v -w30 " + addr + "; do echo \"waiting for server\"; sleep 5; done"},
				},
			},
			RestartPolicy: corev1.RestartPolicyNever,
			SchedulerName: "cic-scheduler",
			Containers: []corev1.Container{
				{
					Name:  "worker",
					Image: group.Spec.Image,
					Env:   env,
				},
			},
		},
	}
	spec.Labels = make(map[string]string)
	spec.Labels[PSJobGroupWorkerRoleLabel] = role

	for _, opt := range sepcOpts {
		opt(&spec.Spec)
	}

	return &batchv1.Job{
		ObjectMeta: metav1.ObjectMeta{
			Name:            group.Name + "-worker-" + strconv.Itoa(i),
			Namespace:       group.Namespace,
			Labels:          labels,
			OwnerReferences: ownerRefs,
		},
		Spec: batchv1.JobSpec{
			Template: spec,
		},
	}
}
