package predicates

import (
	"context"
	"fmt"

	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	"k8s.io/apimachinery/pkg/labels"
	"k8s.io/apimachinery/pkg/types"
	"k8s.io/klog"
	"k8s.io/kubernetes/pkg/scheduler/apis/config"
	"k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity"
	"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity"
	"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports"
	"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeunschedulable"
	"k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration"
	k8sframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"

	"volcano.sh/volcano/pkg/scheduler/api"
	"volcano.sh/volcano/pkg/scheduler/framework"
	"volcano.sh/volcano/pkg/scheduler/plugins/util"
	"volcano.sh/volcano/pkg/scheduler/plugins/util/k8s"
)

const (
	// PluginName indicates name of volcano scheduler plugin.
	PluginName = "predicates"

	// GPUTopoPredicate is the key for enableing GPU Topo Predicate in YAML
	GPUTopoPredicate                  = "kmpredicate.GPUTopoEnable"
	AvailableGPUTypes                 = "kmpredicate.AvailableGPUTypes"
	SamePriorityScheduleOneCardEnable = "kmpredicate.SamePriorityScheduleOneCardEnable"
)

type predicatesPlugin struct {
	// Arguments given for the plugin
	pluginArguments framework.Arguments
}

// New return predicate plugin
func New(arguments framework.Arguments) framework.Plugin {
	return &predicatesPlugin{pluginArguments: arguments}
}

func (pp *predicatesPlugin) Name() string {
	return PluginName
}

type predicateEnable struct {
	gpuTopoEnable                     bool
	samePriorityScheduleOneCardEnable bool
	availableGPUTypes                 map[string]bool
}

func enablePredicate(args framework.Arguments) predicateEnable {

	/*
	   User Should give predicatesEnable in this format(predicate.GPUSharingEnable).
	   Currently supported only GPUSharing predicate checks.

	   actions: "reclaim, allocate, backfill, preempt"
	   tiers:
	   - plugins:
	     - name: priority
	     - name: gang
	     - name: conformance
	   - plugins:
	     - name: drf
	     - name: predicates
	     - name: proportion
	     - name: nodeorder
	*/

	predicate := predicateEnable{
		gpuTopoEnable:                     false,
		samePriorityScheduleOneCardEnable: false,
		availableGPUTypes:                 map[string]bool{},
	}

	args.GetBool(&predicate.gpuTopoEnable, GPUTopoPredicate)
	args.GetMap(predicate.availableGPUTypes, AvailableGPUTypes)
	args.GetBool(&predicate.samePriorityScheduleOneCardEnable, SamePriorityScheduleOneCardEnable)

	return predicate
}

func (pp *predicatesPlugin) OnSessionOpen(ssn *framework.Session) {
	pl := util.NewPodLister(ssn)
	pods, _ := pl.List(labels.NewSelector())
	nodeMap, nodeSlice := util.GenerateNodeMapAndSlice(ssn.Nodes)

	predicate := enablePredicate(pp.pluginArguments)

	kubeClient := ssn.KubeClient()
	// Register event handlers to update task info in PodLister & nodeMap
	ssn.AddEventHandler(&framework.EventHandler{
		AllocateFunc: func(event *framework.Event) {
			pod := pl.UpdateTask(event.Task, event.Task.NodeName)

			nodeName := event.Task.NodeName
			node, found := nodeMap[nodeName]
			if !found {
				klog.Errorf("predicates, update pod %s/%s allocate to NOT EXIST node [%s]", pod.Namespace, pod.Name, nodeName)
				return
			}
			// gpuCount := api.GetGPUCountOfPod(pod, predicate.availableGPUTypes)
			// if gpuCount > 0 {
			// 	nodeInfo, _ := ssn.Nodes[nodeName]
			// 	ids := allocateGPUs(pod, nodeInfo, gpuCount, predicate.gpuTopoEnable)
			// 	if len(ids) < 0 {
			// 		klog.Errorf("The node %s can't place the pod %s in ns %s", pod.Spec.NodeName, pod.Name, pod.Namespace)
			// 		return
			// 	}
			// 	patch := api.AddGPUInfoPatch(nodeInfo, pod, ids)
			// 	pod, err := kubeClient.CoreV1().Pods(pod.Namespace).Patch(context.TODO(), pod.Name, types.JSONPatchType, []byte(patch), metav1.PatchOptions{})
			// 	if err != nil {
			// 		klog.Errorf("Patch pod %s failed with patch %s: %v", pod.Name, patch, err)
			// 		return
			// 	}
			// 	// update device info
			// 	klog.V(4).Infof("predicates pod %s gpu ids: %+v", pod.Name, ids)
			// 	event.Task.Pod = pod
			// 	// nodeInfo.AllocateGPUResource(event.Task)
			// 	klog.V(4).Infof("predicates with gpu sharing, update pod %s/%s allocate to node [%s]", pod.Namespace, pod.Name, nodeName)
			// }

			node.AddPod(pod)
			klog.V(4).Infof("predicates, update pod %s/%s allocate to node [%s]", pod.Namespace, pod.Name, nodeName)
		},
		AddPatchFunc: func(event *framework.Event) {
			pod := pl.UpdateTask(event.Task, event.Task.NodeName)
			nodeName := event.Task.NodeName
			prioirty := pod.Spec.Priority
			gpuCount := uint(len(event.Task.InitResreq.GPUDevices))
			if gpuCount > 0 {
				nodeInfo, _ := ssn.Nodes[nodeName]
				ids := allocateGPUs(pod, nodeInfo, gpuCount, predicate.gpuTopoEnable, predicate.samePriorityScheduleOneCardEnable)
				if len(ids) < 0 {
					klog.Errorf("The node %s can't place the pod %s in ns %s", pod.Spec.NodeName, pod.Name, pod.Namespace)
					return
				}
				patch := api.AddGPUInfoPatch(nodeInfo, pod, ids)
				pod, err := kubeClient.CoreV1().Pods(pod.Namespace).Patch(context.TODO(), pod.Name, types.JSONPatchType, []byte(patch), metav1.PatchOptions{})
				if err != nil {
					klog.Errorf("Patch pod %s failed with patch %s: %v", pod.Name, patch, err)
					return
				}
				// update device info
				klog.V(3).Infof("predicates patch pod %s gpu ids: %+v", pod.Name, ids)
				pod.Spec.Priority = prioirty
				event.Task.Pod = pod
				klog.V(4).Infof("predicates with gpu sharing, update pod %s/%s allocate to node [%s]", pod.Namespace, pod.Name, nodeName)
			}
		},
		DeallocateFunc: func(event *framework.Event) {
			pod := pl.UpdateTask(event.Task, "")
			nodeName := event.Task.NodeName
			node, found := nodeMap[nodeName]
			if !found {
				klog.Errorf("predicates, update pod %s/%s allocate from NOT EXIST node [%s]", pod.Namespace, pod.Name, nodeName)
				return
			}

			// if api.GetGPUCountOfPod(pod, predicate.availableGPUTypes) > 0 {
			// 	patch := api.RemoveGPUInfoPatch()
			// 	_, err := kubeClient.CoreV1().Pods(pod.Namespace).Patch(context.TODO(), pod.Name, types.JSONPatchType, []byte(patch), metav1.PatchOptions{})
			// 	if err != nil {
			// 		klog.Errorf("Patch pod %s failed with patch %s: %v", pod.Name, patch, err)
			// 		return
			// 	}
			// 	klog.V(4).Infof("predicates with gpu sharing, update pod %s/%s deallocate from node [%s]", pod.Namespace, pod.Name, nodeName)
			// }

			node.RemovePod(pod)
			klog.V(4).Infof("predicates, update pod %s/%s deallocate from node [%s]", pod.Namespace, pod.Name, nodeName)

		},
		RemovePatchFunc: func(event *framework.Event) {
			pod := pl.UpdateTask(event.Task, "")
			nodeName := event.Task.NodeName
			if len(event.Task.InitResreq.GPUDevices) > 0 {
				if len(pod.Annotations) > 0 {
					ids := pod.Annotations[api.GPUIndex]
					klog.V(3).Infof("predicates unpatch pod %s gpu ids: %+v", pod.Name, ids)
				}
				patch := api.RemoveGPUInfoPatch()
				_, err := kubeClient.CoreV1().Pods(pod.Namespace).Patch(context.TODO(), pod.Name, types.JSONPatchType, []byte(patch), metav1.PatchOptions{})
				if err != nil {
					klog.Errorf("Patch pod %s failed with patch %s: %v", pod.Name, patch, err)
					return
				}
				klog.V(4).Infof("predicates with gpu sharing, update pod %s/%s deallocate from node [%s]", pod.Namespace, pod.Name, nodeName)
			}
		},
	})

	// Initialize k8s plugins
	// TODO: Add more predicates, k8s.io/kubernetes/pkg/scheduler/framework/plugins/legacy_registry.go
	handle := k8s.NewFrameworkHandle(pods, nodeSlice)
	// 1. NodeUnschedulable
	plugin, _ := nodeunschedulable.New(nil, handle)
	nodeUnscheduleFilter := plugin.(*nodeunschedulable.NodeUnschedulable)
	// 2. NodeAffinity
	plugin, _ = nodeaffinity.New(nil, handle)
	nodeAffinityFilter := plugin.(*nodeaffinity.NodeAffinity)
	// 3. NodePorts
	plugin, _ = nodeports.New(nil, handle)
	nodePortFilter := plugin.(*nodeports.NodePorts)
	// 4. TaintToleration
	plugin, _ = tainttoleration.New(nil, handle)
	tolerationFilter := plugin.(*tainttoleration.TaintToleration)
	// 5. InterPodAffinity
	plArgs := &config.InterPodAffinityArgs{}
	plugin, _ = interpodaffinity.New(plArgs, handle)
	podAffinityFilter := plugin.(*interpodaffinity.InterPodAffinity)

	ssn.AddPredicateFn(pp.Name(), func(task *api.TaskInfo, node *api.NodeInfo) error {
		nodeInfo, found := nodeMap[node.Name]
		if !found {
			return fmt.Errorf("failed to predicates, node info for %s not found", node.Name)
		}

		if node.Allocatable.MaxTaskNum <= len(nodeInfo.Pods) {
			klog.V(4).Infof("NodePodNumber predicates Task <%s/%s> on Node <%s> failed",
				task.Namespace, task.Name, node.Name)
			return api.NewFitError(task, node, api.NodePodNumberExceeded)
		}

		state := k8sframework.NewCycleState()
		// CheckNodeUnschedulable
		status := nodeUnscheduleFilter.Filter(context.TODO(), state, task.Pod, nodeInfo)
		if !status.IsSuccess() {
			return fmt.Errorf("plugin %s predicates failed %s", nodeunschedulable.Name, status.Message())
		}

		// Check NodeAffinity
		status = nodeAffinityFilter.Filter(context.TODO(), state, task.Pod, nodeInfo)
		if !status.IsSuccess() {
			return fmt.Errorf("plugin %s predicates failed %s", nodeaffinity.Name, status.Message())
		}

		// Check NodePorts
		nodePortFilter.PreFilter(context.TODO(), state, task.Pod)
		status = nodePortFilter.Filter(context.TODO(), state, nil, nodeInfo)
		if !status.IsSuccess() {
			return fmt.Errorf("plugin %s predicates failed %s", nodeaffinity.Name, status.Message())
		}

		// PodToleratesNodeTaints: TaintToleration
		status = tolerationFilter.Filter(context.TODO(), state, task.Pod, nodeInfo)
		if !status.IsSuccess() {
			return fmt.Errorf("plugin %s predicates failed %s", tainttoleration.Name, status.Message())
		}

		// InterPodAffinity Predicate
		status = podAffinityFilter.PreFilter(context.TODO(), state, task.Pod)
		if !status.IsSuccess() {
			return fmt.Errorf("plugin %s pre-predicates failed %s", interpodaffinity.Name, status.Message())
		}

		status = podAffinityFilter.Filter(context.TODO(), state, task.Pod, nodeInfo)
		if !status.IsSuccess() {
			return fmt.Errorf("plugin %s predicates failed %s", interpodaffinity.Name, status.Message())
		}

		task.InitTaskGPUResource(predicate.availableGPUTypes)
		klog.V(4).Infof("[InitTaskGPUResource] Predicate task: %s, InitTaskGPUResource: %v", task.Name, api.GetGPUDeviceResources(task.InitResreq))
		if len(task.InitResreq.GPUDevices) > 0 {
			// checkNodeGPUPredicate assume pod gpu resource on node
			fit, err := checkNodeGPUPredicate(task, node, predicate.samePriorityScheduleOneCardEnable)
			if err != nil {
				return err
			}

			klog.V(4).Infof("checkNodeGPUPredicate predicates Task <%s/%s> on Node <%s>: fit %v",
				task.Namespace, task.Name, node.Name, fit)
		}
		return nil
	})
}

func (pp *predicatesPlugin) OnSessionClose(ssn *framework.Session) {}
