package predicates

import (
	"encoding/json"
	"fmt"
	"math/rand"
	"sort"
	"strings"

	v1 "k8s.io/api/core/v1"
	"k8s.io/klog"

	"volcano.sh/volcano/pkg/scheduler/api"
)

// checkNodeGPUPredicate checks if a gpu pod can be scheduled on a node.
func checkNodeGPUPredicate(task *api.TaskInfo, nodeInfo *api.NodeInfo, samePriorityScheduleOneCardEnable bool) (bool, error) {
	klog.V(4).Infof("checkNodeGPUPredicate: %+v", nodeInfo)
	// node
	available := isNodeAvailable(task, nodeInfo)
	if !available {
		return false, fmt.Errorf("scalar resources on pod %s does not available on node %s", task.Pod.Name, nodeInfo.Name)
	}
	allocatable := assume(task, nodeInfo, samePriorityScheduleOneCardEnable)
	if !allocatable {
		return false, fmt.Errorf("no enough gpu resource on devices of node %s", nodeInfo.Name)
	}
	return true, nil
}

func isNodeAvailable(task *api.TaskInfo, nodeInfo *api.NodeInfo) bool {
	reqRes := getGPUResource(task.Resreq.ScalarResources)
	nodeRes := getGPUResource(nodeInfo.Allocatable.ScalarResources)
	for resType := range reqRes {
		resValue, exist := nodeRes[resType]
		if !exist {
			return false
		}
		if resValue == 0 {
			return false
		}
	}
	return true
}

func getGPUResource(scalarResources map[v1.ResourceName]float64) map[v1.ResourceName]float64 {
	gpuRes := make(map[v1.ResourceName]float64)
	for resType := range scalarResources {
		if strings.Contains(string(resType), api.CGPU) {
			gpuRes[resType] = scalarResources[resType]
		}
	}
	return gpuRes
}

func assume(task *api.TaskInfo, node *api.NodeInfo, samePriorityScheduleOneCardEnable bool) bool {
	reqGPUCount := len(task.InitResreq.GPUDevices)
	reqGPUMem := task.InitResreq.GPUDevices[0].Memory
	reqGPUCore := task.InitResreq.GPUDevices[0].Core
	klog.V(4).Infof("predicates with gpu, pod %s/%s request GPUCount :%v, GPUMem: %v, GPUCore: %v", task.Pod.Namespace, task.Pod.Name, reqGPUCount, reqGPUMem, reqGPUCore)
	if reqGPUMem == 0 && reqGPUCore == 0 {
		// 独占
		allocatableGPUIds := node.GetFutureEmptyGPUs()
		if len(allocatableGPUIds) < int(reqGPUCount) {
			return false
		}
	} else {
		// 校验单卡剩余资源和存量pod类型，规则如下
		// 1. 仅开启显存隔离的pod只能和仅开启显存隔离的pod调度到同一张卡上
		// 2. 仅开启算力隔离的pod只能和仅开启算力隔离的pod调度到同一张卡上
		// 3. 开启显存&算力隔离的pod只能和开启显存&算力隔离的pod调度到同一张卡上
		// 4. 相同优先级的pod调度到同一张卡上
		candidateDevs := 0
		for _, gpuDev := range node.FutureIdle().GPUDevices {
			if gpuDev.IsResourceAvailable(reqGPUMem, reqGPUCore) && gpuDev.IsSameShareModePod(task.Pod) {
				if !samePriorityScheduleOneCardEnable {
					candidateDevs++
				} else {
					if gpuDev.IsSamePriorityPod(task.Pod) {
						candidateDevs++
					}
				}
			}
		}
		klog.V(4).Infof("assume candidateDevs: %v", candidateDevs)
		if candidateDevs < int(reqGPUCount) {
			return false
		}
	}
	return true
}

// allocateGPUs allocate the GPU IDs to the pod
func allocateGPUs(pod *v1.Pod, node *api.NodeInfo, reqGPUCount uint, gpuTopoEnable bool, samePriorityScheduleOneCardEnable bool) []int {
	reqGPUMem := api.GetCGPUResourceOfPod(pod, api.CGPUMemory) / reqGPUCount
	reqGPUCore := api.GetCGPUResourceOfPod(pod, api.CGPUCore) / reqGPUCount
	candidateDevIDs := make([]int, 0)
	binPackCandidateDevIDs := make([][]int, 0)

	klog.V(4).Infof("allocate device ids, pod %s/%s request GPUCount :%v, GPUMem: %v, GPUCore: %v", pod.Namespace, pod.Name, reqGPUCount, reqGPUMem, reqGPUCore)
	if reqGPUMem == 0 && reqGPUCore == 0 {
		// 独占
		allocatableGPUIds := node.GetFutureEmptyGPUs()
		klog.V(4).Infof("Alone job, availableGPUs: %+v on node: %s.", allocatableGPUIds, node.Name)
		binPackCandidateDevIDs = combine(allocatableGPUIds, reqGPUCount)
		klog.V(4).Infof("Alone job, after dfs combine: %+v, on node: %s.", binPackCandidateDevIDs, node.Name)
	} else {
		// 校验单卡剩余资源和存量pod类型，规则如下
		// 1. 仅开启显存隔离的pod只能和仅开启显存隔离的pod调度到同一张卡上
		// 2. 仅开启算力隔离的pod只能和仅开启算力隔离的pod调度到同一张卡上
		// 3. 开启显存&算力隔离的pod只能和开启显存&算力隔离的pod调度到同一张卡上
		// 4. 相同优先级的pod调度到同一张卡上
		allocatableGPUDeivces := make(map[int]uint)
		for idx, gpuDev := range node.FutureIdle().GPUDevices {
			if gpuDev.IsResourceAvailable(reqGPUMem, reqGPUCore) && gpuDev.IsSameShareModePod(pod) {
				if !samePriorityScheduleOneCardEnable {
					allocatableGPUDeivces[idx] = gpuDev.Memory
				} else {
					if gpuDev.IsSamePriorityPod(pod) {
						allocatableGPUDeivces[idx] = gpuDev.Memory
					}
				}
			}
		}
		klog.V(4).Infof("allocateGPUs allocatableGPUDeivces: %v", allocatableGPUDeivces)
		// allocatableShareMode := GetAllocatableShareMode(pod)
		// gpuDevices := node.FuturePodMapOnGPUDeivces()
		// allocatableGPUDeivces := make(map[int]uint)
		// for idx := range allocatableGPUIds {
		// 	if gpuDev, found := gpuDevices[idx]; found {
		// 		allocatable := true
		// 		for _, pod := range gpuDev.PodMap {
		// 			if !allocatableShareMode[api.GetPodShareMode(pod)] {
		// 				allocatable = false
		// 				break
		// 			}
		// 		}
		// 		if allocatable {
		// 			allocatableGPUDeivces[idx] = allocatableGPUIds[idx]
		// 		}
		// 	} else {
		// 		allocatableGPUDeivces[idx] = allocatableGPUIds[idx]
		// 	}
		// }

		if len(allocatableGPUDeivces) > 0 {
			binPackCandidateDevIDs = binPackAllocate(allocatableGPUDeivces, reqGPUCount)
		}
	}
	if len(binPackCandidateDevIDs) == 0 {
		klog.Errorf("The node %s can't place the pod %s [reqGPUCount: %v, reqGPUMem: %v] in ns %s.",
			node.Name,
			pod.Name,
			reqGPUCount,
			reqGPUMem,
			pod.Namespace)
	} else {
		candidateDevIDs = binPackCandidateDevIDs[rand.Intn(len(binPackCandidateDevIDs))]
		if reqGPUCount > 1 && gpuTopoEnable {
			gpuTopoAllocateDevIDs, err := gpuTopoAllocate(binPackCandidateDevIDs, node)
			if err == nil {
				candidateDevIDs = gpuTopoAllocateDevIDs
			}
		}
		klog.V(4).Infof("Allocate GPUs: %+v on node %+v", candidateDevIDs, node.Name)
	}
	return candidateDevIDs
}

func combine(availableGPUs []int, reqGPUCount uint) (ans [][]int) {
	temp := []int{}
	var dfs func(int)
	dfs = func(cur int) {
		// 剪枝：temp 长度加上区间 [cur, len(availableGPUs)] 的长度小于 reqGPUCount，不可能构造出长度为 reqGPUCount 的 temp
		if len(temp)+(len(availableGPUs)-cur) < int(reqGPUCount) {
			return
		}
		// 记录合法的答案
		if len(temp) == int(reqGPUCount) {
			comb := make([]int, reqGPUCount)
			copy(comb, temp)
			ans = append(ans, comb)
			return
		}
		// 考虑选择当前位置
		temp = append(temp, availableGPUs[cur])
		dfs(cur + 1)
		temp = temp[:len(temp)-1]
		// 考虑不选择当前位置
		dfs(cur + 1)
	}
	dfs(0)
	return
}

func binPackAllocate(availableGPUsMem map[int]uint, reqGPUCount uint) [][]int {
	found := false
	candidateGpuSet := make([][]int, 0)
	devSlice := []int{}
	for idx := range availableGPUsMem {
		devSlice = append(devSlice, idx)
	}
	sort.Slice(devSlice, func(x, y int) bool {
		gpuX := availableGPUsMem[devSlice[x]]
		gpuY := availableGPUsMem[devSlice[y]]
		return gpuX < gpuY

	})
	// 根据cgpu memory获取最优分卡组合
	bestGpuSetMemSum := uint(0)
	allocatableCount := uint(0)
	for _, idx := range devSlice {
		if availableGPUMem, ok := availableGPUsMem[idx]; ok {
			allocatableCount++
			bestGpuSetMemSum += availableGPUMem
			if allocatableCount == reqGPUCount {
				found = true
				break
			}
		}
	}
	klog.V(4).Infof("devSlice: %v", devSlice)
	klog.V(4).Infof("bestGpuSetMemSum: %+v", bestGpuSetMemSum)
	if found {
		// 遍历所有binpack策略下gpu卡的最优组合
		if reqGPUCount == 1 {
			for _, idx := range devSlice {
				availableGPUMem, ok := availableGPUsMem[idx]
				if ok && availableGPUMem == bestGpuSetMemSum {
					candidateGpuSet = append(candidateGpuSet, []int{idx})
				}
			}
		} else {
			used := make(map[int]bool)
			for idx := range availableGPUsMem {
				used[idx] = false
			}
			candidateGpuSet = reqGPUSum(availableGPUsMem, devSlice, used, bestGpuSetMemSum, len(availableGPUsMem)-1, int(reqGPUCount))
		}
	}
	return candidateGpuSet
}

func reqGPUSum(availableGPUsMem map[int]uint, devSlice []int, used map[int]bool, remainder uint, loop int, reqGPUCount int) [][]int {
	gpuSet := make([][]int, 0)
	if remainder == 0 {
		temp := make([]int, 0)
		for idx := range used {
			if used[idx] {
				temp = append(temp, idx)
			}
		}
		if len(temp) == reqGPUCount {
			gpuSet = append(gpuSet, temp)
		}
		return gpuSet
	}
	for i := loop; i >= 0; i-- {
		if !used[devSlice[i]] && (remainder-availableGPUsMem[devSlice[i]]) >= 0 {
			used[devSlice[i]] = true
			tempSet := reqGPUSum(availableGPUsMem, devSlice, used, remainder-availableGPUsMem[devSlice[i]], i-1, reqGPUCount)
			if len(tempSet) != 0 {
				gpuSet = append(gpuSet, tempSet...)
			}
			used[devSlice[i]] = false
		}
	}
	return gpuSet
}

func gpuTopoAllocate(binPackCandidateDevIDs [][]int, node *api.NodeInfo) ([]int, error) {
	nodeGPUDevices := []*Device{}
	if node.Node.ObjectMeta.Annotations != nil {
		gpuTopo := node.Node.ObjectMeta.Annotations[api.GPUTopo]
		if err := json.Unmarshal([]byte(gpuTopo), &nodeGPUDevices); err != nil {
			klog.Errorf("Failed to unmarshal GpuTopo, error: %v", err)
			return nil, err
		}
	} else {
		errMsg := "Failed to get GpuTopo, node.ObjectMeta.Annotations is nil."
		klog.Errorf(errMsg)
		return nil, fmt.Errorf(errMsg)
	}
	bestSet := make([]int, 0)
	bestScore := 0
	for _, binPackCandidate := range binPackCandidateDevIDs {
		gpuSet := getBinPackCandidateDevs(nodeGPUDevices, binPackCandidate)
		score := calculateGPUSetScore(gpuSet)
		klog.V(4).Infof("calculateGPUSetScore: gpuSet: %+v, score: %+v.", gpuSet, score)
		if score > bestScore {
			bestSet = binPackCandidate
			bestScore = score
		}
	}
	klog.V(4).Infof("gpuTopoAllocate bestSet: %+v on node %+v", bestSet, node.Name)
	return bestSet, nil
}

func getBinPackCandidateDevs(nodeGPUDevices []*Device, binPackCandidate []int) []*Device {
	devs := make([]*Device, 0)
	for _, idx := range binPackCandidate {
		for _, nodeGPUDevice := range nodeGPUDevices {
			if idx == nodeGPUDevice.Index {
				devs = append(devs, nodeGPUDevice)
			}
		}
	}
	return devs
}
