package cluster

import (
	"strings"

	v1 "k8s.io/api/core/v1"

	"aicloud.cn/aicloud-operate/pkg/util"
	_const "aicloud.cn/aicloud-operate/pkg/util/const"
)

func getNodeAvailable(allocatable v1.ResourceList, podResources *K8sResource) v1.ResourceList {
	if podResources == nil {
		return allocatable
	}
	allocatedResourceList := podResources.buildK8sAllocatedResourceList()
	if allocatedResourceList == nil {
		return allocatable
	}
	for allocatedName, allocatedQuantity := range allocatedResourceList {
		if allocatableQuantity, ok := allocatable[allocatedName]; ok {
			allocatableQuantity.Sub(allocatedQuantity)
			allocatable[allocatedName] = allocatableQuantity
		}
	}
	return allocatable
}

func (c *K8sClusterCache) buildNodesAvailResourceSummary() ([]NodeAvailResourceSummary, error) {
	var nodeInfos []NodeAvailResourceSummary
	pods, err := c.listPods()
	if err != nil {
		return nil, err
	}
	nodePodResourcesMap := make(map[string]*K8sResource)
	for _, pod := range pods {
		// When the phase of a pod is Succeeded or Failed, kube-scheduler would not consider its resource occupation.
		if len(pod.Spec.NodeName) != 0 && pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed {
			if nodePodResourcesMap[pod.Spec.NodeName] == nil {
				nodePodResourcesMap[pod.Spec.NodeName] = &K8sResource{}
			}
			nodePodResourcesMap[pod.Spec.NodeName].AddPodRequest(&pod.Spec)
		}
	}
	nodes, err := c.listNodes()
	if err != nil {
		return nil, err
	}
	for _, node := range nodes {
		nodeInfo := NodeAvailResourceSummary{
			Name: node.Name,
		}
		nodeAvailable := getNodeAvailable(node.Status.Allocatable.DeepCopy(), nodePodResourcesMap[node.Name])
		r := NewResource(nodeAvailable)
		nodeInfo.Available = *r
		nodeInfos = append(nodeInfos, nodeInfo)
	}
	return nodeInfos, nil
}

func GetGPUResourceOfContainer(container *v1.Container, resourceName v1.ResourceName) int {
	var count int

	if val, ok := container.Resources.Limits[resourceName]; ok {
		count = int(val.Value())
	}
	return count
}

func IsGPURequiredContainer(c *v1.Container) bool {
	gpuRes := GetGPUResAll()

	for _, rs := range gpuRes {
		count := GetGPUResourceOfContainer(c, rs)
		if count > 0 {
			return true
		}
	}
	return false
}

func GetGPUResAll() []v1.ResourceName {
	return []v1.ResourceName{
		_const.NVIDIA_GPU_NORMAL,
	}
}

// NewK8sResource creates a new resource object from resource list
func NewK8sResource(rl v1.ResourceList) *K8sResource {
	r := &K8sResource{}
	r.ScalarResources = make(map[v1.ResourceName]float64)
	for rName, rQuant := range rl {
		if rName == v1.ResourceCPU {
			r.MilliCPU += float64(rQuant.MilliValue())
		} else if rName == v1.ResourceMemory {
			r.Memory += float64(rQuant.Value())
		} else if strings.HasPrefix(string(rName), v1.ResourceHugePagesPrefix) {
			continue
		} else if rName == v1.ResourceEphemeralStorage {
			continue
		} else if rName == v1.ResourcePods {
			continue
		} else {
			r.AddScalar(rName, float64(rQuant.Value()))
		}
	}
	return r
}

func NewResource(rl v1.ResourceList) *Resource {
	r := &Resource{}
	gpuPrefixArray := util.GetEnvOrDefault(_const.GPU_RESOURCE_PREFIX_ENV, "nvidia")
	gpuResIndex := strings.Split(gpuPrefixArray, ",")

	for rName, rQuant := range rl {
		if rName == v1.ResourceCPU {
			r.CPU = int(rQuant.MilliValue() / 1000)
		} else if rName == v1.ResourceMemory {
			r.Memory = rQuant.Value()
		} else if strings.HasPrefix(string(rName), v1.ResourceHugePagesPrefix) {
			continue
		} else if rName == v1.ResourceEphemeralStorage {
			continue
		} else if rName == v1.ResourcePods {
			continue
		} else if containsGpuIndex(rName.String(), gpuResIndex) {
			if rQuant.Value() > 0 {
				r.GpuType = rName.String()
				r.GpuNum = int(rQuant.Value())
			}
		}
	}
	return r
}

func containsGpuIndex(gpuRes string, indexs []string) bool {
	for _, i := range indexs {
		if strings.Contains(gpuRes, i) {
			return true
		}
	}
	return false
}
