package k8sutil

import (
	"context"
	"fmt"
	"gitee.com/hexug/go-tools/k8s"
	v1 "k8s.io/api/core/v1"
	"k8s.io/apimachinery/pkg/api/resource"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	"k8s.io/client-go/kubernetes"
	"sort"
)

type NodeResourceSummary struct {
	NodeName       string
	CPURequests    resource.Quantity
	CPULimits      resource.Quantity
	MemoryRequests resource.Quantity
	MemoryLimits   resource.Quantity

	CPUAllocatable    resource.Quantity
	MemoryAllocatable resource.Quantity
	CPUCapacity       resource.Quantity
	MemoryCapacity    resource.Quantity
}

type ResourceCollector struct {
	clientset *kubernetes.Clientset
	ctx       context.Context
}

func NewResourceCollector() (*ResourceCollector, error) {
	clientset, _, err := k8s.GetClientSet()
	if err != nil {
		return nil, err
	}
	return &ResourceCollector{
		clientset: clientset,
		ctx:       context.Background(),
	}, nil
}
func NewResourceCollectorFromKubeConfig(kubeConfigPath string) (*ResourceCollector, error) {
	clientset, _, err := k8s.GetClientSetFromKubeConfig(kubeConfigPath)
	if err != nil {
		return nil, err
	}
	return &ResourceCollector{
		clientset: clientset,
		ctx:       context.Background(),
	}, nil
}

func (r *ResourceCollector) GetAllNodesResources() ([]NodeResourceSummary, error) {
	nodes, err := r.clientset.CoreV1().Nodes().List(r.ctx, metav1.ListOptions{})
	if err != nil {
		return nil, err
	}

	var nodeNames []string
	for _, n := range nodes.Items {
		nodeNames = append(nodeNames, n.Name)
	}

	return r.GetNodesResources(nodeNames)
}

func (r *ResourceCollector) GetNodeResources(nodeName string) (*NodeResourceSummary, error) {
	results, err := r.GetNodesResources([]string{nodeName})
	if err != nil {
		return nil, err
	}
	if len(results) == 0 {
		return nil, fmt.Errorf("node %s not found or has no pods", nodeName)
	}
	return &results[0], nil
}

func (r *ResourceCollector) GetNodesResources(nodeNames []string) ([]NodeResourceSummary, error) {
	pods, err := r.clientset.CoreV1().Pods("").List(r.ctx, metav1.ListOptions{})
	if err != nil {
		return nil, err
	}

	nodes, err := r.clientset.CoreV1().Nodes().List(r.ctx, metav1.ListOptions{})
	if err != nil {
		return nil, err
	}

	nodeSet := make(map[string]struct{})
	for _, name := range nodeNames {
		nodeSet[name] = struct{}{}
	}

	summaries := make(map[string]*NodeResourceSummary)
	for _, node := range nodes.Items {
		if _, ok := nodeSet[node.Name]; !ok {
			continue
		}
		s := &NodeResourceSummary{
			NodeName:          node.Name,
			CPUAllocatable:    node.Status.Allocatable[v1.ResourceCPU],
			MemoryAllocatable: node.Status.Allocatable[v1.ResourceMemory],
			CPUCapacity:       node.Status.Capacity[v1.ResourceCPU],
			MemoryCapacity:    node.Status.Capacity[v1.ResourceMemory],
		}
		summaries[node.Name] = s
	}

	for _, pod := range pods.Items {
		if _, ok := nodeSet[pod.Spec.NodeName]; !ok {
			continue
		}
		sum := summaries[pod.Spec.NodeName]
		for _, c := range pod.Spec.Containers {
			req := c.Resources.Requests
			lim := c.Resources.Limits
			sum.CPURequests.Add(req[v1.ResourceCPU])
			sum.CPULimits.Add(lim[v1.ResourceCPU])
			sum.MemoryRequests.Add(req[v1.ResourceMemory])
			sum.MemoryLimits.Add(lim[v1.ResourceMemory])
		}
	}

	var results []NodeResourceSummary
	for _, s := range summaries {
		results = append(results, *s)
	}

	return results, nil
}

func (s *NodeResourceSummary) PrintSummary() {
	fmt.Printf("=== Node: %s ===\n", s.NodeName)
	fmt.Printf("CPU Capacity:    %s\n", formatCPU(s.CPUCapacity))
	fmt.Printf("CPU Allocatable: %s\n", formatCPU(s.CPUAllocatable))
	fmt.Printf("CPU Requests: %s\n", formatCPU(s.CPURequests))
	fmt.Printf("CPU Limits:   %s\n", formatCPU(s.CPULimits))
	fmt.Printf("Mem Capacity:    %s\n", formatMemory(s.MemoryCapacity))
	fmt.Printf("Mem Allocatable: %s\n", formatMemory(s.MemoryAllocatable))
	fmt.Printf("Mem Requests: %s\n", formatMemory(s.MemoryRequests))
	fmt.Printf("Mem Limits:   %s\n", formatMemory(s.MemoryLimits))

	// 利用率计算
	if s.CPUAllocatable.Value() > 0 {
		reqRatio := float64(s.CPURequests.MilliValue()) / float64(s.CPUAllocatable.MilliValue()) * 100
		limRatio := float64(s.CPULimits.MilliValue()) / float64(s.CPUAllocatable.MilliValue()) * 100
		fmt.Printf("CPU Utilization (Req): %.2f%%, (Limit): %.2f%%\n", reqRatio, limRatio)
	}
	if s.MemoryAllocatable.Value() > 0 {
		reqRatio := float64(s.MemoryRequests.Value()) / float64(s.MemoryAllocatable.Value()) * 100
		limRatio := float64(s.MemoryLimits.Value()) / float64(s.MemoryAllocatable.Value()) * 100
		fmt.Printf("Mem Utilization (Req): %.2f%%, (Limit): %.2f%%\n", reqRatio, limRatio)
	}
	fmt.Println()
}

// formatCPU 格式化CPU显示，超过1000m时显示为核数
func formatCPU(cpu resource.Quantity) string {
	milliValue := cpu.MilliValue()
	if milliValue >= 1000 {
		return fmt.Sprintf("%.2f核", float64(milliValue)/1000)
	}
	return cpu.String()
}

// formatMemory 格式化内存显示，使用人性化的单位
func formatMemory(mem resource.Quantity) string {
	bytes := mem.Value()

	const (
		KB = 1024
		MB = KB * 1024
		GB = MB * 1024
		TB = GB * 1024
	)

	switch {
	case bytes >= TB:
		return fmt.Sprintf("%.2fTB", float64(bytes)/TB)
	case bytes >= GB:
		return fmt.Sprintf("%.2fGB", float64(bytes)/GB)
	case bytes >= MB:
		return fmt.Sprintf("%.2fMB", float64(bytes)/MB)
	case bytes >= KB:
		return fmt.Sprintf("%.2fKB", float64(bytes)/KB)
	default:
		return fmt.Sprintf("%dB", bytes)
	}
}

// ------------------------- 排序部分 -------------------------

// SortByCPU 根据 CPU 请求排序
func SortNodesByCPU(nodes []NodeResourceSummary, desc bool) {
	sort.Slice(nodes, func(i, j int) bool {
		if desc {
			return nodes[i].CPURequests.MilliValue() > nodes[j].CPURequests.MilliValue()
		}
		return nodes[i].CPURequests.MilliValue() < nodes[j].CPURequests.MilliValue()
	})
}

// SortByMemory 根据内存请求排序
func SortNodesByMemory(nodes []NodeResourceSummary, desc bool) {
	sort.Slice(nodes, func(i, j int) bool {
		if desc {
			return nodes[i].MemoryRequests.Value() > nodes[j].MemoryRequests.Value()
		}
		return nodes[i].MemoryRequests.Value() < nodes[j].MemoryRequests.Value()
	})
}
