package cpu

import (
	"context"
	"fmt"
	"strings"

	autoscalingv1 "k8s.io/api/autoscaling/v1"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	"k8s.io/client-go/kubernetes"
	"k8s.io/client-go/tools/clientcmd"
	metrics "k8s.io/metrics/pkg/client/clientset/versioned"
	"k8stools/pkg/config"
	"k8stools/pkg/output"
)

// GetDeploymentCpu 获取Deployment的CPU使用情况
func GetDeploymentCpu(c *config.Config) {
	cfg, err := clientcmd.BuildConfigFromFlags("", c.KubeConfig)
	if err != nil {
		panic(err)
	}

	clientset, err := kubernetes.NewForConfig(cfg)
	if err != nil {
		panic(err)
	}

	metricsClient, err := metrics.NewForConfig(cfg)
	if err != nil {
		panic(err)
	}

	headers := []string{
		"Namespace", "Deployment",
		"Main CPU Usage (m)", "Sidecar CPU Usage (m)",
		"Main CPU Requests (m)", "Sidecar CPU Requests (m)",
		"Main CPU Limits (m)", "Sidecar CPU Limits (m)",
		"Pod Min Replicas", "Pod Max Replicas",
	}

	var rows [][]string

	for _, ns := range c.NameSpace {
		rows = collectDeploymentCpuStats(ns, clientset, metricsClient, rows)
	}

	// 使用增强的output包保存带时间戳的CSV文件
	if err := output.OutputDataWithTimestamp("deployment_cpu_info", headers, rows, "csv"); err != nil {
		fmt.Printf("❌ 保存CSV文件失败: %v\n", err)
	}
}

// GetDaemonSetCpu 获取DaemonSet的CPU使用情况
func GetDaemonSetCpu(c *config.Config) {
	cfg, err := clientcmd.BuildConfigFromFlags("", c.KubeConfig)
	if err != nil {
		panic(err)
	}

	clientset, err := kubernetes.NewForConfig(cfg)
	if err != nil {
		panic(err)
	}

	metricsClient, err := metrics.NewForConfig(cfg)
	if err != nil {
		panic(err)
	}

	headers := []string{
		"Namespace", "DaemonSet",
		"Main CPU Usage (m)", "Sidecar CPU Usage (m)",
		"Main CPU Requests (m)", "Sidecar CPU Requests (m)",
		"Main CPU Limits (m)", "Sidecar CPU Limits (m)",
		"Desired Pods", "Current Pods", "Ready Pods",
	}

	var rows [][]string

	for _, ns := range c.NameSpace {
		rows = collectDaemonSetCpuStats(ns, clientset, metricsClient, rows)
	}

	// 使用增强的output包保存带时间戳的CSV文件
	if err := output.OutputDataWithTimestamp("daemonset_cpu_info", headers, rows, "csv"); err != nil {
		fmt.Printf("❌ 保存CSV文件失败: %v\n", err)
	}
}

// GetStatefulSetCpu 获取StatefulSet的CPU使用情况
func GetStatefulSetCpu(c *config.Config) {
	cfg, err := clientcmd.BuildConfigFromFlags("", c.KubeConfig)
	if err != nil {
		panic(err)
	}

	clientset, err := kubernetes.NewForConfig(cfg)
	if err != nil {
		panic(err)
	}

	metricsClient, err := metrics.NewForConfig(cfg)
	if err != nil {
		panic(err)
	}

	headers := []string{
		"Namespace", "StatefulSet",
		"Main CPU Usage (m)", "Sidecar CPU Usage (m)",
		"Main CPU Requests (m)", "Sidecar CPU Requests (m)",
		"Main CPU Limits (m)", "Sidecar CPU Limits (m)",
		"Replicas", "Ready Replicas", "Current Replicas",
	}

	var rows [][]string

	for _, ns := range c.NameSpace {
		rows = collectStatefulSetCpuStats(ns, clientset, metricsClient, rows)
	}

	// 使用增强的output包保存带时间戳的CSV文件
	if err := output.OutputDataWithTimestamp("statefulset_cpu_info", headers, rows, "csv"); err != nil {
		fmt.Printf("❌ 保存CSV文件失败: %v\n", err)
	}
}

// GetJobCpu 获取Job的CPU使用情况
func GetJobCpu(c *config.Config) {
	cfg, err := clientcmd.BuildConfigFromFlags("", c.KubeConfig)
	if err != nil {
		panic(err)
	}

	clientset, err := kubernetes.NewForConfig(cfg)
	if err != nil {
		panic(err)
	}

	metricsClient, err := metrics.NewForConfig(cfg)
	if err != nil {
		panic(err)
	}

	headers := []string{
		"Namespace", "Job",
		"Main CPU Usage (m)", "Sidecar CPU Usage (m)",
		"Main CPU Requests (m)", "Sidecar CPU Requests (m)",
		"Main CPU Limits (m)", "Sidecar CPU Limits (m)",
		"Active Pods", "Succeeded Pods", "Failed Pods",
	}

	var rows [][]string

	for _, ns := range c.NameSpace {
		rows = collectJobCpuStats(ns, clientset, metricsClient, rows)
	}

	// 使用增强的output包保存带时间戳的CSV文件
	if err := output.OutputDataWithTimestamp("job_cpu_info", headers, rows, "csv"); err != nil {
		fmt.Printf("❌ 保存CSV文件失败: %v\n", err)
	}
}

// collectDeploymentCpuStats 收集Deployment CPU统计信息
func collectDeploymentCpuStats(ns string, clientset *kubernetes.Clientset, metricsClient *metrics.Clientset, rows [][]string) [][]string {
	ctx := context.Background()

	deployments, err := clientset.AppsV1().Deployments(ns).List(ctx, metav1.ListOptions{})
	if err != nil {
		fmt.Printf("❌ 获取 %s 命名空间中的 Deployment 失败: %v\n", ns, err)
		return rows
	}

	fmt.Printf("🔍 正在收集 %s 命名空间中 %d 个 Deployment 的CPU信息...\n", ns, len(deployments.Items))

	podMetricsList, err := metricsClient.MetricsV1beta1().PodMetricses(ns).List(ctx, metav1.ListOptions{})
	if err != nil {
		fmt.Printf("⚠️ 获取 %s 命名空间中的 Pod Metrics 失败: %v\n", ns, err)
		// 继续执行，但CPU使用量将为0
	}

	podMetricsMap := make(map[string]map[string]int64)
	for _, podMetrics := range podMetricsList.Items {
		metrics := make(map[string]int64)
		for _, c := range podMetrics.Containers {
			metrics[c.Name] = c.Usage.Cpu().MilliValue()
		}
		podMetricsMap[podMetrics.Name] = metrics
	}

	hpaList, err := clientset.AutoscalingV1().HorizontalPodAutoscalers(ns).List(ctx, metav1.ListOptions{})
	if err != nil {
		fmt.Printf("⚠️ 获取 %s 命名空间中的 HPA 失败: %v\n", ns, err)
		// 继续执行，但HPA信息将不可用
	}

	hpaMap := make(map[string]*autoscalingv1.HorizontalPodAutoscaler)
	for _, hpa := range hpaList.Items {
		hpaMap[hpa.Spec.ScaleTargetRef.Name] = &hpa
	}

	for _, deploy := range deployments.Items {
		selector := deploy.Spec.Selector.MatchLabels
		selectorStr := []string{}
		for k, v := range selector {
			selectorStr = append(selectorStr, fmt.Sprintf("%s=%s", k, v))
		}

		pods, err := clientset.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{
			LabelSelector: strings.Join(selectorStr, ","),
		})
		if err != nil {
			fmt.Printf("❌ 获取 Deployment %s 的 Pods 失败: %v\n", deploy.Name, err)
			continue
		}

		var mainUsage, sidecarUsage, mainRequest, sidecarRequest, mainLimit, sidecarLimit int64
		for _, pod := range pods.Items {
			metrics := podMetricsMap[pod.Name]
			for i, c := range pod.Spec.Containers {
				cpuReq := c.Resources.Requests.Cpu().MilliValue()
				cpuLim := c.Resources.Limits.Cpu().MilliValue()
				cpuUse := metrics[c.Name]
				if i == 0 {
					mainUsage += cpuUse
					mainRequest += cpuReq
					mainLimit += cpuLim
				} else {
					sidecarUsage += cpuUse
					sidecarRequest += cpuReq
					sidecarLimit += cpuLim
				}
			}
		}

		minReplicas := int32(0)
		maxReplicas := *deploy.Spec.Replicas
		if hpa, ok := hpaMap[deploy.Name]; ok {
			minReplicas = *hpa.Spec.MinReplicas
			maxReplicas = hpa.Spec.MaxReplicas
		} else {
			minReplicas = *deploy.Spec.Replicas
		}

		rows = append(rows, []string{
			ns,
			deploy.Name,
			fmt.Sprintf("%d", mainUsage),
			fmt.Sprintf("%d", sidecarUsage),
			fmt.Sprintf("%d", mainRequest),
			fmt.Sprintf("%d", sidecarRequest),
			fmt.Sprintf("%d", mainLimit),
			fmt.Sprintf("%d", sidecarLimit),
			fmt.Sprintf("%d", minReplicas),
			fmt.Sprintf("%d", maxReplicas),
		})
	}
	
	return rows
}

// collectDaemonSetCpuStats 收集DaemonSet CPU统计信息
func collectDaemonSetCpuStats(ns string, clientset *kubernetes.Clientset, metricsClient *metrics.Clientset, rows [][]string) [][]string {
	ctx := context.Background()

	daemonsets, err := clientset.AppsV1().DaemonSets(ns).List(ctx, metav1.ListOptions{})
	if err != nil {
		fmt.Printf("❌ 获取 %s 命名空间中的 DaemonSet 失败: %v\n", ns, err)
		return rows
	}

	fmt.Printf("🔍 正在收集 %s 命名空间中 %d 个 DaemonSet 的CPU信息...\n", ns, len(daemonsets.Items))

	for _, ds := range daemonsets.Items {
		selector := ds.Spec.Selector.MatchLabels
		selectorStr := []string{}
		for k, v := range selector {
			selectorStr = append(selectorStr, fmt.Sprintf("%s=%s", k, v))
		}

		pods, err := clientset.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{
			LabelSelector: strings.Join(selectorStr, ","),
		})
		if err != nil {
			fmt.Printf("❌ 获取 DaemonSet %s 的 Pods 失败: %v\n", ds.Name, err)
			continue
		}

		// 获取Pod的CPU使用情况
		podMetricsList, err := metricsClient.MetricsV1beta1().PodMetricses(ns).List(ctx, metav1.ListOptions{
			LabelSelector: strings.Join(selectorStr, ","),
		})
		if err != nil {
			fmt.Printf("⚠️ 获取 DaemonSet %s 的 Pod Metrics 失败: %v\n", ds.Name, err)
			// 继续执行，但CPU使用量将为0
		}

		podMetricsMap := make(map[string]map[string]int64)
		for _, podMetrics := range podMetricsList.Items {
			metrics := make(map[string]int64)
			for _, c := range podMetrics.Containers {
				metrics[c.Name] = c.Usage.Cpu().MilliValue()
			}
			podMetricsMap[podMetrics.Name] = metrics
		}

		var mainUsage, sidecarUsage, mainRequest, sidecarRequest, mainLimit, sidecarLimit int64
		for _, pod := range pods.Items {
			metrics := podMetricsMap[pod.Name]
			for i, c := range pod.Spec.Containers {
				cpuReq := c.Resources.Requests.Cpu().MilliValue()
				cpuLim := c.Resources.Limits.Cpu().MilliValue()
				cpuUse := metrics[c.Name]
				if i == 0 {
					mainUsage += cpuUse
					mainRequest += cpuReq
					mainLimit += cpuLim
				} else {
					sidecarUsage += cpuUse
					sidecarRequest += cpuReq
					sidecarLimit += cpuLim
				}
			}
		}

		rows = append(rows, []string{
			ns,
			ds.Name,
			fmt.Sprintf("%d", mainUsage),
			fmt.Sprintf("%d", sidecarUsage),
			fmt.Sprintf("%d", mainRequest),
			fmt.Sprintf("%d", sidecarRequest),
			fmt.Sprintf("%d", mainLimit),
			fmt.Sprintf("%d", sidecarLimit),
			fmt.Sprintf("%d", ds.Status.DesiredNumberScheduled),
			fmt.Sprintf("%d", ds.Status.CurrentNumberScheduled),
			fmt.Sprintf("%d", ds.Status.NumberReady),
		})
	}
	
	return rows
}

// collectStatefulSetCpuStats 收集StatefulSet CPU统计信息
func collectStatefulSetCpuStats(ns string, clientset *kubernetes.Clientset, metricsClient *metrics.Clientset, rows [][]string) [][]string {
	ctx := context.Background()

	statefulsets, err := clientset.AppsV1().StatefulSets(ns).List(ctx, metav1.ListOptions{})
	if err != nil {
		fmt.Printf("❌ 获取 %s 命名空间中的 StatefulSet 失败: %v\n", ns, err)
		return rows
	}

	fmt.Printf("🔍 正在收集 %s 命名空间中 %d 个 StatefulSet 的CPU信息...\n", ns, len(statefulsets.Items))

	for _, sts := range statefulsets.Items {
		selector := sts.Spec.Selector.MatchLabels
		selectorStr := []string{}
		for k, v := range selector {
			selectorStr = append(selectorStr, fmt.Sprintf("%s=%s", k, v))
		}

		pods, err := clientset.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{
			LabelSelector: strings.Join(selectorStr, ","),
		})
		if err != nil {
			fmt.Printf("❌ 获取 StatefulSet %s 的 Pods 失败: %v\n", sts.Name, err)
			continue
		}

		// 获取Pod的CPU使用情况
		podMetricsList, err := metricsClient.MetricsV1beta1().PodMetricses(ns).List(ctx, metav1.ListOptions{
			LabelSelector: strings.Join(selectorStr, ","),
		})
		if err != nil {
			fmt.Printf("⚠️ 获取 StatefulSet %s 的 Pod Metrics 失败: %v\n", sts.Name, err)
			// 继续执行，但CPU使用量将为0
		}

		podMetricsMap := make(map[string]map[string]int64)
		for _, podMetrics := range podMetricsList.Items {
			metrics := make(map[string]int64)
			for _, c := range podMetrics.Containers {
				metrics[c.Name] = c.Usage.Cpu().MilliValue()
			}
			podMetricsMap[podMetrics.Name] = metrics
		}

		var mainUsage, sidecarUsage, mainRequest, sidecarRequest, mainLimit, sidecarLimit int64
		for _, pod := range pods.Items {
			metrics := podMetricsMap[pod.Name]
			for i, c := range pod.Spec.Containers {
				cpuReq := c.Resources.Requests.Cpu().MilliValue()
				cpuLim := c.Resources.Limits.Cpu().MilliValue()
				cpuUse := metrics[c.Name]
				if i == 0 {
					mainUsage += cpuUse
					mainRequest += cpuReq
					mainLimit += cpuLim
				} else {
					sidecarUsage += cpuUse
					sidecarRequest += cpuReq
					sidecarLimit += cpuLim
				}
			}
		}

		rows = append(rows, []string{
			ns,
			sts.Name,
			fmt.Sprintf("%d", mainUsage),
			fmt.Sprintf("%d", sidecarUsage),
			fmt.Sprintf("%d", mainRequest),
			fmt.Sprintf("%d", sidecarRequest),
			fmt.Sprintf("%d", mainLimit),
			fmt.Sprintf("%d", sidecarLimit),
			fmt.Sprintf("%d", sts.Status.Replicas),
			fmt.Sprintf("%d", sts.Status.ReadyReplicas),
			fmt.Sprintf("%d", sts.Status.CurrentReplicas),
		})
	}
	
	return rows
}

// collectJobCpuStats 收集Job CPU统计信息
func collectJobCpuStats(ns string, clientset *kubernetes.Clientset, metricsClient *metrics.Clientset, rows [][]string) [][]string {
	ctx := context.Background()

	jobs, err := clientset.BatchV1().Jobs(ns).List(ctx, metav1.ListOptions{})
	if err != nil {
		fmt.Printf("❌ 获取 %s 命名空间中的 Job 失败: %v\n", ns, err)
		return rows
	}

	fmt.Printf("🔍 正在收集 %s 命名空间中 %d 个 Job 的CPU信息...\n", ns, len(jobs.Items))

	for _, job := range jobs.Items {
		selector := job.Spec.Selector.MatchLabels
		selectorStr := []string{}
		for k, v := range selector {
			selectorStr = append(selectorStr, fmt.Sprintf("%s=%s", k, v))
		}

		pods, err := clientset.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{
			LabelSelector: strings.Join(selectorStr, ","),
		})
		if err != nil {
			fmt.Printf("❌ 获取 Job %s 的 Pods 失败: %v\n", job.Name, err)
			continue
		}

		// 获取Pod的CPU使用情况
		podMetricsList, err := metricsClient.MetricsV1beta1().PodMetricses(ns).List(ctx, metav1.ListOptions{
			LabelSelector: strings.Join(selectorStr, ","),
		})
		if err != nil {
			fmt.Printf("⚠️ 获取 Job %s 的 Pod Metrics 失败: %v\n", job.Name, err)
			// 继续执行，但CPU使用量将为0
		}

		podMetricsMap := make(map[string]map[string]int64)
		for _, podMetrics := range podMetricsList.Items {
			metrics := make(map[string]int64)
			for _, c := range podMetrics.Containers {
				metrics[c.Name] = c.Usage.Cpu().MilliValue()
			}
			podMetricsMap[podMetrics.Name] = metrics
		}

		var mainUsage, sidecarUsage, mainRequest, sidecarRequest, mainLimit, sidecarLimit int64
		for _, pod := range pods.Items {
			metrics := podMetricsMap[pod.Name]
			for i, c := range pod.Spec.Containers {
				cpuReq := c.Resources.Requests.Cpu().MilliValue()
				cpuLim := c.Resources.Limits.Cpu().MilliValue()
				cpuUse := metrics[c.Name]
				if i == 0 {
					mainUsage += cpuUse
					mainRequest += cpuReq
					mainLimit += cpuLim
				} else {
					sidecarUsage += cpuUse
					sidecarRequest += cpuReq
					sidecarLimit += cpuLim
				}
			}
		}

		rows = append(rows, []string{
			ns,
			job.Name,
			fmt.Sprintf("%d", mainUsage),
			fmt.Sprintf("%d", sidecarUsage),
			fmt.Sprintf("%d", mainRequest),
			fmt.Sprintf("%d", sidecarRequest),
			fmt.Sprintf("%d", mainLimit),
			fmt.Sprintf("%d", sidecarLimit),
			fmt.Sprintf("%d", job.Status.Active),
			fmt.Sprintf("%d", job.Status.Succeeded),
			fmt.Sprintf("%d", job.Status.Failed),
		})
	}
	
	return rows
}
