package agents

import (
	"context"
	"fmt"
	"k8sops/pkg/client"
	"k8sops/pkg/common"
	"k8sops/pkg/services"
	"log"

	v1 "k8s.io/api/core/v1"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

func SchedulingStatus(lockList map[string]int) {
	key := "agent_SchedulingStatus"
	if lockList[key] == 1 {
		return
	}
	lockList[key] = 1
	defer common.Sleep(key, 300, lockList)
	var (
		nodesMem  map[string]int64
		nodesNode map[string]v1.Node
	)
	nodesMem = make(map[string]int64)
	nodesNode = make(map[string]v1.Node)

	mc, err := services.GetNodeMetrics()
	if err != nil {
		log.Fatal(err.Error())
		return
	}
	n, err := services.GetNodes()
	if err != nil {
		log.Fatal(err.Error())
		return
	}

	for _, node := range n.Items {
		nodesMem[node.Name] = node.Status.Allocatable.Memory().Value()
		nodesNode[node.Name] = node
	}

	for _, metric := range mc.Items {
		fmt.Println(metric.Name, "MemoryTopUsed:", float64(metric.Usage.Memory().Value())/float64(nodesMem[metric.Name]))
		if float64(metric.Usage.Memory().Value())/float64(nodesMem[metric.Name]) > 0.85 {
			err := SetUnschedulable(true, nodesNode[metric.Name])
			if err != nil {
				log.Fatal(err.Error())
				return
			}
		}
		if float64(metric.Usage.Memory().Value())/float64(nodesMem[metric.Name]) < 0.8 {
			if nodesNode[metric.Name].Spec.Unschedulable {
				err := SetUnschedulable(false, nodesNode[metric.Name])
				if err != nil {
					log.Fatal(err.Error())
					return
				}
			}
		}
	}
}

func SetUnschedulable(b bool, node v1.Node) (err error) {
	k := client.K8sClientSet{}
	c, err := k.GetClientset()
	if err != nil {
		log.Fatal(err.Error())
		return
	}

	node.Spec.Unschedulable = b

	_, err = c.CoreV1().Nodes().Update(context.TODO(), &node, metav1.UpdateOptions{})
	if err != nil {
		log.Fatal(err.Error())
		return
	}
	return
}
