package ping

import (
	"context"
	"fmt"
	"latencyPublisher/api/pb"
	"latencyPublisher/pkg/grpc/client"
	"latencyPublisher/pkg/prome"
	"sync"
	"time"

	probing "github.com/prometheus-community/pro-bing"
	"github.com/sirupsen/logrus"
	v1 "k8s.io/api/core/v1"
	"k8s.io/apimachinery/pkg/api/errors"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

	"k8s.io/client-go/kubernetes"
)

const (
	ScheduleRate = time.Second * 1

	// ServerGrpcServiceFQDN 是controller对应的Service的完全限定域名。
	// 由于Publisher大概率不会和controller在同一个namespace之下，所以使用完全限定域名
	ServerGrpcServiceFQDN = "kubebuildernodecontroller-controller-manager-metrics-service.kubebuildernodecontroller-system.svc.cluster.local"
)

// LatencyPublisher  定时搜集延迟指标并且发送给集群中的nodeController更新延迟指标
type LatencyPublisher struct {
	kClient *kubernetes.Clientset

	// nodePingerMap 为了多线程向每个集群内的node发送icmp探测包，将Pinger分段存储在map中
	nodePingerMap map[string]*probing.Pinger

	// nodeName LatencyPublisher通过DaemonSet在每个node上进行部署，因此需要表示node名称
	nodeName   string
	grpcClient pb.NodeTopoNetMetricServiceClient

	// mode 表示Publisher的工作模式
	mode string // "crd" or "prometheus"

	// exporter prometheus exporter实例
	exporter     *prome.Exporter
	exporterPort int // 用于prometheus模式下的端口
}

func NewLatencyPublisher(kClient *kubernetes.Clientset, nodeName string, mode string, exporterPort int) *LatencyPublisher {
	return &LatencyPublisher{
		kClient:      kClient,
		nodeName:     nodeName,
		mode:         mode,
		exporterPort: exporterPort,
	}
}

func (p *LatencyPublisher) Initialize() error {
	// 1. 初始化nodePingerMap
	p.nodePingerMap = make(map[string]*probing.Pinger)
	nodeList, err := p.kClient.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{})
	if err != nil {
		if errors.IsNotFound(err) {
			logrus.Infof("[Initialize]-未找到node")
		} else {
			logrus.Errorf("[Initialize]-查找node时出错, err=%s", err)
			return err
		}
	}

	for _, node := range nodeList.Items {
		if node.Name != p.nodeName {
			addresses := node.Status.Addresses
			nodeIPAddr := findIpInAddress(addresses)
			if nodeIPAddr == "" {
				continue
			}
			config := &PingerConfig{Count: 5, Interval: time.Microsecond * 200, Address: nodeIPAddr}
			pinger, err := NewPinger(config)
			if err != nil {
				logrus.Errorf("[Initialize]-初始化发向%s的node的Pinger失败, err=%s", nodeIPAddr, err)
				continue
			}
			p.nodePingerMap[node.Name] = pinger
			logrus.Infof("[Initialize]-初始化发向%s的node的Pinger成功, ip = %s", node.Name, nodeIPAddr)
		}
	}
	// 2. 根据模式初始化不同的组件
	if p.mode == "crd" {
		logrus.Infof("[Initialize]-初始化grpc客户端")
		gRPCServerAddr := fmt.Sprintf("%s:%d", ServerGrpcServiceFQDN, 7963)
		// gRPCServerAddr := fmt.Sprintf("%s:%d", pod.Status.PodIP, 7963)
		c, err := client.NewNodeTopoNetMetricServiceClient(gRPCServerAddr)
		if err != nil {
			logrus.Errorf("[Initialize]-初始化grpc客户端失败, err=%s", err)
			return err
		}
		logrus.Infof("[Initialize]-初始化grpc客户端成功, server地址为%s", gRPCServerAddr)
		p.grpcClient = c
	} else if p.mode == "prometheus" {
		logrus.Infof("[Initialize]-初始化prometheus exporter")
		p.exporter = prome.NewExporter(p.exporterPort)
		if err := p.exporter.Start(); err != nil {
			logrus.Errorf("[Initialize]-启动prometheus exporter失败, err=%s", err)
			return err
		}
		logrus.Infof("[Initialize]-prometheus exporter启动成功")
	}

	return nil
}

// Run 收集延迟信息，并通过gRPC客户端向nodeMetric控制器发送数据。
// 该函数是非阻塞的。
func (p *LatencyPublisher) Run() {
	logrus.Infof("[Run]-开始执行Run，模式为: %s", p.mode)
	go p.PingAtScheduledRate(ScheduleRate)
}

// PingAtScheduledRate 以rate ms的速率调度ping请求
func (p *LatencyPublisher) PingAtScheduledRate(rate time.Duration) {
	for {
		p.pingOnce()
		toNodeList := make([]string, 0)
		latencies := make([]string, 0)
		for toNode, v := range p.nodePingerMap {
			avgRtt := v.Statistics().AvgRtt
			toNodeList = append(toNodeList, toNode)
			latencies = append(latencies, fmt.Sprintf("%d", avgRtt))

			// 根据模式处理延迟数据
			if p.mode == "prometheus" {
				// 更新prometheus metrics
				p.exporter.UpdateLatency(p.nodeName, toNode, float64(avgRtt.Nanoseconds())/1e6) // 转换为毫秒
			}

			// 重置pinger
			newPinger, err := NewPingerFromPinger(v)
			if err != nil {
				logrus.Errorf("[PingAtScheduledRate]-创建新的Pinger失败, err=%s", err)
				continue
			}
			p.nodePingerMap[toNode] = newPinger
		}

		// 只在CRD模式下发送grpc请求
		if p.mode == "crd" {
			param := &pb.PingLatencyMetric{
				NodeName:  p.nodeName,
				ToNodes:   toNodeList,
				Latencies: latencies,
			}
			_, err := p.grpcClient.UploadPingLatency(context.Background(), param)
			if err != nil {
				logrus.Errorf("[PingAtScheduledRate]-上传延迟信息失败, err=%s", err)
			}
		}

		time.Sleep(rate)
	}
}

func (p *LatencyPublisher) pingOnce() {
	wg := &sync.WaitGroup{}
	for k, v := range p.nodePingerMap {
		wg.Add(1)
		go func(k string, v *probing.Pinger) {
			defer wg.Done()
			err := v.Run()
			if err != nil {
				logrus.Errorf("[PingOnce]-向节点%s 发送ping失败, err=%s", k, err)
				return
			}
		}(k, v)
	}
	wg.Wait()
}

// findIpInAddress 在给定的AddressList中寻找类型为InternalIP的ip地址，因为只有这个ip地址是可解析的ip地址
func findIpInAddress(addresses []v1.NodeAddress) string {
	for _, address := range addresses {
		if address.Type == "InternalIP" {
			return address.Address
		}
	}
	return ""
}

func getPodFromDeployment(clientset *kubernetes.Clientset, namespace, deployName string) (*v1.Pod, error) {
	dd, err := clientset.AppsV1().Deployments(namespace).Get(context.TODO(), deployName, metav1.GetOptions{})
	if err != nil {
		return nil, err
	}
	selector := dd.Spec.Selector.MatchLabels
	labelSelector := metav1.FormatLabelSelector(&metav1.LabelSelector{
		MatchLabels: selector,
	})
	pods, err := clientset.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{
		LabelSelector: labelSelector,
	})
	if err != nil {
		return nil, err
	}
	// if len(pods.Items) != 1 {
	// 	return nil, fmt.Errorf("pod数量不为1")
	// }
	return &pods.Items[0], nil
}
