package schedule

import (
	"context"
	"encoding/json"
	"errors"
	"flare-admin/config/nacos"
	"flare-admin/db/dao"
	"flare-admin/db/sqlc/cluster"
	"flare-admin/ecode"
	"flare-admin/http/model"
	"flare-admin/http/request"
	"flare-admin/prome"
	"flare-admin/service/dto/k8s"
	"flare-admin/service/dto/pgschedule"
	"flare-admin/service/external/k8s/clientset"
	"fmt"
	"strconv"
	"time"

	"github.com/sirupsen/logrus"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	"k8s.io/client-go/kubernetes"
)

type Service struct {
	dao *dao.DBTXQuery
}

func New(dao *dao.DBTXQuery) *Service {
	return &Service{
		dao: dao,
	}
}

func (s *Service) GetLatencyInfo(ctx context.Context, clusterID string, start string, end string) (k8s.LatencyInfo, error) {
	cid, err := strconv.Atoi(clusterID)
	if err != nil {
		logrus.Errorf("[GetLatencyInfo] - failed to parse clusterID: %v", err)
		return k8s.LatencyInfo{}, ecode.ErrRequestParam
	}
	clusterInfo, err := s.dao.Cluster.SelectSingleClusterConfigByID(ctx, int32(cid))
	if err != nil {
		logrus.Errorf("[GetLatencyInfo] - failed to get cluster info by ID %d: %v", cid, err)
		return k8s.LatencyInfo{}, ecode.ErrClusterNotExists
	}

	cs, err := s.getClientset(clusterInfo.ClusterType, clusterInfo.Name)
	if err != nil {
		return k8s.LatencyInfo{}, ecode.ErrClusterNotExists
	}

	// 获取master——node的IP
	masterNodes, err := cs.CoreV1().Nodes().List(ctx, metav1.ListOptions{LabelSelector: "node-role.kubernetes.io/control-plane"})
	if err != nil {
		logrus.Errorf("[GetLatencyInfo] - failed to list master nodes: %v", err)
		return k8s.LatencyInfo{}, ecode.ErrClusterResourceFetching
	}
	if len(masterNodes.Items) == 0 {
		logrus.Errorf("[GetLatencyInfo] - no master nodes found")
		return k8s.LatencyInfo{}, ecode.ErrClusterResourceFetching
	}

	// nip为master的ip
	var nip string

	//for _, ipAddrInfo := range masterNodes.Items[0].Status.Addresses {
	//	if ipAddrInfo.Type == "InternalIP" {
	//		nip = ipAddrInfo.Address
	//		break
	//	}
	//}
	if nacos.ApplicationCfg.Mode == "dev" {
		nip = "prometheus:9090"
	} else if nacos.ApplicationCfg.Mode == "local" {
		nip = "10.176.40.186:30090"
	}

	// 构建prometheus客户端，并且实施查询
	pclient, err := prome.NewPromClient(fmt.Sprintf("http://%s", nip))
	if err != nil {
		logrus.Errorf("[GetLatencyInfo] - failed to create Prometheus client: %v", err)
		return k8s.LatencyInfo{}, ecode.ErrPrometheusConnect
	}
	if end == "" {
		end = time.Now().Format(time.RFC3339)
	}
	if start == "" {
		endTime, err := time.Parse(time.RFC3339, end)
		if err != nil {
			logrus.Errorf("[GetLatencyInfo] - failed to parse end time: %v", err)
			return k8s.LatencyInfo{}, ecode.ErrRequestParam
		}
		start = endTime.Add(-1 * time.Hour).Format(time.RFC3339)
	}
	result, err := pclient.GetLatencyByTimeRange(start, end)
	if err != nil {
		logrus.Errorf("[GetLatencyInfo] - failed to query latency from Prometheus: %v", err)
		return k8s.LatencyInfo{}, ecode.ErrPrometheusQuery
	}

	// 将prometheus查询结果转化为LatencyInfo
	return k8s.TransferPrometheusLatencyInfoToLatencyInfo(result), nil

}

func (s *Service) GetLatencyInfoPair(ctx context.Context, clusterID string, node1, node2 string, start, end string) (k8s.LatencyInfo, error) {
	cid, err := strconv.Atoi(clusterID)
	if err != nil {
		logrus.Errorf("[GetLatencyInfo] - failed to parse clusterID: %v", err)
		return k8s.LatencyInfo{}, ecode.ErrRequestParam
	}
	clusterInfo, err := s.dao.Cluster.SelectSingleClusterConfigByID(ctx, int32(cid))
	if err != nil {
		logrus.Errorf("[GetLatencyInfo] - failed to get cluster info by ID %d: %v", cid, err)
		return k8s.LatencyInfo{}, ecode.ErrClusterNotExists
	}

	cs, err := s.getClientset(clusterInfo.ClusterType, clusterInfo.Name)
	if err != nil {
		return k8s.LatencyInfo{}, ecode.ErrClusterNotExists
	}

	masterNodes, err := cs.CoreV1().Nodes().List(ctx, metav1.ListOptions{LabelSelector: "node-role.kubernetes.io/control-plane"})
	if err != nil {
		logrus.Errorf("[GetLatencyInfo] - failed to list master nodes: %v", err)
		return k8s.LatencyInfo{}, ecode.ErrClusterResourceFetching
	}
	if len(masterNodes.Items) == 0 {
		logrus.Errorf("[GetLatencyInfo] - no master nodes found")
		return k8s.LatencyInfo{}, ecode.ErrClusterResourceFetching
	}

	// nip为master的ip
	var nip string
	if nacos.ApplicationCfg.Mode == "dev" {
		nip = "prometheus:9090"
	} else if nacos.ApplicationCfg.Mode == "local" {
		nip = "10.176.40.186:30090"
	}

	pclient, err := prome.NewPromClient(fmt.Sprintf("http://%s", nip))
	if err != nil {
		logrus.Errorf("[GetLatencyInfo] - failed to create Prometheus client: %v", err)
		return k8s.LatencyInfo{}, ecode.ErrPrometheusConnect
	}
	if end == "" {
		end = time.Now().Format(time.RFC3339)
	}
	if start == "" {
		endTime, err := time.Parse(time.RFC3339, end)
		if err != nil {
			logrus.Errorf("[GetLatencyInfo] - failed to parse end time: %v", err)
			return k8s.LatencyInfo{}, ecode.ErrRequestParam
		}
		start = endTime.Add(-1 * time.Hour).Format(time.RFC3339)
	}
	result, err := pclient.GetSingleLatencyByTimeRange(node1, node2, start, end)
	if err != nil {
		logrus.Errorf("[GetLatencyInfo] - failed to query latency from Prometheus: %v", err)
		return k8s.LatencyInfo{}, ecode.ErrPrometheusQuery
	}

	// 将prometheus查询结果转化为LatencyInfo
	return k8s.TransferPrometheusLatencyInfoToLatencyInfo(result), nil
}

func (s *Service) AddPodGroupSchedulingRecord(ctx context.Context, record *request.SchedulingRecordRequest, clusterID string) (string, error) {
	record.ClusterID = clusterID
	if record.CommitTime == "" {
		return "", ecode.ErrRequestParam
	}
	param, err := pgschedule.ScheduleRecord2AddRecordParam(record)
	if err != nil {
		logrus.Errorf("[AddPodGroupSchedulingRecord] - failed to convert scheduling record to DB param: %v", err)
		return "", ecode.ErrRequestParam
	}
	_, err = s.dao.Cluster.AddPodGroupRecord(ctx, param)
	return "ok", err
}

func (s *Service) UpdatePodGroupSchedulingRecord(ctx context.Context, record *request.SchedulingRecordRequest, clusterID string) (string, error) {
	record.ClusterID = clusterID
	record.UpdateAt = time.Now().Format(time.RFC3339)
	var rerr error
	if len(record.LatencyInfo) != 0 && len(record.ScheduledRes) != 0 {
		param, err := pgschedule.ScheduleRecord2UpdateRecordParam(record)
		if err != nil {
			logrus.Errorf("[UpdatePodGroupSchedulingRecord] - failed to convert scheduling record to DB param: %v", err)
			return "", ecode.ErrRequestParam
		}
		_, rerr = s.dao.Cluster.UpdatePodGroupRecord(ctx, param)
	} else if len(record.LatencyInfo) != 0 {
		_, rerr = s.dao.Cluster.UpdatePodGroupLatencyInfo(ctx, cluster.UpdatePodGroupLatencyInfoParams{
			LatencyInfo: []byte(record.LatencyInfo),
			Name:        record.Name,
			Uid:         record.UID,
			Namespace:   record.Namespace,
			Clusterid:   record.ClusterID,
		})
	} else {
		_, rerr = s.dao.Cluster.UpdatePodGroupRecordScheduleRes(ctx, cluster.UpdatePodGroupRecordScheduleResParams{
			ScheduleRes: []byte(record.ScheduledRes),
			Name:        record.Name,
			Uid:         record.UID,
			Namespace:   record.Namespace,
			Clusterid:   record.ClusterID,
		})
	}
	return "ok", rerr
}

func (s *Service) UpdatePodGroupSchedulingRecordStatus(ctx context.Context, name, namespace, status, clusterID, uid string) (string, error) {
	r, err := s.dao.Cluster.UpdatePodGroupRecordStatus(ctx, cluster.UpdatePodGroupRecordStatusParams{
		Status:    status,
		Name:      name,
		Namespace: namespace,
		Uid:       uid,
		Clusterid: clusterID,
	})
	if err != nil {
		logrus.Errorf("[UpdatePodGroupSchedulingRecordStatus] - failed to update scheduling record status: %v", err)
		return "", ecode.ErrRequestParam
	}

	return "status更改为 " + r.Status, nil
}

func (s *Service) GetAllPodGroupSchedulingRecords(ctx context.Context, clusterID string) ([]model.ScheduleRecord, error) {
	records, err := s.dao.Cluster.SelectPodGroupRecordByClusterID(ctx, clusterID)
	if err != nil {
		logrus.Errorf("[GetAllPodGroupSchedulingRecords] - failed to get scheduling records by cluster ID %s: %v", clusterID, err)
		return nil, err
	}

	res := make([]model.ScheduleRecord, 0, len(records))
	for _, each := range records {
		res = append(res, pgschedule.DBRecord2ScheduleRecordModel(each))
	}

	return res, nil
}

func (s *Service) GetSingleRecord(ctx context.Context, clusterID, namespace, name, uid string) (resp model.ScheduleRecordDetail, err error) {
	detail, err := s.dao.Cluster.SelectPodGroupRecordDetailByClusterID(ctx, cluster.SelectPodGroupRecordDetailByClusterIDParams{
		Uid:       uid,
		Clusterid: clusterID,
		Name:      name,
		Namespace: namespace,
	})
	if err != nil {
		logrus.Errorf("[GetSingleRecord] - failed to get scheduling record detail by cluster ID %s, name %s, namespace %s, uid %s: %v", clusterID, name, namespace, uid, err)
		return resp, err
	}

	// 1. 获取podGroup下的pod信息
	podPlacement := make([]model.PodNodeBind, 0)
	err = json.Unmarshal(detail.ScheduleRes, &podPlacement)
	if err != nil {
		logrus.Errorf("[GetSingleRecord] - failed to unmarshal scheduling record detail schedule_res: %v", err)
		return resp, ecode.ErrUnknownError
	}
	resp.Pods = podPlacement

	// 2. 获取延迟信息
	latencyInfo := make(map[string]model.LatencyDetail)
	err = json.Unmarshal(detail.LatencyInfo, &latencyInfo)
	if err != nil {
		logrus.Errorf("[GetSingleRecord] - failed to unmarshal scheduling record detail latency_info: %v", err)
		return resp, ecode.ErrUnknownError
	}
	resp.NodeLatencies = latencyInfo

	// 3. 获取pod依赖关系
	rawMapArray := make([]model.PodDependency, 0)
	if err = json.Unmarshal(detail.Dependencies, &rawMapArray); err != nil {
		logrus.Errorf("[GetSingleRecord] - failed to unmarshal scheduling record detail dependencies: %v", err)
		return resp, ecode.ErrUnknownError
	}
	resp.Dependencies = rawMapArray

	return resp, nil
}

func (s *Service) getClientset(clusterType, clusterName string) (*kubernetes.Clientset, error) {
	key := clientset.BuildMapKey(clusterType, clusterName)
	cs, err := clientset.GetClientset(key)
	if errors.Is(err, ecode.ErrClusterNotExists) {
		if err = s.loadCluster(clusterName, clusterType); err != nil {
			return nil, err
		}
	}
	// 再次尝试获取
	cs, err = clientset.GetClientset(key)
	if err != nil {
		return nil, err
	}
	return cs, nil
}

func (s *Service) loadCluster(clusterName, clusterType string) error {
	res, err := s.dao.Cluster.SelectSingleClusterInfo(context.Background(), cluster.SelectSingleClusterInfoParams{
		Name:        clusterName,
		Clustertype: clusterType,
	})
	if err != nil {
		return err
	}
	_, err = clientset.AddClientsetFromBytes(clusterType, clusterName, []byte(res.Kubeconfig))
	return err
}
