package aipserver

import (
	"apedge/dao"
	"apedge/dto"
	"apedge/pkg/common"
	"apedge/pkg/configs"
	"apedge/pkg/database"
	"apedge/pkg/protocol"
	"apedge/remote"
	"encoding/json"
	"errors"
	"fmt"
	"strings"
	"time"

	uuid "github.com/satori/go.uuid"
)

func CreateApedgeInference(req dto.CreateApedgeInferenceReq, userInfo *protocol.UserInfoAAA) (string, error) {
	var err error

	tx := database.Db.Begin()
	defer func() {
		if r := recover(); r != nil {
			tx.Rollback()
		}
	}()
	// 名字是否存在
	exist, err := CheckApedgeInferenceNameExist(tx, req.Name)
	if err != nil {
		tx.Rollback()
		return "", err
	}
	if exist {
		tx.Rollback()
		return "", common.ErrInferNameExist
	}

	// 检查模型是否为空
	if CheckIsModelsEmpty(req.Models) {
		tx.Rollback()
		return "", common.ErrModelEmpty
	}
	// 检查模型数是否超出限制
	if CheckIsModelNumOver(req.Models) {
		tx.Rollback()
		return "", common.ErrModelTooMany
	}
	serviceId := GenServiceId()

	inferType := ""
	if strings.Contains(configs.Config.Relevant.Image.NpuInferImage, req.ContainerImage) {
		inferType = "huawei_npu"
		req.ContainerImage = configs.Config.Relevant.Image.NpuInferImage
		req.ContainerImageTag = configs.Config.Relevant.Image.NpuInferImageTag
	} else if strings.Contains(configs.Config.Relevant.Image.TritonImage, req.ContainerImage) {
		inferType = "nvidia_gpu"
		req.ContainerImage = configs.Config.Relevant.Image.TritonImage
		req.ContainerImageTag = configs.Config.Relevant.Image.TritonImageTag
	} else {
		return "", errors.New("镜像不能用于推理，请重新选择, 目前支持nvcr.io/nvidia/tritonserver 和 algorithm/apulistech/ascend-inference-serving")
	}

	// 模型转换 TODO
	apModels, canStart, err := ModelInfo(req.Models, inferType, serviceId)
	if err != nil {
		tx.Rollback()
		return "", err
	}

	modelsStr, err := json.Marshal(apModels)
	if err != nil {
		tx.Rollback()
		return "", err
	}
	envsStr, err := json.Marshal(req.Envs)
	if err != nil {
		tx.Rollback()
		return "", err
	}
	nodeIdsStr, err := json.Marshal(req.NodeIds)
	if err != nil {
		tx.Rollback()
		return "", err
	}
	ContainerPortsStr, err := json.Marshal(req.ContainerPorts)
	if err != nil {
		tx.Rollback()
		return "", err
	}

	now := time.Now()
	t := &dao.ApedgeInference{
		OrgId:                  int64(userInfo.OrgId),
		GroupId:                int64(userInfo.GroupId),
		UserId:                 int64(userInfo.UserId),
		UserName:               userInfo.UserName,
		ServiceId:              serviceId,
		Name:                   req.Name,
		Describe:               req.Describe,
		Status:                 common.TASK_STATUS_MODEL_TRANSFORM,
		Models:                 string(modelsStr),
		Envs:                   string(envsStr),
		NodeIds:                string(nodeIdsStr),
		ContainerImage:         req.ContainerImage,
		ContainerImageTag:      req.ContainerImageTag,
		ContainerRunCmd:        req.ContainerRunCmd,
		ContainerRunParams:     req.ContainerRunParams,
		ContainerPrivilegeMode: req.ContainerPrivilegeMode,
		ContainerResart:        req.ContainerRestart,
		ContainerNetwork:       req.ContainerNetwork,
		ContainerPorts:         string(ContainerPortsStr),
		CreatedAt:              &now,
		UpdatedAt:              &now,
	}

	if err = t.Save(tx); err != nil {
		tx.Rollback()
		return "", err
	}

	// 创建 server node 对应关系表
	for _, nodeId := range req.NodeIds {
		uuids, _ := uuid.NewV4()
		svcNodeId := fmt.Sprintf("%s-%s", "afnode", uuids.String())
		sn := &dao.ApedgeServiceNode{
			OrgId:         int64(userInfo.OrgId),
			GroupId:       int64(userInfo.GroupId),
			UserId:        int64(userInfo.UserId),
			UserName:      userInfo.UserName,
			ServiceNodeId: svcNodeId,
			NodeId:        nodeId,
			ServiceId:     serviceId,
			Status:        common.TASK_STATUS_MODEL_TRANSFORM,
			CreatedAt:     &now,
			UpdatedAt:     &now,
		}
		if err = sn.Save(tx); err != nil {
			tx.Rollback()
			return "", err
		}

		// 创建服务
		if canStart {
			logger.Infoln("canStart")
			err = StartApflow(*t, *sn)
			if err != nil {
				tx.Rollback()
				return "", err
			}
			// 更新服务状态
			snt := &dao.ApedgeServiceNode{
				ServiceNodeId: svcNodeId,
			}
			updateMap := make(map[string]interface{})
			updateMap["status"] = common.TASK_STATUS_SCHEDULING
			err = snt.UpdateByServiceNodeId(tx, updateMap)
			if err != nil {
				tx.Rollback()
				return "", err
			}
		}

	}

	return t.ServiceId, tx.Commit().Error
}

func GetApedgeInferenceList(req dto.GetApedgeInferenceListReq, userInfo *protocol.UserInfoAAA) ([]dto.OutApedgeInferenceItem, int64, error) {
	var err error
	var items []dao.ApedgeInference
	var count int64
	var t dao.ApedgeInference

	tx := database.Db
	items, count, err = t.GetList(tx, req.PageNum, req.PageSize, req.Name, req.Sort, userInfo)
	if err != nil {
		return nil, 0, err
	}

	outPutList := []dto.OutApedgeInferenceItem{}
	for _, item := range items {
		outItem, err := GenOutApedgeInferenceItem(item)
		if err != nil {
			return nil, 0, err
		}
		outPutList = append(outPutList, outItem)
	}
	return outPutList, count, err
}

func GetApedgeInferenceDetail(serviceId string) (*dto.OutApedgeInferenceItem, error) {
	var err error
	var item *dao.ApedgeInference
	var outItem dto.OutApedgeInferenceItem

	tx := database.Db
	t := &dao.ApedgeInference{
		ServiceId: serviceId,
	}

	item, err = t.GetByServiceId(tx)
	if err != nil {
		if err.Error() == common.DB_ERR_RECORD_NOT_FOUND {
			return nil, common.ErrNodeNotExist
		}
		return nil, err
	}

	outItem, err = GenOutApedgeInferenceItem(*item)
	if err != nil {
		return nil, err
	}
	return &outItem, nil
}

func DelApedgeInference(serviceid string) error {

	tx := database.Db.Begin()
	defer func() {
		if r := recover(); r != nil {
			tx.Rollback()
		}
	}()

	// 判断是否存在
	t := dao.ApedgeInference{
		ServiceId: serviceid,
	}
	_, err := t.GetByServiceId(tx)
	if err != nil {
		tx.Rollback()
		if err.Error() == common.DB_ERR_RECORD_NOT_FOUND {
			return common.ErrNodeNotExist
		}
		return err
	}
	// 判断服务节点是否为空
	nsTemp := dao.ApedgeServiceNode{
		ServiceId: serviceid,
	}
	nsList, err := nsTemp.GetListByServiceId(tx)
	if err != nil {
		tx.Rollback()
		return err
	}
	if len(nsList) != 0 {
		tx.Rollback()
		return common.ErrSvcDeleteStatus
	}
	err = t.DeleteByServiceId(tx)
	if err != nil {
		tx.Rollback()
		return err
	}
	return tx.Commit().Error
}

func OpInferenceTask(req dto.OpInferenceTaskReq, userInfo *protocol.UserInfoAAA) error {
	tx := database.Db.Begin()
	defer func() {
		if r := recover(); r != nil {
			tx.Rollback()
		}
	}()
	svcNode := &dao.ApedgeServiceNode{
		ServiceNodeId: req.ServiceNodeId,
	}
	svcNode, err := svcNode.GetByServiceNodeId(tx)
	if err != nil {
		tx.Rollback()
		return err
	}

	svc := &dao.ApedgeInference{
		ServiceId: svcNode.ServiceId,
	}
	svc, err = svc.GetByServiceId(tx)
	if err != nil {
		tx.Rollback()
		return err
	}
	// 启动
	if req.Cmd == common.CMD_START {
		// 检查是否可以启动
		isOk := CheckCanStartService(svcNode.Status)
		if !isOk {
			return common.ErrInvalidStatusToStart
		}
		err = StartApflow(*svc, *svcNode)
		if err != nil {
			tx.Rollback()
			return err
		}
		// 更新服务状态
		snt := &dao.ApedgeServiceNode{
			ServiceNodeId: svcNode.ServiceNodeId,
		}
		updateMap := make(map[string]interface{})
		updateMap["status"] = common.TASK_STATUS_SCHEDULING
		err = snt.UpdateByServiceNodeId(tx, updateMap)
		if err != nil {
			tx.Rollback()
			return err
		}
	} else if req.Cmd == common.CMD_STOP {
		// 检查是否可以停止
		// isOk := CheckCanStopService(svcNode.Status)
		// if !isOk {
		// 	return common.ErrInvalidStatusToStop
		// }
		_, err = remote.DeleteJobToJobScheduler(req.ServiceNodeId)
		if err != nil {
			tx.Rollback()
			return err
		}
	}

	return tx.Commit().Error
}

// service node
func GetServiceNodeList(req dto.GetServiceNodeReq) ([]dto.OutServiceNodeItem, error) {
	var err error
	var items []dao.ApedgeServiceNode
	var t dao.ApedgeServiceNode

	tx := database.Db
	t = dao.ApedgeServiceNode{
		ServiceId: req.ServiceId,
	}
	items, err = t.GetListByServiceId(tx)
	if err != nil {
		return nil, err
	}
	outPutList := []dto.OutServiceNodeItem{}
	for _, item := range items {
		node := &dao.ApedgeNode{
			NodeId: item.NodeId,
		}
		node, err = node.GetNodeByNodeId(tx)
		if err != nil {
			return nil, err
		}
		outItem, err := GenOutServiceNodeItem(item, node.NodeName)
		if err != nil {
			return nil, err
		}
		outPutList = append(outPutList, outItem)
	}
	return outPutList, err
}

func DeleteServiceNode(serviceNodeId string) error {

	tx := database.Db.Begin()
	defer func() {
		if r := recover(); r != nil {
			tx.Rollback()
		}
	}()

	t := dao.ApedgeServiceNode{
		ServiceNodeId: serviceNodeId,
	}
	_, err := t.GetByServiceNodeId(tx)
	if err != nil {
		tx.Rollback()
		if err.Error() == common.DB_ERR_RECORD_NOT_FOUND {
			return common.ErrNodeNotExist
		}
		return err
	}

	err = t.DeleteByServiceNodeId(tx)
	if err != nil {
		tx.Rollback()
		return err
	}
	return tx.Commit().Error
}

func PredictInfo(serviceId string) (*dto.PredictInfoRsp, error) {

	tx := database.Db
	defer func() {
		if r := recover(); r != nil {
			tx.Rollback()
		}
	}()
	t := dao.ApedgeInference{
		ServiceId: serviceId,
	}
	item, err := t.GetByServiceId(tx)
	if err != nil {
		return nil, err
	}

	apModels := []dao.ApflowModel{}
	err = json.Unmarshal([]byte(item.Models), &apModels)
	if err != nil {
		return nil, err
	}
	apModel := apModels[0]
	rspContent, err := remote.GetModelInferInfoformApWorkShop(apModel.ID, apModel.VersionId)
	if err != nil {
		return nil, err
	}

	rsp := &dto.PredictInfoRsp{
		Center: rspContent.Center,
		Url:    item.Url,
	}
	return rsp, nil
}
