package remote

import (
	"apedge/dao"
	"apedge/dto"
	"apedge/pkg/configs"
	"encoding/json"
	"fmt"
	"strconv"

	"github.com/apulis/go-business/pkg/jobscheduler"
)

type GpuApedgeInference struct{}

func (infer *GpuApedgeInference) Creat(info dao.ApedgeInference, snInfo dao.ApedgeServiceNode) (*jobscheduler.CreateJobRsp, error) {
	var err error
	job, err := infer.InitJob(info, snInfo)
	if err != nil {
		return nil, err
	}
	rsp, err := CreateJobToJobScheduler(*job)
	return rsp, err
}

func (infer *GpuApedgeInference) InitJob(info dao.ApedgeInference, snInfo dao.ApedgeServiceNode) (*jobscheduler.Job, error) {

	job, err := infer.InitJobBase(info, snInfo)
	if err != nil {
		return nil, err
	}
	err = infer.SetAllContainer(job, info, snInfo)
	if err != nil {
		return nil, err
	}
	err = infer.SetEnvs(job, info)
	if err != nil {
		return nil, err
	}
	return job, nil
}

func (infer *GpuApedgeInference) InitJobBase(info dao.ApedgeInference, snInfo dao.ApedgeServiceNode) (*jobscheduler.Job, error) {
	job := jobscheduler.NewJob()

	mId, err := strconv.Atoi(configs.Config.Relevant.ModId)
	if err != nil {
		return nil, err
	}

	job.SetModId(mId)
	job.SetJobId(snInfo.ServiceNodeId)
	job.SetResType(jobscheduler.RESOURCE_TYPE_DEPLOYMENT)
	job.SetNamespace("default")
	job.SetReplicas(1)
	job.SetLabels(map[string]string{
		"app": info.Name,
	})
	// 节点
	nodeSelector := make(map[string]string)
	nodeSelector["apedgenode"] = snInfo.NodeId
	job.SetNodeSelector(nodeSelector)
	// 污点
	to := jobscheduler.Toleration{}
	to.SetKey("apedgenode").SetValue("active").SetOperatorEqual().SetTaintEffectNoExecute()
	tolerations := []jobscheduler.Toleration{}
	tolerations = append(tolerations, to)
	job.SetTolerations(tolerations)
	annotation := make(map[string]string)
	annotation["sidecar.istio.io/inject"] = "false"
	job.SetAnnotation(annotation)
	return job, nil
}

func (infer *GpuApedgeInference) SetAllContainer(jobRequest *jobscheduler.Job, info dao.ApedgeInference, snInfo dao.ApedgeServiceNode) error {
	var err error
	var modelList []dao.ApflowModel
	var initCmd string
	var ports dto.ContainerPorts
	var initCountainerMountsPoint []jobscheduler.MountPoint
	var tritonMountPoints []jobscheduler.MountPoint
	var transformMountPoints []jobscheduler.MountPoint

	err = json.Unmarshal([]byte(info.Models), &modelList)
	if err != nil {
		return err
	}
	err = json.Unmarshal([]byte(info.ContainerPorts), &ports)
	if err != nil {
		return err
	}
	// 1. 挂载模型的文件目录
	for _, m := range modelList {
		// initContainer 下载模型到宿主机
		initCountainerModelPathApp := fmt.Sprintf("%s/%d_%d", INFER_DATA_MODELS_PATH_APP, m.ID, m.VersionId)
		fileXXX := fmt.Sprintf("%s/%d", initCountainerModelPathApp, m.VersionId)
		initCountainerMountsPoint = append(initCountainerMountsPoint, jobscheduler.MountPoint{
			Path:          "file://" + initCountainerModelPathApp,
			ContainerPath: initCountainerModelPathApp,
			ReadOnly:      false,
		})
		modeDownloadLink := m.DownloadLink
		// testModelsUri := "http://192.168.3.137/file-server/api/v1/files/cdab401f8e91622e753572e3dd87acfe"
		// modeDownloadLink := testModelsUri
		initCmd += fmt.Sprintf("wget %s -P /tmp -O /tmp/models.tar.gz && mkdir -p  %s/temp && rm -r %s/* && tar -zxvf /tmp/models.tar.gz -C %s && mv %s %s/pkg && ", modeDownloadLink, initCountainerModelPathApp, initCountainerModelPathApp, initCountainerModelPathApp, fileXXX, initCountainerModelPathApp)

		// 挂载模型到推理镜像
		tritonMountPoints = AddAppMountPoint(tritonMountPoints,
			"file://"+initCountainerModelPathApp+"/pkg/infer/models",
			TRITON_MODEL_PATH,
			false)

		// 挂载到transform
		transformMountPoints = AddAppMountPoint(transformMountPoints,
			"file://"+initCountainerModelPathApp+"/pkg/infer/transformer",
			TRANSFORM_SDK_PATH,
			false,
		)
	}

	initContainer := &jobscheduler.Container{
		ImageName:   fmt.Sprintf("%s:%s", configs.Config.Relevant.Image.InitContainer, configs.Config.Relevant.Image.InitContainerTag), // 配置
		MountPoints: initCountainerMountsPoint,
	}
	initContainer.Cmd = append(initContainer.Cmd, "sh")
	initContainer.Cmd = append(initContainer.Cmd, "-c")
	initCmd += "echo InitCmdOk!"
	initContainer.Cmd = append(initContainer.Cmd, initCmd)
	jobRequest.SetInitContainer(initContainer)

	// transform container
	transformPorts := []jobscheduler.ContainerPort{}
	for k, v := range ports.Infer {
		kInt, err := strconv.Atoi(k)
		if err != nil {
			return err
		}
		vInt, err := strconv.Atoi(v)
		if err != nil {
			return err
		}
		p := jobscheduler.ContainerPort{
			Port:     kInt,
			HostPort: vInt,
			PortName: "inference-port",
		}
		transformPorts = append(transformPorts, p)
	}

	tansformCmd := []string{}
	tansformCmd = append(tansformCmd, "bash")
	tansformCmd = append(tansformCmd, "-c")
	tansformCmd = append(tansformCmd, fmt.Sprintf("python -m transformer --predictor_host %s", "localhost:8000"))
	tansformContainer := jobscheduler.Container{
		ContainerName: fmt.Sprintf("%s-%s", "transform", snInfo.ServiceNodeId),
		ImageName:     fmt.Sprintf("%s:%s", configs.Config.Relevant.Image.GpuInferTransformerImage, configs.Config.Relevant.Image.GpuInferTransformerImageTag),
		Cmd:           tansformCmd,
		MountPoints:   transformMountPoints,
		// Ports: []jobscheduler.ContainerPort{
		// 	{
		// 		Port:       8080,
		// 		TargetPort: 8080,
		// 	},
		// },
		Ports: transformPorts,
	}
	jobRequest.AddContainer(tansformContainer)

	// predict container
	predictPorts := []jobscheduler.ContainerPort{}
	for k, v := range ports.Others {
		kInt, err := strconv.Atoi(k)
		if err != nil {
			return err
		}
		vInt, err := strconv.Atoi(v)
		if err != nil {
			return err
		}
		p := jobscheduler.ContainerPort{
			Port:       kInt,
			TargetPort: vInt,
			PortName:   "inference-port",
		}
		predictPorts = append(predictPorts, p)
	}

	predictCmd := []string{}
	predictCmd = append(predictCmd, "/opt/tritonserver/nvidia_entrypoint.sh")
	predictCmd = append(predictCmd, "./bin/tritonserver")
	predictCmd = append(predictCmd, "--model-repository=/models")
	predictCmd = append(predictCmd, "--strict-model-config=false")
	predictCmdContainer := jobscheduler.Container{
		ContainerName: fmt.Sprintf("%s-%s", "predict", snInfo.ServiceNodeId),
		ImageName:     fmt.Sprintf("%s:%s", info.ContainerImage, info.ContainerImageTag),
		Cmd:           predictCmd,
		MountPoints:   tritonMountPoints,
		// Ports: []jobscheduler.ContainerPort{
		// 	{
		// 		Port:       8000,
		// 		TargetPort: 8000,
		// 	},
		// 	{
		// 		Port:       8001,
		// 		TargetPort: 8001,
		// 	},
		// 	{
		// 		Port:       8002,
		// 		TargetPort: 8002,
		// 	},
		// },
		Ports: predictPorts,
		ResourceQuota: jobscheduler.ResourceQuota{
			Request: jobscheduler.ResourceData{
				Device: jobscheduler.Device{
					DeviceType: "nvidia.com/gpu",
					DeviceNum:  "1",
				},
			},
			Limit: jobscheduler.ResourceData{
				Device: jobscheduler.Device{
					DeviceType: "nvidia.com/gpu",
					DeviceNum:  "1",
				},
			},
		},
	}
	jobRequest.AddContainer(predictCmdContainer)
	return nil
}

func (infer *GpuApedgeInference) SetEnvs(job *jobscheduler.Job, info dao.ApedgeInference) error {
	envMap := make(map[string]string)
	err := json.Unmarshal([]byte(info.Envs), &envMap)
	if err != nil {
		return err
	}
	envMap["SERVICE_ID"] = info.ServiceId
	job.SetEnvs(envMap)

	return nil
}
