package manager

import (
	"context"
	"encoding/json"
	"fmt"
	"log"
	"os"
	"path/filepath"
	"strings"
	"sync"
	"time"

	"github.com/cihub/seelog"
	mqtt1 "github.com/eclipse/paho.mqtt.golang"

	fai "kyland.com/eagle/lib/ai"
	"kyland.com/eagle/lib/cache"
	"kyland.com/eagle/lib/etcd"
	"kyland.com/eagle/lib/exception"
	"kyland.com/eagle/lib/http"
	"kyland.com/eagle/lib/mqtt"
	"kyland.com/eagle/lib/node"
)

type Task struct {
	IP   string `json:"ip"`
	Name string `json:"name"`
}

type Result struct {
	Code string `json:"code"`
	Msg  string `json:"msg"`
}

type AITask struct {
	Name           string    `json:"name"`
	Master         string    `json:"master"`
	Duration       int64     `json:"duration"`
	GetDuration    string    `json:"get_duration"`
	MaxSplit       int64     `json:"maxSplit"`
	Nodes          []string  `json:"nodes"`
	Start          int64     `json:"start"`
	End            int64     `json:"end"`
	Begin          string    `json:"begin"`
	Finish         string    `json:"finish"`
	SplitBegin     string    `json:"split_begin"`
	SplitEnd       string    `json:"split_end"`
	SplitCost      int64     `json:"split_cost"`
	MergeBegin     string    `json:"mergeBegin"`
	MergeEnd       string    `json:"mergeEnd"`
	MergeCost      int64     `json:"mergeCost"`
	PreUpBegin     string    `json:"pre_up_begin"`
	PreUpEnd       string    `json:"pre_up_end"`
	PreUpCost      int64     `json:"pre_up_cost"`
	PreDownBegin   string    `json:"pre_down_begin"`
	PreDownEnd     string    `json:"pre_down_end"`
	PreDownCost    int64     `json:"pre_down_cost"`
	AfterUpBegin   string    `json:"after_up_begin"`
	AfterUpEnd     string    `json:"after_up_end"`
	AfterUpCost    int64     `json:"after_up_cost"`
	AfterDownBegin string    `json:"after_down_begin"`
	AfterDownEnd   string    `json:"after_down_end"`
	AfterDownCost  int64     `json:"after_down_cost"`
	Node           string    `json:"node"`
	Status         int       `json:"status"`
	Src            string    `json:"src"`
	Dst            string    `json:"dst"`
	Cost           int64     `json:"cost"`
	SubTask        []*AITask `json:"subTask"`
}

type AIResult struct {
	Msg      string  `json:"msg"`
	States   int     `json:"states"`
	TimeCost float64 `json:"timeCost"`
	Version  string  `json:"version"`
}

type (
	Res struct {
		Status struct {
			Code string `json:"code"`
			Msg  string `json:"msg"`
		} `json:"status"`
	}

	ShareResult struct {
		Data struct {
			Item string `json:"item"`
		} `json:"data"`
		Res
	}

	VideoSplit struct {
		Start int    `json:"start"`
		End   int    `json:"end"`
		Name  string `json:"name"`
	}

	TaskStatus struct {
		Data struct {
			Item int `json:"item"`
		}
		Res
	}

	TaskRequest struct {
		Data struct {
			Item []AITask `json:"item"`
		}
		Res
	}

	TaskNotify struct {
		Data struct {
			Item SrcStatus `json:"item"`
		}
		Res
	}

	SrcStatus struct {
		Src    string `json:"src"`
		Status int    `json:"status"`
	}

	TaskResult struct {
		Data struct {
			Item *AITask `json:"item"`
		} `json:"data"`
		Res
	}
)

const (
	TaskCreated = iota
	TaskRunning
	TaskFinished
	TaskFailed
)

var (
	TaskPrefix       = "tasks"
	UnFinishedPrefix = "unfinished"
	Mutex            = sync.RWMutex{}
)

func (t *AITask) TaskToString() (string, error) {
	data, err := json.Marshal(t)
	if err != nil {
		seelog.Errorf("TaskToString error%s:%v", t.Name, err)
		return "", err
	}

	return string(data), nil
}

func (t *AITask) StringToTask(str string) error {
	return json.Unmarshal([]byte(str), t)
}

func CreateTasks(aiTask *AITask) *AITask {
	seelog.Infof("Begin to accept %v", aiTask)
	tsk, err := GetTask(TaskPrefix, aiTask)
	if err == nil {
		if tsk != nil && tsk.Name == aiTask.Name {
			//exception.Throw("task is alreay exist, please choose another name")
			return nil
		}
	}
	_, filename := filepath.Split(aiTask.Name)
	taskChange := fmt.Sprintf("%s-status", filename)
	cache.Cache.SetCache(taskChange, aiTask)
	begin := time.Now()
	aiTask.Begin = begin.Format("2006-01-02 15:04:05")
	duration, err := fai.GetVideoDuration(aiTask.Name)
	if err != nil {
		seelog.Errorf("Get video duration fai.GetVideoDuration err:%v", err)
		exception.Throw(err)
	}
	aiTask.Master = node.Localhost
	aiTask.Duration = duration
	aiTask.GetDuration = time.Now().Format("2006-01-02 15:04:05")
	cache.Cache.SetCache(taskChange, aiTask)
	seelog.Infof("Begin to getduration %v", aiTask)
	Schedule(aiTask)

	finish := time.Now()
	aiTask.Cost = finish.Unix() - begin.Unix()
	aiTask.Finish = finish.Format("2006-01-02 15:04:05")
	cache.Cache.SetCache(taskChange, aiTask)
	TaskSave(TaskPrefix, aiTask)

	//Begin to transter the video to other nodes
	output := strings.Replace(aiTask.Name, "video", "result", 1)
	outputPath := output
	//transfer the video to h264
	h264 := os.Getenv("H264")
	if h264 != "true" {
		dir, name := filepath.Split(output)
		outputPath = fmt.Sprintf("%sh264-%s", dir, name)
		_ = fai.ConvertToH264(output, outputPath)
	}

	cid, err := ShareFile(node.Localhost, outputPath)
	if err != nil {
		seelog.Errorf("Share file fai.ShareFile err:%v", err)
	}

	finishMsg := &FinishMessage{
		Name:   outputPath,
		Status: "ai_finish",
	}

	mqClient, err := mqtt.NewEmqx(node.Localhost)
	if err != nil {
		seelog.Errorf("New emqx fai.Mq err:%v", err)
	}

	Publish(mqClient, finishMsg)
	mqClient.Disconnect(1000)
	for _, each := range aiTask.Nodes {
		mem := each
		if mem == node.Localhost {
			continue
		}

		mqClient, err = mqtt.NewEmqx(mem)
		if err != nil {
			seelog.Errorf("New emqx fai.Mq err:%v", err)
		}

		go func(mem string, mqClient mqtt1.Client) {
			err = downLoadFile(mem, outputPath, cid)
			if err != nil {
				seelog.Errorf("downLoadFile err:%v", err)
			} else {
				if mqClient != nil {
					Publish(mqClient, finishMsg)
					mqClient.Disconnect(1000)
				}
			}
		}(mem, mqClient)
	}
	return aiTask
}

func ExecuteTask(job *AITask) {
	calculateStart := time.Now().Unix()
	seelog.Infof("execute start %s: elapsed:%d", job.Name, calculateStart)
	cache.Cache.SetCache(job.Src, TaskRunning)
	job.Status = TaskRunning
	TaskSave(TaskPrefix, job)
	aiTaskExecute(job)
	cache.Cache.SetCache(job.Src, TaskFinished)
	job.Status = TaskFinished
	//TaskSave(TaskPrefix, job)
	//TaskDelete(TaskPrefix, job)
	executeEnd := time.Now().Unix()
	seelog.Infof("execute finished :%d, elapsed: %d", executeEnd, executeEnd-calculateStart)
}

func CheckExecute(job *AITask) {
	if job.Node == job.Master {
		ExecuteTask(job)
	} else {
		RequestTask(job)
	}
}

func GetResultOftheTask(task *AITask) (int, error) {
	header := map[string]string{
		"Content-Type": "application/json",
	}

	data := map[string]interface{}{
		"name": task.Name,
		"src":  task.Src,
	}

	ret := TaskStatus{}
	uri := fmt.Sprintf("http://%s:7070/v1/task/status", task.Node)
	err := http.JsonPostStructWithHeader(uri, header, &ret, data)
	if err != nil {
		seelog.Errorf("GetResultOftheTask %s %s failed %v", task.Node, task.Src, err)
		return 0, err
	}

	seelog.Infof("GetResultOftheTask success %s %s %d\n", task.Node, task.Src, ret.Data.Item)
	return ret.Data.Item, nil
}

func GetTimeOftheTask(task *AITask) (*AITask, error) {
	header := map[string]string{
		"Content-Type": "application/json",
	}

	ret := TaskResult{}
	_, filename := filepath.Split(task.Src)
	uri := fmt.Sprintf("http://%s:7070/v1/task/time/%s", task.Node, filename)
	err := http.GetStructWithHeader(uri, header, &ret)
	if err != nil {
		seelog.Errorf("GetTimeOftheTask %s %s failed %v", task.Node, task.Src, err)
		return nil, err
	}

	seelog.Infof("GetTimeOftheTask success %s %s %d\n", task.Node, task.Src, ret.Data.Item)
	return ret.Data.Item, nil
}

func GetTaskStatus(job AITask) int {
	status, found := cache.Cache.GetCache(job.Src)
	seelog.Infof("GetTaskStatus cache:%s, %v", job.Src, found)
	res, ok := status.(int)
	if found && ok {
		seelog.Infof("job status cache:%d", res)
		return res
	}
	seelog.Infof("get etcd")
	tsk, err := GetSlaveTask(TaskPrefix, job)
	if err != nil {
		exception.Throw(err)
	}
	cache.Cache.SetCache(job.Src, tsk.Status)
	seelog.Infof("GetTaskStatus status cache: %s, %s", job.Src, tsk.Status)
	return tsk.Status
}

type PreJobTime struct {
	PreUpBegin   string `json:"pre_up_begin"`
	PreUpEnd     string `json:"pre_up_end"`
	PreUpCost    int64  `json:"pre_up_cost"`
	PreDownBegin string `json:"pre_down_begin"`
	PreDownEnd   string `json:"pre_down_end"`
	PreDownCost  int64  `json:"pre_down_cost"`
}

type AfterJobTIme struct {
	AfterUpBegin   string `json:"after_up_begin"`
	AfterUpEnd     string `json:"after_up_end"`
	AfterUpCost    int64  `json:"after_up_cost"`
	AfterDownBegin string `json:"after_down_begin"`
	AfterDownEnd   string `json:"after_down_end"`
	AfterDownCost  int64  `json:"after_down_cost"`
}

type ProcessTime struct {
	BeginTime string `json:"begin_time"`
	EndTime   string `json:"end_time"`
	Cost      int64  `json:"cost"`
}

func aiTaskExecute(job *AITask) {
	_, finame := filepath.Split(job.Src)

	taskProcess := fmt.Sprintf("%s-process", finame)
	seelog.Infof("aiTaskExecute ip:%s path:%s, src:%s", job.Node, job.Name, job.Src)
	if job.Node != job.Master {
		preUpBegin := time.Now()
		job.PreUpBegin = preUpBegin.Format("2006-01-02 15:04:05")
		cid, err := ShareFile(job.Master, job.Name)
		if err != nil {
			return
		}
		preUpEnd := time.Now()
		preUpCost := preUpEnd.Unix() - preUpBegin.Unix()
		job.PreUpEnd = preUpEnd.Format("2006-01-02 15:04:05")
		job.PreUpCost = preUpCost

		TaskSave(UnFinishedPrefix, job)
		seelog.Infof("aiTaskExecute ip:%s path:%s, cid: %s", job.Node, job.Name, cid)
		preDownBegin := time.Now()
		job.PreDownBegin = preDownBegin.Format("2006-01-02 15:04:05")
		err = downLoadFile(job.Node, job.Name, cid)
		if err != nil {
			return
		}
		preDownEnd := time.Now()
		preDownCost := preDownEnd.Unix() - preDownBegin.Unix()
		job.PreDownEnd = preDownEnd.Format("2006-01-02 15:04:05")
		job.PreDownCost = preDownCost

		TaskSave(UnFinishedPrefix, job)
		seelog.Infof("aiTaskExecute1 ip:%s path:%s, cid: %s, preTime:%v", job.Node, job.Name, cid, job)
	}

	if job.MaxSplit != 1 {
		_ = fai.GetSplit(job.Name, job.Src, job.Start, job.End)
	}
	header := map[string]string{
		"Content-Type": "application/json",
		"retoken":      "w5qhsy842cl7j91",
	}
	paths, _ := filepath.Split(job.Src)
	destName := strings.Replace(paths, "video", "result", 1)
	data := map[string]interface{}{
		"savesign":   true,
		"dealsign":   1,
		"savepath":   destName,
		"file_lists": []string{job.Src},
	}
	uri := fmt.Sprintf("http://%s:9080/predict", job.Node)

	ret := new(AIResult)
	processBegin := time.Now()
	job.Begin = processBegin.Format("2006-01-02 15:04:05")
	TaskSave(UnFinishedPrefix, job)
	cache.Cache.SetCache(taskProcess, job)
	err := http.JsonPostStructWithHeader(uri, header, ret, data)
	if err != nil {
		seelog.Errorf("execute task failed %v", err)
		cache.Cache.SetCache(job.Src, TaskFailed)
	} else {
		processEnd := time.Now()
		processCost := processEnd.Unix() - processBegin.Unix()
		job.Finish = processEnd.Format("2006-01-02 15:04:05")
		job.Cost = processCost
		TaskSave(UnFinishedPrefix, job)
		cache.Cache.SetCache(taskProcess, job)
		if job.Node != job.Master {
			afterUpBegin := time.Now()
			job.AfterUpBegin = afterUpBegin.Format("2006-01-02 15:04:05")
			cid, err := ShareFile(job.Node, job.Dst)
			if err != nil {
				return
			}
			afterUpEnd := time.Now()
			afterUPCost := afterUpEnd.Unix() - afterUpBegin.Unix()
			job.AfterUpEnd = afterUpEnd.Format("2006-01-02 15:04:05")
			job.AfterUpCost = afterUPCost

			afterDownBegin := time.Now()
			job.AfterDownBegin = afterDownBegin.Format("2006-01-02 15:04:05")
			err = downLoadFile(job.Master, job.Dst, cid)
			if err != nil {
				return
			}
			afterDownEnd := time.Now()
			afterDownCost := afterDownEnd.Unix() - afterDownBegin.Unix()
			job.AfterDownEnd = afterDownEnd.Format("2006-01-02 15:04:05")
			job.AfterDownCost = afterDownCost
			TaskSave(UnFinishedPrefix, job)
			seelog.Infof("aiTaskExecute2 ip:%s path:%s, cid: %s", job.Node, job.Name, cid)
		}
	}

	cache.Cache.SetCache(job.Src, TaskFinished)
	if ret != nil && ret.States != 0 {
		cache.Cache.SetCache(job.Src, TaskFailed)
	}

	TaskSave(UnFinishedPrefix, job)
}

func ShareFile(ip, path string) (string, error) {
	header := map[string]string{
		"Content-Type": "application/json",
	}

	data := map[string]interface{}{
		"name": path,
	}

	ret := ShareResult{}
	uri := fmt.Sprintf("http://%s:7070/v1/file/share", ip)
	err := http.JsonPostStructWithHeader(uri, header, &ret, data)
	if err != nil {
		seelog.Errorf("share %s %s failed %v", ip, path, err)
		return "", nil
	}

	seelog.Infof("share success %s %s %s\n", ip, path, ret.Data.Item)
	return ret.Data.Item, nil
}

func downLoadFile(ip, path, cid string) error {
	header := map[string]string{
		"Content-Type": "application/json",
	}

	data := map[string]interface{}{
		"path": path,
		"cid":  cid,
	}

	ret := ShareResult{}
	uri := fmt.Sprintf("http://%s:7070/v1/file/download", ip)
	err := http.JsonPostStructWithHeader(uri, header, &ret, data)
	if err != nil {
		seelog.Errorf("download %s %s failed %v", ip, path, err)
		return err
	}

	seelog.Infof("download success %s %s %s, %s\n", ip, cid, path, ret.Data.Item)
	return nil
}

func ExecuteMerge(job *AITask) {
	mergeStart := time.Now().Unix()
	job.MergeBegin = time.Now().Format("2006-01-02 15:04:05")
	filePaths := []string{}
	outputPath := strings.Replace(job.Name, "video", "result", 1)

	for _, sub := range job.SubTask {
		filePaths = append(filePaths, sub.Dst)
	}

	// Merge the videos
	err := fai.MergeVideos(outputPath, filePaths)
	if err != nil {
		log.Fatalf("Error merging videos: %v", err)
		return
	}
	mergeEnd := time.Now().Unix()
	job.MergeEnd = time.Now().Format("2006-01-02 15:04:05")
	job.MergeCost = mergeEnd - mergeStart
	_, filename := filepath.Split(job.Name)
	taskChange := fmt.Sprintf("%s-status", filename)
	cache.Cache.SetCache(taskChange, job)
	TaskSave(TaskPrefix, job)
	seelog.Infof("merge finished:%d elapsed:%d", time.Now().Unix(), mergeEnd-mergeStart)
}

func CheckMerge(job *AITask) {
	for _, sub := range job.SubTask {
		if sub.Status != TaskFinished {
			return
		}
	}

	ExecuteMerge(job)
}

func RecoverFromDB() {
	cli, err := etcd.GetKeysApi()
	if err != nil {
		exception.Throw(err)
	}

	defer cli.Close()
	tasks, err := cli.GetBatch(UnFinishedPrefix)
	if err != nil {
		exception.Throw(err)
	}

	for _, item := range tasks {
		seelog.Infof("recover tasks:%s", item)
		t := &AITask{}
		err := json.Unmarshal(item, t)
		if err != nil {
			exception.Throw(err)
		}

		if t.Status == TaskFinished {
			TaskDelete(UnFinishedPrefix, t)
			continue
		}

		seelog.Infof("recover tasks: %v", t)
		go Schedule(t)
	}
}

func ChooseNode(dest string, nodes map[string]int) string {
	for key, value := range nodes {
		if key == dest {
			continue
		}

		if value == node.NodeAlive {
			return key
		}
	}

	return ""
}

func Request(job *AITask) {
	cache.Cache.SetCache(job.Src, TaskRunning)
	job.Status = TaskRunning
	TaskSave(TaskPrefix, job)
	calculateStart := time.Now().Unix()
	seelog.Infof("Request calcute begin %s: %d, %d", job.Src, calculateStart, TaskRunning)
	//aiTaskExecute(node.Localhost, job.Name, job.Master)
	ExecuteTask(job)
	cache.Cache.SetCache(job.Src, TaskFinished)
	job.Status = TaskFinished
	TaskSave(TaskPrefix, job)
	mergeEnd := time.Now().Unix()
	seelog.Infof("Request finished:%d elapsed:%d", time.Now().Unix(), mergeEnd-calculateStart)
}

func TaskSave(prefix string, job *AITask) error {
	cli, err := etcd.GetKeysApi()
	if err != nil {
		return err
	}

	defer cli.Close()
	res, err := job.TaskToString()
	if err != nil {
		return err
	}

	var filename string
	if job.Master != node.Localhost {
		_, filename = filepath.Split(job.Src)
	} else {
		_, filename = filepath.Split(job.Name)
	}

	name := fmt.Sprintf("%s/%s", prefix, filename)
	err = cli.Set(name, res)
	if err != nil {
		seelog.Errorf("TaskSave job save error%s:%v", name, err)
		return err
	}

	seelog.Infof("task save to db: %s", res)
	return nil
}

func TaskDelete(prefix string, job *AITask) error {
	cli, err := etcd.GetKeysApi()
	if err != nil {
		return err
	}

	defer cli.Close()

	_, filename := filepath.Split(job.Name)
	name := fmt.Sprintf("%s/%s", prefix, filename)
	err = cli.Del(name)
	if err != nil {
		seelog.Errorf("TaskDelete error%s:%v", name, err)
		return err
	}

	return nil
}

func GetTask(prefix string, job *AITask) (*AITask, error) {
	var name string

	_, filename := filepath.Split(job.Name)
	if prefix == TaskPrefix {
		taskChange := fmt.Sprintf("%s-status", filename)
		tak, exist := cache.Cache.GetCache(taskChange)
		if exist {
			tmp, ok := tak.(*AITask)
			if ok {
				return tmp, nil
			}
		}
	}

	client, err := etcd.GetKeysApi()
	if err != nil {
		return nil, err
	}
	defer client.Close()

	if prefix == UnFinishedPrefix {
		_, filename := filepath.Split(job.Src)
		name = fmt.Sprintf("%s/%s", prefix, filename)
	} else {
		_, filename := filepath.Split(job.Name)
		name = fmt.Sprintf("%s/%s", prefix, filename)
	}

	//name := fmt.Sprintf("%s/%s", prefix, filename)
	result, err := client.Get(name)
	if err != nil {
		seelog.Errorf("GetTask %s:%v", name, err)

		name = fmt.Sprintf("%s/%s", UnFinishedPrefix, filename)
		result, err = client.Get(name)
		if err != nil {
			seelog.Errorf("GetTask %s:%v", name, err)
			return nil, err
		}
	}
	task := new(AITask)
	err = task.StringToTask(result)
	if err != nil {
		return nil, fmt.Errorf("failed to convert string to task object, error:%v", err)
	}

	return task, nil
}

func GetSlaveTask(prefix string, job AITask) (*AITask, error) {
	client, err := etcd.GetKeysApi()
	if err != nil {
		return nil, err
	}
	defer client.Close()

	_, filename := filepath.Split(job.Src)
	name := fmt.Sprintf("%s/%s", prefix, filename)
	result, err := client.Get(name)
	if err != nil {
		seelog.Errorf("GetSlaveTask %s:%v", name, err)
		return nil, err
	}
	task := new(AITask)
	err = task.StringToTask(result)
	if err != nil {
		return nil, fmt.Errorf("failed to convert string to task object, error:%v", err)
	}

	return task, nil
}

func GetAllTasksFromDB() ([]Task, error) {
	return nil, nil
}

func (t *Task) TaskToString() (string, error) {
	data, err := json.Marshal(t)
	if err != nil {
		return "", err
	}

	return string(data), nil
}

func (t *Task) StringToTask(str string) error {
	return json.Unmarshal([]byte(str), t)
}

func RequestTask(job *AITask) ([]AITask, error) {
	header := map[string]string{
		"Content-Type": "application/json",
	}

	data := map[string]interface{}{
		"name":   job.Name,
		"master": job.Master,
		"src":    job.Src,
		"dst":    job.Dst,
		"start":  job.Start,
		"end":    job.End,
		"node":   job.Node,
	}

	ret := TaskRequest{}
	uri := fmt.Sprintf("http://%s:7070/v1/task/request", job.Node)
	err := http.JsonPostStructWithHeader(uri, header, &ret, data)
	if err != nil {
		seelog.Errorf("share %s %s failed %v", job.Node, job.Name, err)
		return nil, nil
	}

	seelog.Infof("request success %s %s %v\n", job.Node, job.Name, ret.Data.Item)
	return ret.Data.Item, nil
}

func Notify(job AITask) SrcStatus {
	status, found := cache.Cache.GetCache(job.Src)
	seelog.Infof("GetTaskStatus cache:%s, %v", job.Src, found)
	res, ok := status.(int)
	if found && ok {
		seelog.Infof("job status cache:%d", res)
		return SrcStatus{
			Src:    job.Src,
			Status: res,
		}
	}
	seelog.Infof("get etcd")
	tsk, err := GetSlaveTask(TaskPrefix, job)
	if err != nil {
		exception.Throw(err)
	}
	cache.Cache.SetCache(job.Src, tsk.Status)
	seelog.Infof("GetTaskStatus status cache: %s, %s", job.Src, tsk.Status)
	return SrcStatus{
		Src:    job.Src,
		Status: res,
	}
}

func GetTasks(prefix string) ([]*AITask, error) {
	client, err := etcd.GetKeysApi()
	if err != nil {
		return nil, err
	}
	defer client.Close()

	tasks, err := client.GetBatch(TaskPrefix)
	if err != nil {
		exception.Throw(err)
	}

	res := []*AITask{}
	for _, item := range tasks {
		t := &AITask{}
		err := json.Unmarshal(item, t)
		if err != nil {
			exception.Throw(err)
		}
		res = append(res, t)
	}

	return res, nil
}

func DeleteTask(prefix string, job *AITask) (*AITask, error) {
	client, err := etcd.GetKeysApi()
	if err != nil {
		return nil, err
	}
	defer client.Close()

	_, filename := filepath.Split(job.Name)
	name := fmt.Sprintf("%s/%s", prefix, filename)
	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
	defer cancel()
	_, err = client.Delete(ctx, name)
	if err != nil {
		seelog.Errorf("DeleteTask %s:%v", name, err)
		return nil, err
	}

	return job, nil
}

func SendMessage(name string) {
	nodes := node.NodesMap
	output := strings.Replace(name, "video", "result", 1)
	outputPath := output

	h264 := os.Getenv("H264")
	if h264 != "true" {
		dir, filename := filepath.Split(output)
		outputPath = fmt.Sprintf("%sh264-%s", dir, filename)
		_ = fai.ConvertToH264(output, outputPath)
	}

	finishMsg := &FinishMessage{
		Name:   outputPath,
		Status: "ai_finish",
	}

	for each, _ := range nodes {
		mem := each
		mqClient, err := mqtt.NewEmqx(mem)
		if err != nil {
			seelog.Errorf("New emqx fai.Mq err:%v", err)
		}

		Publish(mqClient, finishMsg)
		mqClient.Disconnect(1000)
	}
}
