package asynctask

import (
	"fmt"
	"time"

	"github.com/beego/beego/v2/client/orm"
	ecsmodel "github.com/huaweicloud/huaweicloud-sdk-go-v3/services/ecs/v2/model"
	"github.com/pkg/errors"
	"scase.io/application-auto-scaling-service/pkg/cloudresource"
	"scase.io/application-auto-scaling-service/pkg/common"
	"scase.io/application-auto-scaling-service/pkg/db"
	event "scase.io/application-auto-scaling-service/pkg/service/event"
	"scase.io/application-auto-scaling-service/pkg/utils/logger"
)

// 删除伸缩组异步任务
type DelEcsScalingGroupTask struct {
	groupId string
	BaseTask
	event *event.EventService
}

func NewDelEcsScalingGroupTask(groupId string) *DelEcsScalingGroupTask {
	return &DelEcsScalingGroupTask{
		groupId: groupId,
	}
}

// GetKey get id of the resource corresponding to the task
func (t *DelEcsScalingGroupTask) GetKey() string {
	return t.groupId
}

// GetType get task type
func (t *DelEcsScalingGroupTask) GetType() string {
	return TaskTypeDelScalingGroup
}

func (t *DelEcsScalingGroupTask) Run(log *logger.FMLogger) error {
	// 多次未删除完，不将伸缩组状态置deleted，直接结束任务
	if t.GetRetryTimes() >= common.AsyncMaxRetryTimes {
		log.Error("[delete_group] delete scaling group [%s] fail after %d retry", t.groupId, common.AsyncMaxRetryTimes)
		return nil
	}
	// 1. 从数据库读取group，若没有，说明资源已删除完毕
	group, err := db.GetNotDeletedGroupById("", t.groupId)
	if err != nil {
		if errors.Is(err, orm.ErrNoRows) {
			log.Info("[delete_group] Scaling group[%s] has been deleted, do nothing", t.groupId)
			return db.TxRecordDeleteEcsGroupTaskComplete(t.groupId)
		}
		return err
	}
	t.event, err = event.NewEventService(group.ProjectId, nil, log)
	if err != nil {
		log.Error("new event service error:%+v", err)
		return err
	}
	// 2. 删除伸缩策略 和 对应的监控任务
	if db.GroupPolicyRelationTable().CheckRelationExist(group.Id, "") {
		if err = t.delPolicyAndMonitorTask(log, group.Id); err != nil {
			log.Error("[delete_group] delete policy and monitor task error:%+v", err)
			return err
		}
	}
	// 3. 修改伸缩组状态为deleting，置成功相当于锁，最长等待时间为任务多次不成功，加上任务的运行间隔得到的大致最长时间
	// 如果超过最长等待次数，则强制置锁，只有扩容任务可能会长时间等待，如果任务最终完成了，会有游离的实例，考虑将此纳入事件审计中
	if err := t.WaitGroupDeleting(log, group); err != nil {
		log.Error("[delete_group] wait group [%s] delete with error:%+v", group.Id, err)
		return err
	}
	if err := t.delInstance(log, group); err != nil {
		log.Error("[delete_group] delete instance of group [%s] error:%+v", group.Id, err)
		return err
	}
	eveReq := t.event.BuildInstanceEventReq("[delete-group] success", group.Id, fmt.Sprintf("delete-group [%s] of fleet [%s] success", t.groupId, group.FleetId),
		fmt.Sprintf("delete-group [%s] of fleet [%s] success", t.groupId, group.FleetId), common.EventTraceStateNormal)
	t.event.RegisterEvent(eveReq, group.ProjectId, log)

	return db.TxRecordDeleteEcsGroupTaskComplete(t.groupId)
}

func (t *DelEcsScalingGroupTask) WaitGroupDeleting(log *logger.FMLogger, group *db.ScalingGroup) error {
	for retry := 0; retry <= defaultMaxWaitTimes; retry++ {
		err := db.ChangeScalingGroupState2Deleting(log, t.groupId)
		if err != nil {
			// 当前伸缩组状态不支持删除，重试
			if errors.Is(err, common.ErrScalingGroupCannotBeDeleted) {
				time.Sleep(waitGroupCanBeDeletedInternal)
				continue
			}
			return err
		}
		if retry >= defaultMaxWaitTimes {
			log.Warn("[delete_group] set group [%s] state to [deleting] fail after retry %d times", t.groupId, defaultMaxWaitTimes)
			err := db.UpdateScalingGroupState(t.groupId, db.ScalingGroupStateDeleting)
			if err != nil {
				log.Error("set group [%s] state to [deleting] fail with error:%+v", t.groupId, err)
				eveReq := t.event.BuildInstanceEventReq("[delete-group] failed", group.Id, fmt.Sprintf("delete group [%s] of fleet [%s] failed with error:%+v",
					t.groupId, group.FleetId, err.Error()),
					"delete scaling group failed", common.EventTraceStateIncident)
				t.event.RegisterEvent(eveReq, group.ProjectId, log)
				return err
			}
		}
		// 修改伸缩组的状态“deleting”成功，继续执行之后的操作
		break
	}
	return nil
}

// 删除伸缩策略 和 对应的监控任务
func (t *DelEcsScalingGroupTask) delPolicyAndMonitorTask(log *logger.FMLogger, groupId string) error {
	groupPolicyList, err := db.GroupPolicyRelationTable().GetRelation("", groupId, "", 0, 0)
	if err != nil {
		log.Error("[delete_group] get group-policy relation error:%+v", err)
		return err
	}
	// 删除任务，删除策略关联
	for _, relation := range groupPolicyList {
		monitorTask, err := db.GetMetricMonitorTaskByScalingPolicy(relation.ScalingPolicyId)
		if err != nil {
			if !errors.Is(err, orm.ErrNoRows) {
				log.Error("[delete_group] get policy error:%+v", err)
				return err
			}
		}
		if monitorTask != nil {
			if err := db.DeleteMetricMonitorTask(monitorTask.Id); err != nil {
				log.Error("[delete_group] delete monitor task [%s] error:%+v", monitorTask.Id, err)
				return err
			}
		}

		if err := db.GroupPolicyRelationTable().DeleteRelation(groupId, relation.ScalingPolicyId); err != nil {
			log.Error("[delete_group] delete group-policy relation error:%+v", err)
			return err
		}
	}
	log.Info("[delete_group] The scaling policies[%+v] of group[%s] has been deleted", groupPolicyList, t.groupId)
	return nil
}

func (t *DelEcsScalingGroupTask) delInstance(log *logger.FMLogger, group *db.ScalingGroup) error {
	instanceIds, err := db.EcsInfoTable().GetAllInstanceIdByGroupId(group.Id, []string{db.EcsStateActive, db.EcsStateActivating, db.EcsStateError, db.EcsStateDeleting})
	if err != nil {
		log.Error("[delete_group] get all instance by group Id [%s] with error:%+v", group.Id, err)
		return err
	}
	if len(instanceIds) == 0 {
		log.Info("[delete_group] there are no instance need to be deleted, doing nothing")
		return nil
	}
	resCtrl, err := cloudresource.GetEcsController(group.ProjectId)
	if err != nil {
		log.Error("[delete_group] get ecs controller error:%+v", err)
		return err
	}
	jobId, err := resCtrl.BatchDeleteVm(log, instanceIds)
	if err != nil {
		log.Error("[delete_group] batch delete ecs error:%+v", err)
		return err
	}
	// 任务下发成功时再进行进入等待，避免vm已删除的情况，失败处理在waitdelete中完成
	if len(jobId) != 0 {
		if err := t.WaitDeleteResource(jobId, resCtrl, group, log); err != nil {
			log.Error("[delete_group] delete instances of group [%s] error:%+v", group.Id, err)
			return err
		}
	}

	t.handleSuccessJob(instanceIds, log)
	log.Info("[delete_group] job [%s] execute success", jobId)

	return nil
}

func (t *DelEcsScalingGroupTask) WaitDeleteResource(jobId string, resCtrl *cloudresource.EcsResourceController, group *db.ScalingGroup, log *logger.FMLogger) error {
	finishFlag := false
	var showJobResp *ecsmodel.ShowJobResponse
	var err error
	for i := 0; i < defaultMaxWaitTimes; i++ {
		showJobResp, err = resCtrl.ShowJob(jobId)
		if err != nil {
			log.Error("[delete_group] show job error:%+v", err)
			return err
		}
		if *showJobResp.Status == ecsmodel.GetShowJobResponseStatusEnum().SUCCESS {
			finishFlag = true
			log.Info("[delete_group] job id [%s] finish all sub jobs", jobId)
			break
		}
		if *showJobResp.Status == ecsmodel.GetShowJobResponseStatusEnum().FAIL {
			log.Error("[delete_group] job id [%s] finish but fail", jobId)
			break
		}
		time.Sleep(common.SleepSecond * time.Second)
	}
	if !finishFlag {
		t.handleFailedJob(showJobResp, log, group)
		log.Error("[delete_group] job [%s] is fail or not finish after retry %s times", jobId, MaxWaitTimes)
		return fmt.Errorf("job [%s] is not finish after retry %d times", jobId, MaxWaitTimes)
	}
	return nil
}

func (t *DelEcsScalingGroupTask) handleSuccessJob(instanceIds []string, log *logger.FMLogger) {
	// job成功，实例状态置deleted
	if err := db.EcsInfoTable().DeleteDeletingVm(instanceIds); err != nil {
		log.Error("[delete_group] delete all instance of group [%s] error:%+v")
	}
}

func (t *DelEcsScalingGroupTask) handleFailedJob(ShowJobResp *ecsmodel.ShowJobResponse, log *logger.FMLogger, group *db.ScalingGroup) {
	successDeletedInstances, failDeletedInstancesAndReason := getFailSubJobInfo(ShowJobResp)
	// 批量删除失败时，先把删除成功的置deleted
	if err := db.EcsInfoTable().DeleteDeletingVm(successDeletedInstances); err != nil {
		log.Error("[delete_group] delete deleting instance of group[%s] error:%+v")
		return
	}
	log.Warn("[delete_group] there are %d instance delete failed, failed message:%+v", len(failDeletedInstancesAndReason), failDeletedInstancesAndReason)
	eveReq := t.event.BuildInstanceEventReq("[delete-group] fail", group.Id, fmt.Sprintf("delete-group [%s] of fleet [%s] fail", t.groupId, group.FleetId),
		fmt.Sprintf("fleet [%s] has %d instance delete failed, reason:%+v", len(failDeletedInstancesAndReason), group.FleetId, failDeletedInstancesAndReason), common.EventTraceStateNormal)
	t.event.RegisterEvent(eveReq, group.ProjectId, log)
}

func getFailSubJobInfo(showJobResp *ecsmodel.ShowJobResponse) ([]string, map[string]string) {
	successDeletedInstances := []string{}
	failDeletedInstancesAndReason := make(map[string]string)
	for _, subjob := range *showJobResp.Entities.SubJobs {
		if *subjob.Status == ecsmodel.GetSubJobStatusEnum().SUCCESS {
			successDeletedInstances = append(successDeletedInstances, *subjob.Entities.ServerId)
		} else if *subjob.Status == ecsmodel.GetSubJobStatusEnum().FAIL {
			if showJobResp.FailReason == nil {
				failDeletedInstancesAndReason[*subjob.Entities.ServerId] = "fail reason is empty"
			} else {
				failDeletedInstancesAndReason[*subjob.Entities.ServerId] = *subjob.FailReason
			}
		}
	}
	return successDeletedInstances, failDeletedInstancesAndReason
}
