// Copyright (c) Huawei Technologies Co., Ltd. 2022-2022. All rights reserved.

// 异步任务
package taskservice

import (
	"fmt"
	"github.com/google/uuid"
	"github.com/pkg/errors"
	"scase.io/application-auto-scaling-service/pkg/cloudresource"
	"scase.io/application-auto-scaling-service/pkg/common"
	"scase.io/application-auto-scaling-service/pkg/db"
	"scase.io/application-auto-scaling-service/pkg/metricmonitor/interfaces"
	"scase.io/application-auto-scaling-service/pkg/taskmgmt"
	"scase.io/application-auto-scaling-service/pkg/taskmgmt/asynctask"
	"scase.io/application-auto-scaling-service/pkg/utils/logger"
	"time"
)

// StartScaleOutGroupTask 启动扩容任务
// 若此时伸缩组非稳定，会返回错误 ErrScalingGroupNotStable
func StartScaleOutGroupTask(groupId string, targetNum int32) error {
	if targetNum <= 0 {
		return errors.Errorf("invalid param scaleOutNum[%d]", targetNum)
	}

	// 1. db记录伸缩组扩容任务开始
	// 若此时伸缩组非稳定，会返回错误 ErrScalingGroupNotStable
	err := db.TxRecordGroupScaleOutStart(groupId, targetNum)
	if err != nil {
		return err
	}

	// 2. 启动扩容异步任务
	taskmgmt.GetTaskMgmt().AddTask(asynctask.NewScaleOutTask(groupId, targetNum))
	return nil
}

func StartEcsScalingOutTask(group *db.ScalingGroup, targetNum int32) error {
	logger.R.Info("prepare manual scale out of group [%s]", group.Id)
	//计算正在进行的任务中创建的ecs数
	creatingNum := db.GetAllTaskAffectNumOfGroup(group.Id)
	if group.CurrentInstanceNumber+creatingNum >= targetNum {
		logger.R.Info("current num:%d + creating num:%d >= target num:%d, no need to scale out",
			group.CurrentInstanceNumber, creatingNum, targetNum)
		return errors.Wrapf(common.ErrNoNeedToDo, fmt.Sprintf("current num:%d + creating num:%d >= target num:%d", group.CurrentInstanceNumber, creatingNum, targetNum))
	}
	if targetNum > group.MaxInstanceNumber {
		logger.R.Warn("target num: %d > group max instance num:%d, using group max instance num", targetNum, group.MaxInstanceNumber)
		targetNum = group.MaxInstanceNumber
	}
	createNum := targetNum - group.CurrentInstanceNumber - creatingNum
	logger.R.Info("group [%s] current num:%d + creating num:%d < target num:%d, will create %d instance",
		group.Id, group.CurrentInstanceNumber, creatingNum, targetNum, createNum)

	// db记录ecs scaling out 任务开始`
	if createNum <= 0 {
		return errors.Errorf("invalid param scaleOutNum[%d]", createNum)
	}

	taskId := uuid.NewString()

	// 2. 启动异步扩容
	if group.InstanceType == common.InstanceTypeVM {
		if err := db.TxRecordEcsGroupScalingOutStart(taskId, group.Id, createNum); err != nil {
			return err
		}
		taskmgmt.GetTaskMgmt().AddTask(asynctask.NewEcsScaleOutTask(taskId, group.Id))
	} else if group.InstanceType == common.InstanceTypePod {
		if err := db.TxRecordPodGroupScalingOutStart(taskId, group.Id, createNum); err != nil {
			return err
		}
		taskmgmt.GetTaskMgmt().AddTask(asynctask.NewPodScaleOutTask(taskId, group.Id))
	} else {
		return fmt.Errorf("error instance type:%s", group.InstanceType)
	}

	// 每次启动都需要更新伸缩时间
	group.AutoScalingTimestamp = time.Now().UnixNano()
	if err := db.UpdateScalingGroup(group, "auto_scaling_timestamp"); err != nil {
		logger.R.Error("Update AutoScalingTimestamp of ScalingGroup[%s] is failed, err: %+v", err)
		return err
	}
	return nil
}

// StartScaleInGroupTaskForRandomVms 启动缩容任务(随机选择缩容的vm)
// 若此时伸缩组非稳定，会返回错误 ErrScalingGroupNotStable
// Deprecated: 临时方法，需要根据vm的负载计算出最适合的vm进行缩容
func StartScaleInGroupTaskForRandomVms(groupId, projectId string, scaleInNum int32) error {
	// 校验缩容值
	if scaleInNum < 0 {
		return errors.Errorf("invalid param scaleInNum[%d]", scaleInNum)
	}
	if scaleInNum == 0 {
		return nil
	}

	// 获取asGroupId
	group, err := db.GetScalingGroupById(projectId, groupId)
	if err != nil {
		return err
	}
	if group.State != db.ScalingGroupStateStable && group.State != db.ScalingGroupStateError {
		return errors.Wrapf(common.ErrScalingGroupNotStable,
			"scaling group[%s] is in state[%s], cannot be scaled in", groupId, group.State)
	}
	vmGroup, err := db.GetVmScalingGroupById(group.ResourceId)
	if err != nil {
		return err
	}
	asGroupId := vmGroup.AsGroupId

	// 获取所有vm实例
	resCtrl, err := cloudresource.GetResourceController(projectId)
	if err != nil {
		return err
	}
	instanceIds, err := resCtrl.GetAsScalingInstanceIds(logger.R, asGroupId)
	if err != nil {
		return err
	}
	// 校验缩容值
	// 这里可能: 决策时，伸缩组正在缩容；缩容时（此时），缩容完毕vm数量变化，之前的决策失效
	if len(instanceIds) < int(scaleInNum) {
		return errors.Wrapf(common.ErrScalingDecisionExpired, "len of instanceIds[%d] < scaleInNum[%d], "+
			"scaling decision may have expired", len(instanceIds), scaleInNum)
	}
	// 选取需要缩容的vm
	deleteIds := instanceIds[:scaleInNum]
	return StartScaleInGroupTask(groupId, deleteIds)
}

// 自动缩容ecs伸缩组
func StartEcsScaleInGroupTaskForRandomVms(group *db.ScalingGroup, scaleInNum int32) error {
	logger.R.Info("prepare manual scale in for random vm task of group [%s]", group.Id)
	if scaleInNum < 0 {
		return fmt.Errorf("invalid param scaleInNum[%d]", scaleInNum)
	}
	if scaleInNum == 0 {
		return nil
	}

	// 不能缩容的场景：1. 伸缩组不稳定，2. 冷却时间内 3. 缩容数量大于已有数量（决策过期）4. 缩容后数量不能小于最小实例数
	if group.State != db.ScalingGroupStateStable && group.State != db.ScalingGroupStateError {
		return fmt.Errorf("scaling group[%s] is in state[%s], cannot be scaled in", group.Id, group.State)
	}

	if time.Now().UnixNano()-group.AutoScalingTimestamp < group.CoolDownTime*int64(time.Minute) {
		endTime := time.Unix(0, group.AutoScalingTimestamp+group.CoolDownTime*int64(time.Minute)).Format(common.TimeLayout)
		logger.R.Error("ScalingGroup[%s] is in the cooling duration until %s", group.Id, endTime)
		return fmt.Errorf("ScalingGroup is in the cooling duration until %s", endTime)
	}

	instanceIds, err := db.EcsInfoTable().GetAllInstanceIdByGroupId(group.Id, []string{db.EcsStateActive})
	if err != nil {
		return fmt.Errorf("get all instance error:%+v", err)
	}
	if int(scaleInNum) > len(instanceIds) {
		logger.R.Error("scale in num %d is larger than current instance num %d", scaleInNum, len(instanceIds))
		return fmt.Errorf("scale in num %d is larger than current instance num %d", scaleInNum, len(instanceIds))
	}
	// 如果缩容数量超过最小实例数，则需要修正（针对自动伸缩）
	curNum := int32(len(instanceIds))
	if curNum-scaleInNum < group.MinInstanceNumber {
		logger.R.Warn("current instance num:%d - scale in num:%d < group minimum num:%d, change scale in Number to %d",
			curNum, scaleInNum, group.MinInstanceNumber, curNum-group.MinInstanceNumber)
		scaleInNum = curNum - group.MinInstanceNumber
	}
	deleteIds := instanceIds[:scaleInNum]

	return StartEcsScaleInGroupTask(group, deleteIds)
}

func StartManualScaleInTask(group *db.ScalingGroup, scaleInNum int32) error {
	logger.R.Info("prepare manual scale out task of group [%s]", group.Id)
	if scaleInNum <= 0 {
		return fmt.Errorf("invalid param scaleInNum[%d]", scaleInNum)
	}
	instanceIds, err := db.EcsInfoTable().GetAllInstanceIdByGroupId(group.Id, []string{db.EcsStateActive})
	if err != nil {
		return fmt.Errorf("get all instance error:%+v", err)
	}
	if int(scaleInNum) > len(instanceIds) {
		logger.R.Error("scale in num %d is larger than current instance num %d", scaleInNum, len(instanceIds))
		scaleInNum = int32(len(instanceIds))
	}
	deleteIds := instanceIds[:scaleInNum]
	return StartEcsScaleInGroupTask(group, deleteIds)
}

func StartEcsScaleInGroupTask(group *db.ScalingGroup, instanceIds []string) error {
	logger.R.Info("prepare scale in task of group [%s]", group.Id)
	if len(instanceIds) == 0 {
		return errors.Errorf("instanceIds must be provided to StartScaleInGroupTask")
	}
	taskId := uuid.NewString()

	if group.InstanceType == common.InstanceTypeVM {
		// 启动vm缩容
		err := db.TxRecordEcsGroupScalingInStart(taskId, group.Id, instanceIds)
		if err != nil {
			return err
		}
		// 启动缩容异步任务
		taskmgmt.GetTaskMgmt().AddTask(asynctask.NewEcsScaleInTask(taskId, group.Id, instanceIds))
	} else if group.InstanceType == common.InstanceTypePod {
		// 启动pod缩容
		err := db.TxRecordPodGroupScalingInStart(taskId, group.Id, instanceIds)
		if err != nil {
			return err
		}
		// 启动缩容异步任务
		taskmgmt.GetTaskMgmt().AddTask(asynctask.NewPodScaleInTask(taskId, group.Id, instanceIds))
	} else {
		return fmt.Errorf("error instance type:%s", group.InstanceType)
	}

	// 每次启动伸缩任务都需要更新伸缩时间
	group.AutoScalingTimestamp = time.Now().UnixNano()
	if err := db.UpdateScalingGroup(group, "auto_scaling_timestamp"); err != nil {
		logger.R.Error("Update AutoScalingTimestamp of ScalingGroup[%s] is failed, err: %+v", err)
		return err
	}
	return nil
}

// 手动缩容pod实例组，复用StartEcsScaleInGroupTask
func StartManualPodScaleInTask(group *db.ScalingGroup, scaleInNum int32) error {
	logger.R.Info("prepare manual scale out task of group [%s]", group.Id)
	if scaleInNum <= 0 {
		return fmt.Errorf("invalid param scaleInNum[%d]", scaleInNum)
	}
	// todo 如果有未激活的，缩容时优先去掉
	podNames, err := db.PodInfoTable().GetAllPodNames(group.Id, []string{db.PodStateActive, db.PodStateRunning})
	if err != nil {
		return fmt.Errorf("get all instance error:%+v", err)
	}
	if int(scaleInNum) > len(podNames) {
		logger.R.Error("scale in num %d is larger than current instance num %d", scaleInNum, len(podNames))
		scaleInNum = int32(len(podNames))
	}
	deleteIds := podNames[:scaleInNum]
	if scaleInNum <= 0 {
		logger.R.Info("scale in 0 instance, doing nothing")
		return nil
	}
	logger.R.Info("group [%s] will scale in %d pods:%v", group.Id, scaleInNum, deleteIds)
	return StartEcsScaleInGroupTask(group, deleteIds)
}

func StartPodScaleInGroupTask(group *db.ScalingGroup, podNames []string) error {
	logger.R.Info("prepare scale in task of group [%s]", group.Id)
	if len(podNames) == 0 {
		return errors.Errorf("instanceIds must be provided to StartScaleInGroupTask")
	}
	taskId := uuid.NewString()
	err := db.TxRecordPodGroupScalingInStart(taskId, group.Id, podNames)
	if err != nil {
		return err
	}

	// 启动缩容异步任务
	taskmgmt.GetTaskMgmt().AddTask(asynctask.NewPodScaleInTask(taskId, group.Id, podNames))

	// 每次启动伸缩任务都需要更新伸缩时间
	group.AutoScalingTimestamp = time.Now().UnixNano()
	if err := db.UpdateScalingGroup(group, "auto_scaling_timestamp"); err != nil {
		logger.R.Error("Update AutoScalingTimestamp of ScalingGroup[%s] is failed, err: %+v", err)
		return err
	}
	return nil
}

// StartScaleInGroupTask 启动缩容任务
// 若此时伸缩组非稳定，会返回错误 ErrScalingGroupNotStable
func StartScaleInGroupTask(groupId string, instanceIds []string) error {
	if len(instanceIds) == 0 {
		return errors.Errorf("instanceIds must be provided to StartScaleInGroupTask")
	}

	// 1. db记录伸缩组缩容任务开始
	// 若此时伸缩组非稳定，会返回错误 ErrScalingGroupNotStable
	err := db.TxRecordGroupScaleInStart(groupId, instanceIds)
	if err != nil {
		return err
	}

	// 2. 启动缩容异步任务
	taskmgmt.GetTaskMgmt().AddTask(asynctask.NewScaleInTask(groupId, instanceIds))
	return nil
}

// StartDeleteScalingGroupTask 启动伸缩组删除任务
// 若该任务已存在（删除伸缩组api的可重入，会导致重复插入的情况），返回错误 ErrDelGroupTaskAlreadyExists
func StartDeleteScalingGroupTask(groupId string, monitor interfaces.MonitorInf) error {
	// 1. db记录伸缩组删除任务开始
	// 这里只是加入异步任务，而不立即将group状态修改为“deleting”是出于以下考虑：
	// 若伸缩组处于伸缩活动中，会等待此次扩缩执行完毕后再执行删除操作
	// 该等待较为耗时，所以等待的逻辑放在DelScalingGroupTask异步任务中（轮询db尝试将group的状态修改为“deleting”）
	if err := db.InsertDeleteScalingGroupTask(groupId); err != nil {
		return err
	}

	// 2. 启动伸缩组删除异步任务
	taskmgmt.GetTaskMgmt().AddTask(asynctask.NewDelScalingGroupTask(groupId, monitor))
	return nil
}

func StartDeletePodScaleGroupTask(groupId string) error {
	taskId := uuid.NewString()
	if err := db.TxInsertDeletePodNamespaceTask(taskId, groupId); err != nil {
		return err
	}
	taskmgmt.GetTaskMgmt().AddTask(asynctask.NewDelPodNamespaceTask(groupId))
	return nil
}

func StartDeleteEcsScaleGroupTask(groupId string) error {
	// 需要等待伸缩组稳定，以免漏删
	taskId := uuid.NewString()
	if err := db.TxInsertDeleteECSScalingGroupTask(taskId, groupId); err != nil {
		return err
	}
	taskmgmt.GetTaskMgmt().AddTask(asynctask.NewDelEcsScalingGroupTask(groupId))
	return nil
}
