// Copyright (c) Huawei Technologies Co., Ltd. 2022-2022. All rights reserved.

// 备份任务
package task

import (
	"sync"
	"time"

	"codehub-g.huawei.com/videocloud/mediaprocesscenter/application-gateway/config"
	"codehub-g.huawei.com/videocloud/mediaprocesscenter/application-gateway/pkg/common"
	"codehub-g.huawei.com/videocloud/mediaprocesscenter/application-gateway/pkg/distributedlock"
	"codehub-g.huawei.com/videocloud/mediaprocesscenter/application-gateway/pkg/models"
	app_process "codehub-g.huawei.com/videocloud/mediaprocesscenter/application-gateway/pkg/models/appprocess"
	client_session "codehub-g.huawei.com/videocloud/mediaprocesscenter/application-gateway/pkg/models/clientsession"
	server_session "codehub-g.huawei.com/videocloud/mediaprocesscenter/application-gateway/pkg/models/serversession"
	redis "codehub-g.huawei.com/videocloud/mediaprocesscenter/application-gateway/pkg/redisserver"
	"codehub-g.huawei.com/videocloud/mediaprocesscenter/application-gateway/pkg/utils/log"
)

const (
	BackupIntervalSeconds   = 60 // 执行备份任务的时间周期
	BackupDataRetainSeconds = 60 // 上报终止的数据在数据库中的存在时间
)

func InitAppProcessBackupTask(osSignal <-chan struct{}) {
	w := &appProcessBackupWorker{}
	c := distributedlock.NewDistributedLockController(common.LockBackupAppProcess,
		common.LockBizCategory, w, osSignal)
	c.SetLockLeaseTime(30 * time.Second)          // 每次续期30秒
	c.SetTryLockOrLeaseInterval(20 * time.Second) // 每20秒续期/重试一次
	c.Work()
}

func InitServerSessionBackupTask(osSignal <-chan struct{}) {
	w := &serverSessionBackupWorker{}
	c := distributedlock.NewDistributedLockController(common.LockBackupServerSession,
		common.LockBizCategory, w, osSignal)
	c.SetLockLeaseTime(30 * time.Second)          // 每次续期30秒
	c.SetTryLockOrLeaseInterval(20 * time.Second) // 每20秒续期/重试一次
	c.Work()
}

func InitClientSessionBackupTask(osSignal <-chan struct{}) {
	w := &clientSessionBackupWorker{}
	c := distributedlock.NewDistributedLockController(common.LockBackupClientSession,
		common.LockBizCategory, w, osSignal)
	c.SetLockLeaseTime(30 * time.Second)          // 每次续期30秒
	c.SetTryLockOrLeaseInterval(20 * time.Second) // 每20秒续期/重试一次
	c.Work()
}

type appProcessBackupWorker struct {
	stopCh chan struct{}
}

func (w *appProcessBackupWorker) HolderHook() {
	log.RunLogger.Infof("[backup app process worker] start backup app process worker")
	w.stopCh = make(chan struct{}, 0)
	go w.work()
}

func (w *appProcessBackupWorker) CompetitorHook() {
	log.RunLogger.Infof("[backup app process worker] stop backup app process worker")
	close(w.stopCh)
}

// 每隔一段时间清理备份app process的数据
func (w *appProcessBackupWorker) work() {
	log.RunLogger.Infof("[backup app process worker] start to backup app process Worker")
	time.Sleep(defaultSleepTime)

	appProcessBackupDao := app_process.NewAppProcessBackupDao(models.MySqlOrm)

	ticker := time.NewTicker(BackupIntervalSeconds * time.Second)

	for {
		select {
		case <-w.stopCh:
			log.RunLogger.Infof("[backup app process worker] exit backup app process worker")
			return
		case <-ticker.C:
			log.RunLogger.Infof("[express data cleaner] start to check and backup process")
			errC := appProcessBackupDao.CleanAppProcessBackup(config.GlobalConfig.CleanupDays)
			if errC != nil {
				log.RunLogger.Errorf("[express data cleaner] clear express app process from backup table error for %v", errC)
			}
			errB := appProcessBackupDao.BackupAppProcess(config.GlobalConfig.BackupDays)
			if errB != nil {
				log.RunLogger.Errorf("[express data cleaner] backup express app process from runtime error for %v", errB)
			}
			go w.backupTerminatedAppProcessFromRedis()
		}
	}
}

// 从Redis中取出待备份的数据，静默60s后进行备份
func (w *appProcessBackupWorker) backupTerminatedAppProcessFromRedis() {
	var apIds []string
	appProcessBackupDao := app_process.NewAppProcessBackupDao(models.MySqlOrm)
	RedisClient := redis.GetRedisClient()
	defer redis.CloseRedisClient(RedisClient)
	lenAppp := RedisClient.LLen(common.RedisTableTerminatedAppProcess).Val()
	if lenAppp == 0 {
		log.RunLogger.Infof("[backup app process worker] there no app process to backup")
		return
	}
	for i := 0; i < int(lenAppp); i++ {
		apId := RedisClient.RPop(common.RedisTableTerminatedAppProcess)
		if apId.Val() != "" {
			apIds = append(apIds, apId.Val())
			continue
		}
		log.RunLogger.Infof("[backup app process worker] redis %s is nil", common.RedisTableTerminatedAppProcess)
	}
	log.RunLogger.Infof("[backup app process worker] will backup %d processes after %d seconds", lenAppp, BackupDataRetainSeconds)
	time.Sleep(BackupDataRetainSeconds * time.Second)
	wg := sync.WaitGroup{}
	wg.Add(len(apIds))
	for idx, apID := range apIds {
		if idx%app_process.AppProcessBackupCountToSleep == 0 {
			time.Sleep(app_process.AppProcessBackupCountToSleepSeconds * time.Second)
		}
		go appProcessBackupDao.BackupTerminatedAppProcess(apID, &wg)
	}
	wg.Wait()
	log.RunLogger.Infof("[backup app process worker] success to backup %d processes", len(apIds))
}

type serverSessionBackupWorker struct {
	stopCh chan struct{}
}

func (w *serverSessionBackupWorker) HolderHook() {
	log.RunLogger.Infof("[backup server session worker] start backup server session worker")
	w.stopCh = make(chan struct{}, 0)
	go w.work()
}

func (w *serverSessionBackupWorker) CompetitorHook() {
	log.RunLogger.Infof("[backup server session worker] stop backup server session worker")
	close(w.stopCh)
}

// 每隔一段时间清理备份app process的数据
func (w *serverSessionBackupWorker) work() {
	log.RunLogger.Infof("[backup server session worker] start to backup server session Worker")
	time.Sleep(defaultSleepTime)

	serverSessionBackupDao := server_session.NewServerSessionBackupDao(models.MySqlOrm)

	ticker := time.NewTicker(BackupIntervalSeconds * time.Second)

	for {
		select {
		case <-w.stopCh:
			log.RunLogger.Infof("[backup server session worker] exit backup server session worker")
			return
		case <-ticker.C:
			log.RunLogger.Infof("[express data cleaner] start to check and backup server session")
			errC := serverSessionBackupDao.CleanServerSessionBackup(config.GlobalConfig.CleanupDays)
			if errC != nil {
				log.RunLogger.Errorf("[express data cleaner] clear express server session from backup table error for %v", errC)
			}
			errB := serverSessionBackupDao.BackupServerSession(config.GlobalConfig.BackupDays)
			if errB != nil {
				log.RunLogger.Errorf("[express data cleaner] backup express server session from runtime error for %v", errB)
			}
			go w.backupTerminatedServerSessionFromRedis()
		}
	}
}

// 从Redis中取出待备份的数据，静默60s后进行备份
func (w *serverSessionBackupWorker) backupTerminatedServerSessionFromRedis() {
	var ssIds []string
	serverSessionBackupDao := server_session.NewServerSessionBackupDao(models.MySqlOrm)
	RedisClient := redis.GetRedisClient()
	defer redis.CloseRedisClient(RedisClient)
	lenSs := RedisClient.LLen(common.RedisTableTerminatedServerSession).Val()
	if lenSs == 0 {
		log.RunLogger.Infof("[backup server session worker] there is no server session to backup")
		return
	}
	for i := 0; i < int(lenSs); i++ {
		apId := RedisClient.RPop(common.RedisTableTerminatedServerSession)
		if apId.Val() != "" {
			ssIds = append(ssIds, apId.Val())
			continue
		}
		log.RunLogger.Infof("[backup server session worker] redis %s is nil", common.RedisTableTerminatedServerSession)
		return
	}
	log.RunLogger.Infof("[backup server session worker] will backup %d server session after %d seconds", lenSs, BackupDataRetainSeconds)
	time.Sleep(BackupDataRetainSeconds * time.Second)
	wg := sync.WaitGroup{}
	wg.Add(len(ssIds))
	for idx, apID := range ssIds {
		if idx%server_session.ServerSessionBackupCountToSleep == 0 {
			time.Sleep(server_session.ServerSessionBackupCountToSleepSeconds * time.Second)
		}
		go serverSessionBackupDao.BackupTerminatedServerSession(apID, &wg)
	}
	wg.Wait()
	log.RunLogger.Infof("[backup server session worker] success to backup %d server session", len(ssIds))
}

type clientSessionBackupWorker struct {
	stopCh chan struct{}
}

func (w *clientSessionBackupWorker) HolderHook() {
	log.RunLogger.Infof("[backup client session worker] start backup client session worker")
	w.stopCh = make(chan struct{}, 0)
	go w.work()
}

func (w *clientSessionBackupWorker) CompetitorHook() {
	log.RunLogger.Infof("[backup client session worker] stop backup client session worker")
	close(w.stopCh)
}

// 每隔一段时间清理备份app process的数据
func (w *clientSessionBackupWorker) work() {
	log.RunLogger.Infof("[backup client session worker] start to backup client session Worker")
	time.Sleep(defaultSleepTime)

	clientSessionBackupDao := client_session.NewClientSessionBackupDao(models.MySqlOrm)

	ticker := time.NewTicker(BackupIntervalSeconds * time.Second)

	for {
		select {
		case <-w.stopCh:
			log.RunLogger.Infof("[backup client session worker] exit backup client session worker")
			return
		case <-ticker.C:
			log.RunLogger.Infof("[express data cleaner] start to check and backup client session")
			errC := clientSessionBackupDao.CleanClientSessionBackup(config.GlobalConfig.CleanupDays)
			if errC != nil {
				log.RunLogger.Errorf("[express data cleaner] clear express client session from backup table error for %v", errC)
			}
			errB := clientSessionBackupDao.BackupClientSession(config.GlobalConfig.BackupDays)
			if errB != nil {
				log.RunLogger.Errorf("[express data cleaner] backup express client session from runtime error for %v", errB)
			}
			go w.backupTerminatedClientSessionFromRedis()
		}
	}
}

// 从Redis中取出待备份的数据，静默60s后进行备份
func (w *clientSessionBackupWorker) backupTerminatedClientSessionFromRedis() {
	var csIds []string
	clientSessionBackupDao := client_session.NewClientSessionBackupDao(models.MySqlOrm)
	RedisClient := redis.GetRedisClient()
	defer redis.CloseRedisClient(RedisClient)
	lenSs := RedisClient.LLen(common.RedisTableTerminatedClientSession).Val()
	if lenSs == 0 {
		log.RunLogger.Infof("[backup client session worker] there is no client session to backup")
		return
	}
	for i := 0; i < int(lenSs); i++ {
		csId := RedisClient.RPop(common.RedisTableTerminatedClientSession)
		if csId.Val() != "" {
			csIds = append(csIds, csId.Val())
			continue
		}
		log.RunLogger.Infof("[backup client session worker] redis %s is nil", common.RedisTableTerminatedServerSession)
		return
	}
	log.RunLogger.Infof("[backup client session worker] will backup %d client session after %d seconds", lenSs, BackupDataRetainSeconds)
	time.Sleep(BackupDataRetainSeconds * time.Second)
	wg := sync.WaitGroup{}
	wg.Add(len(csIds))
	for idx, apID := range csIds {
		if idx%client_session.ClientSessionBackupCountToSleep == 0 {
			time.Sleep(client_session.ClientSessionBackupCountToSleepSeconds * time.Second)
		}
		go clientSessionBackupDao.BackupTerminatedClientSession(apID, &wg)
	}
	wg.Wait()
	log.RunLogger.Infof("[backup client session worker] success to backup %d client session", len(csIds))
}
