package service

import (
	"strings"
	"sync"
	"time"

	"gitee.com/openeuler/PilotGo/sdk/logger"
	"github.com/go-redis/redis/v8"
	"openeuler.org/PilotGo/PilotGo-plugin-automation/internal/global"
	jobstatus "openeuler.org/PilotGo/PilotGo-plugin-automation/internal/module/common/enum/job"
	"openeuler.org/PilotGo/PilotGo-plugin-automation/internal/module/common/exec"
	"openeuler.org/PilotGo/PilotGo-plugin-automation/internal/module/job_history/dao"
	"openeuler.org/PilotGo/PilotGo-plugin-automation/internal/module/job_history/model"
)

// ------------------------- 模块初始化 -------------------------
func InitJobHistoryModule() error {
	go StartSubJobResultWriter()
	go StartJobResultUpdater()
	go StartJobMonitor()
	go StartSubscriber()

	logger.Debug("Job history module initialized successfully")
	return nil
}

var ( // 并发限制
	subJobResultWorkerLimit     = 10 // 同时最多保存 10 个批次
	subJobStepResultWorkerLimit = 10
	jobUpdateWorkerLimit        = 20
	subJobSemaphore             chan struct{}
	subJobStepSemaphore         chan struct{}
	jobUpdateSemaphore          chan struct{}
)

// ------------------------- 初始化信号量 -------------------------
func init() {
	subJobSemaphore = make(chan struct{}, subJobResultWorkerLimit)
	subJobStepSemaphore = make(chan struct{}, subJobStepResultWorkerLimit)
	jobUpdateSemaphore = make(chan struct{}, jobUpdateWorkerLimit)
}

// ------------------------- 子作业结果写入器 -------------------------
var (
	insertSubJobStepResultChan = make(chan *model.SubJobStepResult, 5000)
	insertSubJobResultChan     = make(chan *model.SubJobResult, 1000)
	triggerWriteChan           = make(chan struct{}, 1)
	batchSize                  = 50
)

func StartSubJobResultWriter() {
	var subJobStepResultBuffer []*model.SubJobStepResult
	var subJobResultBuffer []*model.SubJobResult

	for {
		select {
		case result := <-insertSubJobResultChan:
			subJobResultBuffer = append(subJobResultBuffer, result)
			if len(subJobResultBuffer) >= batchSize {
				bufferCopy := append([]*model.SubJobResult(nil), subJobResultBuffer...)
				go saveSubJobResults(bufferCopy)
				subJobResultBuffer = subJobResultBuffer[:0]
			}
		case result := <-insertSubJobStepResultChan:
			subJobStepResultBuffer = append(subJobStepResultBuffer, result)
			if len(subJobStepResultBuffer) >= batchSize {
				bufferCopy := append([]*model.SubJobStepResult(nil), subJobStepResultBuffer...)
				go saveSubJobStepResults(bufferCopy)
				subJobStepResultBuffer = subJobStepResultBuffer[:0]
			}
		case <-triggerWriteChan:
			drain := true
			for drain {
				select {
				case r := <-insertSubJobResultChan:
					subJobResultBuffer = append(subJobResultBuffer, r)
				case s := <-insertSubJobStepResultChan:
					subJobStepResultBuffer = append(subJobStepResultBuffer, s)
				default:
					drain = false
				}
			}

			if len(subJobResultBuffer) > 0 {
				bufferCopy := append([]*model.SubJobResult(nil), subJobResultBuffer...)
				go saveSubJobResults(bufferCopy)
				subJobResultBuffer = subJobResultBuffer[:0]
			}
			if len(subJobStepResultBuffer) > 0 {
				bufferCopy := append([]*model.SubJobStepResult(nil), subJobStepResultBuffer...)
				go saveSubJobStepResults(bufferCopy)
				subJobStepResultBuffer = subJobStepResultBuffer[:0]
			}
		}
	}
}

func saveSubJobResults(buffer []*model.SubJobResult) {
	subJobSemaphore <- struct{}{}
	defer func() { <-subJobSemaphore }()

	if err := dao.SaveSubJobResult(buffer); err != nil {
		logger.Error("保存 SubJobResult 失败: %s", err.Error())
	} else {
		logger.Debug("成功创建 %d 条子作业记录", len(buffer))
	}
}

func saveSubJobStepResults(buffer []*model.SubJobStepResult) {
	subJobStepSemaphore <- struct{}{}
	defer func() { <-subJobStepSemaphore }()

	if err := dao.SaveSubJobStepResult(buffer); err != nil {
		logger.Error("保存 SubJobStepResult 失败: %s", err.Error())
	} else {
		logger.Debug("成功创建 %d 条子作业步骤记录", len(buffer))
	}
}

// ------------------------- 作业结果更新器 -------------------------
var (
	jobResultUpdateChan = make(chan string, 1000)
)

func StartJobResultUpdater() {
	for jobId := range jobResultUpdateChan {
		jobIdCopy := jobId
		go func(id string) {
			jobUpdateSemaphore <- struct{}{}
			defer func() { <-jobUpdateSemaphore }()

			updateJobResult(id)
		}(jobIdCopy)
	}
}

// ------------------------- 作业监控 -------------------------
var (
	jobMonitorInterval = 3 * time.Second
	stateChan          = make(chan bool, 1)
)

// TODO 从队列中拿数据
func StartJobMonitor() {
	active := true
	ticker := time.NewTicker(jobMonitorInterval)
	defer ticker.Stop()

	for {
		select {
		case state := <-stateChan:
			if state {
				active = true
				logger.Debug("Job监控已启动")
			} else {
				active = false
				logger.Debug("Job监控已暂停")
			}

		case <-ticker.C:
			if !active {
				continue
			}

			runningJobs, err := getAllRunningJobsFromRedis()
			if err != nil {
				logger.Error("从Redis获取运行中作业失败: %v", err)
				continue
			}
			if len(runningJobs) == 0 {
				active = false
				logger.Debug("无运行中作业，暂停监控")
				continue
			}

			for jobId, ip := range runningJobs {
				jobIdCopy, ipCopy := jobId, ip
				go func(jid, jip string) {
					if err := getSubJobResultFromAgent(jid, jip); err != nil {
						logger.Error("监控协程获取作业结果失败: jobId=%s, ip=%s, err=%v", jid, jip, err)
					}
				}(jobIdCopy, ipCopy)
			}
		}
	}
}

// ------------------------- 订阅 -------------------------
const (
	pubSubChannel = "job:running"
	eventSet      = "SET"
	eventDelete   = "DELETE"
)

func StartSubscriber() {
	global.App.Redis.Subscribe(pubSubChannel, func(msg *redis.Message) {
		switch msg.Payload {
		case eventSet:
			stateChan <- true
		case eventDelete:
			if !global.App.Redis.IsExistKeys("job_id:*") {
				stateChan <- false
			}
		}
	})
}

// 异步通道创建子作业
func saveSubJobResult(jobId string, data *model.JobDTO) {
	for _, ip := range strings.Split(data.Job.ExecHosts, ",") {
		code := jobstatus.JobSubStatusCodeInit(data.Job.JobType, 1)
		insertSubJobResultChan <- &model.SubJobResult{
			JobId:        jobId,
			WorkflowId:   data.Job.WorkflowId,
			IP:           ip,
			CurrentStep:  0,
			ExecCode:     code,
			SubJobStatus: jobstatus.GetSubJobStatusByCode(code),
		}
		for _, subJob := range data.SubJobs {
			code := jobstatus.JobSubStatusCodeInit(data.Job.JobType, subJob.StepNum)
			insertSubJobStepResultChan <- &model.SubJobStepResult{
				JobId:        jobId,
				WorkflowId:   data.Job.WorkflowId,
				IP:           ip,
				StepNum:      subJob.StepNum,
				SubJobStatus: jobstatus.GetSubJobStatusByCode(code),
			}
		}
	}
	select {
	case triggerWriteChan <- struct{}{}:
	default:
	}
}

func updateJobResult(jobId string) {
	jobStatusCode, startTime, endTime, totalTime, err := dao.CheckSubJobsStatus(jobId)
	if err != nil {
		logger.Error("分析作业状态失败: jobId=%s, err=%v", jobId, err)
		return
	}

	if err := dao.UpdateJobResult(jobId, jobStatusCode, startTime, endTime, totalTime); err != nil {
		logger.Error("更新作业状态失败: jobId=%s, err=%v", jobId, err)
		return
	}
}

// 更新子作业结果的公共函数
func getSubJobResultFromAgent(jobId string, ip string) error {
	cacheStatus, err := GetSubJobCache(jobId, ip)
	if err != nil {
		logger.Debug("Redis中未找到任务缓存: jobId=%s, ip=%s, 将直接更新作业结果", jobId, ip)
		return nil
	}
	result, err := exec.GetJobResult(ip, jobId)
	if err != nil {
		logger.Error("从Agent获取作业结果失败: jobId=%s, ip=%s, err=%v", jobId, ip, err.Error())
		return err
	}
	if cacheStatus.JobStatus == result.JobStatus && cacheStatus.LastUpdateStep == result.CurrentStep {
		return nil
	}

	var wg sync.WaitGroup
	for step_num, subJob := range result.SubJobs {
		wg.Add(1)
		go func(jobId, ip string, stepNum int, subJob exec.SubJob) {
			defer wg.Done()
			if stepNum >= cacheStatus.LastUpdateStep {
				status := subJob.SubJobStatus
				if status == "" {
					status = jobstatus.SubJobNotRun
				}

				subJobStepResultUpdates := map[string]interface{}{
					"sub_job_status": status,
					"result":         subJob.Result,
					"start_time":     subJob.StartTime,
					"end_time":       subJob.EndTime,
				}

				if updateErr := dao.UpdateSubJobStepResult(jobId, ip, stepNum, subJobStepResultUpdates); updateErr != nil {
					logger.Error("异步更新SubJobResult失败: job=%s ip=%s step_num=%d err=%s", jobId, ip, stepNum, updateErr.Error())
				}
			}
		}(jobId, result.IP, step_num, subJob)
	}
	wg.Wait()

	subJobResultUpdates := map[string]interface{}{
		"current_step":   result.CurrentStep,
		"exec_code":      result.ExecCode,
		"sub_job_status": result.JobStatus,
		"start_time":     result.SubJobs[1].StartTime,
	}
	switch result.JobStatus {
	case jobstatus.SubJobFailed, jobstatus.SubJobSuccess, jobstatus.SubJobTimeout, jobstatus.SubJobCanceled:
		subJobResultUpdates["end_time"] = result.SubJobs[result.CurrentStep].EndTime
		if err := dao.UpdateSubJobResult(jobId, ip, subJobResultUpdates); err != nil {
			logger.Error("更新子作业结果状态失败: jobId=%s, ip=%s, err=%v", jobId, ip, err)
		}
		if err := UpdateJobCache(jobId); err != nil {
			logger.Error("尝试更新作业缓存失败: jobId=%s, ip=%s, err=%v", jobId, ip, err)
		}
		if err := DeleteSubJobCache(jobId, ip); err != nil {
			logger.Error("删除子作业缓存失败: jobId=%s, ip=%s, err=%v", jobId, ip, err)
		}
	case jobstatus.SubJobRunning, jobstatus.SubJobProcessing:
		if err := dao.UpdateSubJobResult(jobId, ip, subJobResultUpdates); err != nil {
			logger.Error("更新子作业结果状态失败: jobId=%s, ip=%s, err=%v", jobId, ip, err)
		}
		if err := UpdateSubJobCache(jobId, ip, result.JobStatus, result.CurrentStep, cacheStatus.TotalStep); err != nil {
			logger.Error("更新子作业缓存失败: jobId=%s, ip=%s, err=%v", jobId, ip, err)
		}
	}

	return nil
}
