package job_center

import (
	"bytes"
	"context"
	"encoding/json"
	"errors"
	"fmt"
	"git.mycaigou.com/gfyx/common/cache"
	"git.mycaigou.com/gfyx/common/tenant_db"
	"git.mycaigou.com/gfyx/common/usermetadata"
	"git.mycaigou.com/gfyx/micro-gfyx-oppty-service/infrastructure/common/constants"
	"git.mycaigou.com/gfyx/micro-gfyx-oppty-service/infrastructure/common/ossfile"
	"git.mycaigou.com/gfyx/micro-gfyx-oppty-service/infrastructure/common/utils"
	"git.mycaigou.com/gfyx/micro-gfyx-oppty-service/infrastructure/dto/base"
	"git.mycaigou.com/gfyx/micro-gfyx-oppty-service/infrastructure/repository/common/file"
	jobCenterRepo "git.mycaigou.com/gfyx/micro-gfyx-oppty-service/infrastructure/repository/job_center"
	pb "git.mycaigou.com/gfyx/micro-gfyx-oppty-service/proto/micro_gfyx_oppty_service_proto/micro-gfyx-oppty-service/job_center"
	"git.mycaigou.com/gfyx/micro-gfyx-oppty-service/service/business_opportunity/bid"
	"git.mycaigou.com/gfyx/micro-gfyx-oppty-service/service/business_opportunity/contact"
	"git.mycaigou.com/gfyx/micro-gfyx-oppty-service/service/business_opportunity/customer"
	"git.mycaigou.com/gfyx/micro-gfyx-oppty-service/service/business_opportunity/project"
	"git.mycaigou.com/gfyx/micro-gfyx-oppty-service/service/clue/clue"
	clueCustomer "git.mycaigou.com/gfyx/micro-gfyx-oppty-service/service/clue/customer"
	"git.myscrm.cn/golang/stark/v4"
	"github.com/xuri/excelize/v2"
	"io"
	"math"
	"time"
)

const (
	jobCacheKey      = "job:%d"
	batchDefaultSize = 500
)

type jobCenterService struct {
	jobCenterRepo         jobCenterRepo.JobRepositoryIface
	fileRepo              file.FileRepoIface
	customerService       customer.CustomerServiceIface
	contactService        contact.ContactServiceIface
	projectService        project.ProjectServiceIface
	bidService            bid.BidServiceIface
	clueService           clue.ClueServiceIface
	buildingService       project.BuildingServiceIface
	customerManageService clueCustomer.CustomerManageServiceIface
}

// NewJobCenterService 构造函数
func NewJobCenterService(
	jobRepo jobCenterRepo.JobRepositoryIface,
	fileRepo file.FileRepoIface,
	customerService customer.CustomerServiceIface,
	contactService contact.ContactServiceIface,
	projectService project.ProjectServiceIface,
	bidService bid.BidServiceIface,
	clueService clue.ClueServiceIface,
	buildingService project.BuildingServiceIface,
	customerManageService clueCustomer.CustomerManageServiceIface,
) JobCenterServiceIface {
	return &jobCenterService{
		jobCenterRepo:         jobRepo,
		fileRepo:              fileRepo,
		customerService:       customerService,
		contactService:        contactService,
		projectService:        projectService,
		bidService:            bidService,
		clueService:           clueService,
		buildingService:       buildingService,
		customerManageService: customerManageService,
	}
}

func (s *jobCenterService) CreateJobAndRun(ctx context.Context, orderId int64, jobType, orderNum, method int32, params string) (int64, error) {
	curUser := usermetadata.GetMetaUserInfo(ctx)

	jobPo, err := s.jobCenterRepo.SaveJob(ctx, &jobCenterRepo.GfyxJobPo{
		Id:           0,
		OrderId:      orderId,
		JobType:      jobType,
		Method:       method,
		Params:       params,
		Number:       orderNum,
		Status:       constants.JobStatusUnStarted,
		FinishStatus: constants.JobFinishStatusUnKnown,
		CreatedBy:    curUser.UserId,
		CreatedOn:    time.Now(),
		ModifiedBy:   curUser.UserId,
		ModifiedOn:   time.Now(),
		IsDeleted:    constants.IsDeletedFalse,
	})
	if err != nil {
		return 0, err
	}

	err = s.StartJob(ctx, jobPo.Id)
	if err != nil {
		return 0, err
	}

	return jobPo.Id, nil
}

func (s *jobCenterService) GetJobInfoByOrderId(ctx context.Context, orderId int64) (jobId, jobStatus int32, fileUrl string, err error) {
	_, jobList, err := s.jobCenterRepo.QueryJobList(ctx, map[string]interface{}{"order_id": orderId, "is_deleted": constants.IsDeletedFalse}, nil, "")
	if err != nil {
		return
	}
	if len(jobList) == 0 {
		return
	}

	orderJob := jobList[0]
	jobId = int32(orderJob.Id)
	jobStatus = orderJob.FinishStatus
	if orderJob.Status != constants.JobStatusFinished {
		return
	}
	m := s.getResultFileUrlByJobIds(ctx, []int64{orderJob.Id})
	fileUrl = m[orderJob.Id]
	return
}

func (s *jobCenterService) GetJobRecordTotal(ctx context.Context, orderType int32, params string) (total int64, err error) {
	switch orderType {
	case constants.OrderTypeCustomerExport, constants.JobTypeMarketCustomerExport:
		total, err = s.customerService.GetExportCustomerTotal(ctx, params)
	case constants.OrderTypeProjectExport:
		total, err = s.projectService.GetExportProjectTotal(ctx, params)
	case constants.OrderTypeBidExport, constants.OrderTypeCustomerDetailPurchaseExport, constants.JobTypeMarketBidExport:
		total, err = s.bidService.GetExportBiddingTotal(ctx, params)
	case constants.OrderTypeCustomerDetailLandExport, constants.JobTypeLandExport:
		total, err = s.projectService.GetExportTakenLandTotal(ctx, params)
	case constants.OrderTypeCustomerDetailBuildExport, constants.JobTypeBuildingExport:
		total, err = s.buildingService.GetExportBuildingTotal(ctx, params)
	case constants.OrderTypeCustomerDetailSaleExport, constants.JobTypeSaleExport:
		total, err = s.projectService.GetExportSaleTotal(ctx, params)
	case constants.OrderTypeCustomerDetailCooperationExport:
		total, err = s.contactService.GetExportCooperationTotal(ctx, params)
	case constants.OrderTypeCustomerDetailCaseExport:
		total, err = s.customerService.GetExportSupplierProjectTotal(ctx, params)
		//case constants.JobTypeSaleExport:
		//	total, err = s.projectService.GetExportSaleTotal(ctx, params)
		//case constants.JobTypeBuildingExport:
		//total, err = s.buildingService.GetExportBuildingTotal(ctx, params)
	}
	return
}

// StartJob 后台运行任务
func (s *jobCenterService) StartJob(ctx context.Context, jobId int64) error {
	//1.获取任务独占锁，避免并发导致重复运行
	err := s.lockJob(ctx, jobId)
	if err != nil {
		return err
	}

	//2.启动协程开始执行任务
	newCtx, err := tenant_db.GetTenantBackgroundContext(ctx, "")
	if err != nil {
		stark.Logger.Errorf(ctx, "runJobInBackground-get tenant new context err:%s", err.Error())
		return err
	}

	//3.检查任务是否正常，正常启动任务
	jobPo, err := s.jobCenterRepo.QueryJobById(ctx, jobId)
	if err != nil {
		stark.Logger.Errorf(ctx, "runJobInBackground-job not found, jobId:%d", jobId)
		return err
	}
	if jobPo == nil {
		stark.Logger.Errorf(ctx, "runJobInBackground-job not found, jobId:%d", jobId)
		return err
	}
	if jobPo.Status == constants.JobStatusFinished {
		stark.Logger.Errorf(ctx, "runJobInBackground-job is completed, jobId:%d", jobId)
		return err
	}
	// 设置任务进行中，并启动后台任务
	_ = s.setJobRunningStatus(ctx, jobId)

	switch jobPo.Method {
	case constants.JobMethodExport:
		go s.runJobInBackground(newCtx, jobPo)
	case constants.JobMethodImport:
		go s.runImportJobInBackground(newCtx, jobPo)
	}
	return nil
}

func (s *jobCenterService) runJobInBackground(ctx context.Context, jobPo *jobCenterRepo.GfyxJobPo) {
	defer func() {
		if err := recover(); err != nil {
			stark.Logger.Errorf(ctx, "runJobInBackground-panic, jobPo:%+v, err:%+v", err)
		}
	}()
	//1. 拉取数据
	finishStatus := constants.JobFinishStatusUnKnown
	title, err := s.pullData(ctx, jobPo)
	if err != nil {
		stark.Logger.Errorf(ctx, "runJobInBackground-pullData jobId:%d, err:%s", jobPo.Id, err.Error())
		finishStatus = constants.JobFinishStatusFailed
	}
	//2.所有批次数据处理完成，汇总数据生成excel文件，更新任务状态
	resultFileId, rowCount, err := s.packingResultFile(ctx, jobPo.Id, title)
	if err != nil {
		stark.Logger.Errorf(ctx, "runJobInBackground packingResultFile jobId:%d, err:%s", jobPo.Id, err.Error())
		finishStatus = constants.JobFinishStatusFailed
	} else {
		_, err = s.jobCenterRepo.SaveJobFile(ctx, &jobCenterRepo.GfyxJobFilePo{
			JobId:      jobPo.Id,
			FileId:     resultFileId,
			FileType:   1,
			CreatedBy:  0,
			CreatedOn:  time.Now(),
			ModifiedBy: 0,
			ModifiedOn: time.Now(),
			IsDeleted:  constants.IsDeletedFalse,
		})
		if err != nil {
			stark.Logger.Errorf(ctx, "runJobInBackground SaveJobFile jobId:%d, err:%s", jobPo.Id, err.Error())
			finishStatus = constants.JobFinishStatusFailed
		}
		finishStatus = constants.JobFinishStatusSuccess
	}

	// 3. 设置任务完成状态
	var result string
	if finishStatus == constants.JobFinishStatusFailed {
		result = "服务内部错误"
	} else {
		result = fmt.Sprintf("导出%d条数据", rowCount)
	}
	err = s.setJobFinishStatus(ctx, jobPo.Id, finishStatus, result)
	if err != nil {
		stark.Logger.Errorf(ctx, "runJobInBackground UpdateJob jobId:%d, err:%s", jobPo.Id, err.Error())
	} else {
		stark.Logger.Infof(ctx, "runJobInBackground UpdateJob jobId:%d, succeed", jobPo.Id)
	}

	//释放锁
	_ = s.unlockJob(ctx, jobPo.Id)
	// 推送数据
	_ = s.pushResultToQueue(ctx, jobPo.Id)
}

func (s *jobCenterService) runImportJobInBackground(ctx context.Context, jobPo *jobCenterRepo.GfyxJobPo) {
	defer func() {
		if err := recover(); err != nil {
			stark.Logger.Errorf(ctx, "runImportJobInBackground-panic, jobPo:%+v, err:%+v", err)
		}
	}()
	// 1. 解析参数
	var request base.ImportRequest
	err := json.Unmarshal([]byte(jobPo.Params), &request)
	if err != nil {
		stark.Logger.Errorf(ctx, "runImportJobInBackground-param-unmarshal jobId:%d, err:%s", jobPo.Id, err.Error())
		return
	}
	// 2, 读取文件内容，优先从缓存拿，如果没有再重新下载
	var rowData [][]string
	if request.CacheKey != "" {
		redis, _ := cache.TenantRedis()
		cacheStr, _ := redis.Get(ctx, request.CacheKey)
		if cacheStr != "" {
			err := json.Unmarshal([]byte(cacheStr), &rowData)
			if err != nil {
				stark.Logger.Errorf(ctx, "runImportJobInBackground-cache-unmarshal jobId:%d, err:%s", jobPo.Id, err.Error())
				return
			}
		}
	} else {
		// 从 oss 下载
		fileInfo, err := s.fileRepo.QueryById(ctx, request.FileId)
		if err != nil {
			stark.Logger.Errorf(ctx, "method:GetFile,err:%v", err)
			return
		}

		// 组装oss的下载地址
		fileUrl, err := ossfile.GetSignURL(fileInfo.FilePath)
		if err != nil {
			stark.Logger.Errorf(ctx, "method:GetSignURL,err:%v", err)
			return
		}

		// 读取文件内容, 校验文件内容
		rowData, err = utils.ReadRemoteExcelData(fileUrl)
		if err != nil {
			stark.Logger.Infof(ctx, "ReadRemoteExcelData 读取文件错误，错误信息：%s", fileUrl, err.Error())
			return
		}
	}
	// 3, 根据不同的业务类型，调用不同的导入方法
	switch jobPo.JobType {
	case constants.JobTypeCustomerImport:
		// 客户导入
		err = s.customerManageService.ImportFileByJob(ctx, request.FileId, jobPo.CreatedBy, rowData)
	}
	// 4, 统一返回错误
	finishStatus := constants.JobFinishStatusSuccess
	result := "导入成功"
	if err != nil {
		stark.Logger.Errorf(ctx, "runImportJobInBackground-ImportFileByJob jobId:%d, err:%s", jobPo.Id, err.Error())
		finishStatus = constants.JobFinishStatusFailed
		result = "导入失败"
	}
	err = s.setJobFinishStatus(ctx, jobPo.Id, finishStatus, result)
	if err != nil {
		stark.Logger.Errorf(ctx, "runJobInBackground UpdateJob jobId:%d, err:%s", jobPo.Id, err.Error())
	} else {
		stark.Logger.Infof(ctx, "runJobInBackground UpdateJob jobId:%d, succeed", jobPo.Id)
	}

	//释放锁
	_ = s.unlockJob(ctx, jobPo.Id)
	// 推送数据
	_ = s.pushResultToQueue(ctx, jobPo.Id)
	return
}

func (s *jobCenterService) lockJob(ctx context.Context, jobId int64) error {
	tenantCache, _ := cache.TenantRedis()
	cacheKey := fmt.Sprintf(jobCacheKey, jobId)

	flag, err := tenantCache.Incr(ctx, cacheKey)
	if err != nil {
		return err
	}
	if flag > 1 {
		stark.Logger.Infof(ctx, "StartJob jobId:%d is running", jobId)
		return errors.New(fmt.Sprintf("StartJob jobId:%d is running", jobId))
	}
	// 锁十分钟后自动释放
	_, err = tenantCache.Expire(ctx, cacheKey, 10*60)
	if err != nil {
		stark.Logger.Infof(ctx, "StartJob jobId:%d set cache key expire err:%s", jobId, err.Error())
		return err
	}
	return nil
}

func (s *jobCenterService) addLockTime(ctx context.Context, jobId int64) error {
	tenantCache, _ := cache.TenantRedis()
	cacheKey := fmt.Sprintf(jobCacheKey, jobId)
	_, err := tenantCache.Expire(ctx, cacheKey, 10*60)
	return err
}

func (s *jobCenterService) unlockJob(ctx context.Context, jobId int64) error {
	tenantCache, _ := cache.TenantRedis()
	cacheKey := fmt.Sprintf(jobCacheKey, jobId)
	_, err := tenantCache.Del(ctx, cacheKey)
	if err != nil {
		stark.Logger.Errorf(ctx, "runJobInBackground-del cache key err:%s", err.Error())
		return err
	}
	return nil
}

func (s *jobCenterService) setJobRunningStatus(ctx context.Context, jobId int64) error {
	return s.jobCenterRepo.UpdateJob(ctx, map[string]interface{}{"id": jobId, "is_deleted": constants.IsDeletedFalse},
		map[string]interface{}{"status": constants.JobStatusRunning})
}

func (s *jobCenterService) setJobFinishStatus(ctx context.Context, jobId int64, finishStatus int, result string) error {
	updates := map[string]interface{}{
		"status":        constants.JobStatusFinished,
		"finish_status": finishStatus,
		"finish_time":   time.Now(),
		"result":        result,
	}
	return s.jobCenterRepo.UpdateJob(ctx, map[string]interface{}{"id": jobId, "is_deleted": constants.IsDeletedFalse}, updates)
}

// pushResultToQueue 将结果推送到消息队列
func (s *jobCenterService) pushResultToQueue(ctx context.Context, jobId int64) error {
	jobPo, err := s.jobCenterRepo.QueryJobById(ctx, jobId)
	if err != nil {
		stark.Logger.Errorf(ctx, "runJobInBackground QueryJobById jobId:%d, err:%s", jobId, err.Error())
		return err
	}
	jobPbBytes, _ := json.Marshal(jobPo2Pb(jobPo))
	listKey := fmt.Sprintf("finished-notice:%d", jobPo.CreatedBy)
	tenantCache, _ := cache.TenantRedis()
	_, err = tenantCache.RPush(ctx, listKey, string(jobPbBytes))
	if err != nil {
		stark.Logger.Errorf(ctx, "runJobInBackground RPush k:%s,v:%s err:%s", listKey, string(jobPbBytes), err.Error())
		return err
	}
	stark.Logger.Infof(ctx, "runJobInBackground RPush %s success", listKey)
	return nil
}

func (s *jobCenterService) pullData(ctx context.Context, jobPo *jobCenterRepo.GfyxJobPo) (title []string, err error) {
	_, batchRuntimeList, err := s.jobCenterRepo.QueryJobBatchRuntimeList(ctx, map[string]interface{}{"job_id": jobPo.Id, "is_deleted": constants.IsDeletedFalse}, nil)
	if err != nil {
		stark.Logger.Errorf(ctx, "pullData QueryJobBatchRuntimeList job batch runtime jobPo:%+v, err:%s", jobPo, err.Error())
		return
	}

	maxBatchNum := int32(0)
	parsedNumber := int32(0)
	for _, batchRuntime := range batchRuntimeList {
		if batchRuntime.BatchNum > maxBatchNum {
			maxBatchNum = batchRuntime.BatchNum
		}
		parsedNumber += batchRuntime.BatchSize
	}
	//剩余的待处理的数据量，计算剩余的批次数
	restSize := jobPo.Number - parsedNumber
	restBatch := int(math.Ceil(float64(restSize) / float64(batchDefaultSize)))

	tenantCode, _ := tenant_db.GetTenantCode(ctx)

	var rowTitle []string
	for i := 1; i <= restBatch; i++ {
		//任务过程中，更新锁，避免锁自动过期后，其他协程获取锁，导致重复运行
		err = s.addLockTime(ctx, jobPo.Id)
		if err != nil {
			stark.Logger.Infof(ctx, "pullData jobPo:%+v set cache key expire err:%s", jobPo, err.Error())
			return
		}

		batchNum := maxBatchNum + int32(i)
		var batchSize int32 = batchDefaultSize
		if i == restBatch {
			//最后一个批次，处理剩余的数据量
			batchSize = restSize % batchDefaultSize
			if batchSize == 0 {
				batchSize = batchDefaultSize
			}
		}
		//获取业务数据
		var rows [][]string
		switch jobPo.JobType {
		case constants.OrderTypeCustomerExport, constants.JobTypeMarketCustomerExport:
			rowTitle, rows, err = s.customerService.QueryExportCustomerBatchData(ctx, jobPo.Params, batchNum, batchSize)
		case constants.OrderTypeProjectExport:
			rowTitle, rows, err = s.projectService.QueryExportProjectBatchData(ctx, jobPo.Params, batchNum, batchSize)
		case constants.OrderTypeBidExport, constants.OrderTypeCustomerDetailPurchaseExport, constants.JobTypeMarketBidExport:
			rowTitle, rows, err = s.bidService.QueryExportBiddingBatchData(ctx, jobPo.Params, batchNum, batchSize)
		case constants.OrderTypeCustomerDetailLandExport, constants.JobTypeLandExport:
			rowTitle, rows, err = s.projectService.QueryExportTakenLandBatchData(ctx, jobPo.Params, batchNum, batchSize)
		case constants.OrderTypeCustomerDetailBuildExport, constants.JobTypeBuildingExport:
			rowTitle, rows, err = s.buildingService.QueryExportBuildingBatchData(ctx, jobPo.Params, batchNum, batchSize)
		case constants.OrderTypeCustomerDetailSaleExport, constants.JobTypeSaleExport:
			rowTitle, rows, err = s.projectService.QueryExportSaleBatchData(ctx, jobPo.Params, batchNum, batchSize)
		case constants.OrderTypeCustomerDetailCooperationExport:
			rowTitle, rows, err = s.contactService.QueryExportCooperationBatchData(ctx, jobPo.Params, batchNum, batchSize)
		case constants.OrderTypeCustomerDetailCaseExport:
			rowTitle, rows, err = s.customerService.QueryExportSupplierProjectBatchData(ctx, jobPo.Params, batchNum, batchSize)
		case constants.JobTypeClueExport:
			rowTitle, rows, err = s.clueService.QueryExportClueBatchData(ctx, jobPo.Params, batchNum, batchSize)
		//case constants.JobTypeSaleExport:
		//rowTitle, rows, err = s.projectService.QueryExportSaleBatchData(ctx, jobPo.Params, batchNum, batchSize)
		//case constants.JobTypeBuildingExport:
		//	rowTitle, rows, err = s.buildingService.QueryExportBuildingBatchData(ctx, jobPo.Params, batchNum, batchSize)
		case constants.JobTypeCustomerExport:
			rowTitle, rows, err = s.customerManageService.QueryExportCustomerBatchData(ctx, jobPo.Params, batchNum, batchSize)
		}
		if err != nil {
			stark.Logger.Errorf(ctx, "pullData QueryExportCustomerBatchData jobPo:%#v, err:%s", jobPo, err.Error())
			return
		}
		if len(title) == 0 && len(rowTitle) != 0 {
			title = rowTitle
		}

		if len(rows) == 0 {
			stark.Logger.Errorf(ctx, "pullData QueryExportCustomerBatchData without data jobPo:%#v", jobPo)
			continue
		}

		//保存业务数据至文件，上传至oss
		rowBytes, _ := json.Marshal(rows)
		var filePath string
		filePath, _, err = ossfile.PutOss(bytes.NewReader(rowBytes), constants.PathExportBatchRuntime, tenantCode, ".txt", 1)
		if err != nil {
			stark.Logger.Errorf(ctx, "runJobInBackground PutOss err:%s", err.Error())
			return
		}
		newFilePo := &file.GfyxFilesPo{
			Business:   constants.PathExportBatchRuntime,
			FileType:   ".txt",
			FileName:   fmt.Sprintf("任务[%d]的第[%d]批次中间过程文件.txt", jobPo.Id, batchNum),
			FilePath:   filePath,
			FileSize:   int64(len(rowBytes)),
			IsDeleted:  constants.IsDeletedFalse,
			CreatedOn:  time.Now(),
			CreatedBy:  "",
			ModifiedOn: time.Now(),
			ModifiedBy: "",
		}
		newFilePo, err = s.fileRepo.SaveFile(ctx, newFilePo)
		if err != nil {
			stark.Logger.Errorf(ctx, "runJobInBackground SaveFile err:%s", err.Error())
			return
		}

		//保存批次数据
		_, err = s.jobCenterRepo.SaveJobBatchRuntime(ctx, &jobCenterRepo.GfyxJobBatchRuntimePo{
			Id:         0,
			JobId:      jobPo.Id,
			BatchNum:   batchNum,
			BatchSize:  batchSize,
			FileId:     newFilePo.Id,
			CreatedBy:  0,
			CreatedOn:  time.Now(),
			ModifiedBy: 0,
			ModifiedOn: time.Now(),
			IsDeleted:  constants.IsDeletedFalse,
		})
		if err != nil {
			return
		}
	}
	return
}

func (s *jobCenterService) packingResultFile(ctx context.Context, jobId int64, titleList []string) (int64, int32, error) {
	_, batchRuntimeList, err := s.jobCenterRepo.QueryJobBatchRuntimeList(ctx, map[string]interface{}{"job_id": jobId, "is_deleted": constants.IsDeletedFalse}, nil)
	if err != nil {
		stark.Logger.Errorf(ctx, "packingResultFile QueryJobBatchRuntimeList job batch runtime jonId:%+v, err:%s", jobId, err.Error())
		return 0, 0, err
	}
	var fileIds []int64
	for _, batchRuntimePo := range batchRuntimeList {
		fileIds = append(fileIds, batchRuntimePo.FileId)
	}

	files, err := s.fileRepo.QueryByIds(ctx, fileIds)
	if err != nil {
		stark.Logger.Errorf(ctx, "packingResultFile QueryByIds fileIds:%+v, err:%s", fileIds, err.Error())
		return 0, 0, err
	}

	excelFile := excelize.NewFile()
	for i, title := range titleList {
		err = excelFile.SetCellValue("Sheet1", fmt.Sprintf("%c1", 65+i), title)
		if err != nil {
			stark.Logger.Errorf(ctx, "packingResultFile SetCellValue title err:%s", err.Error())
			return 0, 0, err
		}
	}
	totalCount := 0
	for _, fileItem := range files {
		fileReader, err := ossfile.GetFile(fileItem.FilePath)
		if err != nil {
			stark.Logger.Errorf(ctx, "packingResultFile GetFile fileItem:%+v, err:%s", fileItem, err)
			return 0, 0, err
		}
		fileData, err := io.ReadAll(fileReader)
		if err != nil {
			stark.Logger.Errorf(ctx, "packingResultFile ReadAll fileItem:%+v, err:%s", fileItem, err)
			return 0, 0, err
		}
		_ = fileReader.Close()
		var rowArr [][]string
		err = json.Unmarshal(fileData, &rowArr)
		if err != nil {
			stark.Logger.Errorf(ctx, "packingResultFile Unmarshal fileItem:%+v, err:%s", fileItem, err)
			return 0, 0, err
		}
		for i, row := range rowArr {
			for j, cell := range row {
				err = excelFile.SetCellValue("Sheet1", fmt.Sprintf("%c%d", 65+j, i+2+totalCount), cell)
				if err != nil {
					stark.Logger.Errorf(ctx, "packingResultFile SetCellValue rows err:%s", err.Error())
					return 0, 0, err
				}
			}
		}
		totalCount += len(rowArr)
	}

	fileBuffer, err := excelFile.WriteToBuffer()
	if err != nil {
		stark.Logger.Errorf(ctx, "packingResultFile WriteToBuffer err:%s", err.Error())
		return 0, 0, err
	}
	fileSize := fileBuffer.Len()
	tenantCode, _ := tenant_db.GetTenantCode(ctx)
	filePath, _, err := ossfile.PutOss(fileBuffer, constants.PathExportResult, tenantCode, ".xlsx", 1)
	if err != nil {
		stark.Logger.Errorf(ctx, "packingResultFile PutOss result file err:%s", err.Error())
		return 0, 0, err
	}
	resultFilePo, err := s.fileRepo.SaveFile(ctx, &file.GfyxFilesPo{
		Business:   constants.PathExportResult,
		FileType:   ".xlsx",
		FileName:   fmt.Sprintf("任务[%d]的最终结果文件.xlsx", jobId),
		FilePath:   filePath,
		FileSize:   int64(fileSize),
		IsDeleted:  constants.IsDeletedFalse,
		CreatedOn:  time.Now(),
		CreatedBy:  "",
		ModifiedOn: time.Now(),
		ModifiedBy: "",
	})
	if err != nil {
		stark.Logger.Errorf(ctx, "packingResultFile SaveFile result file err:%s", err.Error())
		return 0, 0, err
	}
	return resultFilePo.Id, int32(totalCount), nil
}

func (s *jobCenterService) MyJobList(ctx context.Context, request *pb.GetMyJobListRequest) (*pb.GetMyJobListResponse, error) {
	curUser := usermetadata.GetMetaUserInfo(ctx)
	condition := map[string]interface{}{"is_deleted": constants.IsDeletedFalse, "created_by": curUser.UserId}
	if request.Status != 0 {
		condition["status"] = request.Status
	}
	if request.BeginTime != "" {
		condition["begin_time"] = request.BeginTime
	}

	total, list, err := s.jobCenterRepo.QueryJobList(ctx, condition, &utils.PageInfo{
		PageSize: int(request.PageSize),
		Page:     int(request.Page),
		Total:    0,
	}, "finish_time desc")
	if err != nil {
		return nil, err
	}
	resp := &pb.GetMyJobListResponse{
		Total: int32(total),
	}

	if len(list) == 0 {
		return resp, nil
	}

	var completedJobIds []int64
	for _, jobPo := range list {
		if jobPo.Status == constants.JobStatusFinished {
			completedJobIds = append(completedJobIds, jobPo.Id)
		}
	}
	jobId2FileUrlMap := s.getResultFileUrlByJobIds(ctx, completedJobIds)

	for _, jobPo := range list {
		jobPb := jobPo2Pb(jobPo)
		if jobPo.Status == constants.JobStatusFinished {
			jobPb.ResultFileUrl = jobId2FileUrlMap[jobPo.Id]
		}
		resp.List = append(resp.List, jobPb)
	}
	return resp, nil
}

func jobPo2Pb(jobPo *jobCenterRepo.GfyxJobPo) *pb.JobItem {
	jobPb := &pb.JobItem{
		Id:           jobPo.Id,
		Type:         jobPo.JobType,
		Status:       jobPo.Status,
		FinishStatus: jobPo.FinishStatus,
		Result:       jobPo.Result,
		CreatedTime:  jobPo.CreatedOn.Format("2006-01-02 15:04:05"),
	}
	jobPb.Title = fmt.Sprintf("%s_%s.xlsx", constants.GetOrderTypeName(jobPo.JobType), jobPo.CreatedOn.Format("2006-01-02_1504"))
	if jobPo.FinishTime.Valid == true {
		jobPb.FinishedTime = jobPo.FinishTime.Time.Format("2006-01-02 15:04:05")
	}
	return jobPb
}

func (s *jobCenterService) getResultFileUrlByJobIds(ctx context.Context, jobIds []int64) (jobId2ResultFileUrl map[int64]string) {
	jobId2ResultFileUrl = make(map[int64]string)
	_, jobFileList, err := s.jobCenterRepo.QueryJobFileList(ctx, map[string]interface{}{"is_deleted": constants.IsDeletedFalse, "job_ids": jobIds}, nil)
	if err != nil {
		return
	}

	var fileIds []int64
	for _, jobFilePo := range jobFileList {
		fileIds = append(fileIds, jobFilePo.FileId)
	}

	fileList, err := s.fileRepo.QueryByIds(ctx, fileIds)
	if err != nil {
		return
	}

	fileId2FileUrl := make(map[int64]string)
	for _, filePo := range fileList {
		signUrl, err := ossfile.GetSignURL(filePo.FilePath)
		if err != nil {
			continue
		}
		fileId2FileUrl[filePo.Id] = signUrl
	}

	for _, jobFilePo := range jobFileList {
		jobId2ResultFileUrl[jobFilePo.JobId] = fileId2FileUrl[jobFilePo.FileId]
	}
	return
}

func (s *jobCenterService) RerunJob(ctx context.Context, request *pb.RerunJobRequest) (*pb.RerunJobResponse, error) {

	if request.Id == 0 {
		return nil, errors.New("任务id参数不能为空")
	}

	//检查任务状态，是否是执行结束，如果不是，则不允许重新跑
	jobPo, err := s.jobCenterRepo.QueryJobById(ctx, request.Id)
	if err != nil {
		return nil, err
	}
	if jobPo == nil {
		return nil, errors.New("任务不存在")
	}

	if jobPo.Status != constants.JobStatusFinished {
		return nil, errors.New("任务运行中，请稍后再试")
	}

	if jobPo.FinishStatus != constants.JobFinishStatusFailed {
		return nil, errors.New("任务已经成功，请勿重复执行")
	}

	//先删除任务的断点和结果集文件，然后重新跑任务
	err = s.jobCenterRepo.DeleteJobBatchRuntimeByJobIds(ctx, []int64{request.Id})
	if err != nil {
		return nil, err
	}

	err = s.jobCenterRepo.DeleteJobFileByJobIds(ctx, []int64{request.Id})
	if err != nil {
		return nil, err
	}

	err = s.jobCenterRepo.UpdateJob(ctx, map[string]interface{}{"id": request.Id},
		map[string]interface{}{"status": constants.JobStatusUnStarted, "finish_status": constants.JobFinishStatusUnKnown})
	if err != nil {
		return nil, err
	}

	err = s.StartJob(ctx, request.Id)
	if err != nil {
		return nil, err
	}

	return &pb.RerunJobResponse{
		IsSuccess: true,
	}, nil
}

// GetUnfinishedJobIds 未完成的任务id列表
func (s *jobCenterService) GetUnfinishedJobIds(ctx context.Context) ([]int64, error) {
	_, list, err := s.jobCenterRepo.QueryJobList(ctx, map[string]interface{}{"status_list": []int32{constants.JobStatusUnStarted, constants.JobStatusRunning},
		"is_deleted": constants.IsDeletedFalse}, nil, "")
	if err != nil {
		return nil, err
	}
	var ids []int64
	for _, jobPo := range list {
		ids = append(ids, jobPo.Id)
	}
	return ids, nil
}

func (s *jobCenterService) NewJob(ctx context.Context, method, jobType, totalCount int32, params string) (int64, error) {
	curUser := usermetadata.GetMetaUserInfo(ctx)
	//根据数据来源类型，获取要导出的记录条数，保存至任务表
	jobPo, err := s.jobCenterRepo.SaveJob(ctx, &jobCenterRepo.GfyxJobPo{
		JobType:      jobType,
		Method:       method,
		Params:       params,
		Number:       totalCount,
		Status:       constants.JobStatusUnStarted,
		FinishStatus: constants.JobFinishStatusUnKnown,
		CreatedBy:    curUser.UserId,
		CreatedOn:    time.Now(),
		ModifiedBy:   curUser.UserId,
		ModifiedOn:   time.Now(),
		IsDeleted:    constants.IsDeletedFalse,
	})
	if err != nil {
		return 0, err
	}
	return jobPo.Id, nil
}
