package adapter

import (
	"bytes"
	"encoding/json"
	"errors"
	"fmt"
	"github.com/sirupsen/logrus"
	"google.golang.org/grpc"
	"google.golang.org/protobuf/encoding/protojson"
	"google.golang.org/protobuf/proto"
	"io"
	"migration-tool/adapter/aone"
	"migration-tool/adapter/gitee"
	"migration-tool/common"
	"net/http"
	"os"
	"sort"
	"strings"
	"sync"
	"text/template"
	"time"
)

const (
	// 仓库相关的文件名
	BinFileNameMergeRequests                    = "merge-request.data"
	BinFileNameMergeRequestsFailedImport        = "merge-request-failed.data"
	BinFileNameMergeRequestUsers                = "merge-request-user.data"
	BinFileNameMergeRequestsIncrement           = "merge-request-increment.data"
	BinFileNameMergeRequestUsersIncrement       = "merge-request-user-increment.data"
	BinFileNameMergeRequestsMerge               = "merged-data-merge-request.data"
	BinFileNameIssues                           = "issue.data"
	BinFileNameIssuesFailedImport               = "issue-failed.data"
	BinFileNameIssueUsers                       = "issue-user.data"
	BinFileNameIssuesIncrement                  = "issue-increment.data"
	BinFileNameIssueUsersIncrement              = "issue-user-increment.data"
	BinFileNameIssuesMerge                      = "merged-data-issue.data"
	BinFileNameRepoMembers                      = "repo-member.data"
	BinFileNameRepoMembersFailedImport          = "repo-member-failed.data"
	BinFileNameRepoMetaInfo                     = "repo-meta-info.data"
	BinFileNameProtectBranchSetting             = "protect-branch-setting.data"
	BinFileNameProtectBranchSettingFailedImport = "protect-branch-setting-failed.data"
	BinFileNameCodeReviewSetting                = "code-review-setting.data"
	BinFileNamePushRuleSetting                  = "push-rule-setting.data"
	BinFileNameWebHookSetting                   = "webhook-setting.data"
	BinFileNameDeployKeysSetting                = "deploy-keys-setting.data"
	BinFileNameMileStones                       = "milestone.data"
	BinFileNameMileStonesFailedImport           = "milestone-failed.data"
	BinFileNameReleases                         = "release.data"
	BinFileNameReleasesFailedImport             = "release-failed.data"

	// 和企业相关数据，不分库
	BinFileNameEnterprise                    = "enterprise.json"
	BinFileNameEnterpriseMembersProto        = "enterprise-member.data"
	BinFileNameEnterpriseMembersJson         = "enterprise-member.json"
	BinFileNameGroups                        = "group.json"
	BinFileNameGroupMembers                  = "group-member.data"
	BinFileNameGroupMembersFailedImportProto = "group-member-failed.data"
	BinFileNameGroupMembersFailedImportJson  = "group-member-failed.json"
	BinFileNameLabels                        = "label.data"
	BinFileNameLabelsFailedImportProto       = "label-failed.data"
	BinFileNameLabelsFailedImportJson        = "label-failed.json"

	// 不需要文件名，但需要占位
	CloneCodeFileName = ""
	CloneWikiFileName = ""

	// 流程文件名
	JsonFileNameExportTaskProgress = "export-progress.json"
	JsonFileNameImportTaskProgress = "import-progress.json"

	// 导出时，会记录已参与导出的仓库
	// 导入时，读取此文件，根据配置文件中的仓库通配符进行匹配
	JsonFileNameAllRepoInfo              = "repo-info.json"
	BinFileNameAllUsersProto             = "all-users.data"
	BinFileNameAllUsersFailedImportProto = "all-users-failed.data"
	BinFileNameAllUsersFailedImportJson  = "all-users-failed.json"
	BinFileNameAllUsersJson              = "all-users.json"
	FileNameTotalRepoSize                = "total-size.data"

	// 字符串模板
	ExportDataFileTmpl       = "{{.ExportDataDir}}/{{.ExportPlatformName}}-repo-data/{{.RepoGroupName}}-{{.RepoName}}/{{.FileName}}"
	CloneRepoFileTmpl        = "{{.ExportDataDir}}/{{.ExportPlatformName}}-repo-data/{{.RepoGroupName}}-{{.RepoName}}/{{.RepoName}}.git"
	CloneWikiFileTmpl        = "{{.ExportDataDir}}/{{.ExportPlatformName}}-repo-data/{{.RepoGroupName}}-{{.RepoName}}/{{.RepoName}}.wiki.git"
	ExportEnterpriseFileTmpl = "{{.ExportDataDir}}/{{.ExportPlatformName}}-meta-data/{{.EnterpriseName}}/{{.FileName}}"

	WorkerPoolLogFileTmpl = "{{.ExportDataDir}}/{{.ExportPlatformName}}-meta-data/{{.FileName}}"
	LogFileTmpl           = "{{.ExportDataDir}}/{{.ExportPlatformName}}-log/{{.RepoGroupName}}-{{.RepoName}}.log"

	TaskStateReady   TaskState = 1
	TaskStateRunning TaskState = 2
	TaskStateFinish  TaskState = 3
	TaskStateError   TaskState = 4
	TaskStateAbort   TaskState = 5

	AdapterReady   int32 = 0
	AdapterRunning int32 = 1
	AdapterFinish  int32 = 2
	AdapterStop    int32 = 3
	AdapterError   int32 = 4

	IntervalScanTasks int = 3

	// 分库任务，隶属于Adapter调度的任务
	TaskNameCode                 = "repository"
	TaskNameWiki                 = "wiki"
	TaskNameRepoMember           = "repo-member"
	TaskNameRepoMetaInfo         = "repo-meta-info"
	TaskNameProtectBranchSetting = "protect-branch-setting"
	TaskNameCodeReviewSetting    = "code-review-setting"
	TaskNamePushRuleSetting      = "push-rule-setting"
	TaskNameWebHookSetting       = "web-hook-setting"
	TaskNameDeployKeysSetting    = "deploy-keys-setting"
	TaskNameRelease              = "release"
	TaskNameMileStone            = "milestone"
	TaskNameMergeRequest         = "merge-request"
	TaskNameIssue                = "issue"
	TaskNameCommitComment        = "commit-comment"

	// 不分库任务，隶属于全局的任务，仅调度一次，默认执行
	GlobalTaskNameGroup            = "group"
	GlobalTaskNameGroupMember      = "group-member"
	GlobalTaskNameEnterprise       = "enterprise"
	GlobalTaskNameEnterpriseMember = "enterprise-member"
	GlobalTaskNameLabel            = "label"

	// 仅在用户导入时使用，如果不配置将默认不导入用户
	GlobalTaskNameUser = "user"

	// 不在map中的task，用于并发判定
	SubTaskNameMergeRequestComment = "merge-request-comment"
	SubTaskNameIssueComment        = "issue-comment"

	// 日志常量信息，
	LogMsgNoData      = "无数据，任务结束"
	LogMsgTaskStart   = "Start"
	LogMsgTaskFailed  = "Failed"
	LogMsgTaskSuccess = "Success"

	LogMsgAdapterSuccessStop = "仓库成功已停止，并成功保存进度"
	LogMsgAdapterSuccess     = "仓库已执行成功"

	LogMsgCheckSuccess = "校验数据成功，数据完全一致！"

	// adapter的错误
	ErrorMsgAdapterIsRunning  = "仓库已经正在运行，请勿重复启动"
	ErrorMsgAdapterIsFinished = "仓库已经完成所有任务"
	ErrorMsgPlatFormNameError = "配置文件中的exportPlatformName不存在，支持\"aone\",\"gitee\""

	// 由Adapter再次访问可能解决的错误
	ErrorMsgNetworkError = "Get请求访问失败"

	// Adapter不可解决，必须停止程序
	ErrorMsgConnRpcError     = "导入时Rpc连接出现错误"
	ErrorMsgRpcResponseError = "rpc响应错误，"
	ErrorMsgAccessError      = "access的角色权限对应错误"

	ErrorMsgCheckError = "校验数据失败，存在导入失败数据！"

	DefaultRpcHost = "localhost:50051"

	WorkerPoolLogFileName = "worker-pool.log"
	// DefaultMinWorkerPool
	// 只有issue和mr会用到WorkerPool (common.DefaultMaxParallelRepo)
	// 导出时，每个库最多占用4个worker，为了防止被gitee频繁限流，
	DefaultMinWorkerPool int = 20
	DefaultMaxWorkerPool int = 30
	MaxSendMsgSize       int = 64 * 1024 * 1024 * 8

	// DefaultWorkerPoolChanCap 作用在于submit的时候不至于阻塞，减少pool的阻塞次数
	DefaultWorkerPoolChanCap int = 500

	RepoInfoCacheIntervalSeconds = 3 * 60.0 * 60.0 // 3小时的缓存

	DefaultProgressTimeoutSeconds = 60.0 * 30.0 // 30分钟的超时时间

)

// GlobalWorkerPool 只有issue和mr会用到WorkerPool
var GlobalWorkerPool *WorkerPool

var AdapterIsRunningError = errors.New(ErrorMsgAdapterIsRunning)
var AdapterIsFinishedError = errors.New(ErrorMsgAdapterIsFinished)

var NetworkError = errors.New(ErrorMsgNetworkError)

type RepoInfo struct {
	RepoId           int64  `json:"repo_id"`
	RepoName         string `json:"repo_name"` // 其实是repoPath
	RepoGroupId      int64  `json:"repo_group_id"`
	RepoGroupName    string `json:"repo_group_name"` // 其实是groupPath
	EnterpriseId     int64  `json:"enterprise_id"`
	CodeCloneHttpUrl string `json:"code_clone_http_url"`
	WikiEnable       bool   `json:"wiki_enable"`
}

type RepoInfoArray []RepoInfo
type RepoInfoPointArray []*RepoInfo

type taskMsg struct {
	Err      error
	TaskName string
}

type MergeArgs struct {
	Repo       *RepoInfo
	UserMap    *sync.Map
	BaseConfig *common.BaseConfig
	Log        *logrus.Logger
}

type Adapter interface {
	Start() error
	Stop() error
	GetMessage() (string, error)
	Done() bool
}

type WorkerFuncArgs struct {
	ParentTaskUrl string
	Page          int
	UserMap       *sync.Map
	Log           *logrus.Logger
	SubFuncWg     *sync.WaitGroup
	TaskName      string
	StopChan      chan error // 用于外部结束
	PoolStop      *bool      // 用于该仓库WorkerPool的结束调度

	UrlDataIds []string

	// 子任务需要用到的
	RepoInfo *RepoInfo
	Token    string
	PageSize int

	// 增量数据
	// AfterTimeStamp的逻辑是，有任意的新数据，就需要去尝试下一页，直到没有新数据为止
	// 起初认为没有新数据，AfterTimeStamp初始值为false。当出现新数据后，AfterTimeStamp设为true
	IncrementFlag  bool
	AfterTimeStamp *bool
	TimeStamp      time.Time

	// 最终数据
	Data proto.Message
	// 最终数据中的下标
	// 范围左闭右开 [StartIndex,EndIndex)
	StartIndex *int
	EndIndex   *int
	// 最终数据中的下标
	FinalDataIndex int
	// 填入Url中的DataId
	DataId string
}

// Issue和MergeRequest使用
// ParentFunc是获取Issue/Mr的函数，按页获取，一次100条
// ChildFunc是获取ParentFunc中获取的Issue/Mr的评论
type WorkerTask struct {
	Parent     bool
	Args       WorkerFuncArgs
	ParentFunc func(args *WorkerFuncArgs) (bool, error)
	ChildFunc  func(args *WorkerFuncArgs) error
}

type WorkerPool struct {
	workers   []*Worker
	taskQueue chan *WorkerTask
	log       *logrus.Logger
}

type Worker struct {
	id        int
	pool      *WorkerPool
	taskQueue chan *WorkerTask
	poolLog   *logrus.Logger
}

func NewWorker(id int, pool *WorkerPool, poolLog *logrus.Logger) *Worker {
	return &Worker{
		id:        id,
		pool:      pool,
		taskQueue: pool.taskQueue, // taskQueue的大小没有任何用，因为处理完任务worker才会入队
		poolLog:   poolLog,
	}
}

func (w *Worker) Start() {
	go func() {
		for task := range w.taskQueue {
			funcArgs := task.Args
			if *funcArgs.PoolStop {
				continue
			}
			if task.Parent {
				hasNext, err := task.ParentFunc(&funcArgs)
				if !hasNext || err != nil {
					funcArgs.StopChan <- err
					continue
				}

				funcArgs.Page += 1
				nextPageTask := &WorkerTask{
					Parent:     true,
					Args:       funcArgs,
					ParentFunc: task.ParentFunc,
					ChildFunc:  task.ChildFunc,
				}
				// 1、先将下一个页面的请求放入workerTasks
				w.pool.Submit(nextPageTask)

				// 2、将获取的mr取出后，将每一条插入workerTasks中
				startIndex := *funcArgs.StartIndex
				endIndex := *funcArgs.EndIndex
				UrlDataIdIndex := 0
				for startIndex < endIndex {
					funcArgs.FinalDataIndex = startIndex
					funcArgs.DataId = funcArgs.UrlDataIds[UrlDataIdIndex]
					subTask := &WorkerTask{
						Parent:     false,
						Args:       funcArgs,
						ParentFunc: task.ParentFunc,
						ChildFunc:  task.ChildFunc,
					}
					funcArgs.SubFuncWg.Add(1)
					w.pool.Submit(subTask)
					startIndex += 1
					UrlDataIdIndex += 1
				}
			} else {
				err := task.ChildFunc(&funcArgs)
				funcArgs.SubFuncWg.Done()
				if err != nil {
					*funcArgs.PoolStop = true
					funcArgs.StopChan <- err
				}
			}
			//if task.Parent {
			//	w.poolLog.Info("父任务完成:", task.Args.TaskName, " 仓库:", task.Args.RepoInfo.RepoName, " 任务page", task.Args.Page)
			//} else {
			//	w.poolLog.Info("子任务完成:", task.Args.TaskName, " 仓库:", task.Args.RepoInfo.RepoName, " 任务Id", funcArgs.DataId)
			//}
		}
	}()
}

func NewWorkerPool(cfg *common.BaseConfig) *WorkerPool {
	if GlobalWorkerPool != nil {
		GlobalWorkerPool.Stop()
	}
	//numWorkers := cfg.MaxWorkerNum
	//if numWorkers < DefaultMinWorkerPool {
	//	numWorkers = DefaultMinWorkerPool
	//} else if numWorkers > DefaultMaxWorkerPool {
	//	numWorkers = DefaultMaxWorkerPool
	//}

	// 每个库最多使用4个worker，保证不会死锁
	numWorkers := cfg.MaxParallelRepoNum*4 + 1

	tmplArgs := TmplArgs{
		StringTmpl:         WorkerPoolLogFileTmpl,
		ExportDataDir:      cfg.ExportDataDir,
		ExportPlatformName: cfg.ExportPlatformName,
		FileName:           WorkerPoolLogFileName,
	}

	path, err := FillParamIntoStringTmpl(tmplArgs)
	if err != nil {
		panic(err)
	}

	log, err := common.NewLog(path)
	if err != nil {
		panic(err)
	}

	pool := &WorkerPool{
		workers:   make([]*Worker, numWorkers),
		taskQueue: make(chan *WorkerTask, DefaultWorkerPoolChanCap),
		log:       log,
	}

	for i := 0; i < numWorkers; i++ {
		worker := NewWorker(i, pool, log)
		pool.workers[i] = worker
		worker.Start()
	}

	return pool
}

func (p *WorkerPool) Submit(task *WorkerTask) {
	p.taskQueue <- task
	//if task.Parent {
	//	p.log.Info("父任务提交:", task.Args.TaskName, " 仓库:", task.Args.RepoInfo.RepoName, " 任务page", task.Args.Page)
	//} else {
	//	p.log.Info("子任务提交:", task.Args.TaskName, " 仓库:", task.Args.RepoInfo.RepoName, " 任务Id", task.Args.DataId)
	//}
}

func (p *WorkerPool) Stop() {
	close(p.taskQueue)
}

type TmplArgs struct {
	// 通用属性
	StringTmpl     string
	Token          string
	RepoGroupName  string
	RepoName       string
	PageSize       int
	RepoId         int64
	EnterpriseName string

	// 仅在写文件时需要用到的属性
	ExportDataDir      string
	ExportPlatformName string
	FileName           string

	// Issue和MergeRequest用这个来确定Url中的数据Id
	// aone中获取Namespace的Member时也用一下
	DataId string

	// 只有aone的OpenApi用得到
	ExternUid string

	// 只有gitee的OpenApi用得到
	EnterpriseToken string
	UserName        string
	UserId          int64
	EnterpriseId    int64
	RepoGroupId     int64
}

func FillParamIntoStringTmpl(args TmplArgs) (ret string, err error) {
	tmpl, err := template.New("tmpl").Parse(args.StringTmpl)
	if err != nil {
		return "", err
	}
	var url bytes.Buffer
	err = tmpl.Execute(&url, &args)
	if err != nil {
		return "", err
	}
	ret = url.String()
	return
}

// RequestArgs 执行GetRequest的参数
type RequestArgs struct {
	ExportPlatformName string
	RealUrl            string
	Token              string
	RetVal             interface{}
}

// DoGetRequest 执行GetRequest的函数
func DoGetRequest(args RequestArgs) (err error) {
	client := http.Client{}
	defer client.CloseIdleConnections()

	req, err := http.NewRequest(http.MethodGet, args.RealUrl, nil)
	if err != nil {
		return err
	}
	if args.ExportPlatformName == AonePlatformName {
		req.Header.Add("PRIVATE-TOKEN", args.Token)
	}
	resp, err := client.Do(req)
	if err != nil {
		return NetworkError
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return err
	}

	recordUrl := args.RealUrl
	if strings.LastIndex(recordUrl, "?") != -1 {
		recordUrl = recordUrl[:strings.LastIndex(recordUrl, "?")]
	}

	if args.ExportPlatformName == AonePlatformName {
		var errMsg aone.ErrorMsg
		err = json.Unmarshal(body, &errMsg)
		if errMsg.ErrorCode == AoneErrorCodePermissionDenied ||
			errMsg.ErrorCode == AoneErrorCodeUserNotFound ||
			errMsg.ErrorCode == AoneErrorCodeUnAuthorizedError {
			return errors.New("访问OpenApi时出现错误：" + errMsg.Message + "，访问的Api为:" + recordUrl)
		}
	} else if args.ExportPlatformName == GiteePlatformName {
		var errMsg gitee.ErrorMsg
		err = json.Unmarshal(body, &errMsg)
		if errMsg.Message == GiteeErrorMsgGroupError {
			return errors.New("配置文件中的开源项目配置错误，访问的Api为：" + recordUrl)
		} else if errMsg.Message == GiteeErrorMsgUnauthorized401 {
			return errors.New("Token访问权限不足，请重新配置Token，访问的Api为:" + recordUrl)
		} else if errMsg.Message == GiteeErrorMsgUserNotFound {
			return errors.New("用户不存在，访问的Api为:" + recordUrl)
		} else if errMsg.Message != "" {
			return errors.New("访问OpenApi时出现错误，错误消息为【" + errMsg.Message + "】，访问的Api为:" + recordUrl)
		}
	}

	// 目前可能存在问题，因为有些数据可能反序列化错误
	// 需要Api返回数据比较全面的仓库来测试
	// 如果出现错误，需要改变反序列化的对象
	err = json.Unmarshal(body, args.RetVal)
	if err != nil {
		// gitee限流特判
		if args.ExportPlatformName == GiteePlatformName {
			bodyString := string(body)
			if strings.Contains(bodyString, GiteeErrorMsgBlock) {
				return GiteeErrorBlock
			}
		}

		return errors.New("unmarshal失败，获取的数据为:" + string(body))
	}

	return
}

func CreateGrpcConnect(cfg *common.BaseConfig) (*grpc.ClientConn, error) {
	//opts := []grpc.DialOption{
	//	grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(MaxSendMsgSize)),
	//}
	return grpc.Dial(cfg.RpcUrl, grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(MaxSendMsgSize)), grpc.WithInsecure())
	//return grpc.Dial(cfg.RpcUrl, opts...)
}

type RecordArgs struct {
	Log      *logrus.Logger
	TaskName string
	Msg      string
	Err      error
	Export   bool
	Check    bool
}

func recordTask(args RecordArgs) {
	var msg string
	if args.Err == nil {
		if args.Export {
			msg = fmt.Sprintf("导出-[%22s]-%s", args.TaskName, args.Msg)
		} else {
			if args.Check {
				msg = fmt.Sprintf("校验-[%22s]-%s", args.TaskName, args.Msg)
			} else {
				msg = fmt.Sprintf("导入-[%22s]-%s", args.TaskName, args.Msg)
			}
		}
		args.Log.Info(msg)
	} else {
		if args.Export {
			msg = fmt.Sprintf("导出-[%22s]-%s-%s", args.TaskName, args.Msg, args.Err.Error())
		} else {
			if args.Check {
				msg = fmt.Sprintf("校验-[%22s]-%s-%s", args.TaskName, args.Msg, args.Err.Error())
			} else {
				msg = fmt.Sprintf("导入-[%22s]-%s-%s", args.TaskName, args.Msg, args.Err.Error())
			}
		}
		args.Log.Error(msg)
	}
}

func makeAdapterLog(cfg *common.BaseConfig, repoInfo *RepoInfo, exportPlatformName string) (log *logrus.Logger, err error) {
	tmplArgs := TmplArgs{
		RepoName:           repoInfo.RepoName,
		RepoGroupName:      repoInfo.RepoGroupName,
		ExportDataDir:      cfg.ExportDataDir,
		ExportPlatformName: exportPlatformName,
		StringTmpl:         LogFileTmpl,
	}
	filePath, err := FillParamIntoStringTmpl(tmplArgs)
	if err != nil {
		return nil, err
	}
	log, err = common.NewLog(filePath)
	if err != nil {
		return nil, err
	}

	return log, nil
}

func MkdirFromStringTmpl(args DataArgs) (string, error) {
	tmplArgs := TmplArgs{
		ExportDataDir:      args.ExportDataDir,
		ExportPlatformName: args.ExportPlatformName,
		EnterpriseName:     args.EnterpriseName,
		FileName:           args.FileName,
		StringTmpl:         args.FilePathTmpl,
	}

	if args.RepoInfo != nil {
		tmplArgs.RepoId = args.RepoInfo.RepoId
		tmplArgs.RepoName = args.RepoInfo.RepoName
		tmplArgs.RepoGroupName = args.RepoInfo.RepoGroupName
		tmplArgs.EnterpriseId = args.RepoInfo.EnterpriseId
	}
	filePath, err := FillParamIntoStringTmpl(tmplArgs)
	if err != nil {
		return "", err
	}
	lastIdx := strings.LastIndex(filePath, "/")
	dirPath := filePath[0:lastIdx]

	if err = os.MkdirAll(dirPath, os.ModePerm); err != nil {
		return "", err
	}

	return filePath, nil
}

type DataArgs struct {
	ExportDataDir      string
	ExportPlatformName string
	EnterpriseName     string
	FileName           string
	FilePathTmpl       string
	RepoInfo           *RepoInfo
	Data               proto.Message
	WriteJson          bool
}

// WriteProtoData
// 参数中的Data为nil时仅创建目录
func WriteProtoData(args DataArgs) (string, error) {
	filePath, err := MkdirFromStringTmpl(args)
	if err != nil {
		return "", err
	}
	if args.Data != nil {
		var byteArr []byte
		if args.WriteJson {
			byteArr, err = protojson.Marshal(args.Data)
			if err != nil {
				return filePath, err
			}
		} else {
			byteArr, err = proto.Marshal(args.Data)
			if err != nil {
				return filePath, err
			}
		}
		if err = os.WriteFile(filePath, byteArr, os.ModePerm); err != nil {
			return filePath, err
		}
	}

	return filePath, nil
}

// ReadProtoData
// 读数据，返回读文件的path和err
func ReadProtoData(args DataArgs) (string, error) {
	tmplArgs := TmplArgs{
		ExportDataDir:      args.ExportDataDir,
		ExportPlatformName: args.ExportPlatformName,
		EnterpriseName:     args.EnterpriseName,
		FileName:           args.FileName,
		StringTmpl:         args.FilePathTmpl,
	}

	if args.RepoInfo != nil {
		tmplArgs.RepoId = args.RepoInfo.RepoId
		tmplArgs.RepoName = args.RepoInfo.RepoName
		tmplArgs.RepoGroupName = args.RepoInfo.RepoGroupName
	}
	filePath, err := FillParamIntoStringTmpl(tmplArgs)
	if err != nil {
		return "", err
	}

	if args.Data != nil {
		_, err = os.Stat(filePath)
		if err != nil {
			return filePath, err
		}
		data, err := os.ReadFile(filePath)
		if err != nil {
			return filePath, err
		}
		if args.WriteJson {
			err = protojson.Unmarshal(data, args.Data)
			if err != nil {
				return filePath, err
			}
		} else {
			err = proto.Unmarshal(data, args.Data)
			if err != nil {
				return filePath, err
			}
		}

	}

	return filePath, nil
}

func ReadRepoProgress(cfg *common.BaseConfig, repoInfo *RepoInfo) (string, error) {
	var progressFileName string
	if cfg.Export {
		progressFileName = JsonFileNameExportTaskProgress
	} else {
		progressFileName = JsonFileNameImportTaskProgress
	}

	// mkdir
	dataArgs := DataArgs{
		ExportDataDir:      cfg.ExportDataDir,
		ExportPlatformName: cfg.ExportPlatformName,
		FileName:           progressFileName,
		FilePathTmpl:       ExportDataFileTmpl,
		RepoInfo:           repoInfo,
	}

	// 获取到FilePath
	filePath, err := MkdirFromStringTmpl(dataArgs)
	if err != nil {
		return "", nil
	}

	_, err = os.Stat(filePath)
	if err != nil {
		return "", err
	}
	data, err := os.ReadFile(filePath)
	if err != nil {
		return filePath, err
	}

	var progress TaskProgressArray
	err = json.Unmarshal(data, &progress)
	if err != nil {
		return filePath, err
	}
	sort.Sort(progress)

	var finishTask, unFinishTask string
	for _, val := range progress {
		if val.Finish {
			if finishTask == "" {
				finishTask = val.TaskName
				continue
			}
			finishTask = finishTask + "、" + val.TaskName
		} else {
			if unFinishTask == "" {
				unFinishTask = val.TaskName
				continue
			}
			unFinishTask = unFinishTask + "、" + val.TaskName
		}
	}

	if unFinishTask == "" {
		return fmt.Sprintf("仓库-[%s/%s]-任务已全部完成，已完成的任务:[%s]", repoInfo.RepoGroupName, repoInfo.RepoName, finishTask), nil
	}

	return fmt.Sprintf("仓库-[%s/%s]-已完成的任务:[%s]、未完成的任务:[%s]", repoInfo.RepoGroupName, repoInfo.RepoName, finishTask, unFinishTask), nil
}

func (array RepoInfoArray) Len() int {
	return len(array)
}

func (array RepoInfoArray) Less(i, j int) bool {
	return array[i].RepoGroupName+array[i].RepoName <= array[j].RepoGroupName+array[j].RepoName
}

func (array RepoInfoArray) Swap(i, j int) {
	array[i], array[j] = array[j], array[i]
}

func (array TaskProgressArray) Len() int {
	return len(array)
}

func (array TaskProgressArray) Less(i, j int) bool {
	return array[i].TaskName <= array[j].TaskName
}

func (array TaskProgressArray) Swap(i, j int) {
	array[i], array[j] = array[j], array[i]
}
