package avicit.bdp.oss.utils.upload.service.impl;

import avicit.bdp.common.utils.BdpLogUtil;
import avicit.bdp.common.utils.ConfigUtils;
import avicit.bdp.core.constant.Constants;
import avicit.bdp.oss.dto.BucketAuthDto;
import avicit.bdp.oss.dto.BucketDto;
import avicit.bdp.oss.dto.ObjectDto;
import avicit.bdp.oss.service.object.ObjectService;
import avicit.bdp.oss.utils.auth.PermissionCheckUtils;
import avicit.bdp.oss.utils.common.AcitonEnum;
import avicit.bdp.oss.utils.upload.entity.OssUploadDto;
import avicit.bdp.oss.utils.upload.entity.TaskInfoDto;
import avicit.bdp.oss.utils.upload.entity.UploadDto;
import avicit.bdp.oss.utils.upload.service.AbstractUploadService;
import avicit.bdp.oss.utils.upload.utils.common.CommUtil;
import avicit.bdp.oss.utils.upload.utils.common.Constant;
import avicit.bdp.oss.utils.upload.utils.common.TaskStatusEnum;
import avicit.bdp.oss.utils.upload.utils.common.TaskTypeEnum;
import avicit.bdp.oss.utils.upload.utils.minio.OssUtil;
import avicit.bdp.oss.utils.upload.utils.redis.RedisLock;
import avicit.bdp.oss.utils.upload.utils.redis.RedisUtils;
import avicit.platform6.commons.utils.ComUtil;
import avicit.platform6.commons.utils.JsonHelper;
import avicit.platform6.core.context.ThreadContextHelper;
import avicit.platform6.core.exception.BusinessException;
import avicit.platform6.core.rest.msg.QueryRespBean;
import com.fasterxml.jackson.core.type.TypeReference;
import com.github.pagehelper.Page;
import io.minio.MinioClient;
import io.minio.ObjectStat;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.commons.io.IOUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;

import javax.annotation.Resource;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;

/**
 * @金航数码科技有限责任公司
 * @作者：developer
 * @邮箱：developer@avic-digital.com
 * @创建时间： 2020-12-09
 * @类说明：OssUpload
 * @修改记录：
 * @注意事项：
 * @主要功能：OSS断点续传/分片上传实现类
 */
@Service
public class OssUploadService extends AbstractUploadService {
    private static final Logger logger = LoggerFactory.getLogger(OssUploadService.class);

    private String projectPath = Constant.DEFAULT_TMP_FILE_PATH;

    @Resource
    private ObjectService objectService;

    @Resource
    RedisUtils redisUtils;

    /**
     * Minio连接信息
     */
    private String endPoint;
    private String accessKey;
    private String secretKey;

    /**
     * 本地待合并临时文件在Redis中队列名称
     */
    private String queueSuffix = "DEV";

    /**
     * Minio客户端
     */
    MinioClient minioClient;

    /**
     * 参数合法性检查
     *
     * @param uploadDto
     */
    @Override
    public void validParameter(UploadDto uploadDto) {
        // step1:合法性检查
        CommUtil.validateObjectNotNull(uploadDto, "参数uploadDto为null");

        int chunks = uploadDto.getChunks();
        if (chunks <= 0) {
            CommUtil.printLogAndThrowException("参数uploadDto中chunks非法");
        }

        if (!(uploadDto instanceof OssUploadDto)) {
            CommUtil.printLogAndThrowException("uploadDto不是OssUploadDto类型.");
        }

        OssUploadDto ossUploadDto = (OssUploadDto) uploadDto;
        CommUtil.validateStringNotNull(ossUploadDto.getBucketName(), "参数uploadDto中bucketName为null");
        CommUtil.validateStringNotNull(ossUploadDto.getObjectName(), "参数uploadDto中objectName为null");

        // step2:权限、密级检查
        String prefix = ossUploadDto.getPrefix() == null ? Constants.STRING_BLANK : ossUploadDto.getPrefix();
        ObjectDto queryObjectRet = this.objectService.queryObjectDto(ossUploadDto.getBucketName(), prefix, ossUploadDto.getObjectName(), false);
        BucketDto queryBucketRet = this.objectService.queryBucketDto(ossUploadDto.getBucketName());
        BucketAuthDto queryBucketAuthRet = null;
        if (queryObjectRet != null) {
            queryBucketAuthRet = this.objectService.queryBucketAuthByObjectId(queryObjectRet.getId());
        }

        PermissionCheckUtils.checkPerm(queryBucketRet, queryObjectRet, queryBucketAuthRet, AcitonEnum.PUT_OBJECT);
    }

    /**
     * 初始化上传任务，返回taskId；
     *
     * @param uploadDto
     * @return
     */
    @Override
    public TaskInfoDto initiateUploadTask(UploadDto uploadDto) {
        return TaskInfoDto.builder().id(ComUtil.getId()).build();
    }

    /**
     * 查询chunk是否存在
     *
     * @param uploadDto
     * @return
     */
    @Override
    public boolean checkChunkExist(UploadDto uploadDto) {
        OssUploadDto ossUploadDto = getChunkFromRedis(uploadDto);
        if (ossUploadDto == null) {
            return false;
        }

        return true;
    }

    /**
     * 上传分片到OSS
     *
     * @param uploadDto
     * @return
     */
    @Override
    public String uploadSingleChunk(UploadDto uploadDto) {
        OssUploadDto ossUploadDto = (OssUploadDto) uploadDto;
        String tmpFileName = CommUtil.buildTmpFileName(ossUploadDto.getObjectName(),
                ossUploadDto.getChunkNo(), ossUploadDto.getTaskId());

        try {
            // step1:兼容原有task管理接口，申请一个taskId(file.md5)
            saveTaskInfo(ossUploadDto);

            // step2:保存分片文件到本地磁盘
            saveChunkFile2Disk(ossUploadDto);
        } catch (Exception e) {
            processUploadFail(ossUploadDto);

            String message = String.format(
                    "上传分片[%s-%s-%s]失败, errMsg=%s",
                    ossUploadDto.getBucketName(), tmpFileName, ossUploadDto.getChunkNo(), e.getMessage());
            logger.error(message);
            CommUtil.printLogAndThrowException(message);
        }

        return null;
    }

    /**
     * 保存分片元数据
     *
     * @param uploadDto
     */
    @Override
    public void saveChunkMeta(UploadDto uploadDto) {
        OssUploadDto ossUploadDto = (OssUploadDto) uploadDto;

        // step1:保存分片元数据
        String jsonData = JsonHelper.getInstance().writeValueAsString(ossUploadDto);
        redisUtils.setHash(
                buildChunkInfoKey(ossUploadDto),
                CommUtil.encode(ossUploadDto.getChunkNo().toString()),
                CommUtil.encode(jsonData));

        // step2:记录临时文件保存分片序号，加入队列，异步线程从队列总取信息，用于合并临时文件
        if (ossUploadDto.getChunkNo() == 1) {
            redisUtils.lpush(buildQueueKey(), jsonData);
        }

        // step3:更新进度
        setUploadProgress(ossUploadDto, TaskStatusEnum.UPLOAD_CHUNK);
    }

    /**
     * 检查分片是否完成
     *
     * @param uploadDto
     * @return
     */
    @Override
    public boolean checkChunkCompleted(UploadDto uploadDto) {
        // step1:从redis中获取所有chunk列表
        OssUploadDto ossUploadDto = (OssUploadDto) uploadDto;
        Map<byte[], byte[]> result = redisUtils.getHashAll(buildChunkInfoKey(ossUploadDto));

        // step2:获取到所有chunk信息，分片上传完成
        if (result != null && (ossUploadDto.getChunks().compareTo(result.size()) == 0)) {
            return true;
        }

        return false;
    }

    /**
     * 合并所有分片
     *
     * @param uploadDto
     */
    @Override
    public void mergeAllChunks(UploadDto uploadDto) {
        OssUploadDto ossUploadDto = (OssUploadDto) uploadDto;

        while (true) {
            try {
                // 如果分片未上传完毕，等待
                if (!checkChunkCompleted(uploadDto)) {
                    Thread.sleep(1000);
                    continue;
                }

                // 判断任务状态，取消或失败时不合并分片，返回异常
                TaskInfoDto taskInfoDto = getTaskInfo(ossUploadDto);
                if (taskInfoDto == null) {
                    String message = String.format("上传任务已取消，bucket={}, prefix={}, object={}",
                            ossUploadDto.getBucketName(), ossUploadDto.getPrefix(), ossUploadDto.getObjectName());
                    logger.warn(message);
                    CommUtil.printLogAndThrowException(message);
                }
                if (taskInfoDto.getStatus().getStatus() == TaskStatusEnum.FAIL) {
                    String message = String.format("上传任务失败，bucket={}, prefix={}, object={}",
                            ossUploadDto.getBucketName(), ossUploadDto.getPrefix(), ossUploadDto.getObjectName());
                    logger.warn(message);
                    CommUtil.printLogAndThrowException(message);
                }

                // 判断最终临时文件是否存在&大小等于预期值
                String tmpFileDir = buildTmpFileDir(ossUploadDto);
                File finalTmpFile = new File(tmpFileDir, ossUploadDto.getObjectName());
                if (finalTmpFile.exists() && finalTmpFile.length() == ossUploadDto.getObjectSize()) {
                    logger.info("开始上传最终临时文件, bucket={}, prefix={}, object={}...",
                            ossUploadDto.getBucketName(), ossUploadDto.getPrefix(), ossUploadDto.getObjectName());

                    try (InputStream in = new FileInputStream(finalTmpFile)) {
                        OssUtil.putObject(
                                getMinioClient(),
                                ossUploadDto.getBucketName(),
                                OssUtil.buildFileName(ossUploadDto.getPrefix(), ossUploadDto.getObjectName()),
                                in);
                    }

                    break;
                }

                Thread.sleep(1000);
            } catch (Exception e) {
                e.printStackTrace();
                break;
            }
        }

        logger.info("上传最终临时文件完成, bucket={}, prefix={}, object={}...",
                ossUploadDto.getBucketName(), ossUploadDto.getPrefix(), ossUploadDto.getObjectName());
    }

    /**
     * 清理碎片
     *
     * @param uploadDto
     */
    @Override
    public void cleanFragment(UploadDto uploadDto) {
        OssUploadDto ossUploadDto = (OssUploadDto) uploadDto;

        // step1:设置上传状态
        setUploadProgress(ossUploadDto, TaskStatusEnum.SUCCESS);

        // step2:删除Minio中分片文件、合并失败元数据
        delTmpFiles(ossUploadDto);

        // step3:清理分片元数据
        delChunkMeta(ossUploadDto);

        // step4:删除task元数据
        setTaskExpireTime(JsonHelper.getInstance().writeValueAsString(ossUploadDto), Constant.TASK_SUCCESS_EXPIRE_TIME);
    }

    /**
     * 处理上传过程异常
     *
     * @param uploadDto
     */
    @Override
    public void processUploadFail(UploadDto uploadDto) {
        OssUploadDto ossUploadDto = (OssUploadDto) uploadDto;
        String taskId = ossUploadDto.getTaskId();

        // step1:设置Task状态
        setUploadProgress(ossUploadDto, TaskStatusEnum.FAIL);

        // step2:设置键过期时间，chunkInfo
        redisUtils.setExpireTime(CommUtil.buildChunkInfoKey(taskId), Constant.REDIS_KEY_EXPIRE_TIME_SECONDS);

        // step3:设置过期键
        setTaskExpireTime(taskId, Constant.REDIS_KEY_EXPIRE_TIME_SECONDS);
    }

    /**
     * 获取所有已经上传的分片信息，供前端VUE查询
     *
     * @param taskId
     * @return
     */
    public Map<String, List<Integer>> getUploadedChunks(String taskId, String bucketName, String prefix,
                                                        String objectName, String fileMd5) {
        // step1:合法性检查
        CommUtil.validateStringNotNull(taskId, "taskId为null.");
        CommUtil.validateStringNotNull(bucketName, "bucketName为null.");
        CommUtil.validateStringNotNull(objectName, "objectName为null.");
        CommUtil.validateStringNotNull(fileMd5, "fileMd5为null.");

        // step2:从redis中获取所有chunk列表
        Map<String, List<Integer>> map = new HashMap<>();
        List<Integer> chunkList = new ArrayList<>();

        OssUploadDto ossUploadDto =
                OssUploadDto.builder()
                        .taskId(taskId)
                        .bucketName(bucketName)
                        .prefix(prefix)
                        .objectName(objectName)
                        .fileMd5(fileMd5)
                        .build();
        Map<byte[], byte[]> result = redisUtils.getHashAll(buildChunkInfoKey(ossUploadDto));

        // step3:未查询到chunk信息，分片上传完成
        if (result == null || result.size() <= 0) {
            logger.info("Redis中无对应分片信息, taskId:{},bucket:{},prefix:{},object:{}.", taskId, bucketName, prefix, objectName);
            map.put(Constant.UPLOADED_STR, chunkList);
            return map;
        }

        // step4:解析查询结果
        for (Map.Entry<byte[], byte[]> entry : result.entrySet()) {
            String jsonData = CommUtil.encode(entry.getValue());
            OssUploadDto queryDto = JsonHelper.getInstance().readValue(
                    jsonData,
                    new SimpleDateFormat(Constant.DATA_FORMAT),
                    new TypeReference<OssUploadDto>() {
                    });

            chunkList.add(queryDto.getChunkNo());
        }

        Collections.sort(chunkList);
        map.put(Constant.UPLOADED_STR, chunkList);

        return map;
    }

    /**
     * 对外接口，删除Task；多个任务ID采用英文逗号隔开
     *
     * @param uploadDto
     */
    public void deleteTaskInfo(UploadDto uploadDto) {
        // step1:合法性检查
        CommUtil.validateObjectNotNull(uploadDto, "uploadDto为null.");

        OssUploadDto ossUploadDto = (OssUploadDto) uploadDto;
        CommUtil.validateStringNotNull(ossUploadDto.getTaskId(), "taskId为null.");
        CommUtil.validateStringNotNull(ossUploadDto.getBucketName(), "bucketName为null.");
        CommUtil.validateStringNotNull(ossUploadDto.getObjectName(), "objectName为null.");
        CommUtil.validateStringNotNull(ossUploadDto.getFileMd5(), "fileMd5为null.");

        // step2:删除临时文件、Redis中分片信息、Redis中task信息
        delTmpFiles(ossUploadDto);
        delChunkMeta(ossUploadDto);
        delTaskInfo(ossUploadDto);
    }

    /**
     * 分页查询指定taskId的所有上传任务
     *
     * @return
     */
    public QueryRespBean<TaskInfoDto> listAllTask(Integer pageNo, Integer pageSize, String taskId) {
        if (pageNo == null || pageSize == null) {
            pageNo = 1;
            pageSize = 10;
        }

        Page<TaskInfoDto> page = new Page(pageNo, pageSize);
        QueryRespBean<TaskInfoDto> queryRespBean = new QueryRespBean<>();

        // step1:查询当前归属当前用户的所有Task信息
        List<TaskInfoDto> taskInfoDtos = listAllTaskInner(pageNo, pageSize, taskId);
        for (TaskInfoDto tmpTaskInfo : taskInfoDtos) {
            page.add(tmpTaskInfo);
        }

        // step4:设置返回结果
        page.setPages(1);
        page.setTotal(taskInfoDtos.size());
        queryRespBean.setResult(page);

        BdpLogUtil.log4Query(queryRespBean);
        return queryRespBean;
    }

    /**
     * 保存文件元数据
     *
     * @param uploadDto
     */
    @Override
    public void saveObjectInfo(UploadDto uploadDto) {
        objectService.saveUploadedInfo((OssUploadDto) uploadDto);
    }

    /**
     * 键过期，清理碎片
     *
     * @param ossUploadDto
     */
    public void cleanUpExpiredData(OssUploadDto ossUploadDto) {
        if (ossUploadDto == null) {
            return;
        }

        delTmpFiles(ossUploadDto);
        delTaskInfo(ossUploadDto);
    }

    /**
     * 判断文件是否已经完整上传
     *
     * @param uploadDto
     * @return
     */
    @Override
    public boolean checkFileUploadCompleted(UploadDto uploadDto) {
        return isChunkInOss((OssUploadDto) uploadDto);
    }

    /**
     * 分布式锁，获取锁
     *
     * @param uploadDto
     */
    @Override
    public void dlmLock(UploadDto uploadDto) {
        String lockFile = buildLockFileName(uploadDto);

        int i = 0;
        boolean lock = RedisLock.getInstance().lock(lockFile);
        while (!lock && i++ < 100) {
            try {
                Thread.sleep(1000);
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
            lock = RedisLock.getInstance().lock(lockFile);
        }
    }

    /**
     * 分布式锁，释放锁
     *
     * @param uploadDto
     */
    @Override
    public void dlmUnlock(UploadDto uploadDto) {
        String lockFile = buildLockFileName(uploadDto);

        RedisLock.getInstance().unlock(lockFile);
    }

    /**
     * 异步合成临时分片文件
     */
    public void mergeTmpFile() {
        logger.info("OSS开始执行异步方法，合并临时分片文件...");

        while (true) {
            try {
                // step1:当前队列为空，延时处理
                if (redisUtils.llen(buildQueueKey()) <= 0) {
                    continue;
                }

                // step2:获取上传文件信息OssUploadDto
                OssUploadDto ossUploadDto = getChunkInfoFromQueue();
                if (ossUploadDto == null) {
                    continue;
                }

                logger.warn("合并分片[{}]-[{}]", ossUploadDto.getObjectName(), ossUploadDto.getChunkNo());

                // step3:获取当前文件上传状态，分类处理;
                // step3-1:当前task不存在，说明上传任务已经删除，不需要合并分片
                TaskInfoDto taskInfoDto = getTaskInfo(ossUploadDto);
                if (taskInfoDto == null) {
                    logger.warn("上传任务已完成或取消,bucket={},prefix={},object={}",
                            taskInfoDto.getBucketName(), taskInfoDto.getPrefix(), taskInfoDto.getObjectName());
                    continue;
                }
                // step3-2:判断任务状态，失败或成功的，不需要合并
                switch (taskInfoDto.getStatus().getStatus()) {
                    case SUCCESS:
                    case MERGE_CHUNK:
                    case FAIL:
                        logger.warn("上传任务已完成或取消,taskId={},bucket={},prefix={},object={}",
                                taskInfoDto.getId(), taskInfoDto.getBucketName(),
                                taskInfoDto.getPrefix(), taskInfoDto.getObjectName());
                        break;
                    case UPLOAD_CHUNK:
                        createTmpFile(ossUploadDto);
                        break;
                    default:
                        logger.error("没有这个状态：{}", taskInfoDto.getStatus().getStatus());
                }
            } catch (Exception e) {
                logger.error("后台合并临时文件失败,errMsg={}", e.getMessage());
            } finally {
                try {
                    Thread.sleep(1000);
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }
        }
    }

    /**
     * 分页查询Redis中指定taskId的上传任务列表
     *
     * @param pageNo
     * @param pageSize
     * @param taskId
     * @return
     */
    private List<TaskInfoDto> listAllTaskInner(int pageNo, int pageSize, String taskId) {
        Set<byte[]> taskKeys = listTaskKeyById(pageNo, pageSize, taskId);

        return listTaskInfo(taskId, taskKeys);
    }

    /**
     * 分页查询Redis中指定taskId的多有上传任务
     *
     * @param pageNo
     * @param pageSize
     * @param taskId
     * @return
     */
    private Set<byte[]> listTaskKeyById(int pageNo, int pageSize, String taskId) {
        Set<byte[]> taskKeys = new HashSet<>();

        int start = (pageNo - 1) * pageSize;
        int stop = pageNo * pageSize - 1;
        Set<byte[]> sets = redisUtils.zrangeByScore(buildTaskInfoKey(taskId, Constant.ZSET), start, stop);
        if (CollectionUtils.isEmpty(sets)) {
            return taskKeys;
        }

        return sets;
    }

    /**
     * 根据taskId & key查询Redis中taskInfo
     *
     * @param taskId
     * @param taskKeys
     * @return
     */
    private List<TaskInfoDto> listTaskInfo(String taskId, Set<byte[]> taskKeys) {
        List<TaskInfoDto> taskInfoDtos = new ArrayList<>();

        for (byte[] key : taskKeys) {
            TaskInfoDto taskInfoDto = getTaskInfo(taskId, key);
            if (taskInfoDto != null) {
                taskInfoDtos.add(taskInfoDto);
            }
        }

        Collections.reverse(taskInfoDtos);

        return taskInfoDtos;
    }

    /**
     * 删除Task元数据
     *
     * @param ossUploadDto
     */
    private void delTaskInfo(OssUploadDto ossUploadDto) {
        redisUtils.delHash(
                buildTaskInfoKey(ossUploadDto.getTaskId(), Constant.HASH),
                CommUtil.encode(buildObjectKey(ossUploadDto)));
        redisUtils.zRem(
                buildTaskInfoKey(ossUploadDto.getTaskId(), Constant.ZSET),
                CommUtil.encode(buildObjectKey(ossUploadDto)));
    }

    /**
     * 删除分片元数据
     *
     * @param ossUploadDto
     */
    private void delChunkMeta(OssUploadDto ossUploadDto) {
        redisUtils.delKey(buildChunkInfoKey(ossUploadDto));
    }

    /**
     * 删除所有分片，防止产生碎片
     *
     * @param ossUploadDto
     */
    private void delTmpFiles(OssUploadDto ossUploadDto) {
        if (ossUploadDto == null) {
            return;
        }

        // step1:构建分片文件名称
        String tmpFileDir = buildTmpFileDir(ossUploadDto);
        logger.warn("开始删除所有临时文件,taskId={},bucket={},prefix={},object={},dir={}...",
                ossUploadDto.getTaskId(), ossUploadDto.getBucketName(),
                ossUploadDto.getPrefix(), ossUploadDto.getObjectName(), tmpFileDir);

        // step2:调用后台线程，删除所有分片文件
        new Thread(new Runnable() {
            @Override
            public void run() {
                delTmpDir(new File(tmpFileDir));

                logger.warn("删除临时文件{}成功.", tmpFileDir);
            }
        }).start();
    }

    /**
     * 递归删除临时文件存储路径
     *
     * @param file
     */
    private void delTmpDir(File file) {
        if (file.isDirectory()) {
            File[] files = file.listFiles();
            for (File tmpFile : files) {
                delTmpDir(tmpFile);
            }

            file.delete();
        } else {
            file.delete();
        }
    }

    /**
     * 查询OSS对象存储chunk是否存在
     *
     * @param ossUploadDto
     * @return
     */
    private boolean isChunkInOss(OssUploadDto ossUploadDto) {
        String bucketName = ossUploadDto.getBucketName();
        String objectName = ossUploadDto.getObjectName();
        String prefix = ossUploadDto.getPrefix();
        ObjectStat objectStat = OssUtil.statObject(getMinioClient(), bucketName,
                OssUtil.buildFileName(prefix, objectName));
        if (objectStat == null) {
            return false;
        }

        if (objectStat.length() != ossUploadDto.getObjectSize()) {
            return false;
        }

        return true;
    }

    /**
     * 查询chunk是否存在redis中
     *
     * @param uploadDto
     * @return
     */
    private OssUploadDto getChunkFromRedis(UploadDto uploadDto) {
        // step1:查询redis获取chunk信息。如果没有找到chunk，返回false
        OssUploadDto ossUploadDto = (OssUploadDto) uploadDto;
        byte[] sets = redisUtils.getHash(
                buildChunkInfoKey(ossUploadDto),
                CommUtil.encode(ossUploadDto.getChunkNo().toString()));
        if (sets == null || sets.length <= 0) {
            return null;
        }

        // step2:从字节流中解析出chunk参数
        String jsonData = CommUtil.encode(sets);
        if (jsonData == null) {
            return null;
        }
        OssUploadDto queryDto = JsonHelper.getInstance().readValue(
                jsonData,
                new SimpleDateFormat(Constant.DATA_FORMAT),
                new TypeReference<OssUploadDto>() {
                });

        return queryDto;
    }

    /**
     * 设置上传进度
     *
     * @param ossUploadDto
     * @param taskStatusEnum
     * @return
     */
    private synchronized void setUploadProgress(OssUploadDto ossUploadDto, TaskStatusEnum taskStatusEnum) {
        // step1:获取taskInfo
        TaskInfoDto taskInfoDto = getTaskInfo(ossUploadDto);
        CommUtil.validateObjectNotNull(taskInfoDto, String.format("查询task[%s]失败", ossUploadDto.getTaskId()));

        // step2:设置Task状态
        TaskInfoDto.TaskStatus status = new TaskInfoDto.TaskStatus(taskStatusEnum, null);
        switch (taskStatusEnum) {
            case UPLOAD_CHUNK:
                Map<String, List<Integer>> chunkMap = getUploadedChunks(
                        ossUploadDto.getTaskId(),
                        ossUploadDto.getBucketName(),
                        ossUploadDto.getPrefix(),
                        ossUploadDto.getObjectName(),
                        ossUploadDto.getFileMd5());
                List<Integer> uploadedChunks = chunkMap.get(Constant.UPLOADED_STR);
                status.setProgress(CommUtil.calRatio(uploadedChunks.size(), ossUploadDto.getChunks()));
                break;
            case MERGE_CHUNK:
            case SUCCESS:
            case FAIL:
                break;
            default:
                throw new BusinessException("没有查找到该文件的进度信息");
        }
        taskInfoDto.setStatus(status);

        // step3:更新TaskInfo
        redisUtils.setHash(
                buildTaskInfoKey(ossUploadDto.getTaskId(), Constant.HASH),
                CommUtil.encode(buildObjectKey(ossUploadDto)),
                CommUtil.encode(JsonHelper.getInstance().writeValueAsString(taskInfoDto)));
    }

    /**
     * 查询Redis，获取Task信息
     *
     * @param ossUploadDto
     * @return
     */
    private TaskInfoDto getTaskInfo(OssUploadDto ossUploadDto) {
        byte[] value = redisUtils.getHash(
                buildTaskInfoKey(ossUploadDto.getTaskId(), Constant.HASH),
                CommUtil.encode(buildObjectKey(ossUploadDto)));

        String jsonData = CommUtil.encode(value);
        if (jsonData == null) {
            return null;
        }

        TaskInfoDto taskInfoDto = JsonHelper.getInstance().readValue(
                jsonData,
                new SimpleDateFormat(Constant.DATA_FORMAT),
                new TypeReference<TaskInfoDto>() {
                });

        return taskInfoDto;
    }

    /**
     * 查询Redis，获取Task信息
     *
     * @param taskId
     * @param taskKey
     * @return
     */
    private TaskInfoDto getTaskInfo(String taskId, byte[] taskKey) {
        byte[] value = redisUtils.getHash(buildTaskInfoKey(taskId, Constant.HASH), taskKey);

        String jsonData = CommUtil.encode(value);
        if (jsonData == null) {
            return null;
        }

        TaskInfoDto taskInfoDto = JsonHelper.getInstance().readValue(
                jsonData,
                new SimpleDateFormat(Constant.DATA_FORMAT),
                new TypeReference<TaskInfoDto>() {
                });

        return taskInfoDto;
    }

    /**
     * 设置task任务过期时间，正常上传时：默认30s，删除taskInfo；异常场景下，默认2天，删除taskInfo
     *
     * @param taskId
     * @param expireTime
     */
    private void setTaskExpireTime(String taskId, long expireTime) {
        String key = Constant.OSS_EXPIRE_DEL_KEY + Constant.AND + taskId;
        redisUtils.setValueWithExpireTime(
                CommUtil.encode(key),
                CommUtil.encode(taskId),
                expireTime);
    }

    /**
     * 保存上传Task
     *
     * @param uploadDto
     * @return
     */
    private synchronized TaskInfoDto saveTaskInfo(UploadDto uploadDto) {
        validParameter(uploadDto);

        // step1:task已存在，直接返回
        OssUploadDto ossUploadDto = (OssUploadDto) uploadDto;
        TaskInfoDto queryDto = getTaskInfo(ossUploadDto);
        if (queryDto != null) {
            return null;
        }

        // step2:task不存在，新增记录
        TaskInfoDto taskInfoDto = TaskInfoDto.builder()
                .id(ossUploadDto.getTaskId())
                .userId(ThreadContextHelper.getUserId())
                .bucketName(ossUploadDto.getBucketName())
                .prefix(ossUploadDto.getPrefix())
                .objectName(ossUploadDto.getObjectName())
                .objectSize(ossUploadDto.getObjectSize())
                .chunks(ossUploadDto.getChunks())
                .type(TaskTypeEnum.UPLOAD)
                .status(new TaskInfoDto.TaskStatus(TaskStatusEnum.UPLOAD_CHUNK, "0.0%"))
                .build();
        saveTaskInfo2Redis(taskInfoDto, ossUploadDto);

        return taskInfoDto;
    }

    /**
     * 保存taskInf元数据
     *
     * @param taskInfoDto
     */
    private void saveTaskInfo2Redis(TaskInfoDto taskInfoDto, OssUploadDto ossUploadDto) {
        byte[] value = CommUtil.encode(JsonHelper.getInstance().writeValueAsString(taskInfoDto));
        redisUtils.zAdd(
                buildTaskInfoKey(taskInfoDto.getId(), Constant.ZSET),
                1,
                CommUtil.encode(buildObjectKey(ossUploadDto)));

        redisUtils.setHash(
                buildTaskInfoKey(taskInfoDto.getId(), Constant.HASH),
                CommUtil.encode(buildObjectKey(ossUploadDto)),
                value);
    }

    /**
     * 构建DLM分布式锁key值
     *
     * @param uploadDto
     * @return
     */
    private String buildLockFileName(UploadDto uploadDto) {
        OssUploadDto ossUploadDto = (OssUploadDto) uploadDto;

        return new StringBuffer()
                .append(ossUploadDto.getBucketName())
                .append(Constant.HYPHEN)
                .append(ossUploadDto.getPrefix())
                .append(Constant.HYPHEN)
                .append(ossUploadDto.getObjectName())
                .append(Constant.HYPHEN)
                .append(ossUploadDto.getFileMd5())
                .toString();
    }

    /**
     * 构建Redis中chunk分片元数据的key
     *
     * @param ossUploadDto
     * @return
     */
    private byte[] buildChunkInfoKey(OssUploadDto ossUploadDto) {
        StringBuilder sb = new StringBuilder();

        sb.append(Constant.OSS_UPLOAD_CHUNK_INFO_KEY).append(Constant.HYPHEN).append(buildObjectKey(ossUploadDto));

        return CommUtil.encode(sb.toString());
    }

    /**
     * 构建Redis中taskInfo使用的Key
     *
     * @return
     */
    private byte[] buildTaskInfoKey(String taskId, String suiffix) {
        StringBuilder sb = new StringBuilder();
        sb.append(Constant.OSS_UPLOAD_TASK_INFO_KEY)
                .append(Constant.HYPHEN)
                .append(taskId)
                .append(Constant.HYPHEN)
                .append(suiffix);

        return CommUtil.encode(sb.toString());
    }

    /**
     * @param ossUploadDto
     * @return
     */
    private String buildObjectKey(OssUploadDto ossUploadDto) {
        StringBuilder sb = new StringBuilder();

        return sb.append(ossUploadDto.getTaskId())
                .append(Constant.HYPHEN)
                .append(ossUploadDto.getBucketName())
                .append(Constant.HYPHEN)
                .append(ossUploadDto.getPrefix())
                .append(Constant.HYPHEN)
                .append(ossUploadDto.getObjectName())
                .toString();
    }

    /**
     * 从队列中尾部获取分片信息
     *
     * @return
     */
    private OssUploadDto getChunkInfoFromQueue() {
        String jsonData = redisUtils.rpop(buildQueueKey());
        if (jsonData == null) {
            return null;
        }

        return JsonHelper.getInstance().readValue(
                jsonData,
                new SimpleDateFormat(Constant.DATA_FORMAT),
                new TypeReference<OssUploadDto>() {
                });
    }

    /**
     * 保存本地分片文件
     *
     * @param uploadDto
     * @return
     */
    private String saveChunkFile2Disk(UploadDto uploadDto) {
        OssUploadDto ossUploadDto = (OssUploadDto) uploadDto;

        // step1:生成临时文件存储路径、临时文件名称
        String tmpFileDir = buildTmpFileDir(uploadDto);
        String tmpFileName = buildTmpFileName(uploadDto);
        File uploadDir = new File(tmpFileDir);
        if (!uploadDir.exists()) {
            uploadDir.mkdirs();
        }

        logger.info("开始保存本地临时文件[{}],chunkNo={}...", tmpFileName, ossUploadDto.getChunkNo());

        // step2:写入临时文件
        try {
            InputStream in = ossUploadDto.getFile().getInputStream();
            OutputStream out = new FileOutputStream(new File(tmpFileName));
            IOUtils.copy(in, out);

            in.close();
            out.close();
        } catch (Exception e) {
            String message = String.format("保存本地文件[%s]失败, errMsg=%s", tmpFileName, e.getMessage());
            CommUtil.printLogAndThrowException(message);
        }

        logger.info("保存本地临时文件[{}]成功, chunkNo={}.", tmpFileName, ossUploadDto.getChunkNo());

        return tmpFileName;
    }

    /**
     * 将分片信息加入队列头部
     *
     * @param ossUploadDto
     */
    private void pushChunkInfo2Queue(OssUploadDto ossUploadDto) {
        if (ossUploadDto == null) {
            return;
        }

        redisUtils.lpush(buildQueueKey(),
                JsonHelper.getInstance().writeValueAsString(ossUploadDto));
    }

    /**
     * 生成临时文件
     *
     * @param ossUploadDto
     */
    private void createTmpFile(OssUploadDto ossUploadDto) throws IOException {
        try {
            // step1:本地临时文件不存在，重新加入队列
            if (!checkTmpFileExist(ossUploadDto)) {
                pushChunkInfo2Queue(ossUploadDto);
                return;
            }

            // step2:本地文件存在，合成临时文件
            createTmpFileInner(ossUploadDto);

            // step3:记录下一次要合并的临时分片信息
            logger.warn("记录下一次要合并的临时分片信息,chunkNo={},chunks={}",
                    ossUploadDto.getChunkNo(), ossUploadDto.getChunks());
            if (ossUploadDto.getChunkNo().compareTo(ossUploadDto.getChunks()) != 0) {
                logger.warn("记录下一次要合并的临时分片信息,chunkNo={}", ossUploadDto.getChunkNo());
                ossUploadDto.setChunkNo(ossUploadDto.getChunkNo() + 1);
                pushChunkInfo2Queue(ossUploadDto);
            }
        } catch (Exception e) {
            logger.error("生成最终临时文件失败,bucketName={},prefix={},objectName={}",
                    ossUploadDto.getBucketName(), ossUploadDto.getPrefix(), ossUploadDto.getObjectName());
            setUploadProgress(ossUploadDto, TaskStatusEnum.FAIL);
        }
    }

    /**
     * 合并本地文件
     *
     * @param ossUploadDto
     */
    private void createTmpFileInner(OssUploadDto ossUploadDto) throws IOException {
        String tmpFileDir = buildTmpFileDir(ossUploadDto);
        File finalTmpFile = new File(tmpFileDir, ossUploadDto.getObjectName());
        if (!finalTmpFile.exists()) {
            // 首次，创建然后写本地文件
            writeLocalFile(ossUploadDto);
        } else {
            // 非首次，追加写本地文件
            appendLocalFile(ossUploadDto);
        }
    }

    /**
     * 判断本地临时分片文件是否存在
     *
     * @param ossUploadDto
     * @return
     */
    private boolean checkTmpFileExist(OssUploadDto ossUploadDto) {
        File tmpFile = new File(buildTmpFileName(ossUploadDto));

        return tmpFile.exists();
    }

    /**
     * 写本地文件
     *
     * @param ossUploadDto
     */
    private void writeLocalFile(OssUploadDto ossUploadDto) throws IOException {
        // step1:生成临时文件存储路径
        File finalTmpFile = new File(buildTmpFileDir(ossUploadDto), ossUploadDto.getObjectName());

        logger.info("后台线程开始保存本地临时文件[{}], chunkNo={}...", finalTmpFile.getAbsoluteFile(), ossUploadDto.getChunkNo());

        // step2:写入临时文件
        String tmpFileStr = buildTmpFileName(ossUploadDto);
        File tmpChunkFile = new File(tmpFileStr);
        if (!tmpChunkFile.exists()) {
            CommUtil.printLogAndThrowException(String.format("{} 临时文件不存在.", tmpChunkFile.getName()));
        }
        InputStream in = new FileInputStream(tmpChunkFile);
        OutputStream out = new FileOutputStream(finalTmpFile);
        IOUtils.copy(in, out);

        in.close();
        out.close();

        logger.info("后台线程保存本地临时文件[{}]成功,chunkNo={}.", finalTmpFile.getAbsoluteFile(), ossUploadDto.getChunkNo());
    }

    /**
     * 追加写本地文件
     *
     * @param ossUploadDto
     */
    private void appendLocalFile(OssUploadDto ossUploadDto) throws IOException {
        // step1:生成临时文件存储路径、临时分片文件存储路径
        File finalTmpFile = new File(buildTmpFileDir(ossUploadDto), ossUploadDto.getObjectName());
        File tmpChunkFile = new File(buildTmpFileName(ossUploadDto));
        if (!tmpChunkFile.exists()) {
            CommUtil.printLogAndThrowException(String.format("{} 临时文件不存在.", tmpChunkFile.getName()));
        } else if (finalTmpFile.length() != ossUploadDto.getChunkSize() * (ossUploadDto.getChunkNo() - 1)) {
            logger.warn("{}临时文件大小{}不匹配,currChunkNo={}",
                    finalTmpFile.getName(), finalTmpFile.length(), ossUploadDto.getChunkNo());
            finalTmpFile.delete();
            ossUploadDto.setChunkNo(1);
            pushChunkInfo2Queue(ossUploadDto);

            return;
        }

        logger.info("后台线程开始追加写本地临时文件[{}],chunkNo={}...", finalTmpFile.getAbsoluteFile(), ossUploadDto.getChunkNo());

        // step2:追加写文件
        InputStream in = new FileInputStream(tmpChunkFile);
        OutputStream out = new FileOutputStream(finalTmpFile, true);
        IOUtils.copy(in, out);

        in.close();
        out.close();

        logger.info("后台线程追加写本地临时文件[{}]完成,chunkNo={}...", finalTmpFile.getAbsoluteFile(), ossUploadDto.getChunkNo());
    }

    /**
     * 构建临时文件存放路径
     *
     * @param uploadDto
     * @return
     */
    private String buildTmpFileDir(UploadDto uploadDto) {
        OssUploadDto ossUploadDto = (OssUploadDto) uploadDto;
        StringBuilder sb = new StringBuilder();

        sb.append(projectPath).append(Constant.SLASH).append(ossUploadDto.getBucketName()).append(Constant.SLASH);

        if (ossUploadDto.getPrefix() != null) {
            sb.append(ossUploadDto.getPrefix()).append(Constant.SLASH);
        }
        sb.append(ossUploadDto.getTaskId()).append(ossUploadDto.getFileMd5());

        return sb.toString();
    }

    /**
     * 构建临时文件名称
     *
     * @param uploadDto
     * @return
     */
    private String buildTmpFileName(UploadDto uploadDto) {
        OssUploadDto ossUploadDto = (OssUploadDto) uploadDto;

        return new StringBuffer()
                .append(buildTmpFileDir(uploadDto))
                .append(Constant.SLASH)
                .append(CommUtil.buildTmpFileName(ossUploadDto.getObjectName(), ossUploadDto.getChunkNo()))
                .toString();
    }

    /**
     * 获取minio连接信息
     *
     * @return
     */
    private synchronized MinioClient getMinioClient() {
        if (minioClient == null) {
            endPoint = ConfigUtils.getInstance().getString("minio.endPoint.url");
            accessKey = ConfigUtils.getInstance().getString("minio.username");
            secretKey = ConfigUtils.getInstance().getString("minio.passward");
            minioClient = MinioClient.builder().endpoint(endPoint).credentials(accessKey, secretKey).build();
        }

        return minioClient;
    }

    /**
     * 构建Redis中Queue的KEY
     *
     * @return
     */
    private String buildQueueKey() {
        return new StringBuilder()
                .append(Constant.OSS_UPLOAD_TMP_FILE_QUEUE)
                .append(Constant.HYPHEN)
                .append(queueSuffix)
                .toString();
    }
}
