package com.easydisk.file.service.impl;

import cn.hutool.core.date.DateUtil;
import cn.hutool.core.lang.UUID;
import cn.hutool.core.util.StrUtil;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.model.*;
import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper;
import com.baomidou.mybatisplus.extension.service.impl.ServiceImpl;
import com.easydisk.common.redis.RedisKeyPrefix;
import com.easydisk.common.util.FileUtil;
import com.easydisk.common.util.HttpUtil;
import com.easydisk.common.util.IdGenerator;
import com.easydisk.common.util.StringListUtil;
import com.easydisk.common.util.file.type.context.FileTypeContext;
import com.easydisk.constant.CommonConstant;
import com.easydisk.constant.FileConstant;
import com.easydisk.file.config.MinioProperties;
import com.easydisk.file.mapper.FileChunkMapper;
import com.easydisk.file.mapper.UploadFileMapper;
import com.easydisk.file.mapper.UploadTaskMapper;
import com.easydisk.file.model.entity.FileChunk;
import com.easydisk.file.model.entity.UploadFile;
import com.easydisk.file.model.entity.UploadTask;
import com.easydisk.file.model.entity.UserFile;
import com.easydisk.file.model.param.ChunkUploadEntity;
import com.easydisk.file.model.param.MyPartETag;
import com.easydisk.file.model.req.FileChunkCheckReq;
import com.easydisk.file.model.vo.CheckFileChunkUploadVO;
import com.easydisk.file.model.vo.FileChunkUploadVO;
import com.easydisk.file.service.UploadFileService;
import com.easydisk.file.service.UserFileService;
import com.google.common.collect.Lists;
import io.minio.GetObjectArgs;
import io.minio.MinioClient;
import io.minio.PutObjectArgs;
import io.minio.RemoveObjectArgs;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.web.multipart.MultipartFile;

import javax.servlet.http.HttpServletResponse;
import java.io.*;
import java.util.*;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.stream.Collectors;

/**
 * 物理文件信息表 服务实现类
 *
 * @author word
 * @since 2023-09-08
 */
@Slf4j
@Service
public class UploadFileServiceImpl extends ServiceImpl<UploadFileMapper, UploadFile>
        implements UploadFileService {


    @Autowired
    private MinioProperties minioProperties;

    @Autowired
    private UserFileService userFileService;

    @Autowired
    private UploadFileMapper uploadFileMapper;

    @Autowired
    private FileChunkMapper fileChunkMapper;

    @Autowired
    private RedisTemplate redisTemplate;

    @Autowired
    private AmazonS3 amazonS3;

    @Autowired
    private MinioClient minioClient;

    @Autowired
    private UploadTaskMapper uploadTaskMapper;

    /**
     * 构建的文件访问路径
     * http://localhost.org:9000/easy-disk/2023-09-09/9701712bbad047deb37c99efc7bab9bc.html
     */
    @Override
    public String getUrlPath(String bucket, String objectKey) {
        return StrUtil.format("{}/{}/{}", minioProperties.getEndpoint(),
                bucket, objectKey);
    }

    /**
     * @param suffix
     * @return 文件路径，如: 2023-09-09/9701712bbad047deb37c99efc7bab9bc.html
     * http://localhost.org:9000/easy-disk/2023-09-09/9701712bbad047deb37c99efc7bab9bc.html
     */
    public String getObjectKey(String suffix) {
        return StrUtil.format("{}/{}{}",
                DateUtil.format(new Date(), "yyyy-MM-dd"),
                UUID.fastUUID().toString(true), suffix);
    }


    @Override
    public void uploadFile(MultipartFile file, Long parentId, Long userId,
                           String identifier, Long totalSize, String filename) {
        UploadFile uploadFile = uploadRealFile(file, userId, identifier, totalSize, FileUtil.getFileSuffix(filename));
        userFileService.addUserFile(parentId, filename, FileConstant.FolderFlagEnum.NO,
                FileTypeContext.getFileTypeCode(filename), uploadFile.getId(), userId,
                uploadFile.getFileSizeDesc());
    }

    /**
     * 上传真实文件
     */
    private UploadFile uploadRealFile(MultipartFile file, Long userId, String identifier,
                                      Long totalSize, String suffix) {
        UploadFile uploadFile = null;
        // 获取第一个桶
        List<String> bucketList = minioProperties.getBucketList();
        String bucketName = bucketList.get(0);
        try {
            String realFilePath = uploadFile(file, bucketName, suffix);
            uploadFile = covertToUploadFile(realFilePath, userId,
                    identifier, totalSize, suffix, file.getContentType());
            uploadFileMapper.insert(uploadFile);
            return uploadFile;
        } catch (Exception e) {
            // 删除上传的文件
            if (uploadFile != null) {
                deleteUploadFile(bucketName, uploadFile.getFilename());
            }
            throw new RuntimeException("上传失败");
        }
    }

    /**
     * 上传文件到oss
     *
     * @return filePath 如 2023-08-22/e195a665d869454ebe36f83ec8a5f214.jpg
     * http//localhost:端口 + bucketName + filePath 就可以访问文件路径
     */
    private String uploadFile(MultipartFile file, String bucketName, String suffix) {

        // suffix带有.
        String objectName = getObjectKey(suffix);
        try {
            InputStream inputStream = file.getInputStream();
            minioClient.putObject(
                    PutObjectArgs.builder()
                            .bucket(bucketName)
                            .object(objectName)
                            .contentType(file.getContentType())
                            .stream(inputStream, inputStream.available(), -1)
                            .build());
            return objectName;
        } catch (Exception e) {
            throw new RuntimeException(e.getMessage());
        }

    }

    /**
     * 拼装物理文件信息
     */
    private UploadFile covertToUploadFile(String filePath, Long userId,
                                          String identifier, Long totalSize,
                                          String suffix, String contentType) {
        UploadFile uploadFile = new UploadFile();
        String newFileName = FileUtil.getFilename(filePath);
        uploadFile.setId(IdGenerator.nextId());
        uploadFile.setFilename(newFileName);
        uploadFile.setRealPath(filePath);
        uploadFile.setFileSize(String.valueOf(totalSize));
        uploadFile.setFileSizeDesc(FileUtil.getFileSizeDesc(totalSize));
        uploadFile.setFileSuffix(suffix);
        uploadFile.setFilePreviewContentType(contentType);
        uploadFile.setIdentifier(identifier);
        uploadFile.setCreateUser(userId);
        uploadFile.setCreateTime(new Date());
        return uploadFile;
    }

    /**
     * 删除文件
     *
     * @param bucketName 存储桶
     * @param objectName 文件名称
     */
    public void deleteUploadFile(String bucketName, String objectName) {
        try {
            minioClient.removeObject(
                    RemoveObjectArgs.builder()
                            .bucket(bucketName)
                            .object(objectName)
                            .build());
        } catch (Exception e) {
            log.info("删除文件失败，请手动删除！文件路径为：：" + getUrlPath(bucketName, objectName));
        }
    }

    /**
     * 根据文件Id查找
     *
     * @param fileId 文件id
     * @return UploadFile
     */
    @Override
    public UploadFile getById(Long fileId) {
        return uploadFileMapper.selectById(fileId);
    }

    /**
     * 文件下载
     */
    @Override
    public void downloadFile(Long virtualFileId, HttpServletResponse response, Long userId) {
        try {
            userFileService.getUserFileByIdAndUserId(virtualFileId, userId);
        } catch (RuntimeException e) {
            throw new RuntimeException("您没有下载权限");
        }
        if (userFileService.isFolder(virtualFileId)) {
            throw new RuntimeException("不能选择文件夹下载");
        }
        UserFile userFile = userFileService.getById(virtualFileId);
        UploadFile uploadFile = this.getById(userFile.getRealFileId());
        doDownload(uploadFile.getRealPath(), response, userFile.getFilename());
    }

    @Override
    public void downloadFile(Long virtualFileId, HttpServletResponse response) {
        if (userFileService.isFolder(virtualFileId)) {
            throw new RuntimeException("不能选择文件夹下载");
        }
        UserFile userFile = userFileService.getById(virtualFileId);
        UploadFile uploadFile = this.getById(userFile.getRealFileId());
        doDownload(uploadFile.getRealPath(), response, userFile.getFilename());
    }

    /**
     * 执行下载文件
     *
     * @param objectKey
     */
    private void doDownload(String objectKey, HttpServletResponse response, String filename) {
        // 添加公用的响应头
        String contentTypeValue = FileConstant.APPLICATION_OCTET_STREAM_STR;
        response.reset();
        HttpUtil.addCorsResponseHeader(response);
        response.setHeader(FileConstant.CONTENT_TYPE_STR, contentTypeValue);
        response.setContentType(contentTypeValue);
        // 桶名称
        String bucketName = minioProperties.getBucketList().get(0);
        try {
            response.setHeader(FileConstant.CONTENT_DISPOSITION_STR,
                    FileConstant.CONTENT_DISPOSITION_VALUE_PREFIX_STR + new String(filename.getBytes(FileConstant.GB2312_STR), FileConstant.IOS_8859_1_STR));
            read2OutputStream(bucketName, objectKey, response.getOutputStream());
        } catch (Exception e) {
            log.error("下载文件失败", e);
            throw new RuntimeException(e.getMessage());
        }
    }

    /**
     * 读取文件为输入流
     *
     * @param objectKey    文件路径
     * @param outputStream 输出流
     */
    @Override
    public void read2OutputStream(String bucketName, String objectKey, OutputStream outputStream) {
        try {
            InputStream inputStream = minioClient.getObject(GetObjectArgs.builder()
                    .bucket(bucketName).object(objectKey).build());
            FileUtil.writeStreamToStreamNormal(inputStream, outputStream);
        } catch (Exception e) {
            log.info(e.getMessage());
        }
    }

    /**
     * 读取文件为输入流
     *
     * @param outputStream 输出流
     */
    @Override
    public void read2OutputStream(String filePath, OutputStream outputStream) {
        String bucketName = minioProperties.getBucketList().get(0);
        try {
            InputStream inputStream = minioClient.getObject(GetObjectArgs.builder()
                    .bucket(bucketName).object(filePath).build());
            FileUtil.writeStreamToStreamNormal(inputStream, outputStream);
        } catch (Exception e) {
            log.info(e.getMessage());
        }
    }


    /**
     * 秒传文件
     */
    @Override
    public boolean secondUploadFile(Long parentId, String filename, String identifier, Long userId) {
        List<UploadFile> uploadFileList = uploadFileMapper
                .selectList(new QueryWrapper<UploadFile>().eq("identifier", identifier));
        if (CollectionUtils.isEmpty(uploadFileList)) {
            return false;
        }
        UploadFile uploadFile = uploadFileList.get(CommonConstant.ZERO_INT);
        // 构建添加用户-文件的映射
        userFileService.addUserFile(parentId,
                filename,
                FileConstant.FolderFlagEnum.NO,
                FileTypeContext.getFileTypeCode(filename),
                uploadFile.getId(),
                userId,
                uploadFile.getFileSizeDesc());
        return true;
    }


    /**
     * 合并文件分片
     */
    @Override
    public void mergeFileChunks(String filename, String identifier, Long parentId, Long totalSize, Long userId) {
        UploadFile uploadFile = this.mergeChunks(identifier, totalSize, userId, filename);
        //创建用户-真实文件映射记录
        userFileService.addUserFile(parentId, filename, FileConstant.FolderFlagEnum.NO,
                FileTypeContext.getFileTypeCode(filename), uploadFile.getId(), userId,
                uploadFile.getFileSizeDesc());
    }

    /**
     * 合并文件分片
     * 基于redis缓存
     */
    @Override
    public void mergeFileChunksByRedis(String filename, String identifier, Long parentId, Long totalSize, Long userId) {
        UploadFile uploadFile = this.mergeChunksByRedis(identifier, totalSize, userId, filename);
        //创建用户-真实文件映射记录
        userFileService.addUserFile(parentId, filename, FileConstant.FolderFlagEnum.NO,
                FileTypeContext.getFileTypeCode(filename), uploadFile.getId(), userId,
                uploadFile.getFileSizeDesc());
    }

    /**
     * 合并文件分片
     *
     * @param filename 文件名称，不包括目录
     */
    private UploadFile mergeChunksByRedis(String identifier, Long totalSize, Long userId, String filename) {
        String filePath = doMergeChunksByRedis(identifier, userId);
        UploadFile uploadFile = convertUploadFile(filePath, userId, identifier, totalSize, filename);
        try {
            uploadFileMapper.insert(uploadFile);
        } catch (Exception e) {
            try {
                this.deleteFileByPath(null, uploadFile.getRealPath());
            } catch (Exception ex) {
                log.info("文件物理删除失败，请手动删除！文件路径为：{}", uploadFile.getRealPath());
            }
            throw new RuntimeException("合并文件失败");
        }
        return uploadFile;
    }

    /**
     * 执行合并文件操作
     * redis机制
     */
    private String doMergeChunksByRedis(String identifier, Long userId) {
        UploadTask task = getUploadTaskByIdentifierAndUserId(identifier, userId);
        if (task == null) {
            throw new RuntimeException("分片任务不存在");
        }
        String uploadTaskKey = RedisKeyPrefix.FILE_CHUNK_TASK.getKey() + FileUtil.generateUploadTaskKey(userId, identifier);
        Set<MyPartETag> myPartETagSet = redisTemplate.opsForSet().members(uploadTaskKey);
        if (CollectionUtils.isEmpty(myPartETagSet) || !task.getChunkNum().equals(myPartETagSet.size())) {
            // 已上传分块数量与记录中的数量不对应，不能合并分块
            throw new RuntimeException("分片缺失，请重新上传");
        }
        CompleteMultipartUploadRequest completeMultipartUploadRequest =
                new CompleteMultipartUploadRequest()
                        .withUploadId(task.getUploadId())
                        .withKey(task.getObjectKey())
                        .withBucketName(task.getBucketName())
                        .withPartETags(myPartETagSet.stream().map(myPartETag ->
                                new PartETag(myPartETag.getPartNumber(), myPartETag.getETag())).collect(Collectors.toList()));
        amazonS3.completeMultipartUpload(completeMultipartUploadRequest);
        return task.getObjectKey();
    }

    private UploadTask getUploadTaskByIdentifierAndUserId(String identifier, Long userId) {
        return uploadTaskMapper.selectOne(new QueryWrapper<UploadTask>()
                .eq("file_identifier", identifier)
                .eq("create_user", userId));
    }


    /**
     * 合并文件分片
     *
     * @param filename 文件名称，不包括目录
     */
    private UploadFile mergeChunks(String identifier, Long totalSize, Long userId, String filename) {
        String filePath = doMergeChunks(identifier, userId);
        UploadFile uploadFile = convertUploadFile(filePath, userId, identifier, totalSize, filename);
        try {
            uploadFileMapper.insert(uploadFile);
        } catch (Exception e) {
            try {
                this.deleteFileByPath(null, uploadFile.getRealPath());
            } catch (Exception ex) {
                log.info("文件物理删除失败，请手动删除！文件路径为：{}", uploadFile.getRealPath());
            }
            throw new RuntimeException("合并文件失败");
        }
        return uploadFile;
    }

    /**
     * 删除文件
     *
     * @param cacheKey 缓存key
     * @param filePath 文件路径
     */
    private void deleteFileByPath(String cacheKey, String filePath) {
        String bucketName = minioProperties.getBucketList().get(0);
        String redisKey = RedisKeyPrefix.FILE_CHUNK_TASK.getKey() + cacheKey;

        if (StringUtils.isNotBlank(cacheKey)) {
            ChunkUploadEntity chunkUploadEntity = (ChunkUploadEntity) redisTemplate.opsForValue().get(redisKey);
            if (chunkUploadEntity != null) {
                // 取消分片上传
                AbortMultipartUploadRequest abortMultipartUploadRequest =
                        new AbortMultipartUploadRequest(bucketName, chunkUploadEntity.getFilePath(), chunkUploadEntity.getUploadId());
                amazonS3.abortMultipartUpload(abortMultipartUploadRequest);
                redisTemplate.delete(redisKey);
            }

        } else {
            amazonS3.deleteObject(bucketName, filePath);
        }
    }

    /**
     * 拼装物理文件信息
     */
    private UploadFile convertUploadFile(String filePath, Long userId, String identifier,
                                         Long totalSize, String suffix) {
        UploadFile uploadFile = new UploadFile();
        String newFileName = FileUtil.getFilename(filePath);
        uploadFile.setId(IdGenerator.nextId());
        uploadFile.setFilename(newFileName);
        uploadFile.setRealPath(filePath);
        uploadFile.setFileSize(String.valueOf(totalSize));
        uploadFile.setFileSizeDesc(FileUtil.getFileSizeDesc(totalSize));
        uploadFile.setFileSuffix(suffix);
        uploadFile.setFilePreviewContentType(FileUtil.getContentType(filePath));
        uploadFile.setIdentifier(identifier);
        uploadFile.setCreateUser(userId);
        uploadFile.setCreateTime(new Date());
        return uploadFile;
    }


    /**
     * 分片合并
     *
     * @param cacheKey   缓存key
     * @param attachment 附加信息
     * @return
     */
    public String mergeChunks(String cacheKey, Object attachment) throws IOException {
        // 分片信息存储在redis
        String key = RedisKeyPrefix.FILE_CHUNK_TASK.getKey() + cacheKey;
        ChunkUploadEntity chunkUploadEntity = (ChunkUploadEntity) redisTemplate.opsForValue().get(key);
        String filePath = chunkUploadEntity.getFilePath();
        String uploadId = chunkUploadEntity.getUploadId();
        CopyOnWriteArrayList<MyPartETag> partETags = chunkUploadEntity.getPartETagList();

        // 在执行完成分片上传操作时，需要提供所有有效的partETags
        // OSS收到提交的partETags后，会逐一验证每个分片的有效性
        // 当所有的数据分片验证通过后，OSS将把这些分片组合成一个完整的文件
        String bucketName = minioProperties.getBucketList().get(0);
        List<PartETag> list = new ArrayList<>();
        for (MyPartETag myPartETag : partETags) {
            PartETag partETag = new PartETag(myPartETag.getPartNumber(), myPartETag.getETag());
            list.add(partETag);
        }
        CompleteMultipartUploadRequest completeMultipartUploadRequest =
                new CompleteMultipartUploadRequest(bucketName, filePath, uploadId, list);
        // 完成上传
        amazonS3.completeMultipartUpload(completeMultipartUploadRequest);
        redisTemplate.delete(key);
        return filePath;
    }


    /**
     * 获取文件分片列表
     *
     * @param identifier 文件标识
     * @param userId     创建用户id
     */
    private List<FileChunk> getFileChunkList(String identifier, Long userId) {
        QueryWrapper<FileChunk> query = new QueryWrapper<>();
        query.eq("identifier", identifier);
        query.eq("create_user", userId);
        return fileChunkMapper.selectList(query);
    }

    /**
     * 根据文件md5识和创建用户id
     *
     * @param identifier
     * @param userId
     */
    @Override
    @Transactional(rollbackFor = Exception.class)
    public void deleteFileChunkByIdentifierAndUserId(String identifier, Long userId) {
        QueryWrapper<FileChunk> query = new QueryWrapper<>();
        query.eq("identifier", identifier);
        query.eq("create_user", userId);
        fileChunkMapper.delete(query);
    }


    /**
     * 分片上传并检查已上传的分片
     * 数据库存储机制
     */
    @Override
    public CheckFileChunkUploadVO checkUploadWithChunk(Long userId, String identifier) {
        CheckFileChunkUploadVO checkFileChunkUploadVO = new CheckFileChunkUploadVO();
        QueryWrapper<FileChunk> chunkWrapper = new QueryWrapper<>();
        chunkWrapper.eq("identifier", identifier);
        chunkWrapper.eq("create_user", userId);
        List<FileChunk> fileChunkList = fileChunkMapper.selectList(chunkWrapper);
        List<Integer> uploadedChunkNumbers = Lists.newArrayListWithCapacity(64);
        if (CollectionUtils.isNotEmpty(fileChunkList)) {
            fileChunkList.forEach(fileChunk -> uploadedChunkNumbers.add(fileChunk.getChunkNumber()));
        }
        checkFileChunkUploadVO.setUploadedChunks(uploadedChunkNumbers);

        return checkFileChunkUploadVO;
    }

    /**
     * 分片上传并检查已上传的分片
     * redis机制
     */
    @Override
    public CheckFileChunkUploadVO checkUploadWithChunkByRedis(Long userId, FileChunkCheckReq fileChunkCheckReq) {
        List<Integer> uploadedChunkNumbers = Lists.newArrayListWithCapacity(64);
        CheckFileChunkUploadVO checkFileChunkUploadVO = new CheckFileChunkUploadVO();
        // 判断是否存在该任务
        UploadTask task = getUploadTaskByIdentifierAndUserId(fileChunkCheckReq.getIdentifier(), userId);
        // 创建一个上传任务
        if (task == null){
            String uploadTaskKey = RedisKeyPrefix.FILE_CHUNK_TASK.getKey() + FileUtil.generateUploadTaskKey(userId, fileChunkCheckReq.getIdentifier());
            InitiateMultipartUploadResult initResult = initiateMultipartUpload(FileUtil.getFileSuffix(fileChunkCheckReq.getFilename()), uploadTaskKey);
            // 数据库插入一条上传任务
            task = new UploadTask();
            task.setId(IdGenerator.nextId())
                    .setUploadId(initResult.getUploadId())
                    .setFileIdentifier(fileChunkCheckReq.getIdentifier())
                    .setFileName(fileChunkCheckReq.getFilename())
                    .setBucketName(initResult.getBucketName())
                    .setObjectKey(initResult.getKey())
                    .setTotalSize(fileChunkCheckReq.getTotalSize())
                    .setChunkSize(fileChunkCheckReq.getCurrentChunkSize())
                    .setChunkNum(fileChunkCheckReq.getTotalChunks())
                    .setCreateUser(userId)
                    .setCreateTime(new Date())
                    .setUpdateTime(new Date())
                    .setExpirationTime(com.easydisk.common.util.DateUtil.afterDays(FileConstant.CHUNK_FILE_EXPIRATION_DAYS));
            uploadTaskMapper.insert(task);
        }else{
            // 已存在上传任务。查看还需上传的分片
            String partEtgKey = RedisKeyPrefix.FILE_CHUNK_TASK.getKey() + FileUtil.generateUploadTaskKey(userId, fileChunkCheckReq.getIdentifier());
            Set<MyPartETag> chunkUploadSet = redisTemplate.opsForSet().members(partEtgKey);
            if (CollectionUtils.isNotEmpty(chunkUploadSet)) {
                chunkUploadSet.forEach(myPartETag -> uploadedChunkNumbers.add(myPartETag.getPartNumber()));
            }
        }
        checkFileChunkUploadVO.setUploadedChunks(uploadedChunkNumbers);
        return checkFileChunkUploadVO;
    }

    /**
     * 文件分片上传
     * 传统数据库
     *
     * @return 文件存储路径
     */
    @Override
    public FileChunkUploadVO uploadWithChunk(MultipartFile file, Long userId, String identifier, Integer totalChunks,
                                             Integer chunkNumber, Long totalSize, String filename) {
        FileChunkUploadVO fileChunkUploadVO = new FileChunkUploadVO();
        fileChunkUploadVO.setMergeFlag(FileConstant.MergeFlag.NOT_READY.getCode());
        boolean ready = this.saveWithChunk(file, userId, identifier, totalChunks,
                chunkNumber, totalSize, filename);
        if (ready) {
            fileChunkUploadVO.setMergeFlag(FileConstant.MergeFlag.READY.getCode());
        }
        return fileChunkUploadVO;
    }

    /**
     * 文件分片上传
     * redis机制
     *
     * @return 文件存储路径
     */
    @Override
    public FileChunkUploadVO uploadWithChunkByRedis(MultipartFile file, Long userId, String identifier, Integer totalChunks,
                                                    Integer chunkNumber, Long totalSize, String filename) {
        FileChunkUploadVO fileChunkUploadVO = new FileChunkUploadVO();
        fileChunkUploadVO.setMergeFlag(FileConstant.MergeFlag.NOT_READY.getCode());
        boolean ready = this.saveWithChunkByRedis(file, userId, identifier, totalChunks,
                chunkNumber, totalSize, filename);
        if (ready) {
            fileChunkUploadVO.setMergeFlag(FileConstant.MergeFlag.READY.getCode());
        }
        return fileChunkUploadVO;
    }


    /**
     * 保存分片文件
     *
     * @param file
     * @param userId
     * @param identifier
     * @param totalChunks
     * @param chunkNumber
     * @param totalSize
     * @param filename
     * @return 是否全部上传完成
     */
    public synchronized boolean saveWithChunk(MultipartFile file, Long userId, String identifier, Integer totalChunks, Integer chunkNumber, Long totalSize, String filename) {
        // 上传分片文件
        String filePath = uploadFileWithChunk(file, identifier, totalChunks, chunkNumber, totalSize, FileUtil.getFileSuffix(filename), userId);
        // 数据库存储分片信息
        addFileChunk(userId, identifier, chunkNumber, filePath);
        int uploadedChunkCount = getUploadedChunkCount(identifier, userId);
        return uploadedChunkCount == totalChunks;
    }


    private Integer getUploadedChunkCount(String identifier, Long userId) {
        QueryWrapper<FileChunk> query = new QueryWrapper<>();
        query.eq("identifier", identifier);
        query.eq("create_user", userId);
        return fileChunkMapper.selectCount(query).intValue();
    }

    /**
     * 保存分片文件
     *
     * @param file
     * @param userId
     * @param identifier
     * @param totalChunks
     * @param chunkNumber
     * @param totalSize
     * @param filename
     * @return 是否全部上传完成
     */
    private  boolean saveWithChunkByRedis(MultipartFile file, Long userId, String identifier, Integer totalChunks, Integer chunkNumber, Long totalSize, String filename) {
        // 上传分片文件
        uploadFileWithChunk(file, identifier, totalChunks, chunkNumber, totalSize, filename, userId);
        int uploadedChunkCount = getUploadedChunkCntByRedis(identifier, userId);
        // 判断是否已经完成上传的所有分片。
        return uploadedChunkCount == totalChunks;
    }

    private int getUploadedChunkCntByRedis(String identifier, Long userId) {
        String uploadTaskKey = RedisKeyPrefix.FILE_CHUNK_TASK.getKey() + FileUtil.generateUploadTaskKey(userId, identifier);
        long size = redisTemplate.opsForSet().size(uploadTaskKey);
        return Math.toIntExact(size);

    }


    private void addFileChunk(Long userId, String identifier, Integer chunkNumber, String realPath) {

        if (Objects.isNull(userId) || Objects.isNull(chunkNumber)
                || StringUtils.isBlank(identifier)) {
            throw new RuntimeException("保存文件分片信息失败,参数非法");
        }
        if (chunkNumber <= CommonConstant.ZERO_INT) {
            throw new RuntimeException("保存文件分片信息失败,分片下标必须从1开始");
        }
        FileChunk fileChunk = new FileChunk();
        fileChunk.setId(IdGenerator.nextId());
        fileChunk.setIdentifier(identifier);
        fileChunk.setChunkNumber(chunkNumber);
        fileChunk.setRealPath(realPath);
        fileChunk.setExpirationTime(com.easydisk.common.util.DateUtil.afterDays(FileConstant.CHUNK_FILE_EXPIRATION_DAYS));
        fileChunk.setCreateUser(userId);
        fileChunk.setCreateTime(new Date());
        fileChunkMapper.insert(fileChunk);

    }


    /**
     * 上传分片文件
     *
     * @return 文件存储路径
     */
    private String uploadFileWithChunk(MultipartFile file, String identifier,
                                       Integer totalChunks, Integer chunkNumber,
                                       Long totalSize, String filename, Long userId) {
        try {
            return storeWithChunk(file.getInputStream(), identifier, totalChunks,
                    chunkNumber, totalSize, file.getSize(), filename, userId);
        } catch (IOException e) {
            throw new RuntimeException("上传失败!");
        }
    }

    /**
     * 文件分片存储
     * 注意：分片上传支持并发随机上传分片
     *
     * @param inputStream 文件输入流
     * @param identifier  文件唯一标识
     * @param totalChunks 分片总数
     * @param chunkNumber 当前分片下标 从1开始
     * @param totalSize   文件总大小
     * @param chunkSize   分片文件大小
     * @param filename      文件名称
     * @param userId      用户ID
     * @return 文件存储路径
     * @throws IOException
     */
    public String storeWithChunk(InputStream inputStream, String identifier, Integer totalChunks,
                                 Integer chunkNumber, Long totalSize, Long chunkSize, String filename, Long userId) throws IOException {
        if (totalChunks > FileConstant.MAX_NUMBER_SHARDS) {
            throw new RuntimeException("分片数超过了限制，分片数不得大于" + FileConstant.MAX_NUMBER_SHARDS);
        }
        UploadTask task = getUploadTaskByIdentifierAndUserId(identifier, userId);
        if (task == null) {
            throw new RuntimeException("文件分片任务不存在,上传分片失败");
        }
        doUploadMultipart(inputStream, chunkNumber, chunkSize, task, userId);
        return task.getObjectKey();
    }

    /**
     * 初始化分片请求 缓存分片id
     * 创建一个上传任务
     *
     * @param suffix
     * @param uploadTaskKey
     */
    private InitiateMultipartUploadResult initiateMultipartUpload(String suffix, String uploadTaskKey) {
        String key = getObjectKey(suffix);
        String bucketName = minioProperties.getBucketList().get(0);
        InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(bucketName, key);
        return amazonS3.initiateMultipartUpload(request);
    }

//    /**
//     * 上传分片文件
//     * 以前的
//     *
//     * @param inputStream
//     * @param chunkUploadKey
//     * @param chunkNumber
//     * @param chunkSize
//     */
//    private void doUploadMultipart(InputStream inputStream, String chunkUploadKey, Integer chunkNumber, Long chunkSize) {
//        ChunkUploadEntity chunkUploadEntity = (ChunkUploadEntity) redisTemplate.opsForValue().get(chunkUploadKey);
//        UploadPartRequest uploadPartRequest = new UploadPartRequest();
//        String bucketName = minioProperties.getBucketList().get(0);
//        uploadPartRequest.setBucketName(bucketName);
//        uploadPartRequest.setKey(chunkUploadEntity.getFilePath());
//        uploadPartRequest.setUploadId(chunkUploadEntity.getUploadId());
//        uploadPartRequest.setInputStream(inputStream);
//        // 设置分片大小，除了最后一个分片没有大小限制，其他的分片最小为100 KB
//        uploadPartRequest.setPartSize(chunkSize);
//        // 设置分片号，每一个上传的分片都有一个分片号，取值范围是1~10000，如果超出此范围，OSS将返回InvalidArgument错误码
//        uploadPartRequest.setPartNumber(chunkNumber);
//        // 每个分片不需要按顺序上传，甚至可以在不同客户端上传，OSS会按照分片号排序组成完整的文件
//        UploadPartResult uploadPartResult = amazonS3.uploadPart(uploadPartRequest);
//        // 每次上传分片之后，OSS的返回结果包含PartETag。PartETag将被保存在partETags中
//        PartETag partETag = uploadPartResult.getPartETag();
//        MyPartETag myPartETag = new MyPartETag(partETag.getPartNumber(), partETag.getETag());
//        chunkUploadEntity.getPartETagList().add(myPartETag);
//        redisTemplate.opsForValue().set(chunkUploadKey, chunkUploadEntity);
//    }

    private void doUploadMultipart(InputStream inputStream,Integer chunkNumber, Long chunkSize, UploadTask uploadTask, Long userId) {
        UploadPartRequest uploadPartRequest = new UploadPartRequest();
        String bucketName = minioProperties.getBucketList().get(0);
        uploadPartRequest.setBucketName(bucketName);
        uploadPartRequest.setKey(uploadTask.getObjectKey());
        uploadPartRequest.setUploadId(uploadTask.getUploadId());
        uploadPartRequest.setInputStream(inputStream);
        // 设置分片大小，除了最后一个分片没有大小限制，其他的分片最小为100 KB
        uploadPartRequest.setPartSize(chunkSize);
        // 设置分片号，每一个上传的分片都有一个分片号，取值范围是1~10000，如果超出此范围，OSS将返回InvalidArgument错误码
        uploadPartRequest.setPartNumber(chunkNumber);
        // 每个分片不需要按顺序上传，甚至可以在不同客户端上传，OSS会按照分片号排序组成完整的文件
        UploadPartResult uploadPartResult = amazonS3.uploadPart(uploadPartRequest);
        // 每次上传分片之后，OSS的返回结果包含PartETag。PartETag将被保存在partETags中
        PartETag partETag = uploadPartResult.getPartETag();
        MyPartETag myPartETag = new MyPartETag(partETag.getPartNumber(), partETag.getETag());
        String uploadTaskKey = RedisKeyPrefix.FILE_CHUNK_TASK.getKey() + FileUtil.generateUploadTaskKey(userId, uploadTask.getFileIdentifier());
        redisTemplate.opsForSet().add(uploadTaskKey, myPartETag);
    }


    /**
     * 执行合并文件操作
     * 数据库
     */
    private String doMergeChunks(String identifier, Long userId) {
        // 获取mysql中的所有相同identifier + userId 分片
        List<FileChunk> fileChunkList = this.getFileChunkList(identifier, userId);
        if (CollectionUtils.isNotEmpty(fileChunkList)) {
            List<String> filePathList = fileChunkList.stream().map(FileChunk::getRealPath).collect(Collectors.toList());
            try {
                String uploadId = FileUtil.generateChunkKey(identifier, userId);
                String filePath = this.mergeChunks(uploadId, filePathList);
                this.deleteFileChunkByIdentifierAndUserId(identifier, userId);
                return filePath;
            } catch (IOException e) {
                throw new RuntimeException("合并失败");
            }
        }
        throw new RuntimeException("没有查询到分片信息，合并失败");
    }


    /**
     * 物理删除用户文件
     */
    @Override
    @Transactional
    public void physicalDeleteUserFiles(String fileIds, Long userId) {
        if (StringUtils.isBlank(fileIds) || Objects.isNull(userId)) {
            throw new RuntimeException("物理删除用户文件失败");
        }
        List<Long> longList = StringListUtil.string2LongList(fileIds);
        List<UserFile> userFileList = userFileService.getBatchByFileId(longList);
        List<Long> fileIdList = userFileList.stream().map(UserFile::getId).collect(Collectors.toList());
        if (userFileService.deleteBatchUseFileByFileIdAndUserId(fileIdList, userId) != fileIdList.size()) {
            throw new RuntimeException("物理删除用户文件失败");
        }
        Set<Long> realFileIdSet = assembleAllUnusedRealFileIdSet(userFileList);
        if (CollectionUtils.isEmpty(realFileIdSet)) {
            return;
        }
        this.deleteBatchPhyFiles(realFileIdSet);
        this.deleteBatchFileByFileId(realFileIdSet);
    }

    /**
     * 批量删除物理文件
     */
    private void deleteBatchPhyFiles(Set<Long> realFileIdSet) {
        List<UploadFile> uploadFileList = uploadFileMapper.selectBatchIds(realFileIdSet);
        uploadFileList.forEach(uploadFile -> {
            amazonS3.deleteObject(minioProperties.getBucketList().get(0),
                    uploadFile.getRealPath());
        });
    }

    /**
     * 删除表物理文件信息
     *
     * @return
     */
    private void deleteBatchFileByFileId(Collection<Long> fileIdList) {
        if (uploadFileMapper.deleteBatchIds(fileIdList) != fileIdList.size()) {
            throw new RuntimeException("删除物理文件信息失败");
        }
    }

    /**
     * 拼装要删除的真实文件id列表
     */
    private Set<Long> assembleAllUnusedRealFileIdSet(List<UserFile> rPanUserFileList) {
        Set<Long> realFileIdSet = rPanUserFileList.stream()
                .filter(userFile -> !userFileService.isFolder(userFile))
                .map(UserFile::getRealFileId)
                .filter(realFileId -> !userFileService.checkRealFileUsed(realFileId))
                .collect(Collectors.toSet());
        return realFileIdSet;
    }


}
