package com.atghost.minio.service.impl;

import cn.hutool.core.io.IoUtil;
import com.alibaba.fastjson.JSON;
import com.amazonaws.SdkClientException;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.iterable.S3Objects;
import com.amazonaws.services.s3.model.*;
import com.atghost.minio.config.MinioProperties;
import com.atghost.minio.constant.FileStorageUtils;
import com.atghost.minio.dto.ChunkUploadContext;
import com.atghost.minio.dto.req.CompleteUploadChunkReq;
import com.atghost.minio.dto.req.StartChunkUploadReq;
import com.atghost.minio.dto.req.UploadChunkReq;
import com.atghost.minio.dto.res.FileInodeUploadRes;
import com.atghost.minio.dto.res.StartChunkUploadRes;
import com.atghost.minio.dto.res.StatusCode;
import com.atghost.minio.dto.res.UploadChunkRes;
import com.atghost.minio.entity.FileInodeEntity;
import com.atghost.minio.mapper.FileInodeMapper;
import com.atghost.minio.mapper.FileRefMapper;
import com.atghost.minio.service.MinioFileService;
import com.atghost.minio.util.FileUrlUtils;
import com.atghost.minio.util.response.ComErrorCode;
import com.atghost.minio.util.response.RespResult;
import com.atghost.miniofile.config.MinioObject;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.fasterxml.jackson.databind.ObjectMapper;
import lombok.extern.slf4j.Slf4j;
import org.apache.catalina.connector.ClientAbortException;
import org.apache.commons.io.IOUtils;
import org.apache.commons.io.input.CountingInputStream;
import org.springframework.beans.BeanUtils;
import org.springframework.util.ObjectUtils;
import org.springframework.util.StringUtils;
import org.bouncycastle.crypto.Digest;
import org.bouncycastle.crypto.digests.GeneralDigest;
import org.bouncycastle.crypto.digests.SHA256Digest;
import org.bouncycastle.crypto.io.DigestInputStream;
import org.bouncycastle.util.encoders.Hex;
import org.slf4j.MDC;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.stereotype.Service;

import javax.annotation.Resource;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.*;
import java.net.URLDecoder;
import java.net.URLEncoder;
import java.nio.charset.StandardCharsets;
import java.util.*;


/**
 * @author Admin
 * @version 1.0
 * @date 2024-5-12 21:37
 **/
@Slf4j
@Service
public class MinioFileServiceImpl implements MinioFileService {

    @Resource
    private FileInodeMapper fileInodeMapper;
    @Resource
    private FileRefMapper fileRefMapper;

    @Resource
    private AmazonS3 amazonS3;
    @Resource
    private MinioProperties minioProperties;

    @Resource(name = "jsonRedisTemplate")
    private RedisTemplate<String, Serializable> redisTemplate;
    /**
     * 指定切片大小 除以 1024*1024    分片大小, 默认 16M
     **/
    @Value("${file.upload.chunk-size:16777216}")
    private Integer chunkSize;
    @Autowired
    private ObjectMapper objectMapper;
    /**
     * 上传session在文件服务内存里的过期时间, 默认1小时
     **/
    @Value("${file.upload.upload-session-memory-expire:3600000}")
    private Long uploadSessioMemorynExpire;
    /**
     * 摘要算法
     */
    public static final String DIGEST_ALGORITHM = "SHA-256";

    private Map<String, ChunkUploadContext> contextMap = new HashMap<>();
    /**
     * 存储视频的元数据列表
     */
    private static final String OBJECT_INFO_LIST = "com:minio:media:objectList";

    @Override
    public Object startChunkUpload(StartChunkUploadReq startChunkUploadReq) {
        if (!DIGEST_ALGORITHM.equals(startChunkUploadReq.getDigestAlgorithm())) {
            throw new RuntimeException("不支持的摘要算法: " + startChunkUploadReq.getDigestAlgorithm());
        }
        //组装文件的key(存放在minio的url)
        String key = FileStorageUtils.sha256ToFileKey(startChunkUploadReq.getDigest());
        String originFileName = startChunkUploadReq.getFileName();
        String suffix = originFileName.substring(originFileName.indexOf("."));
        key = key + suffix;
        //检测minio是否存在该key的文件
        boolean exist = isFileExist(key);
        //step 1 文件服务器已存在(秒传)
        if (exist) {
            log.info("文件服务器已存在-------------秒传");
            // 文件已经存储在文件服务器里了, 直接秒传
            //1.检测file_inode是否存在对应的sha256(sha256唯一)
            FileInodeEntity fileInode = fileInodeMapper.selectOne(new LambdaQueryWrapper<FileInodeEntity>()
                    .eq(FileInodeEntity::getSha256, startChunkUploadReq.getDigest()));

            //获取文件大小
            long fileSize = getFileSize(key);
            //文件信息表没有该文件,存入
            if (null == fileInode) {
                fileInode = createFileInode(startChunkUploadReq.getDigest(), fileSize);
            }
            //2.返回文件信息给前端
            String fileName = startChunkUploadReq.getFileName();
            StartChunkUploadRes res = getFileStartChunkUpload(fileInode, fileSize, fileName);

            return res;
        }
        //step 2 文件服务器不存在-计算文件切片大小、片数等信息

        StartChunkUploadRes fileInfoCalculateRes = calculateFileInfo(startChunkUploadReq, key);

        return fileInfoCalculateRes;
    }

    @Override
    public UploadChunkRes chunkUpload(UploadChunkReq req) {

        if (req.getSize() > chunkSize) {
            throw new RuntimeException("分片大小不能超过: " + chunkSize);
        }
        String id = req.getId();
        Throwable[] exceptions = new Throwable[1];
        ChunkUploadContext v = contextMap.computeIfPresent(id, (k, ctx) -> {
            ctx.setExpireTime(System.currentTimeMillis() + uploadSessioMemorynExpire);
            if (!Objects.equals(req.getChunkId(), ctx.getNextChunkId())) {
                log.warn("并发上传同一个文件, nextChunkId={}, chunkId={}", ctx.getNextChunkId(), req.getChunkId());
                return ctx;
            }
            CountingInputStream countingInputStream = null;
            DigestInputStream digestInputStream = null;
            try (InputStream input = req.getFile()) {
                // 统计分片大小
                countingInputStream = new CountingInputStream(input);
                digestInputStream = new DigestInputStream(countingInputStream,
                        (Digest) ctx.getMessageDigest().copy());

                String etag = null;
                try {
                    // 除了最后一个分片外, 分片大小不能小于5MB
                    boolean lastPart = ctx.getChunkCount().equals(req.getChunkId());
                    etag = uploadChunk(ctx.getFileKey(), ctx.getUploadId(), req.getChunkId(),
                            req.getSize(), digestInputStream, lastPart);
                } catch (SdkClientException e) {
                    exceptions[0] = new RuntimeException("网络传输异常, 请刷新页面重试: " + e.getMessage(), e);
                    return ctx;
                }
                long byteCount = countingInputStream.getByteCount();
                if (byteCount != req.getSize()) {
                    try {
                        cancelUploadChunk(ctx.getFileKey(), ctx.getUploadId());
                        exceptions[0] = new RuntimeException(
                                String.format("分片尺寸不正确, 预期 %s byte, 实际 %s byte", req.getSize(), byteCount));
                    } catch (Throwable e) {
                        exceptions[0] = new RuntimeException(
                                String.format("分片尺寸不正确, 预期 %s byte, 实际 %s byte", req.getSize(), byteCount), e);
                    }
                    return null;
                }
                Digest digest = digestInputStream.getDigest();

                String finalEtag = etag;
                ctx.update(byteCount, req.getChunkId(), digest, finalEtag);
                return ctx;
            } catch (IOException e) {
                exceptions[0] = new RuntimeException("上传失败, 网络异常, 请重试!", e);
                return ctx;
            } finally {
                IoUtil.close(digestInputStream);
                IoUtil.close(countingInputStream);
                IoUtil.close(req.getFile());
            }
        });
        if (exceptions[0] != null) {
            Throwable e = exceptions[0];
            if (e instanceof RuntimeException) {
                throw ((RuntimeException) e);
            } else {
                throw new RuntimeException(e);
            }
        }
        if (v == null) {
            throw new RuntimeException(String.format("上传会话失效, 请刷新页面重试! traceId=%s", MDC.get("x-traceId")));
        }
        UploadChunkRes res = new UploadChunkRes();
        res.setNextChunkId(v.getNextChunkId());
        return res;
    }

    @Override
    public FileInodeUploadRes completeChunkUpload(CompleteUploadChunkReq req) {
        RuntimeException[] exceptions = new RuntimeException[1];
        ChunkUploadContext v = contextMap.computeIfPresent(req.getId(), (id, ctx) -> {
            if (ctx.getComplete() != null && ctx.getComplete()) {
                return ctx;
            }

            String uploadId = ctx.getUploadId();
            String fileKey = ctx.getFileKey();
            GeneralDigest digest = (GeneralDigest) ctx.getMessageDigest().copy();
            byte[] finalDigest = new byte[digest.getDigestSize()];
            digest.doFinal(finalDigest, 0);
            String hex = Hex.toHexString(finalDigest).toLowerCase();
            if (!Objects.equals(hex, ctx.getDigest())) {
                try {
                    cancelUploadChunk(ctx.getFileKey(), ctx.getUploadId());
                    exceptions[0] = new RuntimeException("上传失败, 文件摘要不匹配, 请重试!");
                } catch (Throwable e) {
                    exceptions[0] = new RuntimeException("上传失败, 文件摘要不匹配, 请重试!", e);
                }
                return null;
            }
            if (ctx.getUploadSize() != ctx.getExpectSize()) {
                try {
                    cancelUploadChunk(ctx.getFileKey(), ctx.getUploadId());
                    exceptions[0] = new RuntimeException("上传失败, 文件大小不匹配, 请重试!");
                } catch (Throwable e) {
                    exceptions[0] = new RuntimeException("上传失败, 文件大小不匹配, 请重试!", e);
                }
                return null;
            }
            Map<Integer, String> etags = ctx.getEtags();
            List<PartETag> partETags = new ArrayList<>();
            for (Map.Entry<Integer, String> entry : etags.entrySet()) {
                PartETag partETag = new PartETag(entry.getKey(), entry.getValue());
                partETags.add(partETag);
            }

            try {

                completeUploadChunk(fileKey, uploadId, partETags);
                ctx.setComplete(true);
                // 上传完成, 把过期时间缩短为10分钟, 尽快释放内存
                ctx.setExpireTime(System.currentTimeMillis() + 600 * 1000);
                log.info("…………fileKey…………:{}", fileKey);
            } catch (Throwable e) {
                exceptions[0] = new RuntimeException("合并分片失败, 请重试!", e);
                return null;
            }
            return ctx;
        });
        if (exceptions[0] != null) {
            throw exceptions[0];
        }
        if (v == null) {
            throw new RuntimeException(String.format("上传会话失效, 请刷新页面重试! traceId=%s", MDC.get("x-traceId")));
        }
        ChunkUploadContext ctx = v;
        FileInodeEntity inode = createFileInode(ctx.getDigest(), ctx.getUploadSize());
        FileInodeUploadRes res = new FileInodeUploadRes();
        res.setId(inode.getId());
        res.setSha256(inode.getSha256());
        res.setSize(inode.getSize());
        res.setCreateTime(inode.getCreateTime());

        String fileKey = ctx.getFileKey();

        /**URLEncoder.encode 加密后路径含有%还是有/**/
//        String urlEncodeFileKey = URLEncoder.encode(fileKey);
//        log.info("加密后的路径:{}", urlEncodeFileKey);
//        String urlUnEncodeFileKey = URLDecoder.decode(urlEncodeFileKey);
//        String urlByFileInodeId = FileUrlUtils.getUrlByFileInodeId(inode.getId(), ctx.getFileName());

        String urlEncodeFileKey = fileKey.replaceAll("/", ":");
        log.info("文件存储路径路径:{}", urlEncodeFileKey);
        res.setUrl(urlEncodeFileKey);
        res.setBucketName(minioProperties.getBucketName());
        res.setShowName(ctx.getFileName());
        log.info("上传成功:{}", JSON.toJSONString(res));
        return res;
    }

    /**
     * 视频播放
     **/
    @Override
    public void videoPlay(HttpServletRequest req, HttpServletResponse res, String bucketName, String objectName) {
        //查询redis
        String key = bucketName + "/" + objectName.replaceAll(":", "/");
       objectName = objectName.replaceAll(":", "/");
        Object obj = redisTemplate.boundHashOps(OBJECT_INFO_LIST).get(key);
        MinioObject minioObject;
        if (null == ((MinioObject) obj).getObject()) {
            S3Object objectResponse = null;

            try {
                objectResponse = amazonS3.getObject(bucketName, objectName);
            } catch (Exception e) {
                log.error("{}中{}不存在: {}", bucketName, objectName, e.getMessage());
                res.setCharacterEncoding(StandardCharsets.UTF_8.toString());
                res.setContentType("application/json;charset=utf-8");
                res.setStatus(HttpServletResponse.SC_NOT_FOUND);
                try {
                    res.getWriter().write(objectMapper.writeValueAsString(RespResult.error(ComErrorCode.NOT_FOUND)));
                } catch (IOException ex) {
                    throw new RuntimeException(ex);
                }
                return;
            }
            // 判断是否是视频，是否为mp4格式
            String filenameExtension = StringUtils.getFilenameExtension(objectName);
            if (ObjectUtils.isEmpty(filenameExtension) ||
                    !"mp4".equalsIgnoreCase(filenameExtension.toLowerCase(Locale.ENGLISH))) {
                throw new IllegalArgumentException("不支持的媒体类型, 文件名: " + objectName);
            }
            minioObject = new MinioObject();
            BeanUtils.copyProperties(objectResponse, minioObject);
            minioObject.setBucket(objectResponse.getBucketName());
            minioObject.setSize(objectResponse.getObjectMetadata().getContentLength());

            redisTemplate.boundHashOps(OBJECT_INFO_LIST).put(key, minioObject);

        } else {
            minioObject = (MinioObject) obj;
        }

        //获取文件长度
        long fileSize = minioObject.getSize();
        res.setHeader("Accept-Ranges", "bytes");
        //pos开始读取位置;  last最后读取位置
        long startPos = 0;
        long endPos = fileSize - 1;
        String rangeHeader = req.getHeader("Range");
        if (!ObjectUtils.isEmpty(rangeHeader) && rangeHeader.startsWith("bytes=")) {

            try {
                // 情景一：RANGE: bytes=2000070- 情景二：RANGE: bytes=2000070-2000970
                String numRang = req.getHeader("Range").replaceAll("bytes=", "");
                if (numRang.startsWith("-")) {
                    endPos = fileSize - 1;
                    startPos = endPos - Long.parseLong(new String(numRang.getBytes(StandardCharsets.UTF_8), 1,
                            numRang.length() - 1)) + 1;
                } else if (numRang.endsWith("-")) {
                    endPos = fileSize - 1;
                    startPos = Long.parseLong(new String(numRang.getBytes(StandardCharsets.UTF_8), 0,
                            numRang.length() - 1));
                } else {
                    String[] strRange = numRang.split("-");
                    if (strRange.length == 2) {
                        startPos = Long.parseLong(strRange[0].trim());
                        endPos = Long.parseLong(strRange[1].trim());
                    } else {
                        startPos = Long.parseLong(numRang.replaceAll("-", "").trim());
                    }
                }

                if (startPos < 0 || endPos < 0 || endPos >= fileSize || startPos > endPos) {
                    // SC 要求的范围不满足
                    res.setStatus(HttpServletResponse.SC_REQUESTED_RANGE_NOT_SATISFIABLE);
                    return;
                }

                // 断点续传 状态码206
                res.setStatus(HttpServletResponse.SC_PARTIAL_CONTENT);
            } catch (NumberFormatException e) {
                log.error(req.getHeader("Range") + " is not Number!");
                startPos = 0;
            }
        }

        // 总共需要读取的字节
        long rangLength = endPos - startPos + 1;
        res.setHeader("Content-Range", String.format("bytes %d-%d/%d", startPos, endPos, fileSize));
        res.addHeader("Content-Length", String.valueOf(rangLength));
        //response.setHeader("Connection", "keep-alive");
        res.addHeader("Content-Type", "video/mp4");

        try (BufferedOutputStream bos = new BufferedOutputStream(res.getOutputStream());
             S3Object s3Object = amazonS3.getObject(bucketName, objectName);
             S3ObjectInputStream s3ObjectInputStream = s3Object.getObjectContent();
             BufferedInputStream bis = new BufferedInputStream(s3ObjectInputStream)) {
            IOUtils.copy(bis, bos);
        } catch (
                IOException e) {
            if (e instanceof ClientAbortException) {
                // ignore 这里就不要打日志，这里的异常原因是用户在拖拽视频进度造成的
            } else {
                log.error(e.getMessage());
            }
        }
    }


    /**
     * 合并上传
     **/
    public void completeUploadChunk(String fileKey, String uploadId, List<PartETag> partETags) {
        log.info("completeUploadChunk {} {}", fileKey, uploadId);
        CompleteMultipartUploadRequest uploadRequest = new CompleteMultipartUploadRequest();
        uploadRequest.setBucketName(minioProperties.getBucketName());
        uploadRequest.setUploadId(uploadId);
        uploadRequest.setKey(fileKey);
        uploadRequest.setPartETags(partETags);
        amazonS3.completeMultipartUpload(uploadRequest);
    }

    /**
     * 取消上传
     **/
    private void cancelUploadChunk(String fileKey, String uploadId) {
        AbortMultipartUploadRequest request = new AbortMultipartUploadRequest(minioProperties.getBucketName(), fileKey, uploadId);
        amazonS3.abortMultipartUpload(request);

    }

    private String uploadChunk(String fileKey, String uploadId, Integer chunkId, Long size, DigestInputStream digestInputStream, boolean lastPart) {
        UploadPartRequest uploadPartRequest = new UploadPartRequest();
        uploadPartRequest.setBucketName(minioProperties.getBucketName());
        uploadPartRequest.setUploadId(uploadId);
        uploadPartRequest.setKey(fileKey);
        uploadPartRequest.setPartNumber(chunkId);
        uploadPartRequest.setInputStream(digestInputStream);
        uploadPartRequest.setPartSize(size);
        uploadPartRequest.setObjectMetadata(new ObjectMetadata());
        uploadPartRequest.getObjectMetadata().setContentLength(size);
        uploadPartRequest.setLastPart(lastPart);
        log.info("uploadChunk start {} {} {} {} {}", fileKey, uploadId, chunkId, size, lastPart);

        UploadPartResult uploadPartResult = amazonS3.uploadPart(uploadPartRequest);
        log.info("uploadChunk finish:{}", uploadPartResult);

        return uploadPartResult.getETag();
    }

    /**
     * 文件服务器不存，则计算分片上传的信息
     **/
    private StartChunkUploadRes calculateFileInfo(StartChunkUploadReq startChunkUploadReq, String key) {
        //切片大小(默认指定为16M)
        int chkSize = chunkSize;
        //文件总大小
        Long fileSize = startChunkUploadReq.getFileSize();
        //获取id  =  摘要算法 + 摘要
        String id = startChunkUploadReq.getDigestAlgorithm() + ": " + startChunkUploadReq.getDigest();
        long now = System.currentTimeMillis();
        ChunkUploadContext ctx = contextMap.compute(id, (k, v) -> {
            if (v != null) {
                v.setExpireTime(now + uploadSessioMemorynExpire);
                return v;
            }
            v = new ChunkUploadContext();
            //片数（不足的话 +1）
            long chunkCount = fileSize / chkSize;
            if (chunkCount * chunkSize < fileSize) {
                chunkCount++;
            }
            v.setChunkCount(Math.toIntExact(chunkCount));
            v.setId(k);
            v.setChunkSize(chkSize);
            v.setFileName(startChunkUploadReq.getFileName());
            v.setFileKey(key);
            v.setDigest(startChunkUploadReq.getDigest());
            v.setDigestAlgorithm(startChunkUploadReq.getDigestAlgorithm());
            v.setExpectSize(fileSize);
            v.setMessageDigest(new SHA256Digest());
            // etag需要升续排列
            v.setEtags(new TreeMap<>());
            v.setExpireTime(System.currentTimeMillis() + uploadSessioMemorynExpire);

            v.setUploadId(startChunkUpload(key));
            v.setChunkDir(UUID.randomUUID().toString());
            //上传第一片
            v.setNextChunkId(1);
            return v;
        });
        StartChunkUploadRes res = new StartChunkUploadRes();
        res.setChunkSize(ctx.getChunkSize());
        res.setChunkCount(ctx.getChunkCount());
        res.setId(ctx.getId());
        res.setInstanceId(ctx.getInstanceId());
        res.setNextChunkId(ctx.getNextChunkId());
        res.setFileName(startChunkUploadReq.getFileName());
        return res;
    }

    /**
     * 开始上传：获取minio的uploadId
     **/
    public String startChunkUpload(String fileKey) {

        InitiateMultipartUploadRequest req = new InitiateMultipartUploadRequest(minioProperties.getBucketName(), fileKey);
        req.setObjectMetadata(new ObjectMetadata());
        InitiateMultipartUploadResult res = amazonS3.initiateMultipartUpload(req);
        return res.getUploadId();
    }

    /**
     * 组装文件返回前端信息
     **/
    public StartChunkUploadRes getFileStartChunkUpload(FileInodeEntity fileInode, long fileSize, String fileName) {
        StartChunkUploadRes res = new StartChunkUploadRes();

        FileInodeUploadRes fileInodeUploadRes = new FileInodeUploadRes();
        fileInodeUploadRes.setSha256(fileInode.getSha256());
        fileInodeUploadRes.setSize(fileSize);
        //拼接结果: /condor/fileDload/downloadByInodeId?fileInodeId=%s&fileName=%s
        fileInodeUploadRes.setUrl(FileUrlUtils.getUrlByFileInodeId(fileInode.getId(), fileName));
        fileInodeUploadRes.setShowName(fileName);
        fileInodeUploadRes.setCreateTime(fileInode.getCreateTime());
        res.setExist(fileInodeUploadRes);
        res.setFileName(fileName);
        return res;

    }

    /**
     * 保存文件信息
     **/
    public FileInodeEntity createFileInode(String digest, long fileSize) {
        FileInodeEntity fileIn = new FileInodeEntity();
        fileIn.setSha256(digest);
        fileIn.setSize(fileSize);
        fileIn.setCreateTime(new Date());
        fileIn.setUpdateTime(new Date());
        fileIn.setCreatorId(0L);
        fileIn.setUpdateId(0L);

        fileInodeMapper.insert(fileIn);

        return fileIn;
    }

    /**
     * 获取文件大小
     **/
    public long getFileSize(String fileKey) {
        if (StringUtils.isEmpty(fileKey)) {
            return 0;
        }
        String bucketName = minioProperties.getBucketName();
        try (S3Object object = amazonS3.getObject(bucketName, fileKey)) {
            if (null == object) {
                return 0;
            }
            ObjectMetadata objectMetadata = object.getObjectMetadata();
            if (objectMetadata == null) {
                return 0;
            }
            return objectMetadata.getContentLength();
        } catch (Exception e) {
            throw new RuntimeException(e);
        }

    }

    /**
     * 判断minio是否存在该文件
     **/
    private boolean isFileExist(String key) {
        //检测minio是否存在该key的文件
        if (key == null || key.length() == 0) {
            return false;
        }
        boolean exist = amazonS3.doesObjectExist(minioProperties.getBucketName(), key);
        return exist;
    }
}
