package com.imooc.pan.storage.engine.oss;

import cn.hutool.core.date.DateUtil;
import com.alibaba.fastjson.JSONObject;
import com.aliyun.oss.OSSClient;
import com.aliyun.oss.model.*;
import com.google.common.base.Joiner;
import com.google.common.base.Splitter;
import com.imooc.pan.core.constants.RPanConstants;
import com.imooc.pan.core.exception.RPanFrameworkException;
import com.imooc.pan.core.utils.FileUtil;
import com.imooc.pan.core.utils.UUIDUtil;
import com.imooc.pan.storage.engine.core.AbstractStorageEngine;
import com.imooc.pan.storage.engine.core.context.*;
import com.imooc.pan.storage.engine.oss.config.OssStorageEngineConfig;
import com.sun.xml.internal.bind.v2.schemagen.xmlschema.TopLevelAttribute;
import lombok.*;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
import org.assertj.core.util.Lists;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;

import java.io.IOException;
import java.io.Serializable;
import java.util.List;
import java.util.Objects;
import java.util.stream.Collectors;

/**
 * 基于OSS实现的文件存储引擎
 * @author hhb
 * @data 2024/6/10 16:51
 */
@Component
public class OssStorageEngine extends AbstractStorageEngine {

    private static final Integer TEN_THOUSAND_INT = 10000;

    private static final String CACHE_KEY_TEMPLATE = "oss_cache_upload_id_%s_%s";

    private static final String IDENTIFIER_KEY = "identifier";

    private static final String UPLOAD_ID_KEY = "uploadId";

    private static final String USER_ID_KEY = "userId";

    private static final String PART_NUMBER_KEY = "partNumber";

    private static final String E_TAG_KEY = "eTag";

    private static final String PART_SIZE_KEY = "partSize";

    private static final String PART_CRC_KEY = "partCRC";


    @Autowired
    private OssStorageEngineConfig config;

    @Autowired
    private OSSClient ossClient;


    @Override
    protected void doStoreFile(StoreFileContext context) throws IOException {
        String realPath = getFilePath(FileUtil.getFileSuffix(context.getFilename()));
        ossClient.putObject(config.getBucketName(),realPath,context.getInputStream());
        context.setRealPath(realPath);
    }


    /**
     * 删除文件的操作
     *
     * 获取所有需要删除的文件存储路径
     * 如果该存储路径是一个文件分片的路径，截取出对应的object的name，然后取消文件分片的操作
     * 如果是正常的文件路径，直接执行物理删除即可
     * @param context
     * @throws IOException
     */
    @Override
    protected void doDeleteFile(DeleteFileContext context) throws IOException {
        List<String> realPathList = context.getRealPathList();
        realPathList.stream().forEach(realPath->{
            if(checkHaveParams(realPath)){
                //文件分片的路径
                //解析
                JSONObject params = analysisUrlParams(realPath);
                if(Objects.nonNull(params) && !params.isEmpty()){
                    String uploadId = params.getString(UPLOAD_ID_KEY);
                    String identifier = params.getString(IDENTIFIER_KEY);
                    Long userId = params.getLong(USER_ID_KEY);
                    //删除分片缓存
                    getCache().evict(getCacheKey(identifier,userId));
                    //删除
                    try{
                        AbortMultipartUploadRequest request = new AbortMultipartUploadRequest(config.getBucketName(), getBaseUrl(realPath), uploadId);
                        ossClient.abortMultipartUpload(request);
                    }catch (Exception e){
                        e.printStackTrace();
                        throw new RPanFrameworkException("删除文件分片失败");
                    }
                }
            }else{
                //直接删除
                ossClient.deleteObject(config.getBucketName(),realPath);
            }
        });
    }

    /**
     * 文件分片上传
     * OSS文件分片上传的步骤：
     * 1.初始化文件分片上传，获取一个全局唯一的uploadId
     * 2.并发上传文件分片，每一个文件分片都需要带有初始化返回的uploadId
     * 3.所有分片上传完成，触发文件分片合并的操作
     * 难点：
     * 1、我们的分片上传是在一个多线程并发环境下运行的，我们的程序需要保证我们的初始化分片上传的操作只有一个线程可以做
     * 2、我们所有的文件分片都需要带有一个全局唯一的uploadId,该uploadId就需要放到一个线程的共享空间中
     * 3、我们需要保证每一个文件分片都能够单独的去调用文件分片上传，而不是依赖于全局的uploadId
     * <p>
     * 解决方案：
     * 1、加锁，我们目前首先按照单体架构去考虑，使用JVM的锁去保证一个线程初始化文件分片上传，如果后续扩展成分布式的架构，需更换分布式锁
     * 2、使用缓存，缓存分为本地缓存以及分布式缓存（比如Redis），我们由于当前是一个单体架构，可以考虑使用本地缓存，但是，后期的项目额度分布式架构
     * 升级之后，同样要升级我们的缓存为分布式缓存，与其后期升级，我们还是第一版本就支持分布式缓存比较好
     * 3、我们要想把每一个文件的Key都能够通过文件的url来获取，就需要定义一种数据格式，支持我们添加附件数据，并且可以很方便的解析出来，我们的实现方案，可以参考
     * 网络请求的URL格式：fileRealPath?paramKey=paramValue
     * <p>
     * 具体的实现逻辑：
     * 1、校验文件分片数不得大于10000
     * 2、获取缓存key
     * 3、通过缓存key获取初始化后的实体对象，获取全局的uploadId和ObjectName
     * 4、如果获取为空，直接初始化
     * 5、执行文件分片上传的操作
     * 6、上传完成后，将全局的参数封装成一个可识别的url，保存在上下文里面，用于业务的落库操作
     * @param context
     * @throws IOException
     */
    @Override
    protected synchronized void doSaveChunkFile(StoreChunkFileContext context) throws IOException {
        if(context.getTotalChunks() > TEN_THOUSAND_INT){
            throw new RPanFrameworkException("分片个数超过了限制，分片数不得大于：" + TEN_THOUSAND_INT);
        }
        //获取缓存key
        String cacheKey = getCacheKey(context.getIdentifier(),context.getUserId());
        ChunkUploadEntity entity = getCache().get(cacheKey, ChunkUploadEntity.class);
        if(Objects.isNull(entity)){
            entity = initChunkUpload(context.getFilename(),cacheKey);
        }

        UploadPartRequest request = new UploadPartRequest();
        request.setBucketName(config.getBucketName());
        request.setKey(entity.getObjectKey());
        request.setUploadId(entity.getUploadId());
        request.setInputStream(context.getInputStream());
        request.setPartSize(context.getCurrentChunkSize());
        request.setPartNumber(context.getChunkNumber());

        UploadPartResult result = ossClient.uploadPart(request);

        if(Objects.isNull(result)){
            throw new RPanFrameworkException("文件分片上传失败");
        }

        //分片上传标识
        PartETag partETag = result.getPartETag();

        // 拼装文件分片的url
        JSONObject params = new JSONObject();
        params.put(IDENTIFIER_KEY, context.getIdentifier());
        params.put(UPLOAD_ID_KEY, entity.getUploadId());
        params.put(USER_ID_KEY, context.getUserId());
        params.put(PART_NUMBER_KEY, partETag.getPartNumber());
        params.put(E_TAG_KEY, partETag.getETag());
        params.put(PART_SIZE_KEY, partETag.getPartSize());
        params.put(PART_CRC_KEY, partETag.getPartCRC());

        String realPath = assembleUrl(entity.getObjectKey(),params);

        context.setRealPath(realPath);

    }


    /**
     * 执行文件分片的动作
     *
     * 获取缓存信息，获取全局uploadId
     * 从上下文信息里面获取所有的分片的URL，解析出需要执行文件合并请求的参数
     * 执行文件合并的请求
     * 清除缓存
     * 设置返回结果
     * @param context
     * @throws IOException
     */
    @Override
    protected void doMergeFile(MergeFileContext context) throws IOException {
        String cacheKey = getCacheKey(context.getIdentifier(), context.getUserId());
        ChunkUploadEntity entity = getCache().get(cacheKey, ChunkUploadEntity.class);
        if(Objects.isNull(entity)){
            throw new RPanFrameworkException("文件分片合并失败，文件唯一标识为：" + context.getIdentifier());
        }
        List<String> chunkPaths = context.getRealPathList();
        List<PartETag> partETags = Lists.newArrayList();
        if(CollectionUtils.isNotEmpty(chunkPaths)){
            partETags =  chunkPaths.stream()
                    .filter(StringUtils::isNotBlank)
                    .map(this::analysisUrlParams)
                    .filter(Objects::nonNull)
                    .filter(jsonObject -> !jsonObject.isEmpty())
                    .map(jsonObject -> new PartETag(jsonObject.getIntValue(PART_NUMBER_KEY),
                            jsonObject.getString(E_TAG_KEY),
                            jsonObject.getLongValue(PART_SIZE_KEY),
                            jsonObject.getLong(PART_CRC_KEY))).collect(Collectors.toList());
        }
        CompleteMultipartUploadRequest request = new CompleteMultipartUploadRequest(config.getBucketName(),entity.getObjectKey(),entity.getUploadId(),partETags);
        CompleteMultipartUploadResult result = ossClient.completeMultipartUpload(request);
        if(Objects.isNull(result)){
            throw new RPanFrameworkException("文件分片合并失败，文件唯一标识为：" + context.getIdentifier());
        }
        //清除缓存
        getCache().evict(cacheKey);
        //设置返回结果
        context.setRealPath(entity.getObjectKey());
    }

    /**
     * 读取文件内容并写入到输出流中
     * @param context
     * @throws IOException
     */
    @Override
    protected void doReadFile(ReadFileContext context) throws IOException {
        OSSObject ossObject = ossClient.getObject(config.getBucketName(), context.getRealPath());
        if(Objects.isNull(ossObject)){
            throw new RPanFrameworkException("读取文件内容失败");
        }
        FileUtil.writeStreamTOStreamNormal(ossObject.getObjectContent(),context.getOutputStream());
    }

    /***********************************************private***********************************************/

    /**
     * 文件分片操作
     * @param filename
     * @param cacheKey
     * @return
     */
    private ChunkUploadEntity initChunkUpload(String filename, String cacheKey) {
        String filePath = getFilePath(filename);
        InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(config.getBucketName(), filePath);
        InitiateMultipartUploadResult result = ossClient.initiateMultipartUpload(request);
        if(Objects.isNull(result)){
            throw new RPanFrameworkException("初始化文件分片上传失败");
        }
        ChunkUploadEntity entity = new ChunkUploadEntity();
        entity.setUploadId(result.getUploadId());
        entity.setObjectKey(filePath);
        //保存到缓存中
        getCache().put(cacheKey,entity);
        return entity;
    }


    /**
     * 该实体为文件分片上传树池化之后的全局信息载体
     */
    @AllArgsConstructor
    @NoArgsConstructor
    @Getter
    @Setter
    @EqualsAndHashCode
    @ToString
    public static  class ChunkUploadEntity implements Serializable {

        /**
         * 分片上传全局唯一的uploadId
         */
        private String uploadId;

        /**
         * 文件分片上传的实体名称
         */
        private String objectKey;
    }

    /**
     * 获取缓存key
     * @param identifier
     * @param userId
     * @return
     */
    public static String getCacheKey(String identifier,Long userId){
        return String.format(CACHE_KEY_TEMPLATE,identifier,userId);
    }

    /**
     * 拼装URL
     * @param baseUrl
     * @param params
     * @return
     */
    private String assembleUrl(String baseUrl, JSONObject params) {
        if (Objects.isNull(params) || params.isEmpty()) {
            return baseUrl;
        }
        StringBuffer urlStringBuffer = new StringBuffer(baseUrl);
        urlStringBuffer.append(RPanConstants.QUESTION_MARK_STR);
        List<String> paramsList = Lists.newArrayList();
        StringBuffer urlParamsStringBuffer = new StringBuffer();
        params.entrySet().forEach(entry -> {
            urlParamsStringBuffer.setLength(RPanConstants.ZERO_INT);
            urlParamsStringBuffer.append(entry.getKey());
            urlParamsStringBuffer.append(RPanConstants.EQUALS_MARK_STR);
            urlParamsStringBuffer.append(entry.getValue());
            paramsList.add(urlParamsStringBuffer.toString());
        });
        return urlStringBuffer.append(Joiner.on(RPanConstants.AND_MARK_STR).join(paramsList)).toString();
    }

    /**
     * 获取基础URL
     *
     * @param url
     * @return
     */
    private String getBaseUrl(String url) {
        if (StringUtils.isBlank(url)) {
            return RPanConstants.EMPTY_STR;
        }
        if (checkHaveParams(url)) {
            return url.split(getSplitMark(RPanConstants.QUESTION_MARK_STR))[0];
        }
        return url;
    }

    /**
     * 检查是否是含有参数的URL
     *
     * @param url
     * @return
     */
    private boolean checkHaveParams(String url) {
        return StringUtils.isNotBlank(url) && url.indexOf(RPanConstants.QUESTION_MARK_STR) != RPanConstants.MINUS_ONE_INT;
    }

    /**
     * 获取截取字符串的关键标识
     * 由于java的字符串分割会按照正则去截取
     * 我们的URL会影响标识的识别，故添加左右中括号去分组
     *
     * @param mark
     * @return
     */
    private String getSplitMark(String mark) {
        return new StringBuffer(RPanConstants.LEFT_BRACKET_STR)
                .append(mark)
                .append(RPanConstants.RIGHT_BRACKET_STR)
                .toString();
    }


    private static String getFilePath(String fileSuffix) {
        return new StringBuffer()
                .append(DateUtil.thisYear())
                .append(RPanConstants.SLASH_STR)
                .append(DateUtil.thisMonth() + 1)
                .append(RPanConstants.SLASH_STR)
                .append(DateUtil.thisDayOfMonth())
                .append(RPanConstants.SLASH_STR)
                .append(UUIDUtil.getUUID())
                .append(fileSuffix)
                .toString();
    }

    /**
     * 分析URL参数
     *
     * @param url
     * @return
     */
    private JSONObject analysisUrlParams(String url) {
        JSONObject result = new JSONObject();
        if (!checkHaveParams(url)) {
            return result;
        }
        String paramsPart = url.split(getSplitMark(RPanConstants.QUESTION_MARK_STR))[1];
        if (StringUtils.isNotBlank(paramsPart)) {
            List<String> paramPairList = Splitter.on(RPanConstants.AND_MARK_STR).splitToList(paramsPart);
            paramPairList.stream().forEach(paramPair -> {
                String[] paramArr = paramPair.split(getSplitMark(RPanConstants.EQUALS_MARK_STR));
                if (paramArr != null && paramArr.length == RPanConstants.TWO_INT) {
                    result.put(paramArr[0], paramArr[1]);
                }
            });
        }
        return result;
    }
}
