//package cn.sw.file.service.composite.impl;
//
//import cn.sw.api.system.file.FilesFragmentationApi;
//import cn.sw.common.database.domain.dto.form.FilesFragmentationFormDto;
//import cn.sw.common.database.domain.entity.FilesFragmentation;
//import cn.sw.common.http.HttpResponse;
//import cn.sw.common.utils.Mapper;
//import cn.sw.file.fragmentation.reactminiospring.common.R;
//import cn.sw.file.fragmentation.reactminiospring.enums.HttpCodeEnum;
//import cn.sw.file.fragmentation.reactminiospring.model.FileUploadInfo;
//import cn.sw.file.fragmentation.reactminiospring.model.UploadUrlsVO;
//import cn.sw.file.fragmentation.reactminiospring.util.MinioUtil;
//import cn.sw.file.fragmentation.reactminiospring.util.RedisUtil;
//import cn.sw.file.service.composite.CompositeFilesFragmentationService;
//import com.baomidou.mybatisplus.core.toolkit.Assert;
//import lombok.extern.slf4j.Slf4j;
//import org.springframework.stereotype.Service;
//
//import javax.annotation.Resource;
//import java.util.List;
//
//@Service
//@Slf4j
//public class CompositeFilesFragmentationServiceImpl implements CompositeFilesFragmentationService {
//
//    @Resource
//    RedisUtil redisUtil;
//
//    @Resource
//    MinioUtil  minioUtil;
//
//    @Resource
//    FilesFragmentationApi filesFragmentationApi;
//
//
//
//    @Override
//    public R<FileUploadInfo> checkFileByMd5(String md5) {
//        log.info("查询 <{}> 文件是否存在、是否进行断点续传", md5);
//        // 1.校验
//        Assert.notNull(md5, "md5 不能为空");
//
//        // 2.从Redis中查询
//        FileUploadInfo fileUploadInfo = (FileUploadInfo) redisUtil.get(md5);
//        if(fileUploadInfo != null){
//            List<Integer> listParts = minioUtil.getListParts(fileUploadInfo.getObject(), fileUploadInfo.getUploadId());//获取当前已上传的分片编号列表
//            fileUploadInfo.setListParts(listParts);
//            return R.http(HttpCodeEnum.UPLOADING, fileUploadInfo);
//        }
//
//        // 3.从Mysql中查询
//        HttpResponse<List<FilesFragmentation>> httpListFiles = (HttpResponse<List<FilesFragmentation>>) filesFragmentationApi.R(new FilesFragmentationFormDto(md5));
//        if("200".equals(httpListFiles.getCode()) && httpListFiles.getData() != null && httpListFiles.getData().size() > 0){
//            try {
//                FileUploadInfo uploadInfo = Mapper.map(httpListFiles.getData().get(0), FileUploadInfo.class);
//                return R.http(HttpCodeEnum.UPLOAD_SUCCESS, uploadInfo);
//            } catch (Exception e) {
//                throw new RuntimeException(e);
//            }
//        }
//
//        // 4.返回结果
//        return R.http(HttpCodeEnum.NOT_UPLOADED, null);
//    }
//
//    @Override
//    public R<UploadUrlsVO> initFragmentationUpload(FileUploadInfo fileUploadInfo) {
//        return null;
//    }
//}
package cn.sw.file.service.composite.impl;

import cn.hutool.core.date.DateUtil;
import cn.hutool.core.io.FileUtil;
import cn.hutool.core.util.StrUtil;
import cn.sw.api.system.file.FilesFragmentationApi;
import cn.sw.common.database.domain.dto.form.FilesFragmentationFormDto;
import cn.sw.common.database.domain.dto.query.FilesFragmentationQueryDto;
import cn.sw.common.database.domain.entity.FilesFragmentation;
import cn.sw.common.http.HttpResponse;
import cn.sw.common.utils.Mapper;
import cn.sw.file.config.MinIOConfigInfo;
import cn.sw.file.fragmentation.reactminiospring.common.R;
import cn.sw.file.fragmentation.reactminiospring.enums.HttpCodeEnum;
import cn.sw.file.fragmentation.reactminiospring.model.FileUploadInfo;
import cn.sw.file.fragmentation.reactminiospring.model.UploadUrlsVO;
import cn.sw.file.fragmentation.reactminiospring.util.MinioUtil;
import cn.sw.file.fragmentation.reactminiospring.util.RedisUtil;
import cn.sw.file.service.composite.CompositeFilesFragmentationService;
import com.baomidou.mybatisplus.core.toolkit.Assert;
import com.baomidou.mybatisplus.core.toolkit.CollectionUtils;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.Lists;
import lombok.extern.slf4j.Slf4j;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;

import javax.annotation.Resource;
import java.time.LocalDateTime;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.concurrent.TimeUnit;

@Service
@Slf4j
public class CompositeFilesFragmentationServiceImpl implements CompositeFilesFragmentationService {

    @Resource
    private RedisUtil redisUtil;

    @Resource
    private MinioUtil minioUtil;

    @Resource
    private MinIOConfigInfo minioConfigInfo;

    @Resource
    private FilesFragmentationApi filesFragmentationApi;



    @Override
    public R<FileUploadInfo> checkFileByMd5(String md5) {
        log.info("查询 <{}> 文件是否存在、是否进行断点续传", md5);

        // 1. 校验
        Assert.notNull(md5, "md5 不能为空");

        // 2. 从 Redis 中查询
        FileUploadInfo fileUploadInfo = (FileUploadInfo) redisUtil.get(md5);
        if (fileUploadInfo != null) {
            try {
                List<Integer> listParts = minioUtil.getListParts(fileUploadInfo.getObject(), fileUploadInfo.getUploadId());
                fileUploadInfo.setListParts(listParts);
                log.info("文件 <{}> 已在 Redis 中找到，处于上传中状态", md5);
                return R.http(HttpCodeEnum.UPLOADING, fileUploadInfo);
            } catch (Exception e) {
                log.error("获取分片列表失败，md5={}", md5, e);
                throw new RuntimeException("获取分片列表失败", e);
            }
        }

        // 3. 从 MySQL 中查询 （调用openfeign的时候，需要把数据反序列化）
        try {
            HttpResponse<List<FilesFragmentation>> httpListFiles =
                    (HttpResponse<List<FilesFragmentation>>) filesFragmentationApi.R(new FilesFragmentationQueryDto(md5));

            if ("200".equals(httpListFiles.getCode()) && CollectionUtils.isNotEmpty(httpListFiles.getData())) {
                // 获取第一个元素
                Object obj = httpListFiles.getData().get(0);

                // 判断是否为 LinkedHashMap 类型
                if (obj instanceof LinkedHashMap) {
                    ObjectMapper objectMapper = new ObjectMapper();
//                    自定义mapper不行，因为LinkedHashMap没有类型 没法匹配上，需要使用objectMapper.convertValue()来进行名字匹配。
                    FilesFragmentation filesFragmentation = objectMapper.convertValue(obj, FilesFragmentation.class);
                    FileUploadInfo uploadInfo = Mapper.map(filesFragmentation, FileUploadInfo.class);
                    log.info("文件 <{}> 在数据库中已存在，返回上传成功状态", md5);
                    return R.http(HttpCodeEnum.UPLOAD_SUCCESS, uploadInfo);
                } else {
                    // 直接转换为 FilesFragmentation
                    FilesFragmentation filesFragmentation = (FilesFragmentation) obj;
                    FileUploadInfo uploadInfo = Mapper.map(filesFragmentation, FileUploadInfo.class);
                    log.info("文件 <{}> 在数据库中已存在，返回上传成功状态", md5);
                    return R.http(HttpCodeEnum.UPLOAD_SUCCESS, uploadInfo);
                }
            } else {
                log.info("文件 <{}> 在数据库中未找到，准备新上传", md5);
            }
        } catch (Exception e) {
            log.error("调用接口或映射数据失败，md5={}", md5, e);
            throw new RuntimeException("调用接口或映射数据失败", e);
        }


        // 4. 返回未上传状态
        return R.http(HttpCodeEnum.NOT_UPLOADED, null);
    }

    @Override
    public R<UploadUrlsVO> initFragmentationUpload(FileUploadInfo fileUploadInfo) {
        // 1.从 Redis 中获取已有的上传记录
        FileUploadInfo redisFileUploadInfo = (FileUploadInfo) redisUtil.get(fileUploadInfo.getMd5());
        String object;
        if (redisFileUploadInfo != null) {
            fileUploadInfo = redisFileUploadInfo;
            object = redisFileUploadInfo.getObject();
        } else {
            // 2. 若无记录，则构造唯一文件存储路径 object，并设置到 FileUploadInfo 中。
            String originFileName = fileUploadInfo.getOriginFileName();//  原始文件名
            String suffix = FileUtil.extName(originFileName);//  文件后缀
            String fileName = FileUtil.mainName(originFileName);//  文件名
            // 对文件重命名，并以年月日文件夹格式存储
            String nestFile = DateUtil.format(LocalDateTime.now(), "yyyy/MM/dd");//  存储路径
            object = nestFile + "/" + fileName + "_" + fileUploadInfo.getMd5() + "." + suffix;// 存储路径

            fileUploadInfo.setObject(object); //  存储路径
            fileUploadInfo.setType(suffix); //  文件后缀
        }
        UploadUrlsVO urlsVO;

        // 3. 根据分片数量判断上传方式：
        if (fileUploadInfo.getChunkCount() == 1) {
            log.info("当前分片数量 <{}> 单文件上传", fileUploadInfo.getChunkCount());
            urlsVO = minioUtil.getUploadObjectUrl(fileUploadInfo.getContentType(), object);
        } else {
            // 分片上传
            log.info("当前分片数量 <{}> 分片上传", fileUploadInfo.getChunkCount());
            urlsVO = minioUtil.initMultiPartUpload(fileUploadInfo, object);
        }
        fileUploadInfo.setUploadId(urlsVO.getUploadId());
        // 4.存入 redis （单片存 redis 唯一用处就是可以让单片也入库，因为单片只有一个请求，基本不会出现问题）
        redisUtil.set(fileUploadInfo.getMd5(), fileUploadInfo, minioConfigInfo.getBreakpointTime(), TimeUnit.DAYS);
        // 5.返回封装好的上传 URL 信息（R.ok(urlsVO)）。
        return R.ok(urlsVO);

    }

    @Override
    @Transactional
    public R<String> mergeMultipartUpload(String md5) {
        FileUploadInfo redisFileUploadInfo = (FileUploadInfo) redisUtil.get(md5);

        String url = StrUtil.format("{}/{}/{}", minioConfigInfo.getEndpoint(), minioConfigInfo.getBucket(), redisFileUploadInfo.getObject());

        try {
            FilesFragmentationFormDto filesFragmentation = Mapper.map(redisFileUploadInfo, FilesFragmentationFormDto.class);

            filesFragmentation.setUrl(url);
            filesFragmentation.setBucket(minioConfigInfo.getBucket());


            Integer chunkCount = redisFileUploadInfo.getChunkCount();
            // 分片为 1 ，不需要合并，否则合并后看返回的是 true 还是 false
            boolean isSuccess = chunkCount == 1 || minioUtil.mergeMultipartUpload(redisFileUploadInfo.getObject(), redisFileUploadInfo.getUploadId());
            if (isSuccess) {
                filesFragmentationApi.C(filesFragmentation);
                redisUtil.del(md5);
                return R.http(HttpCodeEnum.SUCCESS, url);
            }
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
        return R.http(HttpCodeEnum.UPLOAD_FILE_FAILED, null);
    }
    @Override
    public R<List<FilesFragmentation>> getFileList(FilesFragmentationQueryDto query) {
        HttpResponse<List<FilesFragmentation>> httpListFiles =
                (HttpResponse<List<FilesFragmentation>>) filesFragmentationApi.R(query);
        try {
            if ("200".equals(httpListFiles.getCode()) && CollectionUtils.isNotEmpty(httpListFiles.getData())) {
                List<FilesFragmentation> result = Lists.newArrayList();

                for (Object obj : httpListFiles.getData()) {
                    if (obj instanceof LinkedHashMap) {
                      ObjectMapper objectMapper = new ObjectMapper();
                        FilesFragmentation fragmentation = objectMapper.convertValue(obj, FilesFragmentation.class);
                        FileUploadInfo uploadInfo = Mapper.map(fragmentation, FileUploadInfo.class);
                        System.out.println(uploadInfo);
                        result.add(fragmentation);
                    } else if (obj instanceof FilesFragmentation) {
                        result.add((FilesFragmentation) obj);
                    }
                }

                return R.ok(result); // 返回封装好的结果
            } else {
                return R.ok(Collections.emptyList()); // 数据为空返回空列表
            }
        } catch (Exception e) {
            log.error("调用接口或映射数据失败", e);
            throw new RuntimeException("调用接口或映射数据失败", e);
        }
    }

    @Override
    @Transactional
    public R<String> deleteFile(String id) {
//        1.校验
        Assert.notNull(id, "id 不能为空");

//        2.查询
        FilesFragmentation fragmentation = null;
        HttpResponse<FilesFragmentation> httpRById = (HttpResponse<FilesFragmentation>) filesFragmentationApi.RById(id);
        if("200".equals(httpRById.getCode()) && httpRById.getData()!=null){
            Object obj = httpRById.getData();
            if(obj instanceof LinkedHashMap){
                ObjectMapper objectMapper = new ObjectMapper();
                try {
                     fragmentation = objectMapper.convertValue(obj,FilesFragmentation.class);
                    if(fragmentation ==  null){
                        return R.http(HttpCodeEnum.FILE_NOT_EXIST, "文件不存在");
                    }
                } catch (Exception e) {
                    throw new RuntimeException("数据转化异常："+e);
                }
            }
        }else {
            return R.http(HttpCodeEnum.FILE_NOT_EXIST, "文件不存在");
        }

//        3.删除Minio中文件
        try {
            minioUtil.removeObject(fragmentation.getObject());
        } catch (Exception e) {
            throw new RuntimeException("Minio文件删除异常："+e);
        }

//        4.删除数据库记录
        HttpResponse<?> httpDById = filesFragmentationApi.DById(fragmentation.getId());
        if(!("200".equals(httpDById.getCode()))){
            throw new RuntimeException("数据库文件删除异常");
        }

//        5.尝试删除Redis缓存(即使缓存不存在，也不影响整体流程)
        String md5 = fragmentation.getMd5();
        String redisKeyById = id;
        String redisKeyByMd5 = md5;

        redisUtil.del(redisKeyById); //  删除ID对应的文件信息（可能不存在）
        redisUtil.del(redisKeyByMd5); // 删除MD5对应的上传信息（可能不存在）

        return R.http(HttpCodeEnum.SUCCESS, "文件删除成功");

    }


}
