package com.ruoyi.file.controller;

import cn.hutool.core.date.DateUtil;
import com.alibaba.fastjson2.JSONObject;
import com.ruoyi.common.config.RuoYiConfig;
import com.ruoyi.common.core.controller.BaseController;
import com.ruoyi.common.core.domain.AjaxResult;
import com.ruoyi.common.core.domain.model.LoginUser;
import com.ruoyi.common.core.redis.RedisCache;
import com.ruoyi.common.utils.StringUtils;
import com.ruoyi.common.utils.file.FileUtils;
import com.ruoyi.file.component.FileUploadProcess;
import com.ruoyi.file.component.PublishLayerProcess;
import com.ruoyi.file.domain.*;
import com.ruoyi.file.service.IFileChunkInfoService;
import com.ruoyi.file.service.ITblFileService;
import com.ruoyi.framework.minio.MinioFile;
import com.ruoyi.framework.minio.MinioService;
import com.ruoyi.framework.web.service.TokenService;
import org.redisson.api.RedissonClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.*;
import org.springframework.web.multipart.MultipartFile;

import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.File;
import java.util.*;

/**
 * 上传下载文件
 *
 * @author 任伟伟
 * 文件上传接口
 * 该接口也支持大文件(GB级别以上)上传
 * 前端请使用BigFileUploader组件
 * 该请求建议根据参数hash到服务器上
 * minio 分片上传
 * https://blog.csdn.net/m0_67402970/article/details/126112680
 */
@RestController
@RequestMapping("/file/uploader/local")
public class FileLocalChunkController extends BaseController {
    private final Logger logger = LoggerFactory.getLogger(FileLocalChunkController.class);
    @Autowired
    private RedissonClient redissonClient;
    @Autowired
    private IFileChunkInfoService fileChunkInfoService;
    @Autowired
    private TokenService tokenService;
    @Autowired
    private RuoYiConfig ruoYiConfig;
    @Autowired
    private RedisCache redisCache;
    @Autowired
    private ITblFileService tblFileService;
    @Autowired
    private MinioService minioService;
    @Autowired
    private FileUploadProcess fileUploadProcess;
    @Autowired
    private PublishLayerProcess publishLayerProcess;

    //实现秒传接口
    @GetMapping("/chunk")
    public AjaxResult checkChunk(FileChunkInfo chunk, HttpServletResponse response) {
        File chunkFolder = new File(RuoYiConfig.getUploadPath(),chunk.getIdentifier());
        //默认返回其他状态码，前端不进去checkChunkUploadedByResponse函数，正常走标准上传
        File file = new File(chunkFolder,chunk.getFilename());
        //先判断整个文件是否已经上传过了，如果是，则告诉前端跳过上传，实现秒传
        if (file.exists()&&file.length()==chunk.getTotalSize()) {//如果完整文件存在
            JSONObject data = new JSONObject();
            data.put("skipUpload",true);
            data.put("state",2000);
            data.put("totalChunks",chunk.getTotalChunks());
            return AjaxResult.success("完整文件已存在，直接跳过上传，实现秒传",data);
        }
        //如果完整文件不存在，则去数据库判断当前哪些文件块已经上传过了，
        // 把结果告诉前端，跳过这些文件块的上传，实现断点续传
        //只有之前上传成功该chunk之后才保存到数据库中
        ArrayList<Integer> list = fileChunkInfoService.checkChunk(chunk);
        JSONObject data = new JSONObject();
        data.put("uploaded",list);//当前文件已经上传完成的chunk
        return AjaxResult.success("完整文件已存在，直接跳过上传，实现秒传",data);
    }
    /**
     * 上传文件块
     *
     * @param chunk
     * @return http://localhost:7777/dev-api/file/uploader/chunk?chunkNumber=1&chunkSize=2048000&currentChunkSize=2048000
     * &totalSize=6904810&identifier=19b01e74f140975ad6d95d77018949bc&filename=wgs84china.zip
     * &relativePath=wgs84china.zip&totalChunks=4
     */
    @PostMapping("/chunk")
    public AjaxResult uploadChunk(FileChunkInfo chunk) {
        try {
            MultipartFile multipartFile = chunk.getUpfile();
            logger.info("multipartFile originName: {}, chunkNumber: {}",multipartFile.getOriginalFilename(),chunk.getChunkNumber());
            File chunkFolder = new File(RuoYiConfig.getUploadPath(),chunk.getIdentifier());
            //写入本地
            byte[] bytes = multipartFile.getBytes();
            File file = new File(chunkFolder,chunk.getFilename());
            if(!file.exists()){
                file.getParentFile().mkdirs();
                file.createNewFile();
            }
            //本地分片上传直接写到文件相应的片位置
            long seek = (chunk.getChunkNumber()-1)*chunk.getChunkSize();
            FileUtils.writeByteArraySeekToFile(file,bytes,seek);//使用直接内存读写，快速
            if (fileChunkInfoService.saveChunk(chunk) < 0) {
                return AjaxResult.error("上传文件异常");
            }else {
                ArrayList<Integer> list = fileChunkInfoService.checkChunk(chunk);
                logger.info("分片上传成功.....当前分片：{},已上传分片：{}  总分片数：{} ",
                        chunk.getChunkNumber(), list.size(),chunk.getTotalChunks());
                JSONObject data = new JSONObject();
                if(chunk.getTotalChunks().intValue()==list.size()){//当前文件块
                    data.put("state",2000);//上传完成
                    data.put("totalChunks",chunk.getTotalChunks());
                }else {
                    data.put("state",2001);//继续上传
                }
                return AjaxResult.success("上传文件成功",data);
            }
        } catch (Exception e) {
            e.printStackTrace();
            return AjaxResult.error("上传文件异常");
        }
    }

    @PostMapping("/mergeFile")
    public AjaxResult mergeFile(HttpServletRequest request,
                                @RequestBody TFileInfoLocalVO chunk) {
        try {
            logger.info("mergeFile...........................................");
            String taskId=chunk.getRefProjectId();
            logger.info("taskid...........{}",taskId);
            LoginUser loginUser = tokenService.getLoginUser(request);
            Long userId = loginUser.getUserId();
            File chunkFolder = new File(RuoYiConfig.getUploadPath(),chunk.getUniqueIdentifier());
            File file = new File(chunkFolder,chunk.getName());
            //先判断整个文件是否已经上传过了，如果是，则告诉前端跳过上传，实现秒传
            if (!file.exists()||file.length()!=chunk.getSize()) {//如果完整文件存在
                logger.info("mergeFile error file not exist or size not equal");
                return AjaxResult.error("上传文件失败");
            }
            //前端组件参数转换为model对象
            //进行文件的合并操作--文件我们直接内存读写，所以我们不用再合并文件了
            //查询是否已经上传过
            String bucket = !StringUtils.isEmpty(chunk.getBucket())?chunk.getBucket(): DateUtil.format(new Date(),"yyyy-MM-dd");
            String objectName = StringUtils.isEmpty(chunk.getFolder())?
                    userId+"/"+chunk.getRelativePath():
                    chunk.getFolder()+"/"+chunk.getRelativePath();//getRelativePath 无/开头
            File bucketFolder = new File(ruoYiConfig.getMinioLocalDir(),bucket);
            File destFile =  new File(bucketFolder,objectName);
            File parentFile = destFile.getParentFile();
            if(!parentFile.exists()){
                parentFile.mkdirs();
            }
            if(destFile.exists()){
                destFile.delete();//如果文件已经存在
            }
            //拷贝
            FileUtils.moveFile(file,destFile);
            MinioFile minioFile = new MinioFile(bucket,objectName);
            fileChunkInfoService.deleteFileChunkInfoByIdentifier(chunk.getUniqueIdentifier());
            //保存到minio
//            MinioFile minioFileUpload = minioService.uploadFile(new FileInputStream(destFile),
//                    minioFile.getBucket(),
//                    minioFile.getObjectName());
//            if(minioFileUpload==null){
//                logger.error("processUploadFile","upload to minio error");
//                return AjaxResult.error("合并文件异常");
//            }
            fileUploadProcess.sendFileUploadMessage(destFile.getCanonicalPath(),minioFile.getBucket(),minioFile.getObjectName());
            Thread.sleep(60000);
            //删除零时目录
            FileUtils.deleteDirectory(chunkFolder);
            MergeModel rootFile = chunk.getRootFile();
            JSONObject data = new JSONObject();
            data.put("bucket",minioFile.getBucket());
            data.put("objectName",minioFile.getObjectName());
            chunk.setBucket(minioFile.getBucket());
            chunk.setObjectName(minioFile.getObjectName());
            if(rootFile.getFolder()){
                logger.info("mergeFile success then check the  folder while has all upload  ...............");
                //上传的是文件夹
                //利用redis 缓存当前上传的文件，等待所有文件上传完成之后再保存
                //这段需要用分布式锁
//                RLock lock = redissonClient.getLock("file_marge_" + chunk.getActionId());
//                lock.lock();
//                try{
////                    //尝试获取锁，如果获取锁失败则等待
////                    boolean locked = lock.tryLock(10, TimeUnit.SECONDS);
////                    if(locked){
////
////                    }else {
////                        logger.info("mergeFile get lock error false ");
////                    }
//                }finally{
//                    //释放锁
//                    lock.unlock();
//                }
                Map<String, TFileInfoLocalVO> cacheMap = redisCache.
                        getCacheMap(chunk.getActionId());
                if(cacheMap==null){
                    cacheMap = new HashMap<>();
                }
                cacheMap.put(chunk.getUniqueIdentifier(),chunk);
                redisCache.setCacheMap(chunk.getActionId(),cacheMap);
                logger.info("mergeFile get lock ok ActionId {}   Identifier {}  cache size {}",
                        chunk.getActionId(),chunk.getUniqueIdentifier(),cacheMap.keySet().size());
                if(cacheMap.keySet().size()==rootFile.getFilesCount()){
                    logger.info("mergeFile success   the folder  has all upload  will save to db...............");
                    FileAddDto fileAddDto = new FileAddDto();
                    fileAddDto.setPid(chunk.getPid());
                    fileAddDto.setDirectory(true);
                    fileAddDto.setTaskId(taskId);
                    ArrayList<TFileInfoLocalVO> files = new ArrayList<>();
                    for(String key :cacheMap.keySet()) {
                        files.add(cacheMap.get(key));
                    }
                    fileAddDto.setFiles(files);
                    List<FilePublishDto> filePublishDtos = tblFileService.saveFile(loginUser, fileAddDto);
                    filePublishDtos.forEach(item->{
                        item.setTaskId(taskId);
                    });
                    publishLayerProcess.publishLayer(filePublishDtos);
                    redisCache.deleteObject(chunk.getActionId());
                    data.put("reload",true);
                }else {
                    logger.info("mergeFile success  but the folder  " +
                                    "will continue up upload current {} cache size {} total count {}...............",
                            chunk.getRelativePath(),
                            cacheMap.keySet().size(),rootFile.getFilesCount());
                }
            }else {
                logger.info("mergeFile success then add the file to db ...............");
                //上传的是单个文件
                //fileLocalComponent.processUploadFile(loginUser,minioFile,chunk);
                FileAddDto fileAddDto = new FileAddDto();
                fileAddDto.setPid(chunk.getPid());
                fileAddDto.setDirectory(false);
                fileAddDto.setTaskId(taskId);
                ArrayList<TFileInfoLocalVO> files = new ArrayList<>();
                files.add(chunk);
                fileAddDto.setFiles(files);
                List<FilePublishDto> filePublishDtos = tblFileService.saveFile(loginUser, fileAddDto);
                //遍历，设置taskId
                filePublishDtos.forEach(item->{
                    item.setTaskId(taskId);
                });
                publishLayerProcess.publishLayer(filePublishDtos);

//                redisCache.deleteObject(chunk.getActionId());
                data.put("reload",true);
            }
            //把该文件保存到数据库中
            return AjaxResult.success("合并文件成功",data);
        } catch (Exception e) {
            logger.info("mergeFile error exception ");
            logger.error("mergeFile",e);
            return AjaxResult.error("合并文件异常");
        }
    }
}
