package com.new1cloud.upload.server.service.impls;

import com.alibaba.fastjson.parser.ParserConfig;
import com.google.common.collect.Lists;
import com.new1cloud.file.config.UploadConfig;
import com.new1cloud.file.domain.FileUploadProcess;
import com.new1cloud.file.tools.ThatStringUtil;
import com.new1cloud.upload.server.service.FileUploadService;
import com.newcloud.iotp.common.core.redis.config.RedisCache;
import com.newcloud.iotp.common.core.redis.distributedlock.DistributedLock;
import com.newcloud.iotp.common.core.redis.distributedlock.ZLock;
import com.newcloud.iotp.common.core.utils.MyStringUtils;
import lombok.Cleanup;
import lombok.SneakyThrows;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Service;

import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.*;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;

@Service
public class FileUploadServiceImpl implements FileUploadService {
    protected final Logger log = LoggerFactory.getLogger(this.getClass());

    /**
     * 检测上传进度的Lock Key
     * 格式: check_process_lock_key + fileHash
     */
    private final static String check_process_lock_key = "ul:process:check:";
    /**
     * 分片文件合并的Lock Key
     * 格式: merge_lock_key + fileHash
     */
    private final static String merge_lock_key = "mer:cf:";

    private final RedisCache redisCache;
    private final ExecutorService myExecutorService;
    private final DistributedLock distributedLock;
    public FileUploadServiceImpl(RedisCache redisCache, ExecutorService myExecutorService,
                                 DistributedLock distributedLock) {
        this.redisCache = redisCache;
        this.myExecutorService = myExecutorService;
        this.distributedLock = distributedLock;
    }

    static {
        ParserConfig.getGlobalInstance().addAccept("com.new1cloud.");
    }

    @SneakyThrows
    private FileUploadProcess checkUploadProcess_Sub(String fileHash, Long fileSize, String fileName, String uploadPath) {
        Object obj =  redisCache.getCacheObject(fileHash);
        if(Objects.isNull(obj)) {
            FileUploadProcess fileUploadProcess = new FileUploadProcess(fileName, fileSize, fileHash, 0, null,
                    UploadConfig.getChunkSize(), ThatStringUtil.urlConcat(uploadPath, fileName));
            redisCache.setCacheObject(fileHash, fileUploadProcess);
            return fileUploadProcess;
        }

        FileUploadProcess uploadProcess = (FileUploadProcess) obj;
        //对uploadProcess的信息进行核对确认
        if(Objects.equals(1, uploadProcess.getUploadStatus())) {
            File remoteFile = new File(uploadProcess.getUploadPath());
            if(remoteFile.exists()) {
                return uploadProcess;
            } else {
                log.warn("文件(标识码: {})的聚合文件不存在，将进行分片文件确认.", fileHash);
                uploadProcess.setUploadStatus(0);
            }
        }

        File temDir = new File(ThatStringUtil.urlConcat(UploadConfig.getTemPath(), fileHash));
        if(!temDir.exists()) {
            Files.createDirectories(temDir.toPath());
            uploadProcess.setUploadStatus(0);
            uploadProcess.setCompletedChunkItem(Lists.newArrayList());
            redisCache.setCacheObject(fileHash, uploadProcess);
            return uploadProcess;
        }
        // 得到已上传的文件片号
        List<Integer> chunkNames = Files.walk(temDir.toPath(), 1)
                .filter(path -> Files.isRegularFile(path) && !path.endsWith("doing"))
                .map(path -> {
                    try {
                        return Integer.parseInt(path.getFileName().toString());
                    }catch (Exception ignored){
                    }
                    return null;
                })
                .filter(Objects::nonNull)
                .collect(Collectors.toList());
        //全量更新completedChunkItem信息
        uploadProcess.setCompletedChunkItem(chunkNames);
        uploadProcess.setUploadStatus(0);
        redisCache.setCacheObject(fileHash, uploadProcess);
        log.debug("本次checkUploadProcess_Sub执行完成.");
        return uploadProcess;
    }

    @SneakyThrows
    private String mergeChunkFiles_Sub(String fileHash, Long fileSize, String remoteDirPath, String fileName) {
        Object obj =  redisCache.getCacheObject(fileHash);
        if(Objects.isNull(obj)) {
            throw new RuntimeException("未获取到分片文件信息，无法进行分片文件聚合操作.");
        }
        FileUploadProcess uploadProcess = (FileUploadProcess) obj;
        //说明文件已经合并完成了
        if(Objects.equals(1, uploadProcess.getUploadStatus())) {
            File remoteFile = new File(uploadProcess.getUploadPath());
            if(remoteFile.exists()) {
                return uploadProcess.getUploadPath();
            }
        }

        // 计算文件总分片数
        int chunkNum = (int) Math.ceil(1.0 * fileSize / UploadConfig.getChunkSize());
        // 得到已上传的文件片号
        File temDir = new File(ThatStringUtil.urlConcat(UploadConfig.getTemPath(), fileHash));
        List<Path> chunkPaths = Files.walk(temDir.toPath(), 1)
                .filter(Files::isRegularFile)
                .sorted(Comparator.comparingInt(o -> Integer.parseInt(o.toFile().getName())))
                .collect(Collectors.toList());
        if(!Objects.equals(chunkNum, chunkPaths.size())) {
            //说明已上传的文件分片数不全，直接抛出异常
            log.error("文件({})已上传的分片文件数({})与实际总分片数({})不符，请重新上传.", fileName, chunkPaths.size(), chunkNum);
            throw new RuntimeException("文件已上传分片数与实际总分片数不符，请重新上传");
        }

        // 分片合并
        final List<File> chunkFiles = new ArrayList<>(chunkNum);
        String remoteFilePath = ThatStringUtil.urlConcat(remoteDirPath, fileName);
        @Cleanup
        BufferedOutputStream out = new BufferedOutputStream(new FileOutputStream(remoteFilePath,
                true));
        for (Path chunkPath : chunkPaths) {
            log.debug("当前读取到的分片文件路径: {}", chunkPath);
            File chunkFile = chunkPath.toFile();
            chunkFiles.add(chunkFile);

            @Cleanup BufferedInputStream in = new BufferedInputStream(Files.newInputStream(chunkFile.toPath()));
            byte[] buffer = new byte[1024];
            int bytesRead;
            while ((bytesRead = in.read(buffer)) != -1) {
                out.write(buffer, 0, bytesRead);
            }
        }
        //成功合并之后，再一并删除所有的分片文件
        myExecutorService.submit(() -> chunkFiles.stream().filter(file -> !file.delete())
                .forEach(file -> log.warn("分片文件({})合并完成，但是删除失败.", file.getAbsolutePath())));

        uploadProcess.setUploadStatus(1);
        redisCache.setCacheObject(fileHash, uploadProcess);

        return remoteFilePath;
    }

    @Override
    public FileUploadProcess checkUploadProcess(String fileHash, Long fileSize, String fileName, String uploadPath) {
        ZLock zLock = null;
        try {
            log.debug("开始获取check_process_lock_key锁.");
            zLock = distributedLock.tryLock(check_process_lock_key.concat(fileHash), 10000, 1000, TimeUnit.MILLISECONDS, false);
            if (MyStringUtils.isNull(zLock)) {
                log.error("在执行文件({})的checkUploadProcess时获取锁超时!", fileHash);
                return null;
            }
            log.debug("成功获取到check_process_lock_key锁.");
            return checkUploadProcess_Sub(fileHash, fileSize, fileName, uploadPath);
        } catch (Exception e) {
            log.error("在执行文件({})的checkUploadProcess时发生异常!", fileHash, e);
        } finally {
            try {
                if (MyStringUtils.isNotNull(zLock))
                    distributedLock.unlock(zLock);
            } catch (Exception e) {
                log.error("Release lock for checkUploadProcess failed", e);
            }
        }
        return null;
    }

    @Override
    public String mergeChunkFiles(String fileHash, Long fileSize, String remoteDirPath, String fileName) {
        ZLock zLock = null;
        try {
            //支持可重入
            zLock = distributedLock.tryLock(merge_lock_key.concat(fileHash), 5, 30000, TimeUnit.MILLISECONDS, false);
            if (MyStringUtils.isNull(zLock)) {
                log.error("在执行文件({})的mergeChunkFiles时获取锁超时!", fileHash);
                return null;
            }
            return mergeChunkFiles_Sub(fileHash, fileSize, remoteDirPath, fileName);
        } catch (Exception e) {
            log.error("在执行文件({})的checkUploadProcess时发生异常!", fileHash, e);
        } finally {
            try {
                if (MyStringUtils.isNotNull(zLock))
                    distributedLock.unlock(zLock);
            } catch (Exception e) {
                log.error("Release lock for mergeChunkFiles failed", e);
            }
        }
        return null;
    }
}
