package cn.attackme.myuploader.service;

import cn.attackme.myuploader.config.UploadConfig;
import cn.attackme.myuploader.dao.FileDao;
import cn.attackme.myuploader.model.File;
import cn.attackme.myuploader.utils.FileUtils;
import cn.attackme.myuploader.utils.RedisUtil;
import cn.attackme.myuploader.utils.UpLoadConstant;
import cn.hutool.core.convert.Convert;
import cn.hutool.core.io.FileUtil;
import cn.hutool.core.util.StrUtil;
import com.github.tobato.fastdfs.domain.StorePath;
import com.github.tobato.fastdfs.service.AppendFileStorageClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.web.multipart.MultipartFile;

import java.io.IOException;
import java.util.Date;

import static cn.attackme.myuploader.utils.FileUtils.generateFileName;
import static cn.attackme.myuploader.utils.UploadUtils.*;

/**
 * 文件上传服务
 */
@Service
public class FileService {

    private Logger logger = LoggerFactory.getLogger(FileService.class);
    @Autowired
    private FileDao fileDao;

    @Autowired
    private AppendFileStorageClient defaultAppendFileStorageClient;


    /**
     * 上传文件
     * @param md5
     * @param file
     */
    public void upload(String name,
                       String md5,
                       MultipartFile file) throws IOException {
        String path = UploadConfig.path + generateFileName()+name;
        System.out.println("name==="+name);
        System.out.println("path==="+path);

        FileUtils.write(path, file.getInputStream());
        fileDao.save(new File(name, md5, path, new Date()));
    }

    /**
     * 分块上传文件
     * 服务器只需要按照请求数据中给的分片序号和每片分块大小（分片大小是固定且一样的）算出开始位置
     * @param md5
     * @param size
     * @param chunks
     * @param chunk
     * @param file
     * @throws IOException
     */
    public void uploadWithBlock(String name,
                                String md5,
                                Long size,
                                Integer chunks,
                                Integer chunk,
                                MultipartFile file) throws IOException {
        //将每一块添加到map中 key =md5  value = (每一块随机name,是否上传成功)
        String fileName = getFileName(md5, chunks)+name;
        System.out.println("name=="+name+"<<<<md5<<<<<<<<<"+md5+">>>>>>>总共"+chunks+"<<<<当前是第"+chunk+"<<<<<每块大小为"+size);

        FileUtils.writeWithBlok(UploadConfig.path + fileName, size, file.getInputStream(), file.getSize(), chunks, chunk);
        //为文件添加分块上传记录，添加成功后将对应的块设置为true
        addChunk(md5,chunk);

        //判断所有分块是否已经上传
        //判断map中是否全部为 true
        if (isUploaded(md5)) {
            removeKey(md5);
            fileDao.save(new File(name, md5,UploadConfig.path + fileName, new Date()));
        }
    }

    /**
     * 检查Md5判断文件是否已上传
     * @param md5
     * @return
     */
    public boolean checkMd5(String md5) {
        File file = new File();
        file.setMd5(md5);
        return fileDao.getByFile(file) == null;
    }

    public void uploadFastDfs(String fileName,
                                String fileMd5,
                                Integer chunkSize,
                                Integer chunks,
                                Integer chunk,
                                MultipartFile file) throws IOException {

        // 存储在fastdfs不带组的路径
        String noGroupPath = "";
        logger.info("当前文件的Md5:{}", fileMd5);
        String chunkLockName = UpLoadConstant.chunkLock + fileMd5;

        // 真正的拥有者
        boolean currOwner = false;
        Integer currentChunkInFront = 0;
        try {
            if (chunk == null) {
                chunk = 0;
            }
            if (chunks == null) {
                chunks = 1;
            }

            Long lock = RedisUtil.incrBy(chunkLockName, 1);
            if (lock > 1){
                logger.info("请求块锁失败");
//                return backInfo.failure("请求块锁失败");
            }
            // 写入锁的当前拥有者
            currOwner = true;

            // redis中记录当前应该传第几块(从0开始)
            String currentChunkKey = UpLoadConstant.chunkCurr + fileMd5;
            //在刚上传 第0块添加
            String currentChunkInRedisStr =  RedisUtil.getString(currentChunkKey);
            Integer currentChunkSize = chunkSize;
            logger.info("当前块的大小:{}", currentChunkSize);
            if (StrUtil.isEmpty(currentChunkInRedisStr)) {
                logger.info("无法获取当前文件chunkCurr");
//                return backInfo.failure("无法获取当前文件chunkCurr");
            }
            Integer currentChunkInRedis = Convert.toInt(currentChunkInRedisStr);
            currentChunkInFront = chunk;

            if (currentChunkInFront < currentChunkInRedis) {
                logger.info("当前文件块已上传");
//                return backInfo.failure("当前文件块已上传", "001");
            } else if (currentChunkInFront > currentChunkInRedis) {
                logger.info("当前文件块需要等待上传,稍后请重试");
//                return backInfo.failure("当前文件块需要等待上传,稍后请重试");
            }

            logger.info("***********开始上传第{}块**********", currentChunkInRedis);
            StorePath path = null;
            if (!file.isEmpty()) {
                try {
                    if (currentChunkInFront == 0) {
                        //redis 中记录上传第一块
                        RedisUtil.setString(currentChunkKey, Convert.toStr(currentChunkInRedis + 1));
                        logger.info("{}:redis块+1", currentChunkInFront);
                        try {
                            //上传第一块
                            path = defaultAppendFileStorageClient.uploadAppenderFile(UpLoadConstant.DEFAULT_GROUP, file.getInputStream(),
                                    file.getSize(), FileUtil.extName(fileName));
                            //redis 记录第一个分片上传后的指正偏移量
                            RedisUtil.setString(UpLoadConstant.historyUpload + fileMd5, String.valueOf(currentChunkSize));
                            logger.info("{}:更新完fastDfs", currentChunkInFront);
                            if (path == null) {
                                RedisUtil.setString(currentChunkKey, Convert.toStr(currentChunkInRedis));
                                logger.info("获取远程文件路径出错");
//                                return backInfo.failure("获取远程文件路径出错");
                            }
                        } catch (Exception e) {
                            RedisUtil.setString(currentChunkKey, Convert.toStr(currentChunkInRedis));
                            logger.error("初次上传远程文件出错", e);
//                            return new RespMsgBean().failure("上传远程服务器文件出错");
                        }
                        noGroupPath = path.getPath();
                        //redis 记录第一个分片上传路径
                        RedisUtil.setString(UpLoadConstant.fastDfsPath + fileMd5, path.getPath());
                        logger.info("上传文件 result = {}", path);
                    } else {
                        //redis 中记录 上传了那一块
                        RedisUtil.setString(currentChunkKey, Convert.toStr(currentChunkInRedis + 1));
                        logger.info("{}:redis块+1", currentChunkInFront);
                        //获取上一次上传后fastdfs的上传路径
                        noGroupPath = RedisUtil.getString(UpLoadConstant.fastDfsPath + fileMd5);
                        if (noGroupPath == null) {
                            logger.info("无法获取已上传服务器文件地址");
//                            return new RespMsgBean().failure("无法获取已上传服务器文件地址");
                        }
                        try {
                            //获取fastdfs 上一次的上传后的指针位置
                            String alreadySize = RedisUtil.getString(UpLoadConstant.historyUpload + fileMd5);
                            // 追加方式实际实用如果中途出错多次,可能会出现重复追加情况,这里改成修改模式,即时多次传来重复文件块,依然可以保证文件拼接正确
                            defaultAppendFileStorageClient.modifyFile(UpLoadConstant.DEFAULT_GROUP, noGroupPath, file.getInputStream(),
                                    file.getSize(), Long.parseLong(alreadySize));
                            // 记录分片上传后指针位置
                            RedisUtil.setString(UpLoadConstant.historyUpload + fileMd5, String.valueOf(Long.parseLong(alreadySize) + currentChunkSize));
                            logger.info("{}:更新完fastdfs", currentChunkInFront);
                        } catch (Exception e) {
                            RedisUtil.setString(currentChunkKey, Convert.toStr(currentChunkInRedis));
                            logger.error("更新远程文件出错", e);
//                            return new RespMsgBean().failure("更新远程文件出错");
                        }
                    }
                    if (currentChunkInFront + 1 == chunks) {

                        // 最后一块,清空upload,写入数据库
                        Long size = Long.parseLong(RedisUtil.getString(UpLoadConstant.historyUpload + fileMd5));
                        // 持久化上传完成文件,也可以存储在mysql中
                        noGroupPath = RedisUtil.getString(UpLoadConstant.fastDfsPath + fileMd5);
                        String url = UpLoadConstant.DEFAULT_GROUP + "/" + noGroupPath;
//                        FileDo fileDo = new FileDo(fileName, url, "", size, bizId, bizCode);
//                        fileDo.setCreateUser(userId);
//                        fileDo.setUpdateUser(userId);
//                        FileVo fileVo = saveFileDo4BigFile(fileDo, fileMd5);
//                        String[] deleteKeys = new String[]{UpLoadConstant.chunkCurr + fileMd5,
//                                UpLoadConstant.fastDfsPath + fileMd5,
//                                UpLoadConstant.currLocks + fileMd5,
//                                UpLoadConstant.lockOwner + fileMd5,
//                                UpLoadConstant.fastDfsSize + fileMd5
//                        };
//                        RedisUtil.delKeys(deleteKeys);
                        RedisUtil.delKeys(new String[]{UpLoadConstant.chunkCurr+fileMd5,
                                UpLoadConstant.fastDfsPath+fileMd5,
                                UpLoadConstant.currLocks+fileMd5,
                                UpLoadConstant.lockOwner+fileMd5
                        });
                        logger.info("***********正常结束**********");
//                        return new RespMsgBean().success(fileVo);
                    }
                } catch (Exception e) {
                    logger.error("上传文件错误", e);
//                    return new RespMsgBean().failure("上传错误 " + e.getMessage());
                }
            }
        } finally {
            // 锁的当前拥有者才能释放块上传锁
            if (currOwner) {
                RedisUtil.setString(chunkLockName, "0");
            }
        }
        logger.info("***********第{}块上传成功**********", currentChunkInFront);
//        return backInfo.success("第" + currentChunkInFront + "块上传成功");
    }


    }

