package com.xiangxiao.rpan.storage.service.impl;

import com.alibaba.fastjson.JSON;
import com.xiangxiao.rpan.storage.constant.GlobalConsts;
import com.xiangxiao.rpan.storage.constant.ResponseCode;
import com.xiangxiao.rpan.storage.constant.ResponseMessage;
import com.xiangxiao.rpan.storage.core.CacheManager;
import com.xiangxiao.rpan.storage.core.StorageManager;
import com.xiangxiao.rpan.storage.dao.TUploadedRecordMapper;
import com.xiangxiao.rpan.storage.dto.BigFileBean;
import com.xiangxiao.rpan.storage.dto.BigFileDto;
import com.xiangxiao.rpan.storage.dto.StorePathDto;
import com.xiangxiao.rpan.storage.entity.TUploadedRecord;
import com.xiangxiao.rpan.storage.exception.StorageExcetion;
import com.xiangxiao.rpan.storage.factory.ApplicationContextFactory;
import com.xiangxiao.rpan.storage.helper.UserInfoHelper;
import com.xiangxiao.rpan.storage.service.UploadService;
import com.xiangxiao.rpan.storage.threadpool.AsyncTaskWithThreadPool;
import com.xiangxiao.rpan.storage.threadpool.ThreadExecutorUtils;
import org.apache.commons.lang.ObjectUtils;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.BeanUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.scheduling.annotation.Async;
import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor;
import org.springframework.stereotype.Service;
import org.springframework.util.NumberUtils;
import org.springframework.web.context.request.RequestContextHolder;
import org.springframework.web.context.request.ServletRequestAttributes;
import org.springframework.web.multipart.MultipartFile;

import javax.annotation.Resource;
import javax.servlet.http.HttpServletRequest;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;

/**
 * @auther xiangxiao
 * @email 573768011@qq.com
 * @data 2023/9/6 14:54
 */
@Service
public class UploadServiceImpl implements UploadService {
  private final Logger logger = LoggerFactory.getLogger(UploadServiceImpl.class);
  private static final String SPLIT_FILE_URL_SEPARATOR = "/";
  private static final String POINT_STR = ".";
  private static final Integer MINUS_ONE_INT = -1;
  private static final String EMPTY_STR = "";
  @Value("${file.url.prefix}")
  private String filePriex;
  @Autowired
  @Qualifier(value = "cacheManager")
  private CacheManager cacheManager;

  @Autowired
  @Qualifier(value = "storageManager")
  private StorageManager storageManager;

  @Resource(name = "asyncTaskExecutor")
  private ThreadPoolTaskExecutor asyncTaskExecutor;

  @Autowired
  private TUploadedRecordMapper tUploadedRecordMapper;

  @Autowired
  private UserInfoHelper userInfoHelper;

  private Integer uploadSingleChunk(MultipartFile file, String identifier, Integer totalChunks, Integer chunkNumber, Long totalSize, String redisFileDataKey) {
    TUploadedRecord tUploadedRecord = tUploadedRecordMapper.selectByFileMD5(identifier);
    StorePathDto storeMsg = new StorePathDto();
    BigFileDto fileDto = new BigFileDto();
    long startPos = (chunkNumber - 1) * GlobalConsts.FILE_SPLIT_SIZE;
    long curPartSize = (chunkNumber == totalChunks) ? (totalSize - startPos) : GlobalConsts.FILE_SPLIT_SIZE;
    long endPos = startPos + curPartSize;
    if (chunkNumber == 1) {
      // 上传第一个分片
      try {
        // 如果上传的是第一块,直接从头写入
        String storePath = storageManager.storeWithChunk(file, identifier, totalChunks, chunkNumber,
            totalSize, curPartSize, redisFileDataKey);
        if (storePath == null) {
          throw new StorageExcetion(ResponseMessage.COMPRESSED_FILE_UPLOAD_FAIL, ResponseCode.COMPRESSED_FILE_UPLOAD_FAIL);
        };

        storeMsg.setPath(storePath);
        storeMsg.setCurrChunk(chunkNumber);
        storeMsg.setCurrByte(endPos);
        storeMsg.setTotalChunks(totalChunks);
        // 上传完毕之后再redis保存文件块信息
        cacheManager.put(redisFileDataKey, JSON.toJSONString(storeMsg), GlobalConsts.ONE_DAY_LONG);
        chunkNumber++;
      } catch (Exception e) {
        e.printStackTrace();
        throw new StorageExcetion(e.getMessage(), ResponseCode.BIG_FILE_UPLOAD_FAIL);
      }
    } else {
      // 上传后续的分片
      try {
        // 无法从redis获取文件续传信息则需要重新上传
        StorePathDto fileMsg = JSON.parseObject(String.valueOf(cacheManager.get(redisFileDataKey)), StorePathDto.class);
        if (fileMsg == null) {
          throw new StorageExcetion("无法获取续传信息，请重新上传", ResponseCode.BIG_FILE_UPLOAD_FAIL);
        };
        String storePath = storageManager.storeWithChunk(file, identifier, totalChunks, chunkNumber,
            totalSize, curPartSize, redisFileDataKey);
        if (storePath == null) {
          throw new StorageExcetion(ResponseMessage.COMPRESSED_FILE_UPLOAD_FAIL, ResponseCode.COMPRESSED_FILE_UPLOAD_FAIL);
        };

        storeMsg.setPath(storePath);
        long chunkSize1 = fileMsg.getCurrByte();
        fileMsg.setCurrByte(endPos);
        fileMsg.setCurrChunk(chunkNumber);
        fileMsg.setTotalChunks(totalChunks);
        BeanUtils.copyProperties(fileMsg, storeMsg);
        cacheManager.put(redisFileDataKey, JSON.toJSONString(storeMsg), GlobalConsts.ONE_DAY_LONG);
        chunkNumber++;
      } catch (Exception e) {
        throw new StorageExcetion(e.getMessage(), ResponseCode.COMPRESSED_FILE_UPLOAD_FAIL);
      }
    }
    return chunkNumber;
  }

  @Override
  public BigFileDto uploadBigFile(BigFileBean bigFileBean) {
    StorePathDto storeMsg = new StorePathDto();
    String fileMD5 = bigFileBean.getFileMd5();
    Integer currChunk = bigFileBean.getCurrChunk();
    Integer totalChunks = bigFileBean.getTotalChunks();
    Long fileSize = bigFileBean.getFileSize();
    List<MultipartFile> files = bigFileBean.getFiles();
    BigFileDto fileDto = new BigFileDto();
    String redisFileChunkKey = GlobalConsts.FILE_CHUNK_KEY + ":" + fileMD5;
    String redisFileLockKey = GlobalConsts.FILE_LOCK_KEY + ":" + fileMD5;
    // 根据前台传过来的token获取用户ID
    Long userId = getUserIdByToken(bigFileBean.getToken());

    // 检查文件是否已经上传完成，如果是直接返回已存在的那条文件记录
    TUploadedRecord tUploadedRecord = tUploadedRecordMapper.selectByFileMD5(fileMD5);
    if (tUploadedRecord != null && userId.equals(tUploadedRecord.getUserId())) {
      if (GlobalConsts.FILE_UPLOAD_FINISH.equals(tUploadedRecord.getStatus())) {
        fileDto.setFileUrl(tUploadedRecord.getFilePath());
        fileDto.setFileId(tUploadedRecord.getFilemd5());
        BeanUtils.copyProperties(tUploadedRecord, fileDto);
        return fileDto;
      } else {
        //续传的片数校验
        Object fileMsg = cacheManager.get(redisFileChunkKey);
        if (fileMsg == null) {
          throw new StorageExcetion(ResponseMessage.BIGFILE_UP_FAIL, ResponseCode.BIGFILE_UP_FAIL);
        }
        if (!currChunk.equals(storeMsg.getCurrChunk())) {
          throw new StorageExcetion(ResponseMessage.FILE_UPLOAD_CHUNK_ERROR, ResponseCode.FILE_UPLOAD_CHUNK_ERROR);
        }
      }
    }

    // 文件加锁
    addFileLock(redisFileLockKey);

    try {
      for (int i = 0; i < files.size(); ++i) {
        MultipartFile file = files.get(i);
        String suffix = file.getOriginalFilename();
        while(currChunk <= totalChunks) {
          Integer usedCurrChunk = currChunk;
          Future<Integer> future = asyncTaskExecutor.submit(() ->
            uploadSingleChunk(file, fileMD5,
                totalChunks, usedCurrChunk, fileSize, redisFileChunkKey)
          );

          System.out.println("分片片数下标: " + future.get());
          currChunk = future.get();
        };

        // 最后一块上传完毕之后更新文件记录
        if (currChunk.equals(totalChunks + 1)) {
          // 同时再合并各分片文件,删除缓存中存的文件分片
          String fileUrl = storageManager.mergeChunks(fileMD5, totalChunks, suffix);
          System.out.println(fileUrl);
          fileDto.setFileUrl(fileUrl);
          fileDto.setFileId(fileMD5);
          cacheManager.delete(redisFileChunkKey);
          cacheManager.delete(redisFileLockKey);
          // 更新文件记录
          if (tUploadedRecord == null) {
            tUploadedRecord = this.saveUploadedRecord(bigFileBean, fileUrl, file, totalChunks, userId);
            BeanUtils.copyProperties(tUploadedRecord, fileDto);
          }
          if (tUploadedRecord != null) {
            tUploadedRecord.setCreated(bigFileBean.getOperDate());
            tUploadedRecord.setStatus(ResponseCode.FILE_UPLOAD_FINISH);
            BeanUtils.copyProperties(tUploadedRecord, fileDto);
            tUploadedRecordMapper.updateByPrimaryKey(tUploadedRecord);
          }
        }
      }
    } catch (IOException e) {
      e.printStackTrace();
      throw new StorageExcetion(ResponseMessage.BIG_FILE_UPLOAD_FAIL, ResponseCode.BIG_FILE_UPLOAD_FAIL);
    } catch (ExecutionException e) {
      e.printStackTrace();
    } catch (InterruptedException e) {
      e.printStackTrace();
      throw new StorageExcetion(ResponseMessage.BIG_FILE_UPLOAD_FAIL, ResponseCode.BIG_FILE_UPLOAD_FAIL);
    } finally {
      return fileDto;
    }
  }

  @Override
  public BigFileBean getParametersFromRequest(MultipartFile file, String fileMD5, Long fileSize,
                                              int totalChunks, int currChunks) {
    try {
      HttpServletRequest request = ((ServletRequestAttributes) RequestContextHolder.getRequestAttributes()).getRequest();
      BigFileBean bigFile = new BigFileBean();
      String token = request.getHeader("Zfile-Token");
      bigFile.setToken(token);
      Integer currChunk = Integer.valueOf(currChunks) == null ? 0 : currChunks;
      if (currChunk < 0) {
        throw new StorageExcetion("currChunk不能为负数",
            ResponseCode.PARAMETER_ERROR);
      }
      Integer totalChunk = Integer.valueOf(totalChunks) == null ? 1 : totalChunks;
      if (totalChunk < 1) {
        throw new StorageExcetion("totalChunks不能为0或负数",
            ResponseCode.PARAMETER_ERROR);
      }
      if (StringUtils.isBlank(fileMD5)) {
        throw new StorageExcetion("参数fileMD5值不能为空",
            ResponseCode.PARAMETER_ERROR);
      }
      Long size = Long.valueOf(fileSize);
      if (size < 0) {
        throw new StorageExcetion("fileSize不能为负",
            ResponseCode.PARAMETER_ERROR);
      }
      if (file.isEmpty()) {
        throw new StorageExcetion("file文件不能为空", ResponseCode.PARAMETER_ERROR);
      }
      bigFile.setCurrChunk(currChunk);
      bigFile.setTotalChunks(totalChunks);
      bigFile.setFileMd5(fileMD5);
      bigFile.setFileSize(fileSize);
      bigFile.setOperDate(new Date());
      List<MultipartFile> files = new ArrayList<>();
      files.add(file);
      bigFile.setFiles(files);
      return bigFile;
    } catch (Exception e) {
      logger.error(e.getMessage(), e);
      throw new StorageExcetion(e.getMessage(), ResponseCode.PARAMETER_ERROR);
    }
  }

  private TUploadedRecord saveUploadedRecord(BigFileBean fileBean, String storePath, MultipartFile file, Integer currChunk, Long userId) {
    TUploadedRecord tUploadedRecord;
    tUploadedRecord = new TUploadedRecord();
    tUploadedRecord.setChunks(fileBean.getTotalChunks());
    tUploadedRecord.setCreated(fileBean.getOperDate());
    tUploadedRecord.setFilemd5(fileBean.getFileMd5());
    tUploadedRecord.setFileName(file.getOriginalFilename());
    tUploadedRecord.setFilePath(storePath);
    tUploadedRecord.setFileSize(fileBean.getFileSize());
    tUploadedRecord.setStatus(GlobalConsts.FILE_UPLOAD_FINISH);
    tUploadedRecord.setUploadedChunks(currChunk + 1);
    tUploadedRecord.setUpdated(fileBean.getOperDate());
    tUploadedRecord.setUserId(userId);
    tUploadedRecordMapper.insertRecord(tUploadedRecord);
    return tUploadedRecord;
  }

  /**
   * 根据Token获取用户ID
   *
   * @param token
   * @return 用户ID
   */
  private Long getUserIdByToken(String token) {
    //如果不通过网关调用文件服务的话，即没有token的情况下，用户ID初始化为0
    Long userId;
    try {
      userId = userInfoHelper.getUserIdByToken(token);
    } catch (Exception e) {
      userId = (long) 0;
    }
    return userId;
  }

  private void addFileLock(String redisFileLockKey) {
    String fileLock = (String) cacheManager.get(redisFileLockKey);
    int lock = Integer.parseInt((fileLock == null) ? "0" : fileLock);
    if (lock > 0) {
      // 如果文件已经锁住，则说明该文件正在上传中
      throw new StorageExcetion(ResponseMessage.FILE_EXIST, ResponseCode.GET_FILE_LOCK_FAIL);
    } else {
      // 如果文件没有锁，则加锁
      cacheManager.put(redisFileLockKey, "1", GlobalConsts.ONE_DAY_LONG);
    }
  }


  public String getFileSuffix(String filename) {
    if (StringUtils.isBlank(filename) || (filename.indexOf(POINT_STR) == MINUS_ONE_INT)) {
      return EMPTY_STR;
    }
    return filename.substring(filename.lastIndexOf(POINT_STR)).toLowerCase();
  }
}