import SparkMD5 from "spark-md5";

export const CALC_HASH_CHUNK_SIZE = 2 * 1024 * 1024; // 计算hash分片大小 2097152
export const CHUNK_SIZE = 5 * 1024 * 1024; // 分片大小 5242880
export const BATCH_CHUNK_SIZE = 40 * 5 * 1024 * 1024; // 分片大小 5242880

export const getChunkCount = (file) => {
  return Math.ceil(file.size / CHUNK_SIZE); // 分片数量
};

/**
 * 获取分片信息
 *
 * @param {File} file
 * @returns 分片信息
 */
export const getFileInfo = async (file) => {
  const { name, size } = file;
  const hash = await calcHashSample(file);
  const suffix = name.split(".").pop();
  return { file, name, size, hash, suffix };
};

/**
 * 分片创建
 *
 * @param {Blob} file 分片文件
 * @param {Number} index 分片索引
 * @returns Promise
 */
export const createChunk = (file, index) => {
  return new Promise((resolve) => {
    const startIndex = index * CHUNK_SIZE;
    const endIndex = Math.min(startIndex + CHUNK_SIZE, file.size);
    const chunkFile = file.slice(startIndex, endIndex);

    const spark = new SparkMD5.ArrayBuffer(); // MD5编码
    const fileReader = new FileReader();
    fileReader.readAsArrayBuffer(chunkFile);
    fileReader.onload = (e) => {
      spark.append(e.target.result);
      resolve({ chunkFile, startIndex, endIndex, index, hash: spark.end() });
    };
  });
};

/**
 * 全量计算文件哈希值
 *
 * @param {File} file
 * @returns 文件哈希值
 */
export const calcHashSync = (file, onProgress) => {
  return new Promise((resolve) => {
    const sliceSize = CALC_HASH_CHUNK_SIZE;

    let progress = 0;
    const chunkCount = Math.ceil(file.size / sliceSize); // 分片数量
    const handleProgress = () => {
      progress += 100 / chunkCount;
      onProgress && onProgress(Math.floor(progress));
    };

    const spark = new SparkMD5.ArrayBuffer(); // MD5编码
    const loadNext = (index = 0) => {
      const nextIndex = index + sliceSize;
      const chunkFile = file.slice(index, Math.min(nextIndex, file.size));
      const reader = new FileReader();
      reader.readAsArrayBuffer(chunkFile);
      reader.onload = (e) => {
        spark.append(e.target.result);
        handleProgress();
        nextIndex >= file.size ? resolve(spark.end()) : loadNext(nextIndex);
      };
    };

    loadNext();
  });
};

/**
 * 抽样计算文件哈希值
 *
 * 文件分片 = 文件首尾分片 + 其他多次分割分片
 *
 * @param {File} file
 * @returns 文件哈希值
 */
export const calcHashSample = (file, onProgress) => {
  return new Promise((resolve) => {
    const sliceSize = CALC_HASH_CHUNK_SIZE;

    if (Math.ceil(file.size / sliceSize) < 3) {
      resolve(calcHashSync(file, onProgress));
    }

    const chunks = []; // 35879;
    chunks.push({ file: file.slice(0, sliceSize) });
    for (let cur = sliceSize; cur < file.size - sliceSize; cur += sliceSize) {
      const middle = cur + sliceSize / 2;
      const end = cur + sliceSize;
      chunks.push({ file: file.slice(cur, cur + 2) });
      chunks.push({ file: file.slice(middle - 1, middle + 1) });
      chunks.push({ file: file.slice(end - 2, end) });
    }
    const start = Math.ceil((file.size - sliceSize) / sliceSize) * sliceSize;
    chunks.push({ file: file.slice(start, file.size) });

    const spark = new SparkMD5.ArrayBuffer(); // MD5编码
    const reader = new FileReader();
    const chunkFile = new Blob(chunks, { type: "application/octet-stream" });
    reader.readAsArrayBuffer(chunkFile);
    reader.onload = (e) => {
      spark.append(e.target.result);
      onProgress && onProgress(100);
      resolve(spark.end());
    };
  });
};

/**
 * 批处理分片任务
 *
 * @param {Object} param
 * @param {File} param.file 文件
 * @param {Number} param.uploadedSize 已上传大小
 * @param {Function} action 执行函数
 * @returns Promise
 */
export const handleBatchTask = ({ file, uploadedSize = 0 }, action) => {
  return new Promise((resolve) => {
    const fileSize = file.size - uploadedSize;
    const batchCount = Math.ceil(fileSize / BATCH_CHUNK_SIZE);
    const result = [];
    const nextTask = async (index = 0) => {
      const start = index * BATCH_CHUNK_SIZE + uploadedSize;
      const end = Math.min(start + BATCH_CHUNK_SIZE, file.size);

      const chunkFile = file.slice(start, end, file.type);

      const res = { file: chunkFile, start, end, batchIndex: index, batchCount };

      result[index] = (await action(res)) || [];

      ++index >= batchCount ? resolve(result.flat()) : nextTask(index);
    };

    nextTask();
  });
};
