import SparkMD5 from 'spark-md5';
import { useThreadService } from './thread';

export const HASH_CHUNK_SIZE = 5 * 1024 * 1024; // 文件哈希计算分片大小 5242880
export const CHUNK_SIZE = HASH_CHUNK_SIZE; // 分片大小
export const blobSlice = File.prototype.slice || File.prototype.mozSlice || File.prototype.webkitSlice;

/**
 * 创建文件分片列表
 *
 * @param {File} file 文件对象
 * @param {Number} uploadSize 已上传大小
 * @returns {Promise<Array<Blob>>} 分片列表
 */
export async function createChunks(file, uploadSize = 0) {
  const fileSize = file.size - uploadSize;
  const chunkCount = Math.ceil(fileSize / CHUNK_SIZE);

  const result = [];
  for (let index = 0; index < chunkCount; index++) {
    const startIndex = uploadSize + index * CHUNK_SIZE;
    const endIndex = Math.min(startIndex + CHUNK_SIZE, file.size);
    const chunkFile = await blobSlice.call(file, startIndex, endIndex, file.type);
    result.push({ startIndex, endIndex, chunkFile });
  }
  return result;
}

/**
 * 增量算法计算文件哈希
 *
 * @param {File} file 文件对象
 * @param onProgress
 * @returns Promise<String>
 */
export function computeHash(file, onProgress) {
  return new Promise((resolve) => {
    const spark = new SparkMD5.ArrayBuffer();
    let progress = 0;

    /**
     * 读取文件分片
     *
     * @param startIndex 文件分片开始位置
     * @returns void
     */
    const onloadNext = async (startIndex = 0) => {
      const endIndex = startIndex + HASH_CHUNK_SIZE;
      const chunkFile = blobSlice.call(file, startIndex, Math.min(endIndex, file.size));
      const reader = new FileReader();
      reader.readAsArrayBuffer(chunkFile);
      reader.onload = (e) => {
        spark.append(e.target.result);
      };
      reader.onloadend = () => {
        progress += endIndex - startIndex;
        onProgress?.(progress, file.size);
        endIndex >= file.size ? resolve(spark.end()) : onloadNext(endIndex);
      };
    };

    onloadNext();
  });
}

/**
 * 多线程计算文件哈希
 *
 * @param {File} file 文件对象
 * @param {Function} onProgress
 * @returns Promise<string>
 */
export async function threadComputeHash(file, onProgress) {
  const { chunkThreadTask } = useThreadService();

  const binary = await chunkThreadTask(file, onProgress);

  return binary.length >= 2 ? SparkMD5.hash(binary.join('')) : binary[0];
}

/**
 * 批次多线程计算文件哈希
 *
 * @param {File} file 文件对象
 * @param {Function} onProgress
 * @returns Promise<string>
 */
export function threadComputeHashByIdle(file, onProgress) {
  const { threadCount, chunkThreadTask } = useThreadService();
  const batchChunkSize = threadCount * 20 * 1024 * 1024;
  // 批次
  const batchCount = Math.ceil(file.size / batchChunkSize);
  const result = [];
  let index = 0;
  let progress = 0;
  return new Promise((resolve) => {
    const workLoop = async (deadline) => {
      while (deadline.timeRemaining() > 0) {
        const startIndex = index * batchChunkSize;
        const endIndex = Math.min(startIndex + batchChunkSize, file.size);
        const chunkFile = blobSlice.call(file, startIndex, endIndex, file.type);
        result[index] = await chunkThreadTask(chunkFile, (v) => onProgress?.(v + progress, file.size));

        progress += endIndex - startIndex;
        if (++index >= batchCount) {
          const binary = result.flat();
          resolve(binary.length >= 2 ? SparkMD5.hash(binary.join('')) : binary[0]);
        } else {
          requestIdleCallback(workLoop);
        }
      }
    };
    requestIdleCallback(workLoop);
  });
}
