import SparkMD5 from "spark-md5";

/**
 * 创建分片
 * @param file 文件对象
 * @param index 第几个分片
 * @param chunkSize 分片的大小
 * @returns 返回每一个分片的结果
 */
const changeCreateHunk = (file, index, chunkSize) => {
  return new Promise((resolve) => {
    const start = index * chunkSize;
    const end = start * chunkSize;
    const spark = new SparkMD5.ArrayBuffer();
    const fileReader = new FileReader();
    const blob = file.slice(start, end);
    fileReader.onload = (e) => {
      spark.append(e.target?.result);
      resolve({
        start,
        end,
        index,
        hash: spark.end(), // 这里会非常耗时,会给每一个计算hash值,计算hash是运算的过程,运算是CPU密集型任务,会把CPU占用了。js是单线程的,js计算完CPU才有空闲计算下一个。
        blob,
      });
    };
    fileReader.readAsArrayBuffer(blob);
  });
};

/**
 *
 */
onmessage = async (e) => {
  const { file, start, end, chunk_size } = e.data;
  const result = [];
  for (let i = start; i < end; i++) {
    const prom = changeCreateHunk(file, i, chunk_size);
    result.push(prom);
  }

  const chunks = await Promise.all(result);
  postMessage(chunks);
};
