import type { FileChunkResult } from "../worker/chunkFile";

export const CHUNK_SIZE = 10 * 1024 * 1024;
export const THREAD_COUNT = navigator.hardwareConcurrency;

export const cutFile = async (file: File) => {
  return new Promise((resolve) => {
    const chunkCount = Math.ceil(file.size / CHUNK_SIZE);
    const threadChunkCount = Math.ceil(chunkCount / THREAD_COUNT);
    const finalUseThreadCount = Math.min(THREAD_COUNT, chunkCount);
    const result: any[] = [];
    let finishCount = 0;

    for (let i = 0; i < finalUseThreadCount; i++) {
      const work = new Worker("/src/utils/worker/chunkFile.ts", { type: "module" });
      const start = i * threadChunkCount;
      const end = Math.min((i + 1) * threadChunkCount, chunkCount);

      work.postMessage({
        file,
        start,
        end,
        chunk_size: CHUNK_SIZE,
      });

      work.onmessage = (e: MessageEvent) => {
        result[i] = e.data;
        work.terminate();
        finishCount++;
        if (finishCount === finalUseThreadCount) {
          resolve(result.flat());
        }
      };
    }
  });
};

/**
 * 计算文件的hash值，计算的时候并不是根据所用的切片的内容去计算的，那样会很耗时间，我们采取下面的策略去计算：
 * 1. 第一个和最后一个切片的内容全部参与计算
 * 2. 中间剩余的切片我们分别在前面、后面和中间取2个字节参与计算
 * 这样做会节省计算hash的时间
 * @param fileChunkList 文件切片后的数组
 */
export const getFileHash = async (fileChunkList: Array<FileChunkResult>): Promise<string> => {
  return new Promise(resolve => {
    const work = new Worker("/src/utils/worker/file.ts", { type: "module" });
    work.postMessage({ fileChunkList })
    work.onmessage = e => {
      const fileHash = e.data
      if (fileHash) {
        resolve(fileHash)
      }
    }
  })
}