import SparkMD5 from "spark-md5";

const enum UPLOAD_SCHEMA {
  THREAD_COUNT = 4,
  CHUNK_SIZE = 1 * 1024 * 1024,
  MAX_REQUEST = 6,
}

interface IChunk {
  start: number;
  end: number;
  index: number;
  chunkHash: string;
  chunk: Blob;
}

/**
 * @description 大文件分片上传
 * @param file 文件
 * @param chunkSize 每片文件分片大小
 * @param threadHandleChunksCount  每个线程处理多少个分片
 * @returns
 */
export function fileFragment(
  file: File,
  chunkSize: number = UPLOAD_SCHEMA.CHUNK_SIZE,
  threadCount: number = UPLOAD_SCHEMA.THREAD_COUNT
) {
  return new Promise((resolve) => {
    /** 分片列表 */
    const chunkList: any[] = [];
    /** 分片数量 */
    const chunkCount = Math.ceil(file.size / chunkSize);

    /** 浏览器可用线程数量 */
    const browserAvailableThreadCount = navigator.hardwareConcurrency;

    /** 防止实际线程数量大于浏览器可用线程数量 */
    threadCount = Math.min(threadCount, browserAvailableThreadCount);

    /** 每个线程处理的切片数量 */
    const threadHandlerChunksCount = Math.ceil(chunkCount / threadCount);

    /** spark-md5 */
    const spark = new SparkMD5.ArrayBuffer();

    /** 已完成线程数量 */
    let finishedThreadCount = 0;

    /** 创建线程 */
    for (let i = 0; i < threadCount; i++) {
      /**
       * ES 模块系统要求所有的导入都必须是静态的, 不能在运行时动态构建导入路径,
       * 所以这里使用new URL对象创建一个绝对路径，以便 Vite 能够正确地解析导入路径
       * */
      const worker = new Worker(
        new URL("./upload-worker.ts", import.meta.url),
        { type: "module" }
      );
      /** 计算每个线程处理的分片的开始和结束索引 */
      const startIndex = i * threadHandlerChunksCount;
      let endIndex = startIndex + threadHandlerChunksCount;

      /** 确保最后一个线程处理的切片数不超过总切片数量 */
      endIndex = Math.min(endIndex, chunkCount);

      /** 向线程发送信息 */
      worker.postMessage({
        type: "getChunks",
        file,
        chunkSize,
        startIndex,
        endIndex,
        spark,
      });

      /** 接收到worker线程返回的消息 */
      worker.onmessage = (e) => {
        const { type, chunks, fileHash } = e.data;

        /** 获取文件分片 */
        /** 获取返回回来的切片，放到chunkList对应的位置 */
        if (type === "getChunks") {
          for (let i = startIndex; i < endIndex; i++) {
            chunkList[i] = chunks[i - startIndex]; // 50 -50 = 0，51 - 50 = 1，52 - 50 = 2；
          }
          finishedThreadCount++;
          if (finishedThreadCount === threadCount) {
            worker.postMessage({ type: "getFileHash", chunkList });
          }
        }

        /** 接收fileHash */
        if (type === "getFileHash") {
          console.log("getFileHash");
          worker.terminate();
          uploadFile(chunkList, fileHash, file);
          resolve({
            chunkList,
            fileHash,
          });
        }
      };
    }
  });
}

/**
 * @description 上传文件
 * @param {IChunk[]} chunks 分片列表
 * @param {String} fileHash 文件hash
 * @param {File} file 文件
 *  */
async function uploadFile(chunks: IChunk[], fileHash: string, file: File) {
  console.log("上传文件");

  const suffix = /\.([a-zA-Z0-9]+)$/.exec(file.name)?.[0];

  const formDatas = chunks.map((chunk) => {
    const formdata = new FormData();
    chunk.chunkHash = `${fileHash}-${chunk.index}${suffix}`;

    formdata.append("file", chunk.chunk);
    formdata.append("fileName", chunk.chunkHash);
    formdata.append("md5Name", fileHash);
    return formdata;
  });

  concurrenceRequest(formDatas);
}

/**
 * @description 并发请求控制
 * @param arr 请求参数列表
 * @param maxConcurrence 最大并发请求数
 * @returns
 */
function concurrenceRequest(arr: any[], maxConcurrence: number = 6) {
  return new Promise(async (resolve) => {
    /** 请求池 */
    const requestPool: Promise<any>[] = [];
    /** 遍历chunk的索引 */
    let index = 0;

    maxConcurrence = Math.min(maxConcurrence, UPLOAD_SCHEMA.MAX_REQUEST);

    while (index < arr.length) {
      const chunkFetch = new Promise((resolve) => {
        fetch("/api/small-contractor/sys/file/uploadChunk", {
          method: "POST",
          body: arr[index],
        })
          .then((res) => {
            resolve(res.json());
          })
          .catch((err) => {
            resolve(err);
          });
      }).finally(() => {
        requestPool.splice(requestPool.indexOf(chunkFetch), 1);
      });
      requestPool.push(chunkFetch);
      if (requestPool.length >= maxConcurrence) {
        await Promise.race(requestPool);
      }
      index++;
    }
    Promise.all(requestPool).then((res) => {
      resolve(res);
    });
  });
}
