import SparkMD5 from 'spark-md5'

export const calculateFileMd5 = (
  file: File,
  chunkSize: number,
  maxConcurrent: number = 4
): Promise<string> => {
  return new Promise((resolve, reject) => {
    const spark = new SparkMD5.ArrayBuffer()
    const totalChunks = Math.ceil(file.size / chunkSize)
    let currentChunk = 0
    let activeChunks = 0
    const chunkPromises: Promise<void>[] = []

    const reader = new FileReader()

    reader.onload = (e) => {
      spark.append(e.target?.result as ArrayBuffer)
      currentChunk++
      activeChunks--
      // 如果还有未处理的块，启动新的读取
      if (currentChunk < totalChunks) {
        loadNext()
      }
      // 如果所有块都处理完了，计算 MD5
      if (currentChunk === totalChunks && activeChunks === 0) {
        const md5 = spark.end()
        resolve(md5)
      }
    }

    reader.onerror = () => {
      reject(new Error('文件读取错误'))
    }

    const loadNext = () => {
      // 控制并发读取块的数量
      if (activeChunks >= maxConcurrent) {
        return
      }
      const start = currentChunk * chunkSize
      const end = Math.min(start + chunkSize, file.size)
      const chunkPromise = new Promise<void>((resolve, reject) => {
        reader.onloadend = () => resolve() // 使用箭头函数确保类型正确
        reader.onerror = reject
        reader.readAsArrayBuffer(file.slice(start, end))
      })
      chunkPromises.push(chunkPromise)
      activeChunks++
    }

    // 初始加载块
    loadNext()

    // 等待所有块处理完成
    Promise.all(chunkPromises)
      .then(() => {
        if (currentChunk === totalChunks && activeChunks === 0) {
          const md5 = spark.end()
          resolve(md5)
        }
      })
      .catch(reject)
  })
}
