// 生成文件哈希（使用 Worker）
import SparkMD5 from 'spark-md5';

export const calculateHash = async (file, CHUNK_SIZE) => {
  return new Promise((resolve) => {
    const spark = new SparkMD5.ArrayBuffer()
    const reader = new FileReader()
    // const chunkSize = 50 * 1024 * 1024 // 50MB 分段计算
    const chunks = Math.ceil(file.size / CHUNK_SIZE)
    let currentChunk = 0

    reader.onload = async (e) => {
      spark.append(e.target.result)
      currentChunk++
      
      if (currentChunk < chunks) {
        loadNext()
      } else {
        resolve(spark.end())
      }
    }

    const loadNext = () => {
      const start = currentChunk * CHUNK_SIZE
      const end = start + CHUNK_SIZE >= file.size ? file.size : start + CHUNK_SIZE
      reader.readAsArrayBuffer(file.slice(start, end))
    }

    loadNext()
  })
}