import SparkMD5 from 'spark-md5';
/**
 * 创建文件分片
 * @param file 文件对象
 * @param chunkSize 每个分片的大小 (字节)
 */
export const createFileChunk = (file: File, chunkSize: number) => {
  const chunks = []
  let current = 0
  while (current < file.size) {
    const end = Math.min(current + chunkSize, file.size)
    const chunk = file.slice(current, end)
    chunks.push({
      file: chunk,
      index: chunks.length,
      start: current,
      end: end,
      size: end - current,
    })
    current = end
  }
  return chunks
}

/**
 * 计算文件hash (使用SparkMD5)
 * @param file 文件对象
 */
export const calculateHash = (file: File): Promise<string> => {
  return new Promise((resolve) => {
    const spark = new SparkMD5.ArrayBuffer()

    const reader = new FileReader()
    const chunkSize = 2 * 1024 * 1024 // 2MB
    const chunks = Math.ceil(file.size / chunkSize)
    let currentChunk = 0

    reader.onload = (e) => {
      spark.append(e.target?.result as ArrayBuffer)
      currentChunk++

      if (currentChunk < chunks) {
        loadNext()
      } else {
        resolve(spark.end())
      }
    }

    const loadNext = () => {
      const start = currentChunk * chunkSize
      const end = Math.min(start + chunkSize, file.size)
      const chunk = file.slice(start, end)
      reader.readAsArrayBuffer(chunk)
    }

    loadNext()
  })
}