import SparkMD5 from 'spark-md5'

export default class FileSlicer {
  // 分片大小（以2MB为一个分片）
  chunkSize = 1.5 * 1024 * 1024
  chunkList = []
  fileHash

  constructor (file, chunkSize) {
    this.file = file
    this.chunkSize = chunkSize || this.chunkSize
  }

  /**
   * 生成分片文件
   * @param chunkSize 分片大小
   * @returns {FileSlicer}
   */
  slice (chunkSize = this.chunkSize) {
    const chunkList = []
    const chunkTotal = Math.ceil(this.file.size / chunkSize)
    for (let i = 0; i < chunkTotal; i++) {
      chunkList.push(this.file.slice(i * chunkSize, (i + 1) * chunkSize))
    }
    this.chunkList = chunkList
    return this
  }

  /**
   * 计算文件hash值
   * @param callback 回调函数
   * @param chunkList 分片文件列表
   */
  hash (callback, chunkList = this.chunkList) {
    const spark = new SparkMD5.ArrayBuffer()
    let currentChunk = 0
    let percentage = 0
    const fileReader = new FileReader()

    fileReader.onload = e => {
      spark.append(e.target.result)
      let callbackParam = {}
      if (currentChunk < chunkList.length) {
        percentage += 100 / chunkList.length
        callbackParam = { percentage }
        callback(callbackParam)
        loadNext()
      } else {
        callbackParam = {
          percentage: 100,
          hash: spark.end()
        }
        this.fileHash = callbackParam.hash
        callback(callbackParam)
      }
    }

    fileReader.onerror = function () {
      console.error('文件读取失败')
      callback(null)
    }

    function loadNext () {
      fileReader.readAsArrayBuffer(chunkList[currentChunk++])
    }

    loadNext()
  }
}
