<template>
  <div>
    <input type="file" @change="getFile">
    <div>
      <div style="margin: 20px 0;" v-for="(chunk, index) in fileChunkList" :key="index">
        <input type="range" :value="chunk.percentage">
      </div>
    </div>
  </div>
</template>
<script>
import sparkMd5 from 'spark-md5'
import { uploadFile, mergeChunks } from './utils/request'

export default {
  data () {
    return {
      fileChunkList: [],
      defaultChunkSize: 100 * 1024 * 1024 // 100M
    }
  },
  methods: {
    async getFile (e) {
      this.currFile = e.target.files[0]
      const res = await this.getFileChunk(e.target.files[0], this.defaultChunkSize)
      this.uploadChunks(res.fileHash)
    },
    // 上传
    uploadChunks (fileHash) {
      const that = this
      const request = this.fileChunkList.map((item, index) => {
        const formData = new FormData()
        formData.append(`${that.currFile.name}-${fileHash}-${index}`, item.chunk)
        // 上传
        return uploadFile('/upload', formData, that.onUploadProgress(item))
      })
      Promise.all(request).then(() => {
        // 合并
        mergeChunks('/mergeChunks', {
          size: that.defaultChunkSize,
          filename: that.currFile.name
        })
      })
    },
    onUploadProgress (item) {
      return function (e) {
        item.percentage = (e.loaded / e.total) * 100
      }
    },
    // 获取文件分块
    getFileChunk (file, chunkSize) {
      const that = this
      return new Promise((resolve) => {
        // 拿到file对象上的slice方法，用于切割大文件 (解决兼容性问题)
        // const blobSlice = File.prototype.slice || File.prototype.mozSlice || File.prototype.webkitSlice
        // 拿到分块数量（向上取整）
        const chunks = Math.ceil(file.size / chunkSize)
        // 当前第几块
        let currentChunk = 0
        // 追加数组缓冲区
        const spark = new sparkMd5.ArrayBuffer()
        // 使用fileReader将文件块转为文件流，方便生成文件hash值
        const fileReader = new FileReader()
        // 开始切块
        loadNext()
        // 读取完成
        fileReader.onload = function (e) {
          const chunk = e.target.result
          spark.append(chunk)
          currentChunk++
          if (currentChunk < chunks) {
            loadNext()
          } else {
            const fileHash = spark.end()
            resolve({ fileHash })
          }
        }
        // 读取失败
        fileReader.onerror = function () {
          console.warn('oops, something went wrong.')
        }

        function loadNext () {
          const start = currentChunk * chunkSize
          const end = start + chunkSize >= file.size ? file.size : start + chunkSize
          const chunk = file.slice(start, end)
          that.fileChunkList.push(
            {
              chunk,
              size: chunk.size,
              name: file.name,
              percentage: 0
            }
          )
          fileReader.readAsArrayBuffer(chunk)
        }
      })
    }
  }
}
</script>
<style scoped lang='scss'>
</style>
