<template>
  <div class="home">
    <input type="file" @change="handleFileChange" />
  </div>
</template>

<script setup lang="ts">
import { ref } from 'vue'
import SpakeMd5 from 'spark-md5'

const file = ref<File>(null) // 文件本身
const chunks = ref<Blob[]>([]) // 文件分块
const chunkSize = 1024 * 1024 // 每个分块的大小 1M = 1024 * 1024byte
const hash = ref<string>('') // 文件哈希值
const fileName = ref<string>('') // 文件名

const handleFileChange = async (e: Event) => {
  chunks.value = []
  // 文件发生改变
  const target = e.target as HTMLInputElement
  file.value = target.files?.[0] // 目标文件
  fileName.value = file.value?.name // 文件名

  // 对文件进行分块
  sliceFile(file.value, chunkSize)

  // 计算文件哈希值
  hash.value = await calculateHash(chunks.value)

  // 判断文件是否已经上传过
  const res = await checkFile(hash.value, fileName.value)

  if (res?.isExist) {
    return alert('上传成功')
  }

  // 开始文件上传
  await uploadFile(chunks.value, hash.value)

  // 合并文件
  await mergeFile(hash.value, fileName.value)
  alert('上传成功')
}

// 对文件进行分块
const sliceFile = (file: File, chunkSize: number) => {
  let start = 0
  let end = chunkSize
  while (start < file.size) {
    const blob = file.slice(start, end)
    start = end
    end = start + chunkSize
    chunks.value.push(blob)
  }
}

// 计算文件哈希值
const calculateHash = (chunks: Blob[]) => {
  return new Promise((resolve, reject) => {
    // 文件太大计算哈希值也会导致浏览器卡死 所以在计算的时候我们采用计算文件的第一个分块和最后一个分块的哈希值加上其他分块中间的两个字节 这样会大大的提高计算速度
    const target: Blob[] = [] // 计算哈希值的分块
    chunks.forEach((chunk, index) => {
      if (index === 0 || index === chunks.length - 1) {
        target.push(chunk)
      } else {
        target.push(chunk.slice(chunkSize / 2, chunkSize / 2 + 2))
      }
    })
    const md5 = new SpakeMd5.ArrayBuffer()
    const fileReader = new FileReader()
    fileReader.readAsArrayBuffer(new Blob(target))
    fileReader.onload = (e) => {
      md5.append(e.target?.result as ArrayBuffer)
      hash.value = md5.end()
      resolve(hash.value)
    }
  })
}

// 上传文件
const uploadFile = async (chunks: Blob[], hash: string) => {
  // 每一个分块需要的内容
  const data = chunks.map((chunk, index) => {
    return {
      chunk: chunk,
      fileHash: hash,
      chunkHash: `${hash + index}-${index}`
    }
  })

  const formData = data.map((item) => {
    const formData = new FormData()
    formData.append('chunk', item.chunk)
    formData.append('fileHash', item.fileHash)
    formData.append('chunkHash', item.chunkHash)
    return formData
  })

  // 发送请求
  const max = 6 // 最大并发数
  let index = 0 // 当前发送的第几个分块
  const taskPool = [] // 任务池
  while (index < chunks.length) {
    // 发送请求
    const task = fetch('http://localhost:3000/upload', {
      method: 'POST',
      body: formData[index]
    })
    taskPool.push(task)
    if (taskPool.length >= max) {
      // 如果任务池中的任务数大于等于最大并发数，则等待任务完成
      await Promise.race(taskPool)
      taskPool.splice(0, 1)
    }
    index++
  }
  return await Promise.all(taskPool)
}

// 合并文件
const mergeFile = async (hash: string, fileName: string) => {
  return fetch('http://localhost:3000/merge', {
    method: 'POST',
    headers: {
      'Content-Type': 'application/json'
    },
    body: JSON.stringify({
      fileHash: hash,
      fileName: fileName,
      size: chunkSize
    })
  })
}

// 判断文件以前是否上传过
const checkFile = async (hash: string, fileName: string) => {
  return new Promise((resolve, reject) => {
    fetch('http://localhost:3000/check', {
      method: 'POST',
      headers: {
        'Content-Type': 'application/json'
      },
      body: JSON.stringify({
        fileHash: hash,
        fileName: fileName
      })
    }).then((res) => {
      resolve(res.json())
    })
  })
}
</script>

<style scoped></style>
