<script setup>
import SparkMD5 from 'spark-md5'

const CHUNK_SIZE = 1024 * 1024

// 创建文件分片
const createFileChunks = (file, chunk_size) => {
  let start = 0
  const chunks = []
  while (start < file.size) {
    const chunk = file.slice(start, start + chunk_size)
    chunks.push(chunk)
    start += chunk_size
  }
  return chunks
}

// 计算文件哈希值
const calculateHash = (chunks, chunk_size) => {
  return new Promise((resolve, reject) => {
    const target = []
    chunks.forEach((chunk, index) => {
      if (index === 0 || index === chunks.length - 1) {
        target.push(chunk)
      } else {
        const slice1 = chunk.slice(0, 2)
        const slice2 = chunk.slice(chunk_size / 2, chunk_size / 2 + 2)
        const slice3 = chunk.slice(chunk_size - 2, chunk_size)
        target.push(slice1, slice2, slice3)
      }
    })
    const fileReader = new FileReader()
    const blob = new Blob(target)
    const spark = new SparkMD5.ArrayBuffer()
    fileReader.readAsArrayBuffer(blob)
    fileReader.onload = (e) => {
      spark.append(e.target.result)
      resolve(spark.end())
    }
  })
}

// 上传文件分片到服务器
const uploadChunks = async (fileChunks, hash, uploadedList) => {
  // 将文件分片信息映射为包含文件哈希、索引、文件块哈希、文件块数据的数组
  const data = fileChunks
    .map((chunk, index) => ({
      fileHash: hash,
      chunkHash: `${hash}-${index}`,
      chunk,
      index
    }))
    .filter((item) => !uploadedList.includes(item.chunkHash))

  const maxConcurrentRequests = 6 // 最大并发请求数目
  const taskPool = [] // 请求池
  for (const item of data) {
    const formData = new FormData()
    formData.append('fileHash', item.fileHash)
    formData.append('chunkHash', item.chunkHash)
    formData.append('chunk', item.chunk)
    // 发起上传请求
    const task = fetch('http://localhost:3000/upload', {
      method: 'POST',
      body: formData
    })
    taskPool.push(task)
    // 当请求池达到最大并发请求数或处理完最后一个文件分片时，进行下一步操作
    if (taskPool.length >= maxConcurrentRequests || item.index === data.length - 1) {
      try {
        // 等待所有任务完成
        const responses = await Promise.all(taskPool) // 返回一个包含了所有 Promise 解决值的数组
        // 检查响应
        for (const response of responses) {
          if (!response.ok) {
            throw new Error('Upload failed')
          }
        }
      } catch (error) {
        console.error('Error uploading chunk:', error.message)
        throw error
      } finally {
        // 清空请求池
        taskPool.length = 0
      }
    }
  }
}
// 请求后端合并文件分片
const mergeRequest = (hash, file) => {
  // 发送合并请求
  fetch('http://127.0.0.1:3000/merge', {
    method: 'POST',
    headers: {
      'Content-Type': 'application/json'
    },
    body: JSON.stringify({
      size: CHUNK_SIZE,
      fileHash: hash,
      fileName: file.name
    })
  })
    .then((response) => response.json())
    .then(() => {
      alert('上传成功')
    })
}

// 验证该文件是否需要上传，文件通过hash生成唯一，改名后也是不需要再上传的，也就相当于秒传
const verifyUpload = async (fileHash, file) => {
  return fetch('http://127.0.0.1:3000/verify', {
    method: 'POST',
    headers: {
      'Content-Type': 'application/json'
    },
    body: JSON.stringify({
      fileHash,
      fileName: file.name
    })
  })
    .then((response) => response.json())
    .then((data) => {
      return data // data中包含对应的表示服务器上有没有该文件的查询结果
    })
}

// 处理文件上传事件
const handleChange = async (e) => {
  const file = e.target.files[0]
  // 创建文件分片
  const chunks = createFileChunks(file, CHUNK_SIZE)
  // 计算文件哈希值
  const hash = await calculateHash(chunks, CHUNK_SIZE)

  // 秒传(服务器有相同hash的文件，就不用再重复上传了)
  const res = await verifyUpload(hash, file)
  console.log(res, 135)
  const { shouldUpload, existChunks } = res.data
  if (!shouldUpload) {
    // 服务器上已经有该文件，不需要上传
    alert('秒传：上传成功')
    return
  }

  // 上传文件分片
  await uploadChunks(chunks, hash, existChunks)
  // 合并文件分片
  mergeRequest(hash, file)
}
</script>

<template>
  <div>
    <input type="file" @change="handleChange" />
    <button>文件上传</button>
  </div>
</template>

<style scoped></style>
