<template>
  <div>
    <h1>大文件上传</h1>
    <input type="file" @change="handleUpload" />
  </div>
</template>

<script setup>
import SparkMD5 from "spark-md5"; //引入hash计算库
import { ref } from "vue";
//读取本地文件的逻辑
const handleUpload = async (e) => {
  // console.log(e.target.files);//伪数组
  const files = e.target.files;
  if (!files) return;
  //读取文件
  // console.log(files[0]);
  filName.value = files[0].name;

  //对文件进行分片操作
  const chunks = createChunks(files[0]);
  // console.log(chunks);

  //hash计算
  const hash = await calculateHash(chunks);
  // console.log(hash);
  fileHash.value = hash;

  //校验hash值实现秒传
  const data = await verify();
  if(!data.shouldUpload){
    alert('秒传：上传成功')
    return
  }

  //分片上传
  upLoadChunks(chunks,data.existChunks);
};

const CHUNK_SIZE = 1024 * 1024; //定义分片大小 1M
const fileHash = ref(""); //文件hash值
const filName = ref(""); //文件名

// 1M=1024KB=1024*1024B
//文件分片函数
const createChunks = (file) => {
  let cur = 0; //分片起始位置
  let chunks = []; //定义数组接收分片
  while (cur < file.size) {
    const blob = file.slice(cur, cur + CHUNK_SIZE); //slice为字节的起始结束位置
    chunks.push(blob);
    cur += CHUNK_SIZE;
  }
  return chunks;
};

//hash计算函数
const calculateHash = (chunks) => {
  return new Promise((resolve) => {
    //1.第一个和最后一个切片全部参与计算
    //2.中间的切片只计算前面两个字节，中间两个字节，最后两个字节
    const targets = []; //存储所有参与计算的切片
    const spark = new SparkMD5.ArrayBuffer();
    const fileReader = new FileReader();

    chunks.forEach((chunk, index) => {
      if (index == 0 || index == chunks.length - 1) {
        //1.第一个和最后一个切片全部参与计算
        targets.push(chunk);
      } else {
        //2.中间的切片只计算前面两个字节，中间两个字节，最后两个字节
        targets.push(chunk.slice(0, 2)); //前面两个字节
        targets.push(chunk.slice(CHUNK_SIZE / 2, CHUNK_SIZE / 2 + 2)); //中间两个字节
        targets.push(chunk.slice(CHUNK_SIZE - 2, CHUNK_SIZE)); //最后两个字节
      }
    });

    fileReader.readAsArrayBuffer(new Blob(targets));
    fileReader.onload = (e) => {
      spark.append(e.target.result);
      //拿到计算出来的hash值
      // console.log("hash", spark.end());
      resolve(spark.end());
    };
  });
};

//分片上传
const upLoadChunks = async (chunks,existChunks) => {
  //浏览器的并发请求数是有限的
  const data = chunks.map((chunk, index) => {
    return {
      fileHash: fileHash.value,
      chunkHash: fileHash.value + "-" + index,
      chunk,
    };
  });

  //将对象数组转换为formData数组
  const formDatas = data
  //对已上传的切片进行过滤
  .filter((item)=>!existChunks.includes(item.chunkHash))
  .map((item) => {
    const formData = new FormData();

    formData.append("fileHash", item.fileHash);
    formData.append("chunkHash", item.chunkHash);
    formData.append("chunk", item.chunk);

    return formData;
  });

  // console.log(formDatas);

  const max = 6; //定义最大并发请求数
  let index = 0; //定义下标，当前上传第几个
  const taskPool = []; //给每一个请求建立请求池
  while (index < formDatas.length) {
    //发起请求  使用fetch方法发送POST请求，将文件数据作为请求体发送到服务器，并将返回的Promise对象赋值给task变量。
    const task = fetch("http://127.0.0.1:3000/upload", {
      method: "POST",
      body: formDatas[index],
    });

    taskPool.splice(taskPool.findIndex((item) => item == task));
    taskPool.push(task);
    //如果请求池的数量等于最大请求数
    if (taskPool.length == max) {
      await Promise.race(taskPool); //等待任意一个promise对象的状态变为resolve或reject
    }
    index++;
  }
  await Promise.all(taskPool);

  //发起和并请求（通知服务器合并文件）
  mergeRequest();
};
//对分片文件进行合并
const mergeRequest = () => {
  fetch("http://127.0.0.1:3000/merge", {
    method: "POST",
    headers: {
      "content-type": "application/json",
    },
    body: JSON.stringify({
      fileHash: fileHash.value,
      filName: filName.value,
      size: CHUNK_SIZE,
    }),
  }).then((res) => {
    alert("合并成功了！！！");
  });
};
// 校验hash值实现秒传
const verify = () => {
  return fetch("http://127.0.0.1:3000/verify", {
    method: "POST",
    headers: {
      "content-type": "application/json",
    },
    body: JSON.stringify({
      fileHash: fileHash.value,
      filName: filName.value,
    }),
  })
    .then((res) => {
      return res.json();
    })
};
</script>

<style lang="scss" scoped>
</style>