const express = require("express");
const multer = require("multer");
const upload = multer({ dest: "uploads/" });
const fs = require("fs");
const bodyParser = require("body-parser");

const router = express.Router();

const {
  getUploadUrl,
  findUploadFileList,
  mergeChunksMine,
  checkFileIsUploaded,
} = require("./utils");

// 配置body-parser
router.use(bodyParser.json({ type: "application/*+json" }));

router.get("/test", (req, res) => {
  res.send({
    code: 200,
    msg: "success",
    path: req.url,
    uploadUrl: getUploadUrl("test.js"),
    __dirname: __dirname,
  });
});

// 处理文件上传的路由
router.post("/upload", upload.single("file"), (req, res) => {
  // 获取上传的文件对象
  const file = req.file;

  // 获取文件名
  const filename = req.body.filename;
  const fileHash = req.body.fileHash;

  // 获取总块数和当前块数
  const totalChunks = parseInt(req.body.totalChunks);
  const currentChunk = parseInt(req.body.currentChunk);

  // console.log(req.body);

  // 生成当前块的存储路径
  const chunkPath = getUploadUrl(
    `${filename}-${fileHash}-chunk-${currentChunk}-${totalChunks}`
  );

  // 创建读取文件块的可读流和写入当前块的可写流
  const chunkStream = fs.createReadStream(file.path);
  const writeStream = fs.createWriteStream(chunkPath);

  // 将读取的文件块内容通过管道写入当前块的文件
  chunkStream.pipe(writeStream);

  // 监听读取文件块流结束事件
  chunkStream.on("end", () => {
    fs.unlinkSync(file.path); // 读取文件块的流结束后，删除临时文件
    const progress = ((currentChunk / totalChunks) * 100).toFixed(2); //计算上传进度

    if (progress >= 100) {
      const mergeSuccess = () => {
        res.json({ progress: 100, isFinish: true, success: true });
      };

      mergeChunksMine(filename, fileHash, 0, totalChunks, mergeSuccess);
      // .then((res) => {
      //   res.json({ progress: 100, isFinish: true, success: true });
      // })
      // .catch((err) => {
      //   res.json({ progress: 100, isFinish: false, success: false, err });
      // });
      return false;
    }

    res.json({ progress, isFinish: false, success: true }); // 响应上传成功的状态码
  });
});

// 处理文件合并的路由
router.post("/merge", (req, res) => {
  // 获取文件名和总块数
  const filename = req.body.filename;
  const totalChunks = parseInt(req.body.totalChunks);
  const fileHash = req.body.fileHash;

  console.log(filename, totalChunks, fileHash);

  // 生成合并后文件的存储路径
  const mergedPath = getUploadUrl(filename);

  // 创建写入合并后文件的可写流
  const writeStream = fs.createWriteStream(mergedPath);

  // 递归合并文件块的函数
  const mergeChunks = (index) => {
    if (index === totalChunks) {
      writeStream.end(); // 所有块都合并完成后，关闭写入流
      res.sendStatus(200); // 响应合并成功的状态
      return;
    }

    // 获取当前块的存储路径
    const chunkPath = getUploadUrl(
      `${filename}-${fileHash}-chunk-${index}-${totalChunks}`
    );

    // 同步读取当前块的内容
    const chunk = fs.readFileSync(chunkPath);

    // 删除已合并的块
    fs.unlinkSync(chunkPath);

    // 将块的内容写入合并后文件，并在写入完成后递归合并下一块
    writeStream.write(chunk, () => {
      mergeChunks(index + 1);
    });
  };

  // 开始递归合并文件块
  mergeChunks(0);
});

router.get("/check-file", async (req, res) => {
  const fileHash = req.query.fileHash;
  console.log(fileHash);

  if (await checkFileIsUploaded(fileHash)) {
    //文件已存在
    res.send({
      code: 200,
      msg: "文件已存在",
      status: "FILE_ALREADY_EXIST",
    });
  } else {
    const chunkList = findUploadFileList(`${fileHash}-chunk`);

    //文件不存在
    res.send({
      code: 200,
      msg: "文件不存在",
      status: "NOT_FILE_ALREADY_EXIST",
      data: {
        count: chunkList.length,
        chunkList,
      },
    });
  }
});

module.exports = router;
