const multiparty = require('multiparty')
const path = require('path')
const fs = require('fs')

const uploadDir = path.join(__dirname, '../public/upload/')
exports.uploadFile = function (req, res, next) {
  // 二进制数据上传
  const form = new multiparty.Form()
  // req.query和req.params都是获取路由的参数对象
  // req.query是获取url上？后面的参数  req.params是获取路径后面的参数 比如user/1
  // express 4.x以前使用bodyparse中间件去获取post请求体 4.x以后可以直接通过req.body来获取 但是要配置express.json()和express.urlencoded()
  form.parse(req, (err, fields, files) => {
    if (err) {
      next(err)
      return
    }
    let pa = path.join(__dirname, '../public/upload/chunk/' + fields['filename'][0])
    //判断是否为断点续传
    if (fs.existsSync(pa) && parseInt(fields.index[0]) === 0) {
      console.log('是断点续传')
      //存在该目录
      //返回最大的索引
      let maxIndex = 0
      let arr = fs.readdirSync(pa)
      for (let i = 0; i < arr.length; i++) {
        let str = parseInt(arr[i].split('@')[1])
        console.log(str)
        if (str > maxIndex) {
          maxIndex = str
        }
      }
      res.send({
        code: 300,
        msg: '存在该目录，请继续上传',
        index: maxIndex
      })
    } else {
      //将每一次上传的数据进行统一的存储
      // console.log('不是断点续传')
      // console.log('fields', fields.extname[0])
      if (fs.existsSync(`${uploadDir}${fields.filename[0]}.${fields.extname[0]}`)) {
        res.json({
          code: 301,
          msg: '已经上传了该文件'
        })
        return
      }
      const oldName = files.chunk[0].path
      const newName = path.join(uploadDir, 'chunk', fields['filename'][0], fields['name'][0])
      // const newName = pa + '/' + fields['filename'][0] + '/' + fields['name'][0]
      // console.log('oldName', oldName)
      // console.log('newName', newName)
      // console.log('exit', fs.existsSync(oldName))
      // //创建临时存储目录
      try {
        fs.mkdirSync(uploadDir + 'chunk/' + fields['filename'][0], {
          recursive: true
        })
      } catch (error) {
        console.log('mkdirerror', error)
      }
      fs.copyFile(oldName, newName, err => {
        if (err) {
          console.log('copyerr', err)
        } else {
          // 删除源文件
          fs.unlink(oldName, err => {
            if (err) {
              console.error(err)
            } else {
              // console.log('文件复制和删除成功')
            }
          })
        }
      })
      res.json({
        code: 200,
        msg: '分片上传成功'
      })
    }
  })
}
exports.merge_chunk = (req, res, next) => {
  const fields = req.body
  thunkStreamMerge(`${uploadDir}` + `chunk\\` + fields.filename, uploadDir + fields.filename + '.' + fields.extname)
  res.send({
    code: 200,
    data: '/public/upload/' + fields.filename + '.' + fields.extname
  })
}

// 文件合并
function thunkStreamMerge(sourceFiles, targetFile) {
  // console.log('sourceFiles', sourceFiles)
  // console.log('targetFile', targetFile)
  // const chunkFilesDir = path.join(__dirname, sourceFiles)
  // const chunkTargetDir = path.join(__dirname, targetFile)
  const list = fs.readdirSync(sourceFiles) //读取目录中的文件
  const fileList = list
    .sort((a, b) => a.split('@')[1] * 1 - b.split('@')[1] * 1)
    .map(name => ({
      name,
      filePath: path.resolve(sourceFiles, name)
    }))
  const fileWriteStream = fs.createWriteStream(targetFile)
  thunkStreamMergeProgress(fileList, fileWriteStream, sourceFiles)
}
let i = 0
//合并每一个分片
function thunkStreamMergeProgress(fileList, fileWriteStream, sourceFiles) {
  if (!fileList.length) {
    // thunkStreamMergeProgress(fileList)
    fileWriteStream.end('完成了')
    console.log('length', fileList.length)
    // 删除临时目录
    try {
      if (sourceFiles) {
        // console.log(fs.readdirSync(sourceFiles).length)
        // console.log(fs.readdirSync(sourceFiles))
        fs.rmdirSync(sourceFiles, { recursive: true })
      }
    } catch (error) {
      console.log('删除目录失败error', error.message)
    }
    return
  }
  const data = fileList.shift() // 取第一个数据
  const { filePath: chunkFilePath } = data
  // console.log('chunkFilePath', chunkFilePath)
  const currentReadStream = fs.createReadStream(chunkFilePath) // 读取文件
  // 把结果往最终的生成文件上进行拼接
  currentReadStream.pipe(fileWriteStream, { end: false })
  currentReadStream.on('end', async () => {
    await fs.unlinkSync(chunkFilePath)
    // 拼接完之后进入下一次循环
    thunkStreamMergeProgress(fileList, fileWriteStream, sourceFiles)
  })
}
