import axios from 'axios'
import { ElMessage } from 'element-plus'
import OSS from 'ali-oss'
import { formatDate } from '@/utils'
import SparkMD5 from 'spark-md5'
import FileWorker from '@/utils/file.worker'

// OSS 配置
const config = {
  region: process.env.VUE_APP_OSS_REGION,
  accessKeyId: process.env.VUE_APP_OSS_ACCESS_KEY_ID,
  accessKeySecret: process.env.VUE_APP_OSS_ACCESS_KEY_SECRET,
  bucket: process.env.VUE_APP_OSS_BUCKET
}

// 文件上传配置
export const fileUploadConfig = {
  // 允许上传文件最大值
  fileMaxSize: process.env.VUE_APP_FILE_MAX_SIZE,
  // 大于这个大小的文件使用分块上传(后端可以支持断点续传)
  fileUploadSize: process.env.VUE_APP_FILE_UPLOAD_SIZE,
  // 分片大小
  fileChunkSize: process.env.VUE_APP_FILE_CHUNK_SIZE,
  // 上传接口
  fileUploadUrl: process.env.VUE_APP_FILE_UPLOAD_URL
}

/*
 * status 状态说明：[0：上传成功, 1：待上传, 2：正在上传]
 */
export function useUploadFile() {
  // 读取配置并初始化
  const ossClient = new OSS(config)
  // 文件上传队列
  const uploadQueue = []
  // 多文件上传函数
  const uploadMultipleFiles = async (filesList) => {
    for (const file of filesList) {
      setUploadingFlag(file)

      console.time('cutFile')
      const chunks = await useChunk().cutFile(file)
      console.timeEnd('cutFile')
      console.log(chunks)
      if (ossClient == null) {
        uploadAndObserve(file)
      }
    }
  }

  // 给待上传文件设置上传标识（key、cancelTokenSource）并加入上传队列
  const setUploadingFlag = (file) => {
    if (file.uploaded || file.status === 2) return
    // 生成文件名或自定义文件名
    file.key = `ali_upload/${formatDate(2)}/${file.name}.${file.type}`
    // 创建一个取消令牌
    file.cancelTokenSource = axios.CancelToken.source()
    // 将文件和取消令牌添加到上传队列
    uploadQueue.push(file)
  }

  // 上传文件并监听进度
  const uploadAndObserve = (file) => {
    if (file.uploaded || file.status === 2) return
    file.status = 2 //开始上传并改变状态
    ossClient
      .multipartUpload(file.key, file.fileObj, {
        progress: function (percentage) {
          file.process = Math.round(percentage * 100)
        },
        storageClass: 'Standard', // 指定存储类型为标准存储
        cancelToken: file.cancelTokenSource.token
      })
      .then((res) => {
        // 文件上传完成后从上传队列中移除
        const index = uploadQueue.findIndex((item) => item.key === file.key)
        if (index !== -1) {
          uploadQueue.splice(index, 1)
        }
        file.status = 0
        file.uploaded = 'success'
        console.log(res.res.requestUrls[0].split('?')[0])
      })
      .catch((error) => {
        if (axios.isCancel(error)) {
          ElMessage.warning(`${file.name} 上传被取消`)
        } else {
          ElMessage.error(`${file.name} 上传出错`)
          file.status = 1
        }
      })
  }

  const cancelUpload = (rowObj) => {
    const fileToCancel = uploadQueue.find((item) => item.key === rowObj.key)
    if (fileToCancel) {
      console.log(fileToCancel)
      fileToCancel.cancelTokenSource.cancel('文件上传已取消')
      ElMessage.warning(`${rowObj.name} 上传取消中`)
    }
  }
  return { uploadMultipleFiles, cancelUpload }
}

export function useChunk() {
  const THREAD_COUNT = navigator.hardwareConcurrency || 4 // 线程数，多线程分片文件
  const CHUNK_SIZE = parseInt(fileUploadConfig.fileChunkSize) // 分片大小 / 5MB
  // 创建一个Web Worker实例
  const createWorker = (workerScript) => {
    const blob = new Blob(['(' + workerScript.toString() + ')()'], {
      type: 'text/javascript'
    })
    const url = window.URL.createObjectURL(blob)
    return new Worker(url)
  }
  const cutFile = async (file) => {
    return new Promise((resolve) => {
      const result = []
      const chunkCount = Math.ceil(file.size / CHUNK_SIZE)
      // 线程完成数量
      let finishCount = 0
      // 分片总数
      const workerChunkCount = Math.ceil(chunkCount / THREAD_COUNT)
      // 多线程分片
      for (let i = 0; i < THREAD_COUNT; i++) {
        let worker = createWorker(FileWorker)
        // 计算每个线程的分片开始索引和结束索引
        const startIndex = i * workerChunkCount
        let endIndex = startIndex + workerChunkCount
        endIndex = endIndex > chunkCount ? chunkCount : endIndex
        // 发送消息
        worker.postMessage(
          JSON.stringify({
            file,
            CHUNK_SIZE,
            startIndex,
            endIndex
          })
        )

        // 收到消息
        worker.onmessage = (e) => {
          for (let i = startIndex; i < endIndex; i++) {
            result[i] = e.data[i - startIndex]
          }
          // 线程完成
          worker.terminate()
          // 完成线程数
          finishCount++
          if (finishCount === THREAD_COUNT) {
            resolve(result)
          }
        }
      }
    })
  }

  const createChunk = (file, index, chunkSize) => {
    return new Promise((resolve) => {
      const start = index * chunkSize
      const end = start + chunkSize
      let spark = new SparkMD5.ArrayBuffer()
      let fileReader = new FileReader()
      fileReader.onload = (e) => {
        spark.append(e.target.result)
        resolve({
          start,
          end,
          index,
          hash: spark.end()
        })
      }
      fileReader.readAsArrayBuffer(file.slice(start, end))
    })
  }

  return { cutFile, createChunk }
}
