import { notification } from 'ant-design-vue'
import UploadList from '@/components/upload/UploadList.vue'
import UploadTitle from '@/components/upload/UploadTitle.vue'
import SparkMD5 from 'spark-md5'
import store from '@/store';

// Read in chunks of 2MB
const CHUCK_SIZE = 2097152;

/**
 * 显示文件上传进度弹窗
*/
export function showUploadProcess (list) {
  // 正在上传的文件个数
  const uploadSize = list.filter(i => i.uploadSize < i.fileSize).length || 0
  const completeSize = list.filter(i => i.uploadSize == i.fileSize).length || 0
  notification.open({
    key: 'UPLOAD_LIST_PROCESS',
    placement: 'bottomRight',
    message: (h) => h(UploadTitle, {
      props: {
        uploadSize,
        completeSize
      }
    }),
    description: (h) => h(UploadList, {
      props: {
        list: list
      }
    }),
    duration: null,
    onClose() {
      store.dispatch('cancelUpload')
    }
  });
}

/**
 * 将文件对象转换数组
 */
export function fileObj2Arr (obj) {
  if (typeof obj !== 'object') return []
  const list = []
  for (const key in obj) {
    if (Object.hasOwnProperty.call(obj, key)) {
      const element = obj[key]
      list.push({
        ...element,
        key
      })
    }
  }
  return list
}


/**
 * 计算md5值，以实现断点续传及秒传
 * @param file
 * @returns Promise
 */
export function computeMD5(file) {
  return new Promise((resolve, reject) => {
    let currentChunk = 0;
    const chunks = Math.ceil(file.size / CHUCK_SIZE);
    const blobSlice =
      File.prototype.slice ||
      File.prototype.mozSlice ||
      File.prototype.webkitSlice;
    const spark = new SparkMD5.ArrayBuffer();
    const fileReader = new FileReader();

    const loadNext = () => {
      const start = currentChunk * CHUCK_SIZE;
      const end =
        start + CHUCK_SIZE >= file.size ? file.size : start + CHUCK_SIZE;

      // Selectively read the file and only store part of it in memory.
      // This allows client-side applications to process huge files without the need for huge memory
      fileReader.readAsArrayBuffer(blobSlice.call(file, start, end));
    };

    fileReader.onload = e => {
      spark.append(e.target.result);
      currentChunk++;

      if (currentChunk < chunks) loadNext();
      else resolve(spark.end());
    };

    fileReader.onerror = () => {
      return reject('Calculating file checksum failed');
    };

    loadNext();
  });
}