import SparkMD5 from 'spark-md5';

/**
 * 将文件分片逐步计算最终合并得出整个文件 md5, 提升计算速度
 * @param {*} file
 */
export const chunkHash = (file) => {
  return new Promise((resolve, reject) => {
     let blobSlice = File.prototype.slice || File.prototype.mozSlice || File.prototype.webkitSlice;
     let chunkSize = 2097152 * 1024;  // 按照一片 2MB 分片
     let chunks = Math.ceil(file.size / chunkSize); // 片数
     let currentChunk = 0;
     let spark = new SparkMD5.ArrayBuffer();
     let fileReader = new FileReader();
     
     fileReader.onload = function (e) {
      //  console.log('read chunk nr', currentChunk + 1, 'of', chunks);
       spark.append(e.target.result);
       currentChunk++;

       if (currentChunk < chunks) {
         loadNext();
       } else {
         console.log('finished loading');
         let md5 = spark.end(); //最终md5值
         spark.destroy(); //释放缓存
         resolve(md5);
       }
     };
     
     fileReader.onerror = function (e) {
       console.warn('oops, something went wrong.');
       reject(e);
     };
     
     function loadNext() {
       let start = currentChunk * chunkSize;
       let end = ((start + chunkSize) >= file.size) ? file.size : start + chunkSize;
       fileReader.readAsArrayBuffer(blobSlice.call(file, start, end));
     }

     loadNext();
  }) 
}

/**
 * 将对象转换为 ArrayBuffer
 * @param {Object} obj
 * @return {ArrayBuffer}
 */
const objectToArrayBuffer = (obj) => {
  const str = JSON.stringify(obj);
  const len = str.length;
  const buffer = new ArrayBuffer(len);
  const bufferView = new Uint8Array(buffer);
  for (let i = 0; i < len; i++) {
    bufferView[i] = str.charCodeAt(i);
  }
  return buffer;
}

/**
 * 快速哈希：对文件头部 + 尾部 + 文件属性（文件大小，类型）做 md5 计算
 * @param {*} file
 */
export const fastHash = (file) => {
  return new Promise((resolve, reject) => {
    const blobSlice = File.prototype.slice || File.prototype.mozSlice || File.prototype.webkitSlice;
    const chunkSize = 1048576 * 2;  // 按照一片 1MB 分片
    const list = file.size <= chunkSize ? [
      [0, file.size]
    ] : [
      [0, chunkSize],
      [file.size - chunkSize, file.size],
    ];
    
    let spark = new SparkMD5.ArrayBuffer();
    let fileReader = new FileReader();
    const obj = {
      size: file.size,
      type: file.type
    };
    spark.append(objectToArrayBuffer(obj));

    fileReader.onload = function (e) {
      console.log(new Date());
      spark.append(e.target.result);
      if (list.length) {
        loadNext();
      } else {
        const md5 = spark.end();
        spark.destroy();
        resolve(md5);
      }
    };
    
    fileReader.onerror = function (e) {
      console.warn('oops, something went wrong.');
      reject(e);
    };
  
    function loadNext() {
      const item = list.shift();
      const [start, end] = item || [];
      fileReader.readAsArrayBuffer(blobSlice.call(file, start, end));
    }
    
    loadNext();
  })
}