self.importScripts("../lib/spark-md5.js"); // 导入脚本

/**
 * 抽样hash计算方式
 * 根据样本长度、抽样标本动态计算出hash值
 * 只计算部分文件切片，减少计算时间
 * 适当增加准确性: 
 * 可考虑将文件大小作为影响因素，作为数组最后一个元素，由此计算hash值
 * 代码示例:
 * result.push({file:new Blob([7832], {type: 'text/plain'})});
 * @param {Array} listChunk 
 * @param {Number} length 
 * @param {Number} size 
 */
const createNewPayload = (listChunk, length, size) => {
  //只计算部分文件切片，减少计算时间
  let rank = length <= 256 ? 2 : 4;
  let result = [listChunk[0]],
    marked = rank;
  while (marked < length) {
    result.push(listChunk[marked]);
    marked = marked * rank;
  }
  result.push(listChunk[length - 1]);
  if (size) {
    result.push({ file: new Blob([size], { type: 'text/plain' }) });
  }
  return result;
}

self.onmessage = message => {
  const {
    listFileChunk = [], limit = 20, size
  } = message.data;
  const length = listFileChunk.length;
  // 抽样hash计算方式
  const WrapperFileChunk = length <= limit ? listFileChunk : createNewPayload(listFileChunk, length, size);

  const dataSolve = async (count = 0) => {
    const length = WrapperFileChunk.length;
    const reader = new FileReader();
    const spark = new self.SparkMD5.ArrayBuffer();
    const dataAccept = WrapperFileChunk[count];
    if (dataAccept) {
      reader.readAsArrayBuffer(dataAccept.file);
      count++;
      reader.onload = (e) => {
        spark.append(e.target.result);
        if (count === length) {
          self.postMessage({
            percentage: "100.00",
            hash: spark.end()
          });
        } else {
          self.postMessage({
            percentage: (count * 100 / length).toFixed(2)
          });
        }
        dataSolve(count)
      }
    }
  }

  dataSolve(0);
}