import sparkMD5 from "spark-md5";
import { chunkHashType } from "@/type/file"
/**
 * 文件转字符串
 * @param blob 
 * @returns 
 */
function blobToString(blob: Blob) {
      return new Promise(resolve => {
            const reader = new FileReader();

            reader.onload = function () {
                  const result = reader.result as string;
                  const res = result.split("")
                        .map(v => v.charCodeAt(0))
                        .map(v => v.toString(16).toUpperCase())
                        .join(" ");
                  resolve(res);
            }


            reader.readAsBinaryString(blob);
      })
}


export async function isGif(blob: Blob) {
      const res = await blobToString(blob.slice(0, 6))
      return (res === "47 49 46 38 39 61") || (res === "47 49 46 38 37 61");
}


export async function isPng(blob: Blob) {
      const res = await blobToString(blob.slice(0, 8))
      return (res == "89 50 4E 47 0D 0A 1A 0A") || (res === "89 50 4E 47 D A 1A A");
}



export async function isJpg(blob: Blob) {
      let len = blob.size;
      const start = await blobToString(blob.slice(0, 2))
      const tail = await blobToString(blob.slice(-2, len))
      return (start === "FF D8") && (tail === "FF D9");
}


// The value of "start" is out of range. It must be an integer. Received 104857.6
// fs-extra中的createWriteStream，start参数值不能为小数，所以这里的设置需要注意这个参数
export const DEFAULT_CHUNK_SIZE = 0.5 * 1024 * 1024;


/**
 * 对文件进行切片
 * @param blob 
 * @param size 
 * @returns 
 */
function createdFileChunk(blob: Blob, size: number = DEFAULT_CHUNK_SIZE) {
      const chunks = [];
      let currentSize = 0;
      while (currentSize < blob.size) {
            chunks.push({ index: currentSize, file: blob.slice(currentSize, currentSize + size) });
            currentSize += size;
      }
      return chunks;
}

/**
 * 通过webWorker进行计算hash
 * @param file 
 * @param callback 
 * @returns 
 */
export function calculateHashWorker(file: any, callback: Function): Promise<chunkHashType> {
      const chunks = createdFileChunk(file);
      return new Promise(resolve => {
            let myWorker = new Worker("/hash.js");
            myWorker.postMessage({ chunks });
            myWorker.onmessage = (ev: any) => {
                  callback(ev.data);
                  if (ev.data.progress === 100) {
                        resolve({ chunks, ...ev.data })
                  }
            }
      })
}


/**
 * 通过浏览器空闲时间计算hash
 * @param file 
 * @param callback 
 * @returns 
 */
export function calculateHashIdle(file: any, callback: Function) {
      const chunks = createdFileChunk(file);
      return new Promise(resolve => {
            const spark = new sparkMD5.ArrayBuffer();
            let count = 0;
            const appendToSpark = async (chunk: any) => {
                  return new Promise((sparkResolve) => {
                        const reader = new FileReader();
                        reader.readAsArrayBuffer(chunk);
                        reader.onload = (e) => {
                              let arrayBuffer = e.target?.result as ArrayBuffer;
                              spark.append(arrayBuffer);
                              sparkResolve(true);
                        }
                  })
            }


            const workLoop = async (deadline: IdleDeadline) => {
                  const len = chunks.length;
                  while (count < len && deadline.timeRemaining() > 1) {
                        await appendToSpark(chunks[count].file);
                        count++;
                        if (count < len) {
                              callback({ progress: Number((count * 100 / len).toFixed(2)) })
                        } else {
                              let endReuslt = { progress: 100, hash: spark.end() };
                              callback(endReuslt)
                              resolve(endReuslt);
                        }
                  }

                  window.requestIdleCallback(workLoop);
            }

            window.requestIdleCallback(workLoop);
      })
}


/**
 * 抽样计算md5值【布隆过滤器】
 * 
 * 抽样规则：
 * 取文件前2M，后2M
 * 中间部分取前两个字节，后两个字节
 * 
 * 好处：抽样数据比较少，计算速度快
 * 坏处：抽样精度丢失
 * @param file 
 * @returns 
 */
export function calculateHashSample(file: any) {
      return new Promise(resolve => {
            const offset = 2 * 1024 * 1024;
            const start = file.slice(0, offset);

            const chunks = [start];
            let sliceStart = offset;
            const size = file.size;

            while (sliceStart < size) {
                  // 切最后一部分
                  if (sliceStart + offset >= size) {
                        chunks.push(file.slice(sliceStart, size))
                  } else {
                        let end = sliceStart + offset;
                        let mid = sliceStart + offset / 2;

                        chunks.push(file.slice(sliceStart, 2));
                        chunks.push(file.slice(mid, 2));
                        chunks.push(file.slice(end - 2, end));
                  }
                  sliceStart += offset;
            }

            const chunkBlob = new Blob(chunks);
            const reader = new FileReader();
            const spark = new sparkMD5.ArrayBuffer();

            reader.readAsArrayBuffer(chunkBlob);

            reader.onload = (e) => {
                  const result = e.target?.result as ArrayBuffer;
                  spark.append(result);

                  resolve({ progress: 100, hash: spark.end() });
            }
      })
}