export function base64toBlob(base64Buf: string) {
  const arr = base64Buf.split(',')
  const typeItem = arr[0]
  const mime = typeItem.match(/:(.*?);/)![1]
  const bstr = window.atob(arr[1])
  let n = bstr.length
  const u8arr = new Uint8Array(n)
  while (n--) {
    u8arr[n] = bstr.charCodeAt(n)
  }
  return new Blob([u8arr], { type: mime })
}


import SparkMd5 from 'spark-md5'
export async function generateFileHash(file:File):Promise<{hash:string,suffix:string}> {
  const fileReader = new FileReader()
  const computedFile = sample(file,file.size)
  fileReader.readAsArrayBuffer(computedFile)
  return new Promise(resolve => {
    fileReader.onload = e => {
      const buffer:any = e.target.result
      const spark = new SparkMd5.ArrayBuffer()
      spark.append(buffer)
      const hash = spark.end()
      resolve({
        hash,
        suffix:file.name.split('.').at(-1)
      })
    }
  })
}

// 抽样函数
function sample(file,end) {
  const pre = file.slice(0, 1024 * 2)
  const center = file.slice(end / 2, end/ 2 + 1024 * 2)
  const after = file.slice(end - 1024 * 2, end)
  return new Blob([pre, center, after])
}


export async function generateChunk(file:File,oneSize = 1024 * 1024 * 5){
  console.time('时间')
  const {hash,suffix} = await generateFileHash(file)
  let chunkList = []
  let maxCount = Math.ceil(file.size / oneSize)
  let index = 0
  while (index < maxCount){
    chunkList.push({
      file:file.slice(index * oneSize,(index + 1) * oneSize),
      filename:`${hash}_${index}.${suffix}`,
    })
    index ++
  }
  console.timeEnd('时间')
  return chunkList
}




