import SparkMD5 from 'spark-md5'

onmessage = async (e) => {
	const { chunks, CHUNK_SIZE } = e.data
	// 首位切片全部参与计算,其他切片只计算首尾中两个字节
	new Promise((resolve) => {
		const targets = []

		chunks.forEach((item, index) => {
			if (index == 0 || index == chunks.length - 1) {
				targets.push(item)
			} else {
				// 首两个字节
				targets.push(item.slice(0, 2))
				// 中两个字节
				targets.push(item.slice(CHUNK_SIZE / 2, CHUNK_SIZE / 2 + 2))
				// 末两个字节
				targets.push(item.slice(CHUNK_SIZE - 2, CHUNK_SIZE))
			}
		})
		const spark = new SparkMD5.ArrayBuffer()
		const fileReader = new FileReader()

		// 把新分片的数据转换为blob对象
		const blobs = new Blob(targets)
		fileReader.readAsArrayBuffer(blobs)
		fileReader.onload = (e) => {
			spark.append(e.target.result)
			// console.log(spark.end())
			// 返回拿到的hash值
			// resolve(spark.end())
			postMessage(spark.end())
		}
	})
}
