<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="UTF-8">
    <meta http-equiv="X-UA-Compatible" content="IE=edge">
    <meta name="viewport" content="width=device-width, initial-scale=1.0">
    <title>大文件上传</title>
    <script src="spark-md5.min.js"></script>
</head>
<body> 
    <input type="file"> 
</body>
<script>
    const inp = document.querySelector('input')
    inp.onchange = (e) => {
        const file = inp.files[0]
        if(!file){
            return
        }
        const chunks = creatChunks(file, 10*1024*1024)   // 把文件分割,每段10M字节
        console.log(chunks);
        hash(chunks)       //获得hash值
    }
    function hash(chunks){
        const spark = new SparkMD5()
        _read(0)
        //文件的hash值不能使用整个文件, 因为文件可能很大, 计算hash值需要把文件读入内存
        //分段计算, 最后获得合并的MD5
        function _read(i){
            if(i>=chunks.length){
                let res = spark.end()
                console.log(res)
                return res
            }
            const blob = chunks[i]
            const reader = new FileReader() 
            reader.onload = e => {
                let bytes = e.target.result
                spark.append(bytes)         //hash计算
                _read(i+1)
            }
            reader.readAsArrayBuffer(blob)  // 读取文件分块的字节
        }
    }
    function creatChunks(file, chunkSize){
        const result = []
        for(let i = 0; i<file.size; i+=chunkSize){
            result.push(
                file.slice(i, i+chunkSize)  //截取 [i, i+chunkSize) 的字节
            )
        }
        return result
    }

</script>
</html>