import SparkMD5 from 'spark-md5/spark-md5.min.js'

/**
 *
 * @constructor
 */
export default (function () {
    let blobSlice = File.prototype.slice || File.prototype.mozSlice || File.prototype.webkitSlice;
    let spark = new SparkMD5.ArrayBuffer();
    let fileReader = new FileReader();
    let chunks = 0;
    let currentChunk = 0;
    let chunkSize = 2097152;
    let file = null


    function loadNext() {
        let start = currentChunk * chunkSize,
            end = ((start + chunkSize) >= file.size) ? file.size : start + chunkSize;
        fileReader.readAsArrayBuffer(blobSlice.call(file, start, end));
    }

    /**
     *
     * @param target {File}
     */
    const getFromFile = (target) => {
        chunks = Math.ceil(target.size / chunkSize);
        file = target
        currentChunk = 0

        return new Promise((resolve, reject) => {
            fileReader.onload = function (e) {
                // console.log('read chunk nr', currentChunk + 1, 'of', chunks);
                spark.append(e.target.result);                   // Append array buffer
                currentChunk++;

                if (currentChunk < chunks) {
                    loadNext();
                } else {
                    let result = spark.end()
                    // console.log('finished loading');
                    // console.info('computed hash', result);  // Compute hash
                    resolve(result)
                }
            };
            fileReader.onerror = function () {
                console.warn('【md5】 计算失败');
                reject('【md5】 计算失败')
            };
            loadNext();
        })
    }

    return {getFromFile}
})()



