/* eslint-disable no-unused-vars */
import pLimit from 'p-limit';
import {retryPromiseWithArgs} from './util';
import axios from 'axios';
import SparkMD5 from 'spark-md5';

export default class SingleUpload {
    /**
     *
     * @param {File} file
     * @param {object} options
     * @param {string} options.checkUrl 判断文件是否可以直接下载的url
     * @param {string} options.uploadUrl 上传分片的url
     * @param {string} options.mergeUrl 合并分片的url
     * @param {number} options.size 分片大小
     * @param {number} options.concurrency 最多能并发上传的分片数量
     * @param {number} options.retries 分片上传失败重试次数
     * @param {function(number)} options.handleHashProgress 处理hash计算进度的回调
     * @param {function(number): function(number)} options.handleUploadProgress 处理上传进度的回调
     */
    constructor(file, options) {
        this.file = file;
        this.options = options;
        /**
         * @type {Array<number>}
         * @description 记录每个分片上传的进度
         */
        this.uploaded = [];
    }

    /**
     * 点击上传按钮触发事件
     * @param {function({data, status: number}): boolean} afterCheck check以后的回调,返回值表示文件是否存在
     * @param {function(number, {data, status: number})} afterUpload upload完成的回调
     * @returns
     */
    async start(afterCheck, afterUpload) {
        this.chunks = SingleUpload.splitFileIntoChunks(
            this.file,
            this.options.size
        );
        const hashStart = new Date();
        this.hash = await this.computeHash(this.options.handleHashProgress);
        const hashEnd = new Date();
        console.log('hash computed time:' + (hashEnd - hashStart));

        this.chunks.forEach((chunk, index) => {
            // 用文件hash唯一命名
            chunk.chunkName = `${index}-${this.hash}`;
        });
        const checkRes = await this.check();
        const shouldStop = afterCheck(checkRes);
        if (shouldStop) {
            return;
        }
        const start = new Date();
        await this.upload(this.options.handleUploadProgress);
        const mergeRes = await this.merge();
        const end = new Date();
        afterUpload(end - start, mergeRes);
    }
    /**
     * 合并请求，将分片合并成一个文件
     * @returns {Promise<{data, status:number}>}
     */
    merge() {
        return axios.post(this.options.mergeUrl, {
            filename: this.file.name,
            length: this.chunks.length,
            hash: this.hash
        });
    }
    /**
     * 并发上传分片
     * @param {function(number): function(number)} handleUploadProgress 处理上传进度的二阶回调，参数是分片的编号，返回的函数的参数是每个分片上传进度
     */
    async upload(handleUploadProgress) {
        const limit = pLimit(this.options.concurrency);
        const requestList = this.chunks
            .map(({file, chunkHash, chunkName}, index) => {
                const formData = new FormData();
                formData.append('chunk-' + index, file);
                formData.append('chunkHash', chunkHash);
                formData.append('chunkName', chunkName);
                formData.append('hash', this.hash);
                return formData;
            })
            .map((formData, index) =>
                limit(() =>
                    retryPromiseWithArgs(axios.post, this.options.retries, this.options.uploadUrl, formData, {
                        headers: {
                            'Content-Type': 'multipart/form-data'
                        },
                        onUploadProgress: handleUploadProgress(index)
                    })
                )
            );
        await Promise.all(requestList);
    }
    /**
     * 检查改文件是否被上传过
     * @returns {Promise<{data, status:number}>}
     */
    check() {
        return axios.post(this.options.checkUrl, {
            hash: this.hash,
            fileName: this.file.name
        });
    }

    /**
     * 将文件按size切片
     * @param {File} file
     * @param {number} size
     * @returns
     */
    static splitFileIntoChunks(file, size) {
        if (size > file.size) {
            return [file];
        }
        const fileChunkList = [];
        let cur = 0;
        while (cur < file.size) {
            fileChunkList.push({file: file.slice(cur, cur + size)});
            cur += size;
        }
        return fileChunkList;
    }
    /**
     *
     * @param {function(number)} handleHashProgress 处理计算hash进度的回调，参数为hash计算进度
     * @returns
     */
    computeHash(handleHashProgress) {
        return new Promise((resolve,reject) => {
            const spark = new SparkMD5();
            let count = 0;
            const cpus = navigator.hardwareConcurrency;
            const workers = [];
            for (let i = 0; i < cpus; i++) {
                const worker = new Worker(new URL('./hash.js', import.meta.url));
                workers.push(worker);
            }
            let nextHashIndex = 0;
            for (let i = 0; i < this.chunks.length; i++) {
                const worker = workers[i % cpus];
                worker.postMessage({chunk: this.chunks[i].file, index: i});
                worker.onmessage = e => {
                    
                    const { chunkHash, index } = e.data;
                    count++;
                    const percentage = (count / this.chunks.length) * 100;
                    handleHashProgress(percentage);
                    this.chunks[index].chunkHash = chunkHash;

                    while(nextHashIndex < this.chunks.length && this.chunks[nextHashIndex].chunkHash !== undefined) {
                        spark.append(this.chunks[nextHashIndex].chunkHash);
                        nextHashIndex++;
                    }
                    
                    if (count === this.chunks.length) {
                        const hash = spark.end();
                        for (let j = 0; j < workers.length; j++) {
                            workers[j].terminate();
                        }
                        resolve(hash);
                    }
                };
                worker.onmessageerror = reject;
            }
        })
    }
}
