//分片大小 5m
const chunkSize = 5 * 1024 * 1024;
let SparkMD5 = require("spark-md5")
import axios from 'axios'
import service from "@/api/service";

export default {
    uploadFile,
    empty
}
let total = []
let complete = []
let _self
let token = ''

function empty() {
    total = []
    complete = []
}

/**
 * 注意：本测试Demo不受分片顺序影响
 * 关于上传文件成功后的处理：配置minio监听指定存储桶指定格式文件上传成功后，push通知到mq,后端程序监听并消费即可
 * （建议上传mp4，成功后可以直接在页面看到效果）
 * 测试分片上传
 *      运行页面 > 打开控制台 > console > 选择上传的文件 > 观察打印的信息
 * 测试秒传
 *      在上一个测试的基础上，刷新一下页面，选择上一次上传的文件
 * 测试断点续传
 *      重新选择一个文件(如果你没有多的测试文件，就重启一下后台服务) > 手动模拟上传了一部分失败的场景(在所有分片未上传完成时关掉页面 或 注释掉合并文件代码，然后去 minio chunk桶 删除几个分片)
 *      > 再选择刚选择的文件上传 > 观察打印的信息是否从缺失的分片开始上传
 */
function uploadFile(file, parentIds, categories, self) {
    _self = self
    return new Promise((resolve) => {
        //获取用户选择的文件
        // const file = document.getElementById("upload").files[0];
        //文件大小(大于5m再分片哦，否则直接走普通文件上传的逻辑就可以了，这里只实现分片上传逻辑)
        const fileSize = file.size

        if (fileSize <= chunkSize) {
            console.log("上传的文件大于5m才能分片上传")
        }

        //计算当前选择文件需要的分片数量
        const chunkCount = Math.ceil(fileSize / chunkSize)
        total.push(0)
        console.log("文件大小：", file.size, "分片数：", chunkCount)
        //获取文件md5
        token = sessionStorage.getItem("access_token")
        getFileMd5(file).then(fileMd5 => {
            console.log("文件md5：", fileMd5)
            console.log("向后端请求本次分片上传初始化")
            //向后端请求本次分片上传初始化
            axios({
                baseURL: service.baseURL,
                url: "/small-service-os/minio/init-chunk-upload",
                method: 'POST',
                contentType: "application/json",
                headers: {
                    'Authorization': 'Bearer ' + token,
                },
                processData: false,
                data: {
                    chunkCount: chunkCount,
                    fileMd5: fileMd5,
                    parentIds: parentIds,
                    categories: categories,
                    fileName: file.name,
                    size: fileSize
                },
            }).then(r => {
                let res = r.data
                let chunkUploadUrls = res.data.mysqlFileData
                //文件列表
                const whichNumber = _self.scheduleList.length
                complete.push(new Array(chunkCount))
                _self.scheduleList.push({
                    fileName: file.name,
                    message: '分片上传:文件大小' + (file.size / 1024 / 1024).toFixed(2) + "Mb" + "分片数：" + chunkCount,
                    isShow: 0,
                    schedule: 0,
                    chunkUploadUrls
                })
                //code = 0 文件在之前已经上传完成，直接走秒传逻辑；code = 1 文件上传过，但未完成，走续传逻辑;code = 200 则仅需要合并文件
                if (res.status === 200 && (res.data.UPLOAD_SUCCESS === 1 || res.data.UPLOAD_SUCCESS.code === 1)) {
                    console.log("当前文件上传情况：秒传")
                    console.log("合并文件完成", res.data)
                    composeFileF(whichNumber, file.name)
                    // composeFile(fileMd5, file.name, file.size, parentIds, whichNumber)
                    return
                }
                console.log("当前文件上传情况：初次上传 或 断点续传")
                //当前为顺序上传方式，若要测试并发上传，请将第52行 await 修饰符删除即可
                //若使用并发上传方式，当前分片上传完成后打印出来的完成提示是不准确的，但这并不影响最终运行结果；原因是由ajax请求本身是异步导致的
                let chunkLength = chunkUploadUrls.length
                for (let i = 0; i < 5; i++) {
                    let item = _self.scheduleList[whichNumber].chunkUploadUrls.shift()
                    if (item) {
                        upload(item.uploadUrl, file, item.partNumber, chunkLength, fileMd5, parentIds, whichNumber)
                    }
                }
            })
        });

    })

}

/**
 * 上传文件接口
 * @param url 上传路径
 * @param file 文件
 * @param partNumber 第几个分片
 * @param chunkLength 总长度
 * @param fileMd5 编码
 * @param parentIds 父级ID
 * @param whichNumber 第几个文件
 */
function upload(url, file, partNumber, chunkLength, fileMd5, parentIds, whichNumber) {
    //分片开始位置
    let start = (partNumber - 1) * chunkSize
    //分片结束位置
    let end = Math.min(file.size, start + chunkSize)
    // console.log("第" + item.partNumber + "开始位置" + start + '结束位置' + end)
    //取文件指定范围内的byte，从而得到分片数据
    let _chunkFile = file.slice(start, end)
    axios({
        url: url,
        method: 'PUT',
        contentType: false,
        processData: false,
        data: _chunkFile,
        onUploadProgress: progressEvent => {
            // let persent = (progressEvent.loaded / progressEvent.total * 100 | 0)		//上传进度百分比
            testUp(whichNumber, partNumber, progressEvent.loaded, file.size)
        },
    }).then(res => {
        let m = _self.scheduleList[whichNumber]
        let arr = m.chunkUploadUrls
        if (arr.length) {
            let item = arr.shift()
            upload(item.uploadUrl, file, item.partNumber, chunkLength, fileMd5, parentIds, whichNumber)
        }
        //请求后端合并文件
        total[whichNumber]++
        console.log('请求后端合并文件', total[whichNumber], chunkLength)
        if (total[whichNumber] === chunkLength) {
            composeFile(fileMd5, file.name, file.size, parentIds, whichNumber)
        }
    }).catch(res => {
        let m = _self.scheduleList[whichNumber]
        m['isShow'] = 4
        _self.$set(_self.scheduleList, whichNumber, m)
    })
}

/**
 * 请求后端合并文件
 * @param fileMd5 md5
 * @param fileName 文件名
 * @param size 大小
 * @param parentIds 父级ID
 * @param whichNumber
 */
function composeFile(fileMd5, fileName, size, parentIds, whichNumber) {
    startComposeFile(fileName, whichNumber)
    console.log("开始请求后端合并文件")
    //注意：bucketName请填写你自己的存储桶名称，如果没有，就先创建一个写在这
    axios({
        baseURL: service.baseURL,
        url: "/small-service-os/minio/compose-file",
        method: 'post',
        contentType: "application/json",
        processData: false,
        headers: {
            'Authorization': 'Bearer ' + token,
        },
        data: {
            fileMd5: fileMd5,
            fileName: fileName,
            parentIds: parentIds,
            size: size
        }
    }).then(res => {
        composeFileF(whichNumber)
        console.log("合并文件完成", res.data)
    })
}


/**
 * 获取文件MD5
 * @param file
 * @returns {Promise<unknown>}
 */
function getFileMd5(file) {
    let fileReader = new FileReader()
    fileReader.readAsBinaryString(file)
    let spark = new SparkMD5()
    return new Promise((resolve) => {
        fileReader.onload = (e) => {
            spark.appendBinary(e.target.result)
            resolve(spark.end())
        }
    })
}

/**
 * 跟新进度
 * @param whichNumber 第几个文件
 * @param partNumber 第几个分片
 * @param persent 上传大小
 * @param totalSize 总大小
 */
function testUp(whichNumber, partNumber, persent, totalSize) {
    complete[whichNumber][partNumber] = persent
    let arr = 0
    complete[whichNumber].forEach(v => {
        arr += v
    })
    let sum = (arr / totalSize * 100 | 0)
    console.log(complete, sum, totalSize)
    let m = _self.scheduleList[whichNumber]
    m['schedule'] = sum
    _self.$set(_self.scheduleList, whichNumber, m)
}

/**
 * 开始合并
 * @param fileName 文件名
 * @param whichNumber
 */
function startComposeFile(fileName, whichNumber) {
    let m = _self.scheduleList[whichNumber]
    m['message'] = '开始合并'
    m['isShow'] = 1
    _self.$set(_self.scheduleList, whichNumber, m)
}

/**
 * 合并完成
 * @param whichNumber 第几个文件
 * @param fileName 文件名称
 */
function composeFileF(whichNumber, fileName) {
    let m = _self.scheduleList[whichNumber]
    if (fileName) {
        m['fileName'] = fileName
    }
    m['message'] = '合并完成'
    m['isShow'] = 2
    _self.$set(_self.scheduleList, whichNumber, m)
}