<!doctype html>
<html>
	<head>
		<meta charset="utf-8" />
		<title>大文件分片</title>
		<script src="https://cdn.bootcdn.net/ajax/libs/spark-md5/3.0.2/spark-md5.min.js"></script>
	</head>
	<style>
		.box {
			border: 1px solid red;

			.inner {
				height: 100px;
				width: 100px;
				background: teal;
			}
		}
	</style>

	<body>
		<div class="box">
			<div class="inner"></div>
			<input id="file" type="file" />
		</div>
		<script>
			const imgEl = document.getElementById('img');
			const fileEl = document.getElementById('file');

			fileEl.onchange = async function (e) {
				// console.log(e.target.files[0]);

				let file = e.target.files[0];

				const chunks = createChunks(file, 1024 * 1024 * 0.001);

				const hash = await createHash(chunks);

				console.log(chunks, hash);

				for (let i = 0; i < chunks.length; i++) {
					const url = 'https://httpbin.org/post';
					const fd = new FormData();
					fd.append('chunkData', chunks[i]);
					let res = await fetch(url, { method: 'post', body: fd }).then(res => res.text());
					// 通知上传完毕
				}

				// const formData = new FormData()
				// formData.append('file', file)
				// formData.append('fileName', file.name)
				// fetch('https:file.io', {
				// 	method: 'POST',
				// 	body: formData
				// }).then(res => res.json()).then(res => {
				// 	console.log(res);
				// })
			};

			/**
			 * @description:  对文件分割
			 * @param {*} file 源文件
			 * @param {*} size 每块文件大小
			 * @return {*} 返回分割好的 blob 数组
			 * 这里是保存的是文件的分割信息，并不是真的文件信息，需要FileRender才能读出文件数据
			 */
			function createChunks(file, chunkSize = 1024 * 1024 * 10) {
				let fileSize = file.size;
				let res = [];
				for (let i = 0; i < fileSize; i += chunkSize) {
					res.push(file.slice(i, i + chunkSize, file.type));
				}
				return res;
			}

			/**
			 * @description: spark-md5 计算文件hash
			 * @param {*} chunks
			 * @return {*}
			 */
			function createHash(chunks) {
				return new Promise((resolve, reject) => {
					const spark = new SparkMD5();

					function _read(i) {
						if (i >= chunks.length) {
							resolve(spark.end());
							return;
						}
						const render = new FileReader();
						render.onload = function (e) {
							spark.append(e.target.result);
							_read(i + 1);
						};
						render.readAsArrayBuffer(chunks[i]);
					}

					_read(1);
				});
			}
		</script>
	</body>
</html>
