export function record(callback) {

	// 老的浏览器可能根本没有实现 mediaDevices，所以我们可以先设置一个空的对象
	if (navigator.mediaDevices === undefined) {
		navigator.mediaDevices = {};
	}
	// 一些浏览器部分支持 mediaDevices。我们不能直接给对象设置 getUserMedia
	// 因为这样可能会覆盖已有的属性。这里我们只会在没有getUserMedia属性的时候添加它。
	if (navigator.mediaDevices.getUserMedia === undefined) {
		navigator.mediaDevices.getUserMedia = function(constraints) {
			// 首先，如果有getUserMedia的话，就获得它
			var getUserMedia = navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
			// 一些浏览器根本没实现它 - 那么就返回一个error到promise的reject来保持一个统一的接口
			if (!getUserMedia) {
				return Promise.reject(new Error('getUserMedia is not implemented in this browser'));
			}
			// 否则，为老的navigator.getUserMedia方法包裹一个Promise
			return new Promise(function(resolve, reject) {
				getUserMedia.call(navigator, constraints, resolve, reject);
			});
		}
	}

	navigator.mediaDevices.getUserMedia({
		audio: true
	}).then(mediaStream => {
		console.log(mediaStream);
		// window.mediaStream = mediaStream
		callback() //执行录音中的ui
		beginRecord(mediaStream);
	}).catch(err => {
		// 如果用户电脑没有麦克风设备或者用户拒绝了，或者连接出问题了等
		// 这里都会抛异常，并且通过err.name可以知道是哪种类型的错误 
		console.error(err);
	})
}


export function oldRecord(callback) {
 navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia;
	if(navigator.getUserMedia){
			navigator.getUserMedia({
				audio: true
			},
			mediaStream => {
				console.log('录音流',mediaStream);
				// window.mediaStream = mediaStream
				callback() //执行录音中的ui
				beginRecord(mediaStream);
			},
			err => {
					// 如果用户电脑没有麦克风设备或者用户拒绝了，或者连接出问题了等
					// 这里都会抛异常，并且通过err.name可以知道是哪种类型的错误 
					console.error('录音出现异常',err);
				}
			)
	}else{
		console.log('设备不支持录音')
	}

}



export function beginRecord(mediaStream) {
	let audioContext = new(window.AudioContext || window.webkitAudioContext);
	let mediaNode = audioContext.createMediaStreamSource(mediaStream);
	console.log(mediaNode)
	window.mediaNode = mediaNode
	// 这里connect之后就会自动播放了
	// mediaNode.connect(audioContext.destination);	//直接把录的音直接播放出来
	// 创建一个jsNode
	let jsNode = createJSNode(audioContext);
	window.jsNode = jsNode
	// 需要连到扬声器消费掉outputBuffer，process回调才能触发
	// 并且由于不给outputBuffer设置内容，所以扬声器不会播放出声音
	jsNode.connect(audioContext.destination);
	jsNode.onaudioprocess = onAudioProcess;
	// 把mediaNode连接到jsNode
	mediaNode.connect(jsNode);
}

export function createJSNode(audioContext) {
	const BUFFER_SIZE = 4096; //4096
	const INPUT_CHANNEL_COUNT = 2;
	const OUTPUT_CHANNEL_COUNT = 2;
	// createJavaScriptNode已被废弃
	let creator = audioContext.createScriptProcessor || audioContext.createJavaScriptNode;
	creator = creator.bind(audioContext);
	return creator(BUFFER_SIZE,
		INPUT_CHANNEL_COUNT, OUTPUT_CHANNEL_COUNT);
}
let leftDataList = [],
	rightDataList = [];

export function onAudioProcess(event) {
	// console.log(event.inputBuffer);
	let audioBuffer = event.inputBuffer;
	let leftChannelData = audioBuffer.getChannelData(0),
		rightChannelData = audioBuffer.getChannelData(1);
	// console.log(leftChannelData, rightChannelData);
	// 需要克隆一下
	leftDataList.push(leftChannelData.slice(0));
	rightDataList.push(rightChannelData.slice(0));
}

// 合成声音文件
export function recordFile() {
	// 播放录音
	let leftData = mergeArray(leftDataList),
		rightData = mergeArray(rightDataList);
	let allData = interleaveLeftAndRight(leftData, rightData);
	let wavBuffer = createWavFile(allData);
	// playRecord(wavBuffer);
	return wavBuffer
}

// 将声音文件转为blobUrl
export function recordUrl(arrayBuffer) {
	window.URL = window.URL || window.webkitURL;
	let blob = new Blob([new Uint8Array(arrayBuffer)]);
	let blobUrl = window.URL.createObjectURL(blob);
	// document.querySelector('.audio-node').src = blobUrl;
	return blobUrl
}

export function stopRecord() {
	// 停止录音
	window.mediaNode.disconnect();
	window.jsNode.disconnect();
	console.log("已停止录音")
	// console.log(leftDataList, rightDataList);
}

export function recordClose() {
	// 停止语音
	window.mediaStream.getAudioTracks()[0].stop();
	console.log("已停止语音")
}

function mergeArray(list) {
	let length = list.length * list[0].length;
	let data = new Float32Array(length),
		offset = 0;
	for (let i = 0; i < list.length; i++) {
		data.set(list[i], offset);
		offset += list[i].length;
	}
	return data;
}

function interleaveLeftAndRight(left, right) {
	// 交叉合并左右声道的数据
	let totalLength = left.length + right.length;
	let data = new Float32Array(totalLength);
	for (let i = 0; i < left.length; i++) {
		let k = i * 2;
		data[k] = left[i];
		data[k + 1] = right[i];
	}
	return data;
}

function createWavFile(audioData) {
	const WAV_HEAD_SIZE = 44;
	let buffer = new ArrayBuffer(audioData.length * 2 + WAV_HEAD_SIZE),
		// 需要用一个view来操控buffer
		view = new DataView(buffer);
	// 写入wav头部信息
	// RIFF chunk descriptor/identifier
	writeUTFBytes(view, 0, 'RIFF');
	// RIFF chunk length
	view.setUint32(4, 44 + audioData.length * 2, true);
	// RIFF type
	writeUTFBytes(view, 8, 'WAVE');
	// format chunk identifier
	// FMT sub-chunk
	writeUTFBytes(view, 12, 'fmt ');
	// format chunk length
	view.setUint32(16, 16, true);
	// sample format (raw)
	view.setUint16(20, 1, true);
	// stereo (2 channels)
	view.setUint16(22, 2, true);
	// sample rate
	view.setUint32(24, 44100, true);
	// byte rate (sample rate * block align)
	view.setUint32(28, 44100 * 2, true);
	// block align (channel count * bytes per sample)
	view.setUint16(32, 2 * 2, true);
	// bits per sample
	view.setUint16(34, 16, true);
	// data sub-chunk
	// data chunk identifier
	writeUTFBytes(view, 36, 'data');
	// data chunk length
	view.setUint32(40, audioData.length * 2, true);
	// 写入wav头部，代码同上
	// 写入PCM数据
	let length = audioData.length;
	let index = 44;
	let volume = 1;
	for (let i = 0; i < length; i++) {
		view.setInt16(index, audioData[i] * (0x7FFF * volume), true);
		index += 2;
	}
	return buffer;
}

function writeUTFBytes(view, offset, string) {
	var lng = string.length;
	for (var i = 0; i < lng; i++) {
		view.setUint8(offset + i, string.charCodeAt(i));
	}
}

