<!-- 对讲 基于ws meg -->
<template>
	<div>
		<template v-if="state.isTalkSet">
			<audio class="myvideo" ref="myaudio" controls autoplay muted></audio>

			<div class="circleBtn" v-if="!state.isTalk" @click="startTalk" type="success">
				<el-icon :size="40"><Microphone /></el-icon>
			</div>
			<div class="circleBtn" v-else @click="stopAudio" type="danger">
				<el-icon :size="40"><Mute /></el-icon>
			</div>
			<!-- <el-button type="" @click="closeStream">stop</el-button> -->
		</template>
		<div class="noTalkSet" v-else>
			<h2>打开语音通话</h2>

			<el-timeline>
				<el-timeline-item>
					<div class="step">
						<p>新窗口打开设置地址</p>
						<p>chrome://flags/#unsafely-treat-insecure-origin-as-secure</p>
						<el-text type="primary" @click="copyText('chrome://flags/#unsafely-treat-insecure-origin-as-secure')">快捷复制</el-text>
					</div>
				</el-timeline-item>
				<el-timeline-item><div class="step">将（黄色标签）右侧状态切换成 enable（启用） 状态</div></el-timeline-item>
				<el-timeline-item
					><div class="step">
						<p>输入框中填写需要开启的域名，多个以逗号分隔。</p>
						<el-text type="primary" @click="copyText('http://' + state.ourHost)">复制本网站域名</el-text>
					</div></el-timeline-item
				>
				<el-timeline-item><div class="step">页面空白处点击，点击右下角重启 后生效。</div></el-timeline-item>
			</el-timeline>
		</div>
	</div>
</template>

<script lang="ts" setup name="Talkback">
import { nextTick, onBeforeUnmount, onMounted, reactive, ref } from 'vue';
import { getUrl } from '/@/utils/hm';
import { Microphone, Mute } from '@element-plus/icons-vue';
import { ElMessage } from 'element-plus';
import commonFunction from '/@/utils/commonFunction';
const { copyText } = commonFunction();
let ws: WebSocket;

let recorder: any = null;
const AUDIOOPTS = {
	channelCount: 1,
	sampleRate: 8000,
	sampleSize: 16,
};

const myaudio = ref();
const props = defineProps({
	curAlarm: {
		type: Object,
		default: function () {
			return {};
		},
	},
});

const state = reactive({
	isTalkSet: true, // http环境下是否开启了getUserMedia 权限
	wsFlag: false,
	stream: <any>null,
	isTalk: false,
	ourHost: location.host,
});

onMounted(() => {
	if (!window.navigator.mediaDevices) {
		state.isTalkSet = false;
	}
});

// stopAudio
function stopAudio() {
	if (!state.isTalkSet) return;

	state.isTalk = false;
	myaudio.value.srcObject = null;
	state.stream && state.stream.getTracks().forEach((track: any) => track.stop());
	if (recorder) {
		recorder.disconnect();
		recorder.onaudioprocess = null;
		closeStream();
	}

	state.wsFlag = false;
}

function closeStream() {
	if (state.wsFlag) {
		ws.send(
			JSON.stringify({
				cmdType: 'closeAudioStream',
				deviceCode: props.curAlarm.deviceCode,
				data: null,
			})
		);
		setTimeout(() => {
			ws.close();
		}, 3000);
	}
}

function startTalk() {
	initStreamWs();

	window.navigator.mediaDevices
		.getUserMedia({
			audio: {
				echoCancellation: true,
				autoGainControl: true,
				noiseSuppression: true,
				...AUDIOOPTS,
			},
		})
		.then((mediaStream) => {
			state.stream = mediaStream;
			myaudio.value.srcObject = mediaStream;
			state.isTalk = true;

			let audioContext = new window.AudioContext(AUDIOOPTS);

			let mediaNode = audioContext.createMediaStreamSource(mediaStream);
			recorder = audioContext.createScriptProcessor(4096, 1, 1);
			recorder.connect(audioContext.destination);
			mediaNode.connect(recorder);
			recorder.onaudioprocess = (e: any) => {
				const audioData = e.inputBuffer.getChannelData(0);

				/* 
				使用scriptprocessor记录前端音频流，转换成Float32Array
				将Float32Array转换成16bit PCM
				*/

				let buffer = new ArrayBuffer(audioData.length * 2);
				let view = new DataView(buffer);
				let offset = 0;
				let volume = 1;

				for (let i = 0; i < audioData.length; i++, offset += 2) {
					view.setInt16(offset, audioData[i] * (0x7fff * volume), true);
					// view.setUint16(offset, audioData[i] * (0x7fff * volume), true);
				}

				// console.log({ view });

				// 发送ws数据
				sentWsData(view);
			};

			// 本地保存音频
			// recordVideo(mediaStream);
		})
		.catch((err) => {
			console.log(err);
		});
}

function audioJson() {
	let params = {
		cmdType: 'audioJson',
		deviceCode: props.curAlarm.deviceCode,
		data: {
			head: {
				type: 'audio',
				binary_num: 1,
				channel_id: 0,
				pts: Date.now(),
			},
			body: {
				codec: 9,
				channel_num: 1,
				sampling_bit: AUDIOOPTS.sampleSize,
				sampling_rate: AUDIOOPTS.sampleRate,
			},
		},
	};
	return JSON.stringify(params);
}

function sentWsData(audioData: any) {
	if (state.wsFlag) {
		ws.send(audioJson());
		ws.send(audioData);
	}
}

function initStreamWs() {
	let wsUrl = getUrl('audioStreamWs');

	ws = new WebSocket(`${wsUrl}?deviceCode=${props.curAlarm.deviceCode}`);

	// 二进制传输
	// ws.binaryType = 'arraybuffer';

	try {
		// ws连接成功
		ws.onopen = function () {
			showInfo('streamWs 连接成功！');
			// 连接成功马上订阅音频
			state.wsFlag = true;

			ws.send(
				JSON.stringify({
					cmdType: 'openAudioStream',
					deviceCode: props.curAlarm.deviceCode,
					data: null,
				})
			);
		};

		// ws连接关闭
		ws.onclose = function () {
			state.wsFlag = false;
			showInfo('streamWs 连接关闭 close！');
		};

		// ws连接错误
		ws.onerror = function () {
			state.wsFlag = false;
			showInfo('streamWs 连接错误 err');
		};

		// ws数据返回处理
		ws.onmessage = function (result: any) {};
	} catch (e: any) {
		// alert(e.message);
	}
}

function showInfo(msg: any) {
	console.log(msg);
}

// 保存录音
let mediaRecorder = null;
function recordVideo(mediaStream: any) {
	let chunks: any = [];
	mediaRecorder = new MediaRecorder(mediaStream);
	mediaRecorder.start();

	// 录音开始事件监听（即调用 mediaRecorder.start()时会触发该事件）
	mediaRecorder.onstart = () => {
		console.log('record start');
	};

	// 录音可用事件监听，发生于mediaRecorder.stop()调用后，mediaRecorder.onstop 前
	mediaRecorder.ondataavailable = (e) => {
		console.log('dataavailable');
		console.log(e);
		chunks.push(e.data);
	};

	// 录音结束事件监听，发生在mediaRecorder.stop()和 mediaRecorder.ondataavailable 调用后
	mediaRecorder.onstop = () => {
		console.log('record end');
		// 获取到录音的blob
		let blob = new Blob(chunks, { type: 'audio/webm;codecs=opus' });
		console.log('🚀 ~ file: record.js:27 ~ navigator.mediaDevices.getUserMedia ~ blob:', blob);

		//   var saveDatas = getDataFromLocal("results");
		//   // 上面是拿到我自己的数据，数据的格式是Json字符串
		//   var newBlob = new Blob([JSON.stringify(saveDatas)], { type: "application/json" });

		// 创建一个blob的对象，把Json转化为字符串作为我们的值
		let url = window.URL.createObjectURL(blob);
		// 上面这个是创建一个blob的对象连链接，
		// 然后创建一个链接元素，是属于 a 标签的链接元素，所以括号里才是a，
		let link = document.createElement('a');
		link.href = url;
		// 把上面获得的blob的对象链接赋值给新创建的这个 a 链接
		link.setAttribute('download', '录音.wav');
		// 设置下载的属性（所以使用的是download），这个是a 标签的一个属性
		// 后面的是文件名字，可以更改
		link.click();
		// 使用js点击这个链接

		//  将blob转换为file对象，名字可以自己改，一般用于需要将文件上传到后台的情况
		// let file = new window.File([blob], "record.webm");

		// 将blob转换为地址，一般用于页面上面的回显，这个url可以直接被 audio 标签使用
		// let url = (window.URL || webkitURL).createObjectURL(blob);
	};
}

onBeforeUnmount(() => {
	stopAudio();
});
</script>
<style lang="scss" scoped>
.myvideo {
	display: block;
	margin: 50px auto;
}

.circleBtn {
	width: 120px;
	height: 120px;
	display: block;
	margin: 100px auto 0;
	color: #fff;
	border-radius: 50%;
	display: flex;
	justify-content: center;
	flex-direction: column;
	align-items: center;
	cursor: pointer;

	&[type='success'] {
		background-color: #67c23a;
	}

	&[type='danger'] {
		background-color: #f56c6c;
	}
}

.noTalkSet {
	padding: 10px;

	h2 {
		font-weight: 500;
		color: #f56c6c;
		margin-bottom: 20px;
	}

	.el-text:hover {
		cursor: pointer;
	}
}
</style>
