<template>
	<div class="recorder-container">
		<div class="recorder-controls">
			<el-switch v-model="autoRecordEnabled" active-text="自动录音" inactive-text="手动录音" style="margin-right: 20px" @change="handleAutoRecordChange" />
			<el-switch v-model="autoRecognizeEnabled" active-text="自动识别" inactive-text="手动识别" style="margin-right: 20px" />
			<el-button type="primary" @click="handleRecordClick" :disabled="isPlaying || autoRecordEnabled">
				{{ isRecording ? '停止' : '录音' }}
			</el-button>
			<el-button type="info" @click="handleDownload" :disabled="!audioUrl || isRecording"> 下载 </el-button>
			<el-button type="success" @click="handleRecognize" :disabled="!audioUrl || isRecording || recognizing" :loading="recognizing"> 识别 </el-button>
			<el-button type="danger" @click="reset"> 重置 </el-button>
		</div>
		<div v-if="recognitionResult" class="recognition-result">
			<h3>识别结果：</h3>
			<p>{{ recognitionResult }}</p>
		</div>
		<div v-if="isRecording" class="recording-info">
			<div class="recording-time">录音时长: {{ recordingTime }} 秒</div>
			<div class="decibel-value" :class="{ high: currentDecibels > -20, medium: currentDecibels <= -20 && currentDecibels > -40 }">当前音量: {{ currentDecibels.toFixed(1) }} dB</div>
		</div>
		<div v-if="audioUrl" class="audio-player">
			<audio :src="audioUrl" controls @play="isPlaying = true" @pause="isPlaying = false" @ended="isPlaying = false" ref="audioRef"></audio>
		</div>

		<el-divider content-position="left">TTS</el-divider>
		<div>
			<el-input v-model="state.textareaTxt" style="width: 400px; margin-right: 10px" :rows="3" type="textarea" placeholder="请填写需要合成的文字" />
			<el-button type="primary" @click="handleTTS"> 合成 </el-button>
		</div>

		<el-divider content-position="left">AI语音助手</el-divider>
		<div>
			<div>
				客户的话： <span v-if="state.responseTime !== null" class="response-time">响应时间：{{ state.responseTime }}秒</span>
			</div>
			<div style="display: flex; align-items: flex-start; gap: 10px">
				<el-input v-model="state.customer" style="width: 600px" :rows="3" type="textarea" placeholder="" @input="handleCustomerChange" />

				<el-button type="primary" @click="handleSendMessage" :loading="state.isLoading"> 发送 </el-button>
			</div>
		</div>
		<div>
			<div>AI语音助手的话：</div>
			<el-input v-model="state.ai" style="width: 600px; margin-right: 10px" :rows="3" type="textarea" placeholder="" />
		</div>
		<div class="tc">v1.0.1.20250407-2</div>
	</div>
</template>

<script setup lang="ts">
import { ref, onUnmounted, reactive, onMounted } from 'vue';
import { ElMessage } from 'element-plus';
import { recognizeSpeech } from '/@/api/speech';
import { resampleAudio } from '/@/utils/audioProcessor';
import { anythingllmApi } from '/@/api/anythingllm';

let state = reactive({
	textareaTxt: '',
	customer: '如何使用微信缴费？',
	ai: '',
	isLoading: false,
	responseTime: null as number | null,
	startTime: null as number | null,
	timerInterval: null as number | null,
	hasPlayedWelcome: false,
});

const isRecording = ref(false);
const isPlaying = ref(false);
const recordingTime = ref(0);
const audioUrl = ref('');
const currentDecibels = ref(-Infinity);
const audioRef = ref<HTMLAudioElement | null>(null);
const recognizing = ref(false);
const recognitionResult = ref('');
const autoRecordEnabled = ref(true);
const autoRecognizeEnabled = ref(true);
let soundMonitorTimer: number | null = null;

let mediaRecorder: MediaRecorder | null = null;
let audioChunks: Blob[] = [];
let timer: number | null = null;
let audioContext: AudioContext | null = null;
let analyser: AnalyserNode | null = null;
let silenceTimer: number | null = null;
let lastSoundTime = 0;

// 新增：10秒无声定时器
let noSound10sTimer: number | null = null;

// 修改 detectSound 函数，增加一个标记表示当前录音是否检测到声音
let hasSoundDetected = false;

const SILENCE_THRESHOLD = -9; // 声音阈值（dB）
const AUTO_RECORD_THRESHOLD = -3; // 自动录音触发阈值（dB）
const SILENCE_DURATION = 500; // 静音持续时间阈值（毫秒）

// 添加语音队列管理
const speechQueue: string[] = [];
let isSpeaking = false;

// 添加用于终止网络请求的变量
let currentAbortController: AbortController | null = null;

// 添加用于跟踪当前请求的变量
let currentRequestId = 0;

// 处理自动录音模式切换
const handleAutoRecordChange = async (enabled: boolean) => {
	if (enabled) {
		await startSoundMonitoring();
	} else {
		cleanupAudioResources();
	}
};

// 清理音频资源
const cleanupAudioResources = () => {
	if (soundMonitorTimer) {
		clearInterval(soundMonitorTimer);
		soundMonitorTimer = null;
	}
	if (audioContext) {
		audioContext.close();
		audioContext = null;
	}
	if (analyser) {
		analyser = null;
	}
	currentDecibels.value = -Infinity;
};

// 计算分贝值
const calculateDecibels = (dataArray: Uint8Array): number => {
	let sum = 0;
	let maxValue = 0;
	for (let i = 0; i < dataArray.length; i++) {
		sum += dataArray[i];
		maxValue = Math.max(maxValue, dataArray[i]);
	}
	const average = sum / dataArray.length;
	const value = Math.max(average, maxValue);
	const minValue = 0.1;
	const db = 20 * Math.log10(Math.max(value, minValue) / 255);
	return Math.max(Math.min(db, 0), -60);
};

// 检测声音
const detectSound = (dataArray: Uint8Array) => {
	const db = calculateDecibels(dataArray);
	currentDecibels.value = db;

	if (db > SILENCE_THRESHOLD) {
		lastSoundTime = Date.now();
		hasSoundDetected = true;
	} else {
		const silenceDuration = Date.now() - lastSoundTime;
		if (silenceDuration >= SILENCE_DURATION && hasSoundDetected) {
			stopRecording();
			ElMessage.info('检测到1秒无声，已自动停止录音');
		}
	}
};

// 停止录音
const stopRecording = () => {
	if (mediaRecorder?.state === 'recording') {
		mediaRecorder.stop();
		mediaRecorder.stream.getTracks().forEach((track) => track.stop());
		isRecording.value = false;

		if (timer) {
			clearInterval(timer);
			timer = null;
		}
		if (silenceTimer) {
			clearInterval(silenceTimer);
			silenceTimer = null;
		}
		if (noSound10sTimer) {
			clearTimeout(noSound10sTimer);
			noSound10sTimer = null;
		}

		isPlaying.value = false;
		recordingTime.value = 0;

		if (!autoRecordEnabled.value) {
			cleanupAudioResources();
		}
	}
};

// 开始声音监测
const startSoundMonitoring = async () => {
	try {
		if (soundMonitorTimer) {
			clearInterval(soundMonitorTimer);
			soundMonitorTimer = null;
		}

		if (!audioContext || audioContext.state === 'closed') {
			const stream = await navigator.mediaDevices.getUserMedia({
				audio: {
					sampleRate: 8000,
					channelCount: 1,
					echoCancellation: true,
					noiseSuppression: true,
				},
			});

			audioContext = new AudioContext();
			analyser = audioContext.createAnalyser();
			const source = audioContext.createMediaStreamSource(stream);
			source.connect(analyser);
			analyser.fftSize = 2048;
		} else if (audioContext.state === 'suspended') {
			await audioContext.resume();
		}

		const bufferLength = analyser!.frequencyBinCount;
		const dataArray = new Uint8Array(bufferLength);

		soundMonitorTimer = window.setInterval(() => {
			if (analyser && !isRecording.value && autoRecordEnabled.value) {
				analyser.getByteFrequencyData(dataArray);
				const db = calculateDecibels(dataArray);
				currentDecibels.value = db;

				if (db > AUTO_RECORD_THRESHOLD) {
					handleRecordClick();
				}
			}
		}, 50);
	} catch (error) {
		console.error('Failed to start sound monitoring:', error);
		ElMessage.error('无法访问麦克风，请确保已授予权限');
		autoRecordEnabled.value = false;
		cleanupAudioResources();
	}
};

// 开始/停止录音
const handleRecordClick = async () => {
	if (!isRecording.value) {
		try {
			const stream = await navigator.mediaDevices.getUserMedia({
				audio: {
					sampleRate: 8000,
					channelCount: 1,
					echoCancellation: true,
					noiseSuppression: true,
				},
			});

			mediaRecorder = new MediaRecorder(stream, {
				audioBitsPerSecond: 8000,
			});
			audioChunks = [];

			if (!audioContext || audioContext.state === 'closed') {
				audioContext = new AudioContext();
				analyser = audioContext.createAnalyser();
				const source = audioContext.createMediaStreamSource(stream);
				source.connect(analyser);
				analyser.fftSize = 2048;
			}

			const bufferLength = analyser!.frequencyBinCount;
			const dataArray = new Uint8Array(bufferLength);

			lastSoundTime = Date.now();
			if (silenceTimer) {
				clearInterval(silenceTimer);
			}
			silenceTimer = window.setInterval(() => {
				if (analyser) {
					analyser.getByteFrequencyData(dataArray);
					detectSound(dataArray);
				}
			}, 50);

			// 修改 handleRecordClick 中的10秒无声定时器逻辑
			// 在录音开始时重置 hasSoundDetected
			hasSoundDetected = false;

			// 新增：启动10秒无声定时器（录音一开始就计时）
			if (noSound10sTimer) clearTimeout(noSound10sTimer);
			noSound10sTimer = window.setTimeout(() => {
				if (isRecording.value && !hasSoundDetected) {
					stopRecording();
					ElMessage.info('检测到10秒无声，已自动停止录音');
				}
			}, 10000);

			mediaRecorder.ondataavailable = (event) => {
				audioChunks.push(event.data);
			};

			mediaRecorder.onstop = async () => {
				const audioBlob = new Blob(audioChunks, { type: 'audio/wav' });
				if (audioUrl.value) {
					URL.revokeObjectURL(audioUrl.value);
				}
				audioUrl.value = URL.createObjectURL(audioBlob);
				recordingTime.value = 0;

				if (timer) clearInterval(timer);
				if (silenceTimer) clearInterval(silenceTimer);
				if (noSound10sTimer) {
					clearTimeout(noSound10sTimer);
					noSound10sTimer = null;
				}

				// 只有当检测到声音时才发送语音识别请求
				if (hasSoundDetected && autoRecognizeEnabled.value) {
					await handleRecognize();
				}

				// 自动录音模式下，停止后立即重启
				if (autoRecordEnabled.value) {
					setTimeout(() => {
						handleRecordClick();
					}, 100);
				}
			};

			mediaRecorder.start();
			isRecording.value = true;
			recordingTime.value = 0;

			if (timer) clearInterval(timer);
			timer = window.setInterval(() => {
				recordingTime.value++;
			}, 1000);
		} catch (error: any) {
			console.error('Recording error:', error);
			ElMessage.error('无法访问麦克风，请确保已授予权限');
		}
	} else {
		stopRecording();
	}
};

// 语音识别
const handleRecognize = async () => {
	if (!audioUrl.value) return;

	try {
		recognizing.value = true;
		const response = await fetch(audioUrl.value);
		const audioBlob = await response.blob();

		ElMessage.info('正在处理音频...');
		const resampledBlob = await resampleAudio(audioBlob, 8000);

		const result = await recognizeSpeech(resampledBlob);
		if (result.Response?.Result) {
			recognitionResult.value = result.Response.Result;
			// 将识别结果填入客户输入框
			state.customer = result.Response.Result;
			// 自动发送消息
			await handleSendMessage();
		} else {
			throw new Error('识别结果为空');
		}
	} catch (error: any) {
		console.error('Recognition error:', error);
		ElMessage.error('语音识别失败，请重试');
		recognitionResult.value = '';
	} finally {
		recognizing.value = false;
	}
};

// 下载录音
const handleDownload = () => {
	if (audioUrl.value) {
		const a = document.createElement('a');
		a.href = audioUrl.value;
		a.download = `recording-${new Date().toISOString()}.wav`;
		document.body.appendChild(a);
		a.click();
		document.body.removeChild(a);
	}
};

// 修改语音合成函数
const speakText = (text: string) => {
	if (!text.trim()) return;

	if ('speechSynthesis' in window) {
		const synth = window.speechSynthesis;
		const utterance = new SpeechSynthesisUtterance(text);
		utterance.lang = 'zh-CN';
		utterance.rate = 1.5; // 提高语速到2.0
		utterance.pitch = 1.0; // 音高
		utterance.volume = 1.0; // 音量

		// 设置更短的停顿时间
		utterance.onboundary = (event) => {
			if (event.name === 'sentence') {
				// 在句子边界处减少停顿时间
				utterance.rate = 1.5; // 保持较快的语速
			}
		};

		utterance.onstart = () => {
			isSpeaking = true;
		};

		utterance.onend = () => {
			isSpeaking = false;
			// 立即处理队列中的下一段，减少等待时间
			setTimeout(processNextInQueue, 50);
		};

		utterance.onerror = () => {
			isSpeaking = false;
			setTimeout(processNextInQueue, 50);
		};

		synth.speak(utterance);
	}
};

// 修改处理语音队列的函数
const processNextInQueue = () => {
	if (!isSpeaking && speechQueue.length > 0) {
		const nextText = speechQueue.shift();
		if (nextText) {
			// 减少队列处理延迟
			setTimeout(() => {
				speakText(nextText);
			}, 50);
		}
	}
};

// 修改添加文本到语音队列的函数
const addToSpeechQueue = (text: string) => {
	if (!text.trim()) return;

	// 如果当前没有在播放，直接播放
	if (!isSpeaking) {
		speakText(text);
	} else {
		// 否则加入队列
		speechQueue.push(text);
		// 如果队列中只有一段文本，立即开始处理
		if (speechQueue.length === 1) {
			setTimeout(processNextInQueue, 50);
		}
	}
};

// 修改语音提示函数
const speakPrompt = () => {
	if ('speechSynthesis' in window) {
		const synth = window.speechSynthesis;
		const utterance = new SpeechSynthesisUtterance('请稍候，正在为您查询...');
		utterance.lang = 'zh-CN';
		utterance.rate = 1.5; // 提高语速到2.0
		utterance.pitch = 1.0;
		synth.speak(utterance);
	}
};

// 添加更新响应时间的函数
const updateResponseTime = () => {
	if (state.startTime) {
		state.responseTime = Number(((Date.now() - state.startTime) / 1000).toFixed(2));
	}
};

// 修改发送消息的处理函数
const handleSendMessage = async () => {
	if (!state.customer.trim()) {
		ElMessage.warning('请输入客户的话');
		return;
	}

	try {
		// 终止之前的网络请求
		if (currentAbortController) {
			currentAbortController.abort();
			currentAbortController = null;
		}
		// 创建新的 AbortController
		currentAbortController = new AbortController();

		// 清空所有未播放的语音
		clearAllSpeech();

		// 重置所有状态
		state.isLoading = true;
		state.ai = ''; // 清空之前的回复
		state.responseTime = null; // 重置响应时间
		state.startTime = Date.now(); // 记录开始时间

		// 开始实时更新响应时间
		state.timerInterval = window.setInterval(updateResponseTime, 100);

		// 先播放提示语音
		speakPrompt();

		let currentText = ''; // 用于累积当前文本
		let isFirstResponse = true; // 标记是否是第一个响应
		const requestId = ++currentRequestId; // 生成新的请求ID

		await anythingllmApi.sendMessage(state.customer, (text) => {
			// 如果不是当前请求的响应，直接返回
			if (requestId !== currentRequestId) {
				return;
			}

			// 如果是第一个响应，停止计时器
			if (isFirstResponse) {
				if (state.timerInterval) {
					clearInterval(state.timerInterval);
					state.timerInterval = null;
				}
				isFirstResponse = false;
				state.isLoading = false;
			}

			// 将新的文本追加到现有回复中
			state.ai += text;
			currentText += text;

			// 当遇到标点符号时进行语音播报
			if (/[。！？，；：]$/.test(currentText)) {
				addToSpeechQueue(currentText);
				currentText = ''; // 重置累积文本
			}
		});

		// 如果不是当前请求，不处理剩余的文本
		if (requestId === currentRequestId && currentText) {
			addToSpeechQueue(currentText);
		}

		ElMessage.success('发送成功');
	} catch (error: any) {
		// 如果是主动取消请求，不显示错误提示
		if (error.name === 'AbortError') {
			return;
		}
		console.error('发送消息失败:', error);
		ElMessage.error('发送消息失败，请重试');
		state.ai = '';
		state.responseTime = null;
	} finally {
		state.isLoading = false;
		state.startTime = null;
		if (state.timerInterval) {
			clearInterval(state.timerInterval);
			state.timerInterval = null;
		}
		currentAbortController = null;
	}
};

// 修改清空语音的函数
const clearAllSpeech = () => {
	// 停止当前正在播放的语音
	if ('speechSynthesis' in window) {
		window.speechSynthesis.cancel();
	}
	// 清空语音队列
	speechQueue.length = 0;
	isSpeaking = false;
	// 重置语音合成状态
	if ('speechSynthesis' in window) {
		window.speechSynthesis.cancel();
		// 强制结束所有语音合成
		window.speechSynthesis.getVoices().forEach((voice) => {
			window.speechSynthesis.cancel();
		});
	}
	// 重置当前请求ID
	currentRequestId++;
};

// 修改客户输入变化的监听
const handleCustomerChange = () => {
	// 如果正在播放语音，先停止
	if ('speechSynthesis' in window) {
		window.speechSynthesis.cancel();
	}
	// 清空语音队列
	speechQueue.length = 0;
	isSpeaking = false;
	// 重置响应时间和计时器
	state.responseTime = null;
	if (state.timerInterval) {
		clearInterval(state.timerInterval);
		state.timerInterval = null;
	}
};

const reset = () => {
	// 停止录音
	if (mediaRecorder?.state === 'recording') {
		mediaRecorder.stop();
		mediaRecorder.stream.getTracks().forEach((track) => track.stop());
	}

	// 停止音频播放
	if (audioRef.value) {
		audioRef.value.pause();
	}

	// 清理音频资源
	if (audioUrl.value) {
		URL.revokeObjectURL(audioUrl.value);
		audioUrl.value = '';
	}

	// 清理音频上下文
	cleanupAudioResources();

	// 终止当前的网络请求
	if (currentAbortController) {
		currentAbortController.abort();
		currentAbortController = null;
	}

	// 清空所有语音合成
	clearAllSpeech();

	// 重置所有状态
	isRecording.value = false;
	isPlaying.value = false;
	recordingTime.value = 0;
	currentDecibels.value = -Infinity;
	// autoRecordEnabled.value = false;
	// autoRecognizeEnabled.value = false;
	recognizing.value = false;
	recognitionResult.value = '';

	// 重置响应时间
	state.responseTime = null;
	if (state.timerInterval) {
		clearInterval(state.timerInterval);
		state.timerInterval = null;
	}

	// 重置输入框
	state.customer = '';
	state.ai = '';
	state.textareaTxt = '';

	// 清理所有定时器
	if (timer) clearInterval(timer);
	if (silenceTimer) clearInterval(silenceTimer);
	if (soundMonitorTimer) clearInterval(soundMonitorTimer);
	if (noSound10sTimer) clearTimeout(noSound10sTimer);

	timer = null;
	silenceTimer = null;
	soundMonitorTimer = null;
	noSound10sTimer = null;
	mediaRecorder = null;
	audioChunks = [];
	audioContext = null;
	analyser = null;
	lastSoundTime = 0;

	// 重置欢迎语播放状态
	state.hasPlayedWelcome = false;

	// 在 reset 函数中
	hasSoundDetected = false;

	ElMessage.success('已重置所有状态');
};

// 添加全局方法
const telHoldOff = () => {
	// 先执行重置
	reset();

	// 设置欢迎语
	state.textareaTxt = '您好，韶关市水投集团AI客服为您服务，您可以直接向我提问，为保障您的权益，您的通话可能会被录音。';

	// 播放欢迎语
	speakText(state.textareaTxt);
	state.hasPlayedWelcome = true;

	console.log('已执行telHoldOff方法');
};

onMounted(() => {
	// 将telHoldOff方法注册到window对象上
	(window as any).telHoldOff = telHoldOff;

	// 启动自动录音
	startSoundMonitoring();
});

// 组件卸载时清理资源
onUnmounted(() => {
	if (timer) clearInterval(timer);
	if (silenceTimer) clearInterval(silenceTimer);
	if (soundMonitorTimer) clearInterval(soundMonitorTimer);
	if (noSound10sTimer) clearTimeout(noSound10sTimer);
	if (mediaRecorder?.state === 'recording') {
		mediaRecorder.stop();
		mediaRecorder.stream.getTracks().forEach((track) => track.stop());
	}
	if (audioRef.value) {
		audioRef.value.pause();
	}
	if (audioUrl.value) {
		URL.revokeObjectURL(audioUrl.value);
		audioUrl.value = '';
	}
	cleanupAudioResources();

	// 终止当前的网络请求
	if (currentAbortController) {
		currentAbortController.abort();
		currentAbortController = null;
	}

	// 重置所有状态
	isRecording.value = false;
	isPlaying.value = false;
	recordingTime.value = 0;
	currentDecibels.value = -Infinity;
	autoRecordEnabled.value = false;
	autoRecognizeEnabled.value = false;

	// 清理语音合成
	if ('speechSynthesis' in window) {
		window.speechSynthesis.cancel();
	}
	speechQueue.length = 0;
	isSpeaking = false;

	// 清理响应时间计时器
	if (state.timerInterval) {
		clearInterval(state.timerInterval);
		state.timerInterval = null;
	}

	// 清理全局方法
	delete (window as any).telHoldOff;

	// 在 onUnmounted 中
	hasSoundDetected = false;
});
</script>

<style lang="scss" scoped>
.recorder-container {
	padding: 20px;

	.recorder-controls {
		display: flex;
		gap: 10px;
		margin-bottom: 20px;
	}

	.recording-info {
		margin-top: 10px;
		margin-bottom: 20px;

		.recording-time {
			color: #f56c6c;
			font-size: 16px;
			margin-bottom: 8px;
		}

		.decibel-value {
			font-size: 16px;
			font-weight: 500;
			padding: 8px 12px;
			border-radius: 4px;
			background-color: #f0f0f0;
			display: inline-block;
			transition: all 0.3s ease;
			color: #67c23a;

			&.medium {
				color: #e6a23c;
			}

			&.high {
				color: #f56c6c;
				animation: pulse 1s infinite;
			}
		}
	}

	.audio-player {
		margin-top: 20px;

		audio {
			width: 100%;
			max-width: 500px;
			border-radius: 4px;
			outline: none;
		}
	}

	.response-time {
		padding: 8px 12px;
		background-color: #f0f0f0;
		border-radius: 4px;
		color: #409eff;
		font-weight: 500;
		white-space: nowrap;
	}
}
@keyframes pulse {
	0% {
		transform: scale(1);
	}
	50% {
		transform: scale(1.05);
	}
	100% {
		transform: scale(1);
	}
}
</style>
