<template>
  <div class="voice-recorder">
    <h2>语音录音与识别</h2>

    <div class="controls">
      <button @click="toggleRecording" :disabled="isRecognizing">
        {{ isRecording ? "停止录音" : "开始录音" }}
      </button>
      <button @click="toggleSpeechRecognition" :disabled="isRecording">
        {{ isRecognizing ? "停止识别" : "语音识别" }}
      </button>
      <button @click="toggleIFlytekRecognition" :disabled="isRecording">
        {{ isIFlytekRecognizing ? "停止讯飞识别" : "讯飞语音识别" }}
      </button>
      <button @click="clearAll">清空</button>
    </div>

    <div
      v-if="!isSpeechRecognitionSupported && !isIFlytekAvailable"
      class="warning"
    >
      <p>您的浏览器不支持语音识别功能，建议使用Chrome浏览器进行测试</p>
    </div>

    <div v-if="audioUrl" class="audio-player">
      <h3>录音回放:</h3>
      <audio :src="audioUrl" controls></audio>
    </div>

    <div v-if="transcript || isRecognizing" class="transcript">
      <h3>浏览器识别文字:</h3>
      <div class="transcript-content">
        <p>{{ transcript }}</p>
        <p v-if="isRecognizing" class="listening">正在倾听...请说话</p>
      </div>
    </div>

    <div v-if="iFlytekTranscript || isIFlytekRecognizing" class="transcript">
      <h3>讯飞识别文字:</h3>
      <div class="transcript-content">
        <p>{{ iFlytekTranscript }}</p>
        <p v-if="isIFlytekRecognizing" class="listening">正在倾听...请说话</p>
      </div>
    </div>

    <div class="status" v-if="recognitionStatus">
      <p>{{ recognitionStatus }}</p>
    </div>

    <div class="instructions">
      <h3>使用说明:</h3>
      <ul>
        <li>点击"开始录音"录制音频，点击"停止录音"结束并播放</li>
        <li>点击"语音识别"进行实时语音转文字（需要Chrome浏览器）</li>
        <li>点击"讯飞语音识别"使用科大讯飞语音转写服务</li>
        <li>录音和语音识别不能同时进行</li>
        <li>首次使用时请允许浏览器访问麦克风</li>
      </ul>
    </div>
  </div>
</template>

<script setup>
import { ref, onMounted, onBeforeUnmount } from "vue";
import RecordRTC from "recordrtc";
import CryptoJS from "crypto-js";

const isRecording = ref(false);             // 是否正在录音
const isRecognizing = ref(false);           // 是否正在进行浏览器语音识别
const isIFlytekRecognizing = ref(false);    // 是否正在进行科大讯飞语音识别
const isSpeechRecognitionSupported = ref(false); // 浏览器是否支持原生语音识别
const isIFlytekAvailable = ref(true);       // 科大讯飞服务是否可用（默认为true）
const audioUrl = ref(null);                 // 录音文件的URL
const transcript = ref("");                 // 浏览器语音识别的文本结果
const iFlytekTranscript = ref("");          // 科大讯飞语音识别的文本结果
const recognitionStatus = ref("");          // 识别状态信息

// 音频和识别相关对象引用
let recorder = null;        // RecordRTC录音器实例
let recognition = null;     // 浏览器语音识别实例
let iFlytekWebSocket = null; // 科大讯飞WebSocket连接
let stream = null;          // 音频媒体流
let audioContext = null;    // 音频上下文
let scriptProcessor = null; // 音频处理节点
let mediaStreamSource = null; // 媒体流源节点

// 讯飞语音转写配置
const IFlytekConfig = {
  appId: "1d9812c2", // APPID
  apiKey: "8fd215cd5d704ff90a2f4d69a86ed22c", // APIKey
  apiSecret: "Mjg0ZTg4Mjg1NjI1NmE3YjVhM2ZiY2Fj", // APISecret
  host: "iat-api.xfyun.cn",  // 讯飞语音识别API主机地址
  uri: "/v2/iat",   // 讯飞语音识别API路径
};

// 检查浏览器是否支持语音识别
onMounted(() => {
  isSpeechRecognitionSupported.value = !!(
    window.SpeechRecognition || window.webkitSpeechRecognition
  );
});

// 录音功能
const toggleRecording = async () => {
  if (!isRecording.value) {
    try {
      // 获取用户媒体流
      stream = await navigator.mediaDevices.getUserMedia({ audio: true });

      // 使用 RecordRTC 初始化录音
      recorder = new RecordRTC(stream, {
        type: "audio",              // 录音类型
        mimeType: "audio/webm",     // 音频格式
        sampleRate: 44100,          // 采样率
        desiredSampRate: 16000,     // 期望采样率
      });

      recorder.startRecording();
      isRecording.value = true;
      audioUrl.value = null;
      transcript.value = "";
    } catch (error) {
      console.error("录音初始化失败:", error);
      alert("录音初始化失败: " + (error.message || "请检查麦克风权限设置"));
    }
  } else {
    if (recorder) {
      recorder.stopRecording(() => {
        let blob = recorder.getBlob();
        audioUrl.value = URL.createObjectURL(blob);
        isRecording.value = false;

        // 停止媒体流
        if (stream) {
          stream.getTracks().forEach((track) => track.stop());
        }
      });
    }
  }
};

// 浏览器语音识别功能
const toggleSpeechRecognition = () => {
  if (!isSpeechRecognitionSupported.value) {
    alert("您的浏览器不支持语音识别功能，请使用Chrome浏览器");
    return;
  }

  if (!isRecognizing.value) {
    startSpeechRecognition();
  } else {
    stopSpeechRecognition();
  }
};

// 初始化并启动浏览器内置的语音识别服务，将用户的语音实时转换为文字
const startSpeechRecognition = () => {
  // 检查浏览器是否支持语音识别
  const SpeechRecognition =
    window.SpeechRecognition || window.webkitSpeechRecognition;

  if (!SpeechRecognition) {
    alert("您的浏览器不支持语音识别功能");
    return;
  }

  try {
    recognition = new SpeechRecognition(); // 创建语音识别实例
    recognition.continuous = true; // 持续识别
    recognition.interimResults = true; // 获取中间识别结果
    recognition.lang = "zh-CN"; // 设置语言为中文

    recognition.onstart = () => {
      isRecognizing.value = true;
      transcript.value = "";
      recognitionStatus.value = "语音识别已启动，请说话...";
    };

    recognition.onresult = (event) => {
      recognitionStatus.value = "";
      let interimTranscript = "";   // 中间结果
      let finalTranscript = "";     // 最终结果

      for (let i = event.resultIndex; i < event.results.length; i++) {
        const transcriptResult = event.results[i][0].transcript;
        if (event.results[i].isFinal) {
          finalTranscript += transcriptResult;
        } else {
          interimTranscript += transcriptResult;
        }
      }

      // 显示临时结果和最终结果
      if (finalTranscript) {
        transcript.value += finalTranscript;
      }
    };

    recognition.onerror = (event) => {
      console.error("语音识别错误:", event.error);
      recognitionStatus.value = `语音识别错误: ${event.error}`;

      // 特别处理 network 错误
      if (event.error === "network") {
        recognitionStatus.value =
          "网络错误：无法连接到语音识别服务。请检查网络连接或尝试使用最新版Chrome浏览器。";
      } else if (event.error === "not-allowed") {
        recognitionStatus.value = "权限错误：请允许浏览器访问麦克风。";
      } else if (event.error === "no-speech") {
        recognitionStatus.value = "未检测到语音：请说话或检查麦克风设置。";
      }

      isRecognizing.value = false;
    };

    recognition.onend = () => {
      if (isRecognizing.value) {
        recognitionStatus.value = "语音识别已停止";
        // 自动重新开始识别以提供连续识别体验
        setTimeout(() => {
          if (isRecognizing.value) {
            try {
              recognition.start();
              recognitionStatus.value = "语音识别继续中，请说话...";
            } catch (e) {
              console.error("重新启动识别失败:", e);
            }
          }
        }, 100);
      } else {
        recognitionStatus.value = "";
      }
    };

    recognition.start();
  } catch (error) {
    console.error("启动语音识别失败:", error);
    recognitionStatus.value = "启动语音识别失败: " + error.message;
  }
};

const stopSpeechRecognition = () => {
  if (recognition) {
    isRecognizing.value = false;
    try {
      recognition.stop();
    } catch (e) {
      console.error("停止语音识别时出错:", e);
    }
    recognition = null;
  }
  recognitionStatus.value = "";
};

// 语音识别按钮切换
const toggleIFlytekRecognition = async () => {
  if (!isIFlytekRecognizing.value) {
    await startIFlytekRecognition();
  } else {
    stopIFlytekRecognition();
  }
};

// 讯飞语音 - 鉴权函数
const getIFlytekAuthUrl = () => {
  // 生成RFC1123格式的时间戳
  const date = new Date().toGMTString();

  // 构建signature原始字符串
  const signatureOrigin = `host: ${IFlytekConfig.host}\ndate: ${date}\nGET ${IFlytekConfig.uri} HTTP/1.1`;

  // 使用 CryptoJS 计算 HMAC-SHA256
  const signature = CryptoJS.HmacSHA256(
    signatureOrigin,
    IFlytekConfig.apiSecret
  );

  // 构建 authorization header
  const authorizationOrigin = `api_key="${
    IFlytekConfig.apiKey
  }", algorithm="hmac-sha256", headers="host date request-line", signature="${CryptoJS.enc.Base64.stringify(
    signature
  )}"`;
  const authorization = CryptoJS.enc.Base64.stringify(
    CryptoJS.enc.Utf8.parse(authorizationOrigin)
  );

  // 构建URL
  return `wss://${IFlytekConfig.host}${IFlytekConfig.uri}?authorization=${authorization}&date=${date}&host=${IFlytekConfig.host}`;
};

// 开始录音
const startIFlytekRecognition = async () => {
  try {
    // 获取用户媒体流
    stream = await navigator.mediaDevices.getUserMedia({ audio: true });

    // 建立 WebSocket 连接
    const url = getIFlytekAuthUrl();
    iFlytekWebSocket = new WebSocket(url);

    iFlytekWebSocket.onopen = () => {
      isIFlytekRecognizing.value = true;
      iFlytekTranscript.value = "";
      recognitionStatus.value = "讯飞语音识别已启动，请说话...";

      // 初始化音频处理
      initAudioProcessing();
    };

    iFlytekWebSocket.onmessage = (event) => {
      const data = JSON.parse(event.data);

      console.log("数据---", data);

      if (data.code !== 0) {
        recognitionStatus.value = `讯飞识别错误: ${data.message || data.code}`;
        stopIFlytekRecognition();
        return;
      }

      if (data.data.result) {
        const result = data.data.result.ws;
        let transcriptText = "";

        // 修正循环处理逻辑
        result.forEach((item) => {
          // 每个item包含一个或多个候选词
          if (item.cw && item.cw.length > 0) {
            // 通常取第一个候选词（置信度最高）
            transcriptText += item.cw[0].w;
          }
        });

        // 只有当有新文本时才更新
        if (transcriptText) {
          iFlytekTranscript.value += transcriptText;
        }
      }

      // 处理最终结果标识
      if (data.data.status === 2) {
        // 识别结束
        console.log("讯飞语音识别完成");
      }
    };

    iFlytekWebSocket.onerror = (error) => {
      console.error("讯飞WebSocket错误:", error);
      recognitionStatus.value = "讯飞语音识别连接错误";
      stopIFlytekRecognition();
    };

    iFlytekWebSocket.onclose = () => {
      isIFlytekRecognizing.value = false;
      recognitionStatus.value = "讯飞语音识别已关闭";
      console.log("ws断开链接--");
    };
  } catch (error) {
    console.error("启动讯飞语音识别失败:", error);
    recognitionStatus.value = "启动讯飞语音识别失败: " + error.message;
  }
};

// 优化后的音频处理初始化函数
const initAudioProcessing = () => {
  audioContext = new (window.AudioContext || window.webkitAudioContext)();
  mediaStreamSource = audioContext.createMediaStreamSource(stream);
  scriptProcessor = audioContext.createScriptProcessor(4096, 1, 1);

  mediaStreamSource.connect(scriptProcessor);
  scriptProcessor.connect(audioContext.destination);

  let isSendingFirstFrame = true;

  scriptProcessor.onaudioprocess = (event) => {
    if (!isIFlytekRecognizing.value || !iFlytekWebSocket || iFlytekWebSocket.readyState !== WebSocket.OPEN) {
      return;
    }

    const audioData = event.inputBuffer.getChannelData(0);
    const audioBuffer = downsampleBuffer(audioData, audioContext.sampleRate, 16000);
    const audioBase64 = arrayBufferToBase64(audioBuffer);

    const frame = {
      business: {
        language: "zh_cn",      // 语言
        domain: "iat",          // 领域（iat表示语音听写）
        accent: "mandarin",     // 方言（mandarin表示普通话）
        vad_eos: 10000,         // 静音超时时间(毫秒)
        dwa: "wpgs"             // 动态修正
      },
      common: {
        app_id: IFlytekConfig.appId  // 应用ID
      },
      data: {
        format: "audio/L16;rate=16000", // 音频格式
        encoding: "raw",                // 编码格式
        audio: audioBase64              // 音频数据（Base64编码）
      }
    };

    // 设置帧状态
    if (isSendingFirstFrame) {
      frame.data.status = 0; // 第一帧
      isSendingFirstFrame = false;
    } else {
      frame.data.status = 1; // 中间帧
    }

    // 发送数据帧
    iFlytekWebSocket.send(JSON.stringify(frame));
  };
};

// 音频降采样
const downsampleBuffer = (buffer, sampleRate, outSampleRate) => {
  if (outSampleRate === sampleRate) {
    return buffer;
  }

  const sampleRateRatio = sampleRate / outSampleRate;
  const newLength = Math.round(buffer.length / sampleRateRatio);
  const result = new Int16Array(newLength);
  let offsetResult = 0;
  let offsetBuffer = 0;

  while (offsetResult < result.length) {
    const nextOffsetBuffer = Math.round((offsetResult + 1) * sampleRateRatio);
    let accum = 0;
    let count = 0;

    for (let i = offsetBuffer; i < nextOffsetBuffer && i < buffer.length; i++) {
      accum += buffer[i];
      count++;
    }

    result[offsetResult] = Math.min(1, accum / count) * 0x7fff;
    offsetResult++;
    offsetBuffer = nextOffsetBuffer;
  }

  return result.buffer;
};

// ArrayBuffer转Base64
const arrayBufferToBase64 = (buffer) => {
  let binary = "";
  const bytes = new Uint8Array(buffer);
  const len = bytes.byteLength;

  for (let i = 0; i < len; i++) {
    binary += String.fromCharCode(bytes[i]);
  }

  return btoa(binary);
};

// 停止录音
const stopIFlytekRecognition = () => {
  isIFlytekRecognizing.value = false;

  // 发送结束帧
  if (iFlytekWebSocket && iFlytekWebSocket.readyState === WebSocket.OPEN) {
    const frame = {
      data: {
        status: 4, // 结束帧
        audio: "",
      },
    };
    iFlytekWebSocket.send(JSON.stringify(frame));
  }

  // 清理音频处理
  if (scriptProcessor) {
    scriptProcessor.disconnect();
    scriptProcessor = null;
  }

  if (mediaStreamSource) {
    mediaStreamSource.disconnect();
    mediaStreamSource = null;
  }

  if (audioContext) {
    audioContext.close();
    audioContext = null;
  }

  // 停止媒体流
  if (stream) {
    stream.getTracks().forEach((track) => track.stop());
    stream = null;
  }

  recognitionStatus.value = "";
};

// 清空所有内容
const clearAll = () => {
  transcript.value = "";
  iFlytekTranscript.value = "";
  audioUrl.value = null;
  recognitionStatus.value = "";
};

// 组件卸载时清理资源
onBeforeUnmount(() => {
  if (isRecording.value && recorder) {
    recorder.stopRecording();
  }
  if (isRecognizing.value) {
    stopSpeechRecognition();
  }
  if (isIFlytekRecognizing.value) {
    stopIFlytekRecognition();
  }
  // 停止媒体流
  if (stream) {
    stream.getTracks().forEach((track) => track.stop());
  }
});
</script>

<style scoped>
.voice-recorder {
  padding: 20px;
  max-width: 700px;
  margin: 0 auto;
  font-family: Arial, sans-serif;
}

.voice-recorder h2 {
  text-align: center;
  color: #333;
}

.controls {
  margin-bottom: 20px;
  text-align: center;
}

.controls button {
  margin: 0 5px;
  padding: 10px 15px;
  font-size: 16px;
  border: none;
  border-radius: 4px;
  background-color: #007bff;
  color: white;
  cursor: pointer;
  transition: background-color 0.3s;
}

.controls button:hover:not(:disabled) {
  background-color: #0056b3;
}

.controls button:disabled {
  opacity: 0.6;
  cursor: not-allowed;
}

.warning {
  background-color: #fff3cd;
  color: #856404;
  padding: 10px;
  border-radius: 4px;
  margin-bottom: 20px;
  text-align: center;
}

.audio-player,
.transcript,
.status {
  margin-top: 20px;
  padding: 15px;
  border: 1px solid #ddd;
  border-radius: 5px;
  background-color: #f8f9fa;
}

.transcript-content {
  min-height: 100px;
  padding: 10px;
  background-color: white;
  border-radius: 4px;
  border: 1px solid #eee;
}

.transcript h3,
.audio-player h3,
.status h3 {
  margin-top: 0;
  color: #333;
}

.transcript p {
  font-size: 18px;
  line-height: 1.5;
  white-space: pre-wrap;
  margin: 0 0 10px 0;
}

.listening {
  color: #007bff;
  font-style: italic;
}

.status p {
  color: #666;
  margin: 0;
}

.instructions {
  margin-top: 30px;
  padding: 15px;
  background-color: #e9ecef;
  border-radius: 5px;
}

.instructions h3 {
  margin-top: 0;
  color: #333;
}

.instructions ul {
  padding-left: 20px;
}

.instructions li {
  margin-bottom: 8px;
  line-height: 1.5;
}
</style>
