/**
 * 语音聊天组件 - 重构版
 * 
 * 主要功能：
 * 1. 实现语音录制和识别
 * 2. 与自定义WebSocket语音API交互
 * 3. 处理语音指令
 * 4. 提供语音合成功能(TTS)
 * 
 * 组件特点：
 * - 使用Web Audio API处理音频流
 * - 通过WebSocket与语音识别服务通信
 * - 支持语音指令识别和响应
 * - 集成Web Speech API实现文本转语音
 * - 完善的错误处理和资源清理
 */

import {
  useState,
  useEffect,
  useRef,
  forwardRef,
  useImperativeHandle,
} from "react";
import styles from "./style.module.scss";
import { Button, message } from "antd";
import { AudioOutlined } from "@ant-design/icons";
import CryptoJS from "crypto-js";
import { useTranslation } from 'react-i18next';

// ==================== 类型定义 ====================

/**
 * 消息接口 - 用于存储聊天记录
 */
interface Message {
  type: "user" | "ai"; // 消息类型：用户或AI
  content: string;    // 消息内容
  timestamp: number;  // 时间戳
}

/**
 * 语音识别API响应接口
 */
interface VoiceRecognitionResponse {
  code: number;      // 响应状态码
  message: string;   // 响应消息
  data: {
    result: {
      ws: Array<{    // 识别结果数组
        cw: Array<{  // 候选词数组
          w: string; // 识别出的词语
        }>;
      }>;
    };
    status: number;  // 识别状态
  };
}

// ==================== 常量配置 ====================

/**
 * 语音识别服务配置
 * 使用您提供的认证信息
 */
const VOICE_RECOGNITION_CONFIG = {
  APPID: "28d467b2",                            // 应用ID
  APIKey: "2f019dc099b0269f7758f61b4b4bf660",  // API密钥
  APISecret: "NDQ3MTM1OWYzZjA5MjZiNThiMDg0MGIx", // API密钥
  APIUrl: "wss://iat-api.xfyun.cn/v2/iat", // 替换为您的WebSocket服务地址
};

/**
 * 动作指令映射表
 * 将语音指令映射到对应的动作标识
 */
const ACTION_COMMANDS = {
  "大元宝": "wake",
  "小强": "wake",
  "走一下": "dance",
  "跺脚": "walk",
  "哭一下": "crying",
  "无语一下": "break"
};

// 添加响应文本映射
const RESPONSE_TEXTS = {
  wake: "你好，我是小助理，有什么可以帮你的吗？",
  dance: "好的，我来跳支舞给你看",
  walk: "好的，我来跺跺脚",
  crying: "呜呜呜...",
  break: "无语中..."
};

// 添加表情映射
const EMOTION_MAP = {
  wake: "happy",
  dance: "happy",
  walk: "neutral",
  crying: "sad",
  break: "surprised"
};

// ==================== 组件实现 ====================

interface VoiceChatProps {
  onClose?: () => void; // 关闭回调函数
}

/**
 * 语音聊天主组件
 */
const VoiceChat = forwardRef<{ startRecording: () => void }, VoiceChatProps>(
  ({ onClose }, ref) => {
    const { t } = useTranslation();
    // =============== 状态管理 ===============
    const [messages, setMessages] = useState<Message[]>([]);       // 消息列表
    const [isRecording, setIsRecording] = useState(false);         // 是否正在录音
    const [transcript, setTranscript] = useState("");              // 语音识别文本
    const [isProcessing, setIsProcessing] = useState(false);       // 是否正在处理
    const [isListening, setIsListening] = useState(false);         // 是否在监听指令
    
    // =============== 引用管理 ===============
    const messagesEndRef = useRef<HTMLDivElement>(null);           // 消息列表底部引用
    const audioContextRef = useRef<AudioContext | null>(null);     // Web Audio上下文
    const wsRef = useRef<WebSocket | null>(null);                  // WebSocket连接引用
    const stopTimerRef = useRef<NodeJS.Timeout | null>(null);      // 自动停止计时器
    const streamRef = useRef<MediaStream | null>(null);            // 音频流引用
    const synthRef = useRef<SpeechSynthesis | null>(null);         // 语音合成引用

    // =============== 暴露方法 ===============
    /**
     * 向父组件暴露的方法
     */
    useImperativeHandle(ref, () => ({
      startRecording: () => {
        if (!isRecording) {
          startRecording();
        }
      },
    }));

    // =============== 生命周期 ===============
    /**
     * 组件挂载时初始化语音合成
     */
    useEffect(() => {
      synthRef.current = window.speechSynthesis;
      return () => {
        // 组件卸载时取消所有语音合成
        if (synthRef.current) {
          synthRef.current.cancel();
        }
      };
    }, []);

    /**
     * 消息更新时自动滚动到底部
     */
    useEffect(() => {
      scrollToBottom();
    }, [messages]);

    /**
     * 组件卸载时清理资源
     */
    useEffect(() => {
      return () => {
        // 清理定时器
        if (stopTimerRef.current) {
          clearTimeout(stopTimerRef.current);
        }
        // 关闭WebSocket连接
        if (wsRef.current) {
          wsRef.current.close();
        }
        // 关闭音频上下文
        if (audioContextRef.current) {
          audioContextRef.current.close();
        }
        // 停止所有音轨
        if (streamRef.current) {
          streamRef.current.getTracks().forEach((track) => track.stop());
        }
      };
    }, []);

    // =============== 核心功能方法 ===============

    /**
     * 语音合成功能 - 将文本转换为语音输出
     * @param text 需要转换为语音的文本内容
     */
    const speakAIResponse = (text: string) => {
      if (synthRef.current) {
        const utterance = new SpeechSynthesisUtterance(text);
        utterance.lang = "zh-CN"; // 设置语音语言为中文
        utterance.rate = 1;       // 设置正常语速
        utterance.pitch = 1;      // 设置正常音高
        utterance.volume = 1;     // 设置最大音量
        synthRef.current.speak(utterance); // 播放语音
      }
    };

    /**
     * 处理AI指令 - 解析语音指令并执行相应操作
     * @param text 识别到的语音文本
     * @returns AI响应文本或null
     */
    const handleAICommand = (text: string) => {
      // 检查唤醒词
      if (text.includes("大元宝") || text.includes("小强")) {
        setIsListening(true);
        const response = RESPONSE_TEXTS.wake;
        speakAIResponse(response);
        
        // 触发动画和表情
        const event = new CustomEvent("changeAnimation", {
          detail: {
            action: "wake",
            emotion: EMOTION_MAP.wake,
            text: response
          }
        });
        window.dispatchEvent(event);
        
        return response;
      }

      // 如果处于监听状态，检查动作指令
      if (isListening) {
        for (const [key, value] of Object.entries(ACTION_COMMANDS)) {
          if (text.includes(key)) {
            const response = RESPONSE_TEXTS[value as keyof typeof RESPONSE_TEXTS];
            speakAIResponse(response);
            
            // 触发动画和表情
            const event = new CustomEvent("changeAnimation", {
              detail: {
                action: value,
                emotion: EMOTION_MAP[value as keyof typeof EMOTION_MAP],
                text: response
              }
            });
            window.dispatchEvent(event);
            
            setIsListening(false);
            return response;
          }
        }

        // 没有匹配的指令
        setIsListening(false);
        const response = "抱歉，我没有理解您的指令";
        speakAIResponse(response);
        
        // 触发困惑表情
        const event = new CustomEvent("changeAnimation", {
          detail: {
            emotion: "surprised",
            text: response
          }
        });
        window.dispatchEvent(event);
        
        return response;
      }

      return null;
    };

    /**
     * 生成WebSocket鉴权URL
     * @returns 带鉴权参数的WebSocket URL
     */
    const getWebsocketUrl = () => {
      const url = VOICE_RECOGNITION_CONFIG.APIUrl;
      const host = url.replace("wss://", "").split("/")[0];
      const date = new Date().toUTCString();
      const algorithm = "hmac-sha256";
      const headers = "host date request-line";
      
      // 构造签名原始字符串
      const signatureOrigin = `host: ${host}\ndate: ${date}\nGET /v2/iat HTTP/1.1`;
      
      // 计算HMAC-SHA256签名
      const signatureSha = CryptoJS.HmacSHA256(
        signatureOrigin,
        VOICE_RECOGNITION_CONFIG.APISecret
      );
      const signature = CryptoJS.enc.Base64.stringify(signatureSha);
      
      // 构造鉴权参数
      const authorizationOrigin = `api_key="${VOICE_RECOGNITION_CONFIG.APIKey}", algorithm="${algorithm}", headers="${headers}", signature="${signature}"`;
      const authorization = btoa(authorizationOrigin);

      // 返回完整的WebSocket URL
      return `${url}?authorization=${authorization}&date=${date}&host=${host}`;
    };

    /**
     * 初始化WebSocket连接
     */
    const initWebSocket = () => {
      const url = getWebsocketUrl();
      const ws = new WebSocket(url);
      wsRef.current = ws;

      // WebSocket打开事件
      ws.onopen = () => {
        console.log("WebSocket连接已建立");
        
        // 构造并发送开始帧
        const startParams = {
          common: {
            app_id: VOICE_RECOGNITION_CONFIG.APPID,
          },
          business: {
            language: "zh_cn",
            domain: "iat",
            accent: "mandarin",
            sample_rate: "16000",
            vad_eos: 3000,
          },
          data: {
            status: 0,  // 0表示开始
            format: "audio/L16;rate=16000",
            encoding: "raw",
            audio: "",
          },
        };
        ws.send(JSON.stringify(startParams));
      };

      // WebSocket消息事件
      ws.onmessage = (e) => {
        const response = JSON.parse(e.data) as VoiceRecognitionResponse;
        
        // 处理错误响应
        if (response.code !== 0) {
          message.error(`语音识别失败：${response.message}`);
          setIsProcessing(false);
          return;
        }

        // 处理识别结果
        const result = response.data.result;
        if (result && result.ws) {
          // 拼接识别文本
          const text = result.ws.map((item) => item.cw[0].w).join("");
          if (text) {
            // 添加用户消息到聊天记录
            const userMessage: Message = {
              type: "user",
              content: text,
              timestamp: Date.now(),
            };
            setMessages((prev) => [...prev, userMessage]);

            // 处理AI指令
            const aiResponse = handleAICommand(text);
            if (aiResponse) {
              // 添加AI响应到聊天记录
              const aiMessage: Message = {
                type: "ai",
                content: aiResponse,
                timestamp: Date.now(),
              };
              setMessages((prev) => [...prev, aiMessage]);
            }
          }
        }

        // 识别结束
        if (response.data.status === 2) {
          setIsProcessing(false);
          ws.close();
        }
      };

      // WebSocket错误事件
      ws.onerror = (e) => {
        console.error("WebSocket错误:", e);
        message.error("语音识别服务连接失败");
        setIsProcessing(false);
      };

      // WebSocket关闭事件
      ws.onclose = () => {
        console.log("WebSocket连接已关闭");
        setIsProcessing(false);
      };
    };

    /**
     * 滚动到消息列表底部
     */
    const scrollToBottom = () => {
      messagesEndRef.current?.scrollIntoView({ behavior: "smooth" });
    };

    /**
     * 开始录音
     */
    const startRecording = async () => {
      try {
        // 获取麦克风权限
        const stream = await navigator.mediaDevices.getUserMedia({
          audio: {
            echoCancellation: true,   // 开启回音消除
            noiseSuppression: true,  // 开启噪音抑制
            autoGainControl: true,   // 开启自动增益控制
            sampleRate: 16000,       // 采样率16kHz
            channelCount: 1,         // 单声道
          },
        });

        streamRef.current = stream;

        // 创建音频上下文
        const audioContext = new AudioContext({
          sampleRate: 16000,
          latencyHint: "interactive",
        });
        audioContextRef.current = audioContext;

        // 创建音频处理节点
        const source = audioContext.createMediaStreamSource(stream);
        const processor = audioContext.createScriptProcessor(4096, 1, 1);

        // 连接音频节点
        source.connect(processor);
        processor.connect(audioContext.destination);

        // 初始化WebSocket连接
        initWebSocket();

        // 音频处理回调
        processor.onaudioprocess = (e) => {
          if (wsRef.current && wsRef.current.readyState === WebSocket.OPEN) {
            // 获取音频数据
            const inputData = e.inputBuffer.getChannelData(0);
            
            // 将Float32Array转换为Int16Array
            const pcmData = new Int16Array(inputData.length);
            for (let i = 0; i < inputData.length; i++) {
              pcmData[i] = inputData[i] * 0x7fff; // 16位有符号整数范围
            }

            // 将Int16Array转换为Base64
            const uint8Array = new Uint8Array(pcmData.buffer);
            const base64Audio = btoa(
              Array.from(uint8Array)
                .map((byte) => String.fromCharCode(byte))
                .join("")
            );

            // 构造并发送音频数据
            const audioParams = {
              common: {
                app_id: VOICE_RECOGNITION_CONFIG.APPID,
              },
              business: {
                language: "zh_cn",
                domain: "iat",
                accent: "mandarin",
                sample_rate: "16000",
                vad_eos: 3000,
                nbest: 1,
                wbest: 1,
                ptt: 0,
                pd: "speech",
                rlang: "cn",
              },
              data: {
                status: 1,  // 1表示音频数据中
                format: "audio/L16;rate=16000",
                encoding: "raw",
                audio: base64Audio,
              },
            };
            wsRef.current.send(JSON.stringify(audioParams));
          }
        };

        // 更新状态
        setIsRecording(true);
        setIsProcessing(true);

        // 设置2秒后自动停止录音
        stopTimerRef.current = setTimeout(() => {
          stopRecording();
          // 确保最后的数据被处理
          setTimeout(() => {
            handleSend();
          }, 500);
        }, 2000);
      } catch (error) {
        console.error("录音失败:", error);
        message.error("无法访问麦克风，请检查权限设置");
        setIsProcessing(false);
      }
    };

    /**
     * 停止录音
     */
    // 修改代码
    const stopRecording = () => {
      // 清除自动停止定时器
      if (stopTimerRef.current) {
        clearTimeout(stopTimerRef.current);
        stopTimerRef.current = null;
      }

      // 停止音频流
      if (streamRef.current) {
        streamRef.current.getTracks().forEach((track) => track.stop());
        streamRef.current = null;
      }

      // 关闭音频上下文
      if (audioContextRef.current) {
        audioContextRef.current.close();
      }

      // 发送结束帧
      if (wsRef.current && wsRef.current.readyState === WebSocket.OPEN) {
        const endParams = {
          common: {
            app_id: VOICE_RECOGNITION_CONFIG.APPID,
          },
          business: {
            language: "zh_cn",
            domain: "iat",
            accent: "mandarin",
            sample_rate: "16000",
            vad_eos: 3000,
            nbest: 1,
            wbest: 1,
            ptt: 0,
            pd: "speech",
            rlang: "cn",
          },
          data: {
            status: 2,  // 2表示结束
            format: "audio/L16;rate=16000",
            encoding: "raw",
            audio: "",
          },
        };
        wsRef.current.send(JSON.stringify(endParams));
      }

      // 更新状态
      setIsRecording(false);
    };

    /**
     * 处理发送消息
     */
    const handleSend = () => {
      if (transcript.trim()) {
        // 添加用户消息
        const newMessage: Message = {
          type: "user",
          content: transcript,
          timestamp: Date.now(),
        };
        setMessages([...messages, newMessage]);
        setTranscript("");

        // 处理AI指令
        const aiResponse = handleAICommand(transcript);
        if (aiResponse) {
          // 添加AI响应
          const aiMessage: Message = {
            type: "ai",
            content: aiResponse,
            timestamp: Date.now(),
          };
          setMessages((prev) => [...prev, aiMessage]);
        }
      }
    };

    /**
     * 处理关闭事件
     */
    const handleClose = () => {
      if (onClose) {
        onClose();
      }
    };

    // =============== 渲染部分 ===============
    return (
      <div className={styles.voiceChatContainer}>
        <div className={styles.header}>
          <h3>{t('chat.voiceChat')}</h3>
          <Button onClick={handleClose}>×</Button>
        </div>
        <div className={styles.messagesContainer} ref={messagesEndRef}>
          {messages.map((msg, index) => (
            <div
              key={index}
              className={`${styles.message} ${
                msg.type === "user" ? styles.userMessage : styles.aiMessage
              }`}
            >
              {msg.content}
            </div>
          ))}
        </div>
        <div className={styles.controls}>
          <Button
            type={isRecording ? "primary" : "default"}
            onClick={isRecording ? stopRecording : startRecording}
            icon={<AudioOutlined />}
          >
            {isRecording ? t('chat.stopRecording') : t('chat.startRecording')}
          </Button>
        </div>
      </div>
    );
  }
);

export default VoiceChat;