/**
 * 页面：数字人演示台（视频 + 3D 模型 + 语音/对话）
 * 功能：
 *  - R3F 渲染 3D 数字人模型，支持基础动画/轨迹控制
 *  - 中央视频播放（自定义控制条）+ 弹幕
 *  - 语音转文字功能与科大讯飞 TTS 语音播报
 *  - 右侧 AI 对话面板，支持附件（图片 base64、本地文件模拟分片上传）
 * 主要技术：React Three Fiber、GLTFLoader、科大讯飞 TTS、Antd-less 自定义样式
 */
import React, { useEffect, useRef, useState } from "react";
import { Link } from "react-router-dom";
import "./Index.css";
import { useFrame, useLoader } from "@react-three/fiber";
import { GLTFLoader } from "three/examples/jsm/loaders/GLTFLoader";
import { Canvas, extend, useThree } from "@react-three/fiber";
import type { Mesh } from "three";
import type { GLTF } from "three/examples/jsm/loaders/GLTFLoader";
import * as THREE from "three";
import { speakStreamFrontOnly } from "../../utils/tts-xf";
import { useSpeechRecognition } from "../../hooks/useSpeechRecognition";
import { XfAsrRecognition } from "../../utils/asr-xf";
import { OrbitControls } from "three/examples/jsm/controls/OrbitControls";

// import { getDanmaku, addDanmaku, deleteDanmaku } from "../../api/danmaku";

extend({ OrbitControls });

declare global {
  namespace JSX {
    interface IntrinsicElements {
      orbitControls: any;
    }
  }
}

const Orbit = () => {
  const { camera, gl } = useThree( );
  // eslint-disable-next-line react/no-unknown-property
  return <orbitControls args={[camera, gl.domElement]} />;
};

function AvatarModel({ model, selected }: { model: GLTF; selected: string }) {
 
  const groupRef = useRef<THREE.Group>(null);
  const mixerRef = useRef<THREE.AnimationMixer | null>(null);
  const currentActionRef = useRef<THREE.AnimationAction | null>(null);
  const tRef = useRef(0);

  useEffect(() => {
    if (!model?.scene) return;
    // 初始化 AnimationMixer（如果模型包含动画）
    if (model.animations && model.animations.length > 0) {
      const mixer = new THREE.AnimationMixer(model.scene);
      mixerRef.current = mixer;
      // 默认播放第一个动画
      const defaultClip = model.animations[0];
      const action = mixer.clipAction(defaultClip);
      action.play();
      currentActionRef.current = action;
    } else {
      mixerRef.current = null;
    }
  }, [model]);

  // 切换动画（如有 clip 名称匹配）
  useEffect(() => {
    const mixer = mixerRef.current;
    if (!mixer) return;
    const clip =
      model.animations.find((c) =>
        c.name.toLowerCase().includes(selected.toLowerCase())
      ) || model.animations[0];
    if (!clip) return;
    const next = mixer.clipAction(clip);
    const prev = currentActionRef.current;
    if (prev !== next) {
      next.reset().fadeIn(0.3).play();
      if (prev) prev.fadeOut(0.3);
      currentActionRef.current = next;
    }
  }, [selected, model]);

  // 每帧更新：播放动画或执行程序化动作
  useFrame((_, delta) => {
    const mixer = mixerRef.current;
    if (mixer) {
      mixer.update(delta);
      return;
    }
    // 程序化动作（模型无内置动画时）
    tRef.current += delta;
    const g = groupRef.current;
    if (!g) return;
    const t = tRef.current;
    const baseBob = 0.02 * Math.sin(t * 1.4);
    g.position.y = baseBob;
    if (selected === "Happy") {
      g.rotation.y = Math.sin(t * 1.5) * 0.15;
      g.rotation.x = Math.sin(t * 2.0) * 0.03;
    } else if (selected === "Angry") {
      g.rotation.y = Math.sin(t * 3.0) * 0.25;
      g.rotation.x = Math.sin(t * 2.5) * 0.06;
    } else if (selected === "Sad") {
      g.rotation.y = Math.sin(t * 0.8) * 0.08;
      g.rotation.x = -0.1 + Math.sin(t * 0.6) * 0.02;
    } else if (selected === "Excited") {
      g.rotation.y = Math.sin(t * 4.0) * 0.3;
      g.rotation.x = Math.sin(t * 3.5) * 0.08;
      g.position.y = 0.05 * Math.sin(t * 6.0);
    } else {
      // Neutral
      g.rotation.x = Math.sin(t * 1.2) * 0.02;
      g.rotation.y = Math.sin(t * 1.0) * 0.05;
    }
  });

  return (
    <group ref={groupRef}>
      <primitive object={model.scene} scale={0.7} />
    </group>
  );
}

// 动作到模型路径的映射
const actionToModelMap: Record<string, string> = {
  待机: "/待机.glb",
  跳舞: "/模型_卖萌.glb",
};

// 动态加载模型的组件
function DynamicModelLoader({ action, selected }: { action: string; selected: string }) {
  const modelPath = actionToModelMap[action] || actionToModelMap["待机"];
  const model = useLoader<GLTF>(GLTFLoader, modelPath);
  return <AvatarModel model={model} selected={selected} />;
}

function Index() {
  const mySelfRef = useRef<Mesh>(null);
  const [selectedAction, setSelectedAction] = useState<"待机" | "跳舞">("待机");
  const [selectedAnimation] = useState("Angry"); // 保留用于AvatarModel的selected属性
  const [isPlaying, setIsPlaying] = useState(false);
  const [currentTime, setCurrentTime] = useState(0);
  const [duration, setDuration] = useState(0);
  const [isPictureInPicture, setIsPictureInPicture] = useState(false);
  const videoRef = useRef<HTMLVideoElement>(null);
  const barRef = useRef<HTMLDivElement>(null);
  const draggingRef = useRef(false);
  // AI chat state
  type Attachment = {
    id: string;
    name: string;
    size: number;
    type: "image" | "file";
    mime: string;
    url: string;
  };
  type ChatMsg = {
    role: "user" | "assistant" | "system";
    content: string;
    attachments?: Attachment[];
  };
  const [chatMsgs, setChatMsgs] = useState<ChatMsg[]>([
    { role: "system", content: "你是数字展馆的讲解助手，请用简洁中文回答。" },
  ]);
  const [chatInput, setChatInput] = useState("");
  const [chatLoading, setChatLoading] = useState(false);
  const [isStreaming, setIsStreaming] = useState(false); // 跟踪是否正在流式传输
  const [showChat, setShowChat] = useState(false);
  const [pendingAttachments, setPendingAttachments] = useState<Attachment[]>(
    []
  );
  const [uploading, setUploading] = useState(false);
  const [uploadProgress, setUploadProgress] = useState(0);
  const imageInputRef = useRef<HTMLInputElement>(null);
  const fileInputRef = useRef<HTMLInputElement>(null);
  
  // 浏览器原生语音识别（用于底部💬按钮）
  const { 
    isListening: isSpeechListening,
    transcript, 
    error: speechError,
    startListening,
    stopListening,
    resetTranscript
  } = useSpeechRecognition();

  // 科大讯飞 ASR 语音识别（用于AI对话面板）
  const asrRef = useRef<XfAsrRecognition | null>(null);
  const [isAsrListening, setIsAsrListening] = useState(false);
  const [asrError, setAsrError] = useState<string | null>(null);

  // 科大讯飞 API 配置（与TTS共用）
  const XF_API_KEY = "882935308ac20f8c16bbdf26cd7c1270";
  const XF_API_SECRET = "Y2E4YmJiNzkzOWIwYjRhZGQzNzU0OGNj";
  const XF_APP_ID = "294a03c9";
  
  const MAX_FILE_SIZE = 100 * 1024 * 1024;
  const ALLOWED_MIME = [
    "image/jpeg",
    "image/png",
    "image/gif",
    "image/webp",
    "image/svg+xml",
    "application/pdf",
    "text/plain",
    "application/zip",
    "application/x-zip-compressed",
    "application/msword",
    "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
    "application/vnd.ms-excel",
    "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
    "application/vnd.ms-powerpoint",
    "application/vnd.openxmlformats-officedocument.presentationml.presentation",
  ];

  const [apiKey, setApiKey] = useState(
    localStorage.getItem("moonshot_api_key") || ""
  );
  const [apiKeyTemp, setApiKeyTemp] = useState(apiKey);
  const saveApiKey = () => {
    localStorage.setItem("moonshot_api_key", apiKeyTemp);
    setApiKey(apiKeyTemp);
  };

  interface DanmakuItem {
    id: number;
    text: string;
    track: number;
    color: string; // 弹幕颜色
  }
  const danmakuPresets = [
    "欢迎来到数字人展馆！",
    "AI 驱动，未来已来～",
    "高能预警：精彩内容持续上线",
    "点个赞再走不迷路 👍",
    "互动弹幕测试中……",
  ];
  const [danmakus, setDanmakus] = useState<DanmakuItem[]>([]);
  const nextIdRef = useRef(1);
  const presetIdxRef = useRef(0);
  const [danmakuInput, setDanmakuInput] = useState(""); // 弹幕输入框内容

  // 生成随机颜色
  const getRandomColor = (): string => {
    const colors = [
      "#ffffff", "#00ffff", "#ff00ff", "#ffff00", "#00ff00",
      "#ff0080", "#0080ff", "#80ff00", "#ff8000", "#8000ff",
      "#ff8040", "#40ff80", "#8040ff", "#ff4080", "#80ff40",
      "#ffc0c0", "#c0ffc0", "#c0c0ff", "#ffffc0", "#ffc0ff"
    ];
    return colors[Math.floor(Math.random() * colors.length)];
  };

  // 弹幕轨道配置：从顶部30px开始，到底部390px，共10条轨道，每36px一条
  const DANMAKU_TRACK_COUNT = 10;
  const DANMAKU_START_TOP = 30;
  const DANMAKU_TRACK_SPACING = 36;
  // 弹幕自动销毁时间：10秒
  const DANMAKU_LIFETIME = 10000; // 10秒 = 10000毫秒

  // 弹幕自动销毁函数
  const scheduleDanmakuDestruction = (id: number) => {
    setTimeout(() => {
      setDanmakus((prev) => prev.filter((d) => d.id !== id));
    }, DANMAKU_LIFETIME);
  };

  useEffect(() => {
    if (!isPlaying) return;
    const interval = setInterval(() => {
      const text = danmakuPresets[presetIdxRef.current % danmakuPresets.length];
      presetIdxRef.current++;
      const id = nextIdRef.current++;
      const track = Math.floor(Math.random() * DANMAKU_TRACK_COUNT); // 10条轨道
      const color = getRandomColor(); // 随机颜色
      const item: DanmakuItem = { id, text, track, color };
      setDanmakus((prev) => [...prev, item]);
      // 10秒后自动销毁弹幕
      scheduleDanmakuDestruction(id);
    }, 7200);
    return () => clearInterval(interval);
  }, [isPlaying]);

  // 发送弹幕
  const sendDanmaku = () => {
    const text = danmakuInput.trim();
    if (!text) return;
    const id = nextIdRef.current++;
    const track = Math.floor(Math.random() * DANMAKU_TRACK_COUNT); // 随机轨道（10条）
    const color = getRandomColor(); // 随机颜色
    const item: DanmakuItem = { id, text, track, color };
    setDanmakus((prev) => [...prev, item]);
    setDanmakuInput(""); // 清空输入框
    // 10秒后自动销毁弹幕
    scheduleDanmakuDestruction(id);
  };

  // 删除弹幕
  const deleteDanmaku = (id: number) => {
    setDanmakus((prev) => prev.filter((d) => d.id !== id));
  };

  const actionOptions = [
    { value: "待机", label: "待机" },
    { value: "跳舞", label: "跳舞" },
  ];

  const handleActionChange = (e: React.ChangeEvent<HTMLSelectElement>) => {
    const newAction = e.target.value as "待机" | "跳舞";
    setSelectedAction(newAction);
  };

  useEffect(() => {
    const v = videoRef.current;
    if (!v) return;
    // 尝试静音自动播放以兼容浏览器策略
    v.muted = true;
    v.play()
      .then(() => setIsPlaying(true))
      .catch(() => {});
  }, []);

  const togglePlay = () => {
    const v = videoRef.current;
    if (!v) return;
    if (v.paused) {
      v.play()
        .then(() => setIsPlaying(true))
        .catch(() => {});
    } else {
      v.pause();
      setIsPlaying(false);
    }
  };

  // 切换画中画功能
  const togglePictureInPicture = async () => {
    const v = videoRef.current;
    if (!v) return;

    // 检查浏览器是否支持画中画API
    if (!document.pictureInPictureEnabled) {
      alert("您的浏览器不支持画中画功能");
      return;
    }

    try {
      // 如果已经在画中画模式，则退出
      if (document.pictureInPictureElement === v) {
        await document.exitPictureInPicture();
        setIsPictureInPicture(false);
      } else {
        // 进入画中画模式
        await v.requestPictureInPicture();
        setIsPictureInPicture(true);
      }
    } catch (error) {
      console.error("画中画功能出错:", error);
      alert("无法启用画中画功能");
    }
  };

  // 监听画中画状态变化
  useEffect(() => {
    const v = videoRef.current;
    if (!v) return;

    const handleEnterPictureInPicture = () => {
      setIsPictureInPicture(true);
    };

    const handleLeavePictureInPicture = () => {
      setIsPictureInPicture(false);
    };

    v.addEventListener("enterpictureinpicture", handleEnterPictureInPicture);
    v.addEventListener("leavepictureinpicture", handleLeavePictureInPicture);

    return () => {
      v.removeEventListener("enterpictureinpicture", handleEnterPictureInPicture);
      v.removeEventListener("leavepictureinpicture", handleLeavePictureInPicture);
    };
  }, []);

  const calcRatio = (clientX: number) => {
    const el = barRef.current;
    if (!el || duration === 0) return 0;
    const rect = el.getBoundingClientRect();
    const x = Math.min(Math.max(clientX - rect.left, 0), rect.width);
    return x / rect.width;
  };

  const seekToClientX = (clientX: number) => {
    const v = videoRef.current;
    if (!v || duration === 0) return;
    const ratio = calcRatio(clientX);
    const time = ratio * duration;
    v.currentTime = time;
    setCurrentTime(time);
  };

  const onBarMouseDown = (e: React.MouseEvent<HTMLDivElement>) => {
    draggingRef.current = true;
    seekToClientX(e.clientX);
    window.addEventListener("mousemove", onMouseMove);
    window.addEventListener("mouseup", onMouseUp);
  };

  const onMouseMove = (e: MouseEvent) => {
    if (!draggingRef.current) return;
    seekToClientX(e.clientX);
  };

  const onMouseUp = () => {
    draggingRef.current = false;
    window.removeEventListener("mousemove", onMouseMove);
    window.removeEventListener("mouseup", onMouseUp);
  };

  const onBarTouchStart = (e: React.TouchEvent<HTMLDivElement>) => {
    draggingRef.current = true;
    const touch = e.touches[0];
    if (touch) seekToClientX(touch.clientX);
    window.addEventListener("touchmove", onTouchMove);
    window.addEventListener("touchend", onTouchEnd);
  };

  const onTouchMove = (e: TouchEvent) => {
    if (!draggingRef.current) return;
    const touch = e.touches[0];
    if (touch) seekToClientX(touch.clientX);
  };

  const onTouchEnd = () => {
    draggingRef.current = false;
    window.removeEventListener("touchmove", onTouchMove);
    window.removeEventListener("touchend", onTouchEnd);
  };

  // 关闭聊天并自动恢复播放
  const closeChat = () => {
    setShowChat(false);
    // 关闭聊天面板时停止ASR识别
    if (isAsrListening) {
      stopAsrRecognition();
    }
    const v = videoRef.current;
    if (v) {
      v.play()
        .then(() => setIsPlaying(true))
        .catch(() => {});
    }
  };

  // 开始科大讯飞 ASR 识别
  const startAsrRecognition = async () => {
    if (isAsrListening) {
      stopAsrRecognition();
      return;
    }

    try {
      setAsrError(null);
      
      // 创建ASR实例
      asrRef.current = new XfAsrRecognition({
        apiKey: XF_API_KEY,
        apiSecret: XF_API_SECRET,
        appId: XF_APP_ID,
        onStart: () => {
          setIsAsrListening(true);
          setAsrError(null);
        },
        onResult: (text: string, isFinal: boolean) => {
          console.log("[ASR-Callback] onResult called:", { text, isFinal, length: text.length });
          
          // 实时更新识别结果到输入框
          if (text && text.trim().length > 0) {
            setChatInput(text);
            
            // 如果是最终结果，确保聊天面板已打开
            if (isFinal) {
              console.log("[ASR] Final result received, opening chat panel if needed");
              if (!showChat) {
                setShowChat(true);
                const v = videoRef.current;
                if (v && !v.paused) {
                  v.pause();
                  setIsPlaying(false);
                }
              }
            }
          } else {
            console.log("[ASR] Empty text received, ignoring");
          }
        },
        onError: (error: Error | string) => {
          setIsAsrListening(false);
          const errorMsg = typeof error === 'string' ? error : error.message;
          setAsrError(errorMsg);
          
          // 显示错误提示
          if (errorMsg.includes('网络') || errorMsg.includes('WebSocket')) {
            alert("语音识别网络错误，请检查网络连接后重试");
          } else if (errorMsg.includes('权限') || errorMsg.includes('NotAllowed')) {
            alert("麦克风权限被拒绝，请在浏览器设置中允许访问麦克风");
          } else if (errorMsg.includes('NotFound')) {
            alert("未检测到麦克风设备，请检查设备连接");
          } else {
            alert(`语音识别失败：${errorMsg}`);
          }
        },
        onEnd: () => {
          console.log("[ASR] Recognition ended");
          setIsAsrListening(false);
          
          // 如果识别结束时聊天面板还没打开，但有文本内容，打开面板
          if (!showChat && chatInput.trim().length > 0) {
            console.log("[ASR] Opening chat panel after recognition end");
            setShowChat(true);
            const v = videoRef.current;
            if (v && !v.paused) {
              v.pause();
              setIsPlaying(false);
            }
          }
        },
      });

      // 开始识别
      await asrRef.current.start();
    } catch (error: any) {
      console.error("[ASR] Start error:", error);
      setIsAsrListening(false);
      setAsrError(error?.message || "启动语音识别失败");
      
      if (error?.name === "NotAllowedError" || error?.name === "PermissionDeniedError") {
        alert("麦克风权限被拒绝，请在浏览器设置中允许访问麦克风");
      } else if (error?.name === "NotFoundError" || error?.name === "DevicesNotFoundError") {
        alert("未检测到麦克风设备，请检查设备连接");
      } else {
        alert(`启动语音识别失败：${error?.message || error}`);
      }
    }
  };

  // 停止科大讯飞 ASR 识别
  const stopAsrRecognition = () => {
    if (asrRef.current) {
      asrRef.current.stop();
      asrRef.current = null;
    }
    setIsAsrListening(false);
    setAsrError(null);
  };

  // 请求麦克风权限
  const requestMicrophonePermission = async (): Promise<boolean> => {
    try {
      const stream = await navigator.mediaDevices.getUserMedia({
        audio: {
          channelCount: 1,
          echoCancellation: true,
          noiseSuppression: true,
          sampleRate: 16000,
        },
        video: false,
      });
      // 立即停止流，我们只需要权限，不需要实际的音频流
      stream.getTracks().forEach(track => track.stop());
      return true;
    } catch (error: any) {
      console.error("麦克风权限请求失败:", error);
      if (error.name === "NotAllowedError" || error.name === "PermissionDeniedError") {
        alert("麦克风权限被拒绝，请在浏览器设置中允许访问麦克风");
      } else if (error.name === "NotFoundError" || error.name === "DevicesNotFoundError") {
        alert("未检测到麦克风设备，请检查设备连接");
      } else {
        alert(`无法访问麦克风：${error.message || error.name}`);
      }
      return false;
    }
  };

  // 处理识别结果并打开聊天面板
  const handleRecognitionResult = React.useCallback(() => {
    if (transcript.trim()) {
      setChatInput(transcript);
      // 如果聊天面板未打开，自动打开
      if (!showChat) {
        setShowChat(true);
        const v = videoRef.current;
        if (v && !v.paused) {
          v.pause();
          setIsPlaying(false);
        }
      }
    }
  }, [transcript, showChat]);

  // 切换语音转文字功能
  const toggleSpeechToText = async () => {
    if (isSpeechListening) {
      // 正在识别，手动停止识别
      isManualStopRef.current = true; // 标记为手动停止
      stopListening();
      // 停止后处理识别结果（延迟确保transcript已更新）
      setTimeout(() => {
        handleRecognitionResult();
        resetTranscript();
      }, 200);
    } else {
      // 未在识别，先请求麦克风权限，然后再开始识别
      resetTranscript();
      const hasPermission = await requestMicrophonePermission();
      if (hasPermission) {
        // 权限获取成功，开始语音识别
        startListening();
      }
    }
  };

  // 按下 ESC 关闭聊天
  useEffect(() => {
    if (!showChat) return;
    const onKey = (e: KeyboardEvent) => {
      if (e.key === "Escape") closeChat();
    };
    window.addEventListener("keydown", onKey);
    return () => window.removeEventListener("keydown", onKey);
  }, [showChat]);

  // 语音转文字功能：实时将识别结果更新到输入框
  useEffect(() => {
    if (transcript && isSpeechListening) {
      setChatInput(transcript);
    }
  }, [transcript, isSpeechListening]);

  // 组件卸载时清理ASR资源
  useEffect(() => {
    return () => {
      // 组件卸载时停止ASR识别并清理资源
      if (asrRef.current) {
        asrRef.current.stop();
        asrRef.current = null;
      }
    };
  }, []);

  // 处理语音识别自动停止（用户停止说话一段时间后自动停止）
  const prevListeningRef = useRef(isSpeechListening);
  const isManualStopRef = useRef(false);
  
  useEffect(() => {
    // 如果从识别中状态变为停止状态
    if (prevListeningRef.current && !isSpeechListening) {
      // 延迟一小段时间，确保transcript已经更新完成
      setTimeout(() => {
        // 只有在非手动停止的情况下才自动处理结果
        if (!isManualStopRef.current) {
          handleRecognitionResult();
        }
        isManualStopRef.current = false; // 重置标志
      }, 300);
    }
    prevListeningRef.current = isSpeechListening;
  }, [isSpeechListening, handleRecognitionResult]);

  // 处理语音识别错误，显示用户友好的提示
  const prevSpeechErrorRef = useRef<string | null>(null);
  useEffect(() => {
    // 只在错误变化且是新错误时显示提示
    if (speechError && speechError !== prevSpeechErrorRef.current) {
      console.error("语音识别错误:", speechError);
      
      // 停止识别状态
      if (isSpeechListening) {
        stopListening();
      }
      
      // 根据错误类型显示不同的提示
      let errorHint = '';
      if (speechError.includes('网络')) {
        errorHint = '\n\n解决方案：\n1. 检查网络连接是否正常\n2. 如果使用Chrome浏览器，确保可以访问Google服务\n3. 请使用HTTPS协议访问页面（语音识别需要安全连接）\n4. 某些地区可能需要VPN才能使用语音识别服务';
      } else if (speechError.includes('权限')) {
        errorHint = '\n\n解决方案：\n1. 在浏览器设置中允许麦克风权限\n2. 刷新页面后重试';
      } else if (speechError.includes('麦克风')) {
        errorHint = '\n\n解决方案：\n1. 检查麦克风设备是否连接\n2. 检查系统麦克风权限设置\n3. 尝试重新授权麦克风权限';
      } else {
        errorHint = '\n\n请尝试：\n1. 刷新页面后重试\n2. 检查浏览器是否支持语音识别功能\n3. 尝试使用Chrome或Edge浏览器';
      }
      
      // 使用更友好的提示方式
      const userMessage = `语音识别错误：${speechError}${errorHint}\n\n点击确定后可以重新尝试。`;
      if (window.confirm(userMessage)) {
        // 用户点击确定后，可以选择是否重新尝试
        resetTranscript();
      }
      
      prevSpeechErrorRef.current = speechError;
    } else if (!speechError) {
      // 错误清除时，重置引用
      prevSpeechErrorRef.current = null;
    }
  }, [speechError, isSpeechListening, stopListening, resetTranscript]);

  const sendChat = async () => {
    const text = chatInput.trim();
    if ((!text && pendingAttachments.length === 0) || chatLoading) return;
    if (!apiKey) {
      setChatMsgs((prev) => [
        ...prev,
        { role: "assistant", content: "请先设置 API Key 后再发送。" },
      ]);
      return;
    }
    const content = text || (pendingAttachments.length ? "" : "");
    const newMsgs: ChatMsg[] = [
      ...chatMsgs,
      {
        role: "user" as const,
        content,
        attachments: pendingAttachments.length
          ? [...pendingAttachments]
          : undefined,
      },
    ];
    setChatMsgs(newMsgs);
    setChatInput("");
    setPendingAttachments([]);
    setChatLoading(true);
    console.log(apiKey);

    // 先添加一个空的assistant消息，用于流式更新
    setChatMsgs((prev) => [...prev, { role: "assistant", content: "" }]);
    setIsStreaming(true); // 开始流式传输
    let fullAnswer = "";

    try {
      const resp = await fetch("https://api.moonshot.cn/v1/chat/completions", {
        method: "POST",
        headers: {
          Authorization: `Bearer ${apiKey}`,
          "Content-Type": "application/json",
        },
        body: JSON.stringify({
          model: "moonshot-v1-8k",
          messages: newMsgs.map((m) => ({ role: m.role, content: m.content })),
          temperature: 0.7,
          max_tokens: 1000,
          stream: true, // 启用流式传输
        }),
      });

      if (!resp.ok) {
        const errorText = await resp.text();
        throw new Error(errorText);
      }

      // 读取流式响应
      const reader = resp.body?.getReader();
      if (!reader) {
        throw new Error("无法读取响应流");
      }

      const decoder = new TextDecoder();
      let buffer = "";

      while (true) {
        const { done, value } = await reader.read();
        if (done) break;

        // 解码数据块
        buffer += decoder.decode(value, { stream: true });
        
        // 处理SSE格式的数据（每行以\n分隔）
        const lines = buffer.split("\n");
        buffer = lines.pop() || ""; // 保留最后一个不完整的行

        for (const line of lines) {
          const trimmedLine = line.trim();
          if (!trimmedLine || trimmedLine === "data: [DONE]") continue;
          
          if (trimmedLine.startsWith("data: ")) {
            try {
              const jsonStr = trimmedLine.slice(6); // 去掉 "data: " 前缀
              const data = JSON.parse(jsonStr);
              
              // 提取delta内容
              const delta = data.choices?.[0]?.delta?.content;
              if (delta) {
                fullAnswer += delta;
                // 实时更新最后一条assistant消息
                setChatMsgs((prev) => {
                  const updated = [...prev];
                  const lastMsg = updated[updated.length - 1];
                  if (lastMsg && lastMsg.role === "assistant") {
                    lastMsg.content = fullAnswer;
                  }
                  return updated;
                });
              }
            } catch (parseError) {
              console.warn("[Stream] Failed to parse SSE data:", trimmedLine, parseError);
            }
          }
        }
      }

      // 流式传输完成，确保最终内容正确
      const answer = fullAnswer.trim() || "（无响应）";
      setChatMsgs((prev) => {
        const updated = [...prev];
        const lastMsg = updated[updated.length - 1];
        if (lastMsg && lastMsg.role === "assistant") {
          lastMsg.content = answer;
        }
        return updated;
      });
      setIsStreaming(false); // 流式传输结束
      
      // 调用科大讯飞 TTS 语音播报（异步执行，不阻塞聊天功能）
      if (answer && answer !== "（无响应）" && answer.trim().length > 0) {
        // 使用 setTimeout 确保不阻塞UI，异步执行TTS
        setTimeout(async () => {
          try {
            console.log("[TTS] Starting speech synthesis for answer, length:", answer.length);
            await speakStreamFrontOnly({
              apiKey: XF_API_KEY,
              apiSecret: XF_API_SECRET,
              appId: XF_APP_ID,
              text: answer.trim(),
              vcn: "x5_lingxiaoxuan_flow",
            });
            console.log("[TTS] Speech synthesis completed successfully");
          } catch (ttsError: any) {
            console.error("[TTS] Speech synthesis failed:", ttsError);
            // TTS失败不影响聊天功能，静默失败，不显示错误提示给用户
          }
        }, 100); // 延迟100ms执行，确保UI更新完成
      }
    } catch (e: any) {
      setIsStreaming(false); // 出错时也要停止流式传输状态
      setChatMsgs((prev) => {
        const updated = [...prev];
        const lastMsg = updated[updated.length - 1];
        if (lastMsg && lastMsg.role === "assistant") {
          lastMsg.content = `请求失败：${e.message || e}`;
        } else {
          updated.push({ role: "assistant", content: `请求失败：${e.message || e}` });
        }
        return updated;
      });
    } finally {
      setChatLoading(false);
    }
  };

  const openImagePicker = () => imageInputRef.current?.click();
  const openFilePicker = () => fileInputRef.current?.click();

  const onFilesPicked = async (
    files: FileList | null,
    kind: "image" | "file"
  ) => {
    if (!files || files.length === 0) return;
    const arr = Array.from(files);
    for (const f of arr) {
      await handleUpload(f, kind);
    }
  };

  const handleUpload = async (file: File, kind: "image" | "file") => {
    if (file.size > MAX_FILE_SIZE) {
      setChatMsgs((prev) => [
        ...prev,
        { role: "assistant", content: `文件过大：${file.name}（最大 100MB)` },
      ]);
      return;
    }
    if (!ALLOWED_MIME.includes(file.type)) {
      setChatMsgs((prev) => [
        ...prev,
        {
          role: "assistant",
          content: `不支持的文件类型：${file.name}（${
            file.type || "未知类型"
          }）`,
        },
      ]);
      return;
    }
    // 对图片使用 base64 data URL，文件仍按原流程上传
    if (kind === "image") {
      try {
        setUploading(true);
        setUploadProgress(0);
        const dataUrl: string = await new Promise((resolve, reject) => {
          const reader = new FileReader();
          reader.onload = () => resolve(reader.result as string);
          reader.onerror = reject;
          reader.readAsDataURL(file);
        });
        const att: Attachment = {
          id: `${Date.now()}_${Math.random().toString(36).slice(2, 8)}`,
          name: file.name,
          size: file.size,
          type: kind,
          mime: file.type,
          url: dataUrl,
        };
        setPendingAttachments((prev) => [...prev, att]);
      } finally {
        setUploading(false);
        setUploadProgress(0);
      }
      return;
    }
    // 非图片仍使用分片上传/本地URL
    setUploading(true);
    setUploadProgress(0);
    try {
      const result = await uploadFileChunked(file);
      const att: Attachment = {
        id: `${Date.now()}_${Math.random().toString(36).slice(2, 8)}`,
        name: file.name,
        size: file.size,
        type: kind,
        mime: file.type,
        url: result.url,
      };
      setPendingAttachments((prev) => [...prev, att]);
    } finally {
      setUploading(false);
      setUploadProgress(0);
    }
  };

  const removePendingAttachment = (id: string) => {
    setPendingAttachments((prev) => prev.filter((a) => a.id !== id));
  };

  const uploadFileChunked = async (file: File): Promise<{ url: string }> => {
    const endpoint = (window as any).UPLOAD_ENDPOINT as string | undefined;
    const chunkSize = 5 * 1024 * 1024;
    if (!endpoint) {
      await new Promise((r) =>
        setTimeout(r, Math.min(1000 + Math.floor(file.size / 50000), 4000))
      );
      const url = URL.createObjectURL(file);
      return { url };
    }
    const initResp = await fetch(`${endpoint}/init`, {
      method: "POST",
      headers: { "Content-Type": "application/json" },
      body: JSON.stringify({
        filename: file.name,
        size: file.size,
        mime: file.type,
      }),
    });
    if (!initResp.ok) throw new Error("无法初始化上传");
    const { uploadId } = await initResp.json();
    const totalChunks = Math.ceil(file.size / chunkSize);
    for (let i = 0; i < totalChunks; i++) {
      const start = i * chunkSize;
      const end = Math.min(start + chunkSize, file.size);
      const blob = file.slice(start, end);
      const form = new FormData();
      form.append("uploadId", uploadId);
      form.append("index", String(i));
      form.append("chunk", blob);
      const resp = await fetch(`${endpoint}/chunk`, {
        method: "POST",
        body: form,
      });
      if (!resp.ok) throw new Error(`分片上传失败: ${i}`);
      setUploadProgress(Math.round(((i + 1) / totalChunks) * 100));
    }
    const complete = await fetch(`${endpoint}/complete`, {
      method: "POST",
      headers: { "Content-Type": "application/json" },
      body: JSON.stringify({ uploadId }),
    });
    if (!complete.ok) throw new Error("完成上传失败");
    const data = await complete.json();
    return { url: data.url as string };
  };

  const formatTime = (sec: number) => {
    if (!isFinite(sec) || sec < 0) return "0:00";
    const m = Math.floor(sec / 60);
    const s = Math.floor(sec % 60);
    return `${m}:${s.toString().padStart(2, "0")}`;
  };

  return (
    <div className="digital-showcase">
      {/* 顶部芯片操作栏 */}
      <header className="top-nav">
        <div className="chip-bar">
          <Link to="/mode" className="chip primary">
            进入3D展馆
          </Link>
          <button
            className="chip"
            onClick={() => {
              setShowChat(true);
              const v = videoRef.current;
              if (v && !v.paused) {
                v.pause();
                setIsPlaying(false);
              } 
            }}
          >
            AI对话
          </button>
          <button className="chip">轮播模式</button>
          <button className="chip">中文/English</button>
          <div className="chip">选择视角：跟随</div>
        </div>
      </header>

      {/* 左上角绿色操作选择 */}
      <div className="floating-toolbar green">
        <div className="toolbar-row">
          <span className="toolbar-label">选择动作：</span>
          <select
            value={selectedAction}
            onChange={handleActionChange}
            className="toolbar-select"
          >
            {actionOptions.map((o) => (
              <option key={o.value} value={o.value}>
                {o.label}
              </option>
            ))}
          </select>
        </div>
      </div>

      {/* 主要内容区域 */}
      <main className="main-content">
        {/* 左侧数字人形象（3D模型） */}
        <div
          className="left-avatar"
          style={{ marginTop: "200px", width: "30vw", height: "100%" }}
        >
          <div style={{ width: "100%", height: "100%", minHeight: 360 }}>
            <Canvas
              style={{ backgroundColor: "transparent" }}
              camera={{ position: [1, 1, 2] }}
            >
              <Orbit />
              <mesh ref={mySelfRef}>
                <hemisphereLight intensity={0.15} groundColor="black" />
                <ambientLight />
                <React.Suspense fallback={null}>
                  <DynamicModelLoader action={selectedAction} selected={selectedAnimation} />
                </React.Suspense>
              </mesh>
            </Canvas>
          </div>
        </div>

        {/* 中央视频播放区域 */}
        <section className="video-section">
          <div className="video-container">
            <div
              className={`video-player neon-frame ${
                !isPlaying ? "paused" : ""
              }`}
            >
              <div className="video-content">
                <video
                  ref={videoRef}
                  className="video-tag"
                  src="/ke.mp4"
                  playsInline
                  loop
                  onPlay={() => setIsPlaying(true)}
                  onPause={() => setIsPlaying(false)}
                  // 不展示原生控件，使用自定义按钮
                  onTimeUpdate={(e) =>
                    setCurrentTime((e.target as HTMLVideoElement).currentTime)
                  }
                  onLoadedMetadata={(e) =>
                    setDuration((e.target as HTMLVideoElement).duration)
                  }
                />
                {/* 弹幕层 */}
                <div className="danmaku-layer">
                  {danmakus.map((d) => (
                    <div
                      key={d.id}
                      className="danmaku-item"
                      style={{ 
                        top: `${DANMAKU_START_TOP + d.track * DANMAKU_TRACK_SPACING}px`,
                        color: d.color
                      }}
                    >
                      <span className="danmaku-text">{d.text}</span>
                      <button
                        className="danmaku-delete"
                        onClick={(e) => {
                          e.stopPropagation();
                          deleteDanmaku(d.id);
                        }}
                        title="删除弹幕"
                      >
                        ×
                      </button>
                    </div>
                  ))}
                </div>
                {/* 取消居中覆盖播放按钮，改由下方控制条左侧按钮控制 */}
              </div>
              <div className="video-controls">
                <button
                  className={`play-control ${isPlaying ? "playing" : ""}`}
                  onClick={togglePlay}
                  aria-label={isPlaying ? "暂停" : "播放"}
                  title={isPlaying ? "暂停" : "播放"}
                >
                  {isPlaying ? "⏸️" : "▶️"}
                </button>
                <div
                  className="progress-bar"
                  ref={barRef}
                  onMouseDown={onBarMouseDown}
                  onTouchStart={onBarTouchStart}
                >
                  <div
                    className="progress"
                    style={{
                      width: duration
                        ? `${(currentTime / duration) * 100}%`
                        : "0%",
                    }}
                  ></div>
                </div>
                <div className="time-display">
                  {formatTime(currentTime)} / {formatTime(duration)}
                </div>
                <div className="control-buttons">
                  <button className="control-btn">🔊</button>
                  <button 
                    className={`control-btn ${isPictureInPicture ? "active" : ""}`}
                    onClick={togglePictureInPicture}
                    title={isPictureInPicture ? "退出画中画" : "开启画中画"}
                    aria-label={isPictureInPicture ? "退出画中画" : "开启画中画"}
                  >
                    ⛶
                  </button>
                </div>
              </div>
              {/* 弹幕输入区域 */}
              <div className="danmaku-input-container">
                <input
                  type="text"
                  className="danmaku-input"
                  placeholder="输入弹幕..."
                  value={danmakuInput}
                  onChange={(e) => setDanmakuInput(e.target.value)}
                  onKeyDown={(e) => {
                    if (e.key === "Enter") {
                      sendDanmaku();
                    }
                  }}
                />
                <button className="danmaku-send-btn" onClick={sendDanmaku}>
                  发送
                </button>
              </div>
              {/* 聊天遮罩层（覆盖整个 .video-player，包含控制条） */}
              {showChat && (
                <div className="video-overlay-chat" onClick={closeChat}>
                  <div
                    className="chat-panel"
                    onClick={(e) => e.stopPropagation()}
                    onDragOver={(e) => {
                      e.preventDefault();
                    }}
                    onDrop={(e) => {
                      e.preventDefault();
                      const files = e.dataTransfer?.files;
                      if (files && files.length) {
                        const arr = Array.from(files);
                        arr.forEach(async (f) => {
                          const k = f.type.startsWith("image/")
                            ? "image"
                            : "file";
                          await handleUpload(f as File, k);
                        });
                      }
                    }}
                  >
                    <div className="chat-header">
                      <div>AI 对话</div>
                      <button
                        className="chat-close"
                        onClick={closeChat}
                        aria-label="关闭"
                        title="关闭"
                      >
                        ✕
                      </button>
                    </div>
                    <div className="chat-messages">
                      {chatMsgs
                        .filter((m) => m.role !== "system")
                        .map((m, idx, filteredArray) => {
                          const isLastAssistantMsg = m.role === "assistant" && idx === filteredArray.length - 1;
                          const showCursor = isLastAssistantMsg && isStreaming;
                          return (
                          <div key={idx} className={`chat-msg ${m.role}`}>
                            <div>
                              {m.content}
                              {showCursor && <span className="typing-cursor">|</span>}
                            </div>
                            {m.attachments && m.attachments.length > 0 && (
                              <div className="chat-attachments">
                                {m.attachments.map((att) => (
                                  <div key={att.id} className="chat-attachment">
                                    {att.type === "image" ? (
                                      <img
                                        src={att.url}
                                        alt={att.name}
                                        className="chat-attachment-img"
                                      />
                                    ) : (
                                      <a
                                        href={att.url}
                                        target="_blank"
                                        rel="noreferrer"
                                        className="chat-attachment-file"
                                      >
                                        {att.name}
                                      </a>
                                    )}
                                  </div>
                                ))}
                              </div>
                            )}
                          </div>
                          );
                        })}
                    </div>
                    {pendingAttachments.length > 0 && (
                      <div className="pending-attachments">
                        <div className="pending-title">待发送附件：</div>
                        <div className="chat-attachments">
                          {pendingAttachments.map((att) => (
                            <div key={att.id} className="chat-attachment">
                              {att.type === "image" ? (
                                <img
                                  src={att.url}
                                  alt={att.name}
                                  className="chat-attachment-img"
                                />
                              ) : (
                                <a
                                  href={att.url}
                                  target="_blank"
                                  rel="noreferrer"
                                  className="chat-attachment-file"
                                >
                                  {att.name}
                                </a>
                              )}
                              <button
                                className="pending-remove"
                                onClick={() => removePendingAttachment(att.id)}
                                title="移除"
                              >
                                ✕
                              </button>
                            </div>
                          ))}
                        </div>
                      </div>
                    )}
                    {!apiKey && (
                      <div className="chat-input-row">
                        <input
                          value={apiKeyTemp}
                          onChange={(e) => setApiKeyTemp(e.target.value)}
                          placeholder="请输入 API Key（仅保存在本地）"
                        />
                        <button
                          onClick={saveApiKey}
                          disabled={!apiKeyTemp.trim()}
                        >
                          保存密钥
                        </button>
                      </div>
                    )}
                    <div className="chat-input-row">
                      <div className="chat-upload-btns">
                        {/* <button
                          className="upload-icon"
                          onClick={openFilePicker}
                          title="上传文件"
                          disabled={uploading}
                        >
                          <img
                            className="upload-icon-img"
                            src="/images/file-icon.png"
                            alt="文件"
                          />
                        </button> */}
                        {/* <button
                          className="upload-icon"
                          onClick={openImagePicker}
                          title="上传图片"
                          disabled={uploading}
                        >
                          🖼️
                        </button> */}
                        <button
                          className={`upload-icon ${isAsrListening ? "asr-recording" : ""}`}
                          onClick={startAsrRecognition}
                          title={isAsrListening ? "停止语音输入" : "语音输入"}
                          disabled={uploading || chatLoading}
                        >
                          {isAsrListening ? "🔴" : "🎤"}
                        </button>
                      </div>
                      <input
                        value={chatInput}
                        onChange={(e) => setChatInput(e.target.value)}
                        placeholder="请输入问题..."
                        onKeyDown={(e) => {
                          if (e.key === "Enter") sendChat();
                        }}
                      />
                      {uploading ? (
                        <div className="upload-progress">{uploadProgress}%</div>
                      ) : (
                        <button
                          onClick={sendChat}
                          disabled={chatLoading || !apiKey}
                        >
                          发送
                        </button>
                      )}
                      <input
                        ref={imageInputRef}
                        type="file"
                        accept="image/*"
                        style={{ display: "none" }}
                        onChange={(e) => {
                          onFilesPicked(e.target.files, "image");
                          if (e.target) e.target.value = "";
                        }}
                        multiple
                      />
                      <input
                        ref={fileInputRef}
                        type="file"
                        style={{ display: "none" }}
                        onChange={(e) => {
                          onFilesPicked(e.target.files, "file");
                          if (e.target) e.target.value = "";
                        }}
                        multiple
                      />
                    </div>
                  </div>
                </div>
              )}
            </div>
          </div>

          {/* 背景装饰（已移除） */}
        </section>
      </main>
      {/* 底部交互面板 */}
      <div className="bottom-panel">
        <div className="interaction-center">
          <div className="glowing-orb">
            <div className="orb-core"></div>
            <div className="orb-ring"></div>
          </div>
        </div>
        <div className="action-buttons">
          <button 
            className={`action-btn ${isSpeechListening ? "active" : ""}`}
            onClick={toggleSpeechToText}
            title={isSpeechListening ? "点击停止语音识别" : "点击开始语音转文字"}
            aria-label={isSpeechListening ? "停止语音识别" : "开始语音转文字"}
          >
            {isSpeechListening ? "🎤" : "💬"}
          </button>
          <button className="action-btn">⚙️</button>
        </div>
      </div>
    </div>
  );
}

export default Index;
