import { Button, Input, Tooltip, message } from "antd";
import { useImperativeHandle, useRef, useState } from "react";
import { useAppDispatch, useAppSelector } from "@/store";
import { updateModel, setLoading } from "@/store/modules/chat";
import { LoadingOutlined, PauseCircleOutlined, SendOutlined, AudioOutlined, SoundOutlined } from "@ant-design/icons";
import type { IMyInputChildMethods, TEventSource, TState } from "..";
import { post } from "@/api/config";

const { TextArea } = Input;

interface IMyInputProps {
  sendMessage: (message: string) => void;
  eventSource: TEventSource;
  ref: React.Ref<IMyInputChildMethods>;
}
interface IMyInputIconProps {
  state: TState;
  value: string;
  handleStopAnswer: () => void;
  handleSendMessage: () => void;
  isRecording?: boolean;
}

const MyInputIcon = ({ state, value, handleStopAnswer, handleSendMessage, isRecording = false }: IMyInputIconProps) => {
  const iconMap = {
            default: (
          <Tooltip title={isRecording ? "正在录音，请稍候" : (value || "请输入问题")}>
            <SendOutlined
              className={`text-20 ${value && !isRecording ? "cursor-pointer !text-[#4096ff]" : "cursor-not-allowed !text-[#999]"}`}
              onClick={() => {
                if (value && !["loading", "answering"].includes(state) && !isRecording) {
                  handleSendMessage();
                }
              }}
            />
          </Tooltip>
        ),
    loading: <LoadingOutlined className="text-20 !text-[#4096ff]" />,
    answering: (
      <Tooltip title="停止回答">
        <PauseCircleOutlined className="text-20 cursor-pointer !text-[#4096ff]" onClick={handleStopAnswer} />
      </Tooltip>
    ),
  };
  return iconMap[state];
};

const MyInput = ({ sendMessage, eventSource, ref }: IMyInputProps) => {
  const [value, setValue] = useState(""); // 输入框的值
  const [sendBtnState, setSendBtnState] = useState<TState>("default"); // 发送按钮状态
  const [deepThinking, setDeepThinking] = useState(false);
  const dispatch = useAppDispatch();
  const [messageApi, contextHolder] = message.useMessage();
  const isComposing = useRef(false); // 是否正在输入
  const { token: userToken } = useAppSelector(state => state.user);
  const [isRecording, setIsRecording] = useState(false);
  const mediaRecorderRef = useRef<MediaRecorder | null>(null);
  const audioChunksRef = useRef<Blob[]>([]);
  const [isGeneratingTTS, setIsGeneratingTTS] = useState(false);
  const { messages, model } = useAppSelector(state => state.chat);

  // 暴露方法
  useImperativeHandle(ref, () => ({ setSendBtnState }));

  /**
   * 处理文本输入框的确认事件
   * @param event - 键盘事件对象
   * 
   * 功能说明:
   * 1. 当输入为空或状态为loading/answering时,阻止默认行为
   * 2. loading状态下提示用户等待
   * 3. 如果正在录音，按回车结束录音
   * 4. 否则按回车发送消息
   * 5. Shift+Enter实现换行,并维护光标位置
   */
  const handleConfirm = (event: React.KeyboardEvent<HTMLTextAreaElement>) => {
    if (value.trim() === "" || ["loading", "answering"].includes(sendBtnState)) {
      event.preventDefault();
      return;
    }
    if (sendBtnState === "loading") {
      event.preventDefault();
      messageApi.error("回答输出中，请稍等");
      return;
    }
    // 输入框回车发送消息与输入法冲突的问题结局
    if(isComposing.current) return;
    
    // 如果正在录音，按回车结束录音
    if (isRecording) {
      event.preventDefault();
      if (mediaRecorderRef.current) {
        mediaRecorderRef.current.stop();
      }
      return;
    }
    
    // 按回车键不发送消息，只换行
    if (!event.shiftKey && event.code === "Enter") {
      event.preventDefault();
      // 不发送消息，只换行
      const lastIdx = value.length;
      const target = event.target as HTMLInputElement;
      const startIdx = target.selectionStart || lastIdx;
      const endIdx = target.selectionEnd || lastIdx;
      setValue(val => val.slice(0, startIdx) + "\n" + val.slice(endIdx));
      // 设置光标位置
      setTimeout(() => {
        target.selectionStart = startIdx + 1;
        target.selectionEnd = startIdx + 1;
      }, 0);
      return;
    }
    
    // Shift+Enter 也是换行
    if (event.shiftKey && event.code === "Enter") {
      // 换行，获取光标位置
      const lastIdx = value.length;
      const target = event.target as HTMLInputElement;
      const startIdx = target.selectionStart || lastIdx;
      const endIdx = target.selectionEnd || lastIdx;
      setValue(val => val.slice(0, startIdx) + "\n" + val.slice(endIdx));
      // 设置光标位置
      setTimeout(() => {
        target.selectionStart = startIdx + 1;
        target.selectionEnd = startIdx + 1;
      }, 0);
    }
  };

  // 是否深度思考
  const handleDeepThinking = () => {
    const newDeepThinking = !deepThinking;
    dispatch(updateModel(newDeepThinking ? "deepseek-r1" : "deepseek-v3"));
    setDeepThinking(newDeepThinking);
  };
  
  const handleStopAnswer = () => {
    eventSource?.close();
    setSendBtnState("default");
  };

  const handleSendMessage = () => {
    if (value.trim() === "" || ["loading", "answering"].includes(sendBtnState)) {
      return;
    }
    setSendBtnState("loading");
    sendMessage(value);
    setValue("");
  };

  // 语音识别处理（实时语音转文字）
  const handleVoiceInput = async () => {
    console.log('[语音调试] 当前 isRecording:', isRecording);
    if (isRecording) {
      console.log('[语音调试] 停止录音');
      mediaRecorderRef.current?.stop();
      setIsRecording(false);
      return;
    }
    // 开始录音
    if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
      messageApi.error('当前浏览器不支持麦克风录音');
      console.error('[语音调试] navigator.mediaDevices 或 getUserMedia 不可用');
      return;
    }
    try {
      console.log('[语音调试] 尝试获取麦克风权限...');
      const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
      console.log('[语音调试] 获取到音频流:', stream);
      const mediaRecorder = new MediaRecorder(stream, { mimeType: 'audio/webm' });
      audioChunksRef.current = [];
      mediaRecorderRef.current = mediaRecorder;
      setIsRecording(true);
      
      // 显示录音状态
      messageApi.info('开始录音，请说话...');
      
      mediaRecorder.ondataavailable = (event) => {
        console.log('[语音调试] ondataavailable:', event);
        if (event.data.size > 0) {
          audioChunksRef.current.push(event.data);
        }
      };
      mediaRecorder.onstop = async () => {
        setIsRecording(false);
        console.log('[语音调试] 录音停止，准备上传');
        
        // 停止所有音频轨道
        stream.getTracks().forEach(track => track.stop());
        
        const audioBlob = new Blob(audioChunksRef.current, { type: 'audio/webm' });
        console.log('[语音调试] 生成的音频Blob:', audioBlob);
        
        // 显示识别中状态
        messageApi.loading({ content: '正在识别语音...', key: 'speech-recognition', duration: 0 });
        
        // 上传到后端
        const formData = new FormData();
        formData.append('file', audioBlob, 'audio.webm');
        try {
          // 使用fetch直接请求，因为需要上传文件
          const response = await fetch('/dev-api/user/speech', {
            method: 'POST',
            body: formData,
            // 不设置Content-Type，让浏览器自动设置multipart/form-data
          });
          
          if (!response.ok) {
            throw new Error(`HTTP ${response.status}: ${response.statusText}`);
          }
          
          const responseData = await response.json();
          console.log('[语音调试] 后端返回:', responseData);
          console.log('[语音调试] responseData.data:', responseData.data);
          console.log('[语音调试] responseData.data.text:', responseData.data?.text);
          console.log('[语音调试] responseData类型:', typeof responseData);
          
          if (responseData && responseData.data && typeof responseData.data.text === 'string') {
            const recognizedText = responseData.data.text;
            console.log('[语音调试] 识别文本:', recognizedText);
            if (recognizedText.trim().length > 0) {
              // 将识别的文字设置到输入框
              setValue(recognizedText);
              messageApi.success({ content: '语音识别成功，即将发送消息', key: 'speech-recognition' });
              
              // 自动发送消息
              setTimeout(() => {
                if (recognizedText.trim() && !["loading", "answering"].includes(sendBtnState)) {
                  setSendBtnState("loading");
                  sendMessage(recognizedText);
                  setValue("");
                }
              }, 1000); // 延迟1秒发送，让用户看到识别的文字
            } else {
              messageApi.warning({ content: '未识别到有效语音，请靠近麦克风或提高音量', key: 'speech-recognition' });
            }
          } else {
            console.log('[语音调试] 识别失败，responseData:', responseData);
            messageApi.error({ content: '语音识别失败', key: 'speech-recognition' });
          }
        } catch (err) {
          console.error('[语音调试] 上传或识别失败:', err);
          messageApi.error({ content: '语音识别请求失败', key: 'speech-recognition' });
        }
      };
      mediaRecorder.onerror = (event) => {
        console.error('[语音调试] mediaRecorder 错误:', event);
        messageApi.error('录音出错');
        setIsRecording(false);
      };
      mediaRecorder.start();
      console.log('[语音调试] 开始录音');
    } catch (err) {
      console.error('[语音调试] getUserMedia 捕获异常:', err);
      messageApi.error('无法访问麦克风');
    }
  };

  // 全局文字转语音功能
  // 清理Markdown格式，移除语法符号
  const cleanMarkdownForTTS = (text: string): string => {
    return text
      // 移除Markdown标题符号（包括#和##等）
      .replace(/^#{1,6}\s+/gm, '')
      // 移除粗体符号（**text** 或 __text__）
      .replace(/\*\*(.*?)\*\*/g, '$1')
      .replace(/__(.*?)__/g, '$1')
      // 移除斜体符号（*text* 或 _text_）
      .replace(/\*([^*]+)\*/g, '$1')
      .replace(/_([^_]+)_/g, '$1')
      // 移除代码块符号
      .replace(/```[\s\S]*?```/g, '')
      // 移除行内代码符号
      .replace(/`([^`]+)`/g, '$1')
      // 移除链接格式，只保留文本
      .replace(/\[([^\]]+)\]\([^)]+\)/g, '$1')
      // 移除图片格式
      .replace(/!\[([^\]]*)\]\([^)]+\)/g, '$1')
      // 移除引用符号
      .replace(/^>\s+/gm, '')
      // 移除列表符号
      .replace(/^[\s]*[-*+]\s+/gm, '')
      // 移除数字列表
      .replace(/^[\s]*\d+\.\s+/gm, '')
      // 移除水平分割线
      .replace(/^[\s]*[-*_]{3,}[\s]*$/gm, '')
      // 移除表格符号
      .replace(/\|/g, ' ')
      // 移除HTML标签
      .replace(/<[^>]+>/g, '')
      // 移除特殊Markdown符号
      .replace(/\*\*/g, '')  // 移除剩余的**
      .replace(/##/g, '')    // 移除剩余的##
      .replace(/###/g, '')   // 移除剩余的###
      .replace(/####/g, '')  // 移除剩余的####
      .replace(/#####/g, '') // 移除剩余的#####
      .replace(/######/g, '') // 移除剩余的######
      // 清理多余的空行
      .replace(/\n\s*\n/g, '\n')
      // 清理首尾空白
      .trim();
  };

  const handleGlobalTextToSpeech = async () => {
    if (messages.length <= 1) {
      messageApi.warning('暂无对话内容可转换');
      return;
    }

    try {
      setIsGeneratingTTS(true);
      
      // 构建对话数据，支持多声音，并清理Markdown格式
      const conversationData = messages
        .filter(msg => msg.role !== 'assistant' || msg.content !== 'Hi~ 我是`鼎盛` 您身边的智能助手，可以为你答疑解惑、精读文档、尽情创作 让鼎盛助您轻松工作，多点生活。')
        .map(msg => {
          const conversationItem: any = {
            role: msg.role,
            content: cleanMarkdownForTTS(msg.content)
          };
          
          // 如果有思考内容，也包含进去并清理格式
          if (msg.reasoningContent) {
            conversationItem.reasoningContent = cleanMarkdownForTTS(msg.reasoningContent);
          }
          
          return conversationItem;
        });
      
      // 显示加载提示
      const loadingKey = 'global-tts-loading';
      messageApi.loading({ 
        content: `正在生成语音...（共${conversationData.length}条消息，预计需要${Math.ceil(conversationData.length * 0.5)}-${Math.ceil(conversationData.length * 1.5)}分钟）`, 
        key: loadingKey,
        duration: 0 // 不自动关闭
      });
      
      const response = await post<{success: boolean; audioUrl?: string; error?: string}>('/user/tts', {
        conversation: conversationData
      }, {
        timeout: 600000 // 10分钟超时，足够处理长文本
      });
      
      if (response.data?.success) {
        // 不自动下载，静默处理
      } else {
        messageApi.error({ 
          content: '语音生成失败，请稍后重试', 
          key: loadingKey 
        });
      }
    } catch (error) {
      console.error('全局TTS错误:', error);
      
      // 处理不同类型的错误
      let errorMessage = '语音生成失败，请稍后重试';
      
      if (error instanceof Error) {
        if (error.name === 'AbortError') {
          errorMessage = '请求超时，请稍后重试';
        } else {
          errorMessage = error.message || '语音生成失败，请稍后重试';
        }
      } else if (typeof error === 'string') {
        errorMessage = error;
      }
      
      messageApi.error({ 
        content: errorMessage, 
        key: 'global-tts-loading' 
      });
    } finally {
      setIsGeneratingTTS(false);
    }
  };

  return (
    <>
      {contextHolder}
      <div className="border border-gray-300 p-8 rounded-8 hover:border-[#4096ff] transition-all duration-200">
        <TextArea
          autoSize={{ minRows: 2, maxRows: 6 }}
          placeholder="有问题，尽管问，shift+enter换行"
          className="!border-none !shadow-none"
          value={value}
          onChange={e => setValue(e.target.value)}
          onPressEnter={handleConfirm}
          onCompositionStart={()=> isComposing.current = true}
          onCompositionEnd={()=> isComposing.current = false}
        />
        <div className="flex items-center justify-between">
          <div className="flex items-center gap-8">
            <Button color={deepThinking ? "primary" : "default"} variant="outlined" onClick={handleDeepThinking}>
              深度思考
            </Button>
            <Tooltip title={isRecording ? "点击停止录音" : "点击开始录音，说话后自动识别并发送"}>
              <Button
                icon={<AudioOutlined />}
                type={isRecording ? 'primary' : 'default'}
                danger={isRecording}
                onClick={handleVoiceInput}
              >
                {isRecording ? '停止语音' : '语音输入'}
              </Button>
            </Tooltip>
            <Button
              icon={<SoundOutlined />}
              loading={isGeneratingTTS}
              onClick={handleGlobalTextToSpeech}
              disabled={messages.length <= 1}
              title="将整个对话转换为语音（用户男声，AI女声）"
            >
              文字转语音
            </Button>
          </div>
          {<MyInputIcon state={sendBtnState} value={value} handleStopAnswer={handleStopAnswer} handleSendMessage={handleSendMessage} isRecording={isRecording} />}
        </div>
      </div>
    </>
  );
};

export default MyInput;
