import { View, Text, Input, Image, ScrollView, Button } from '@tarojs/components'
import { useState, useEffect, useRef, useCallback } from 'react'
import Taro from '@tarojs/taro'
import { ui } from '../../utils/platform'
import { useAuth } from '../../contexts/AuthContext'
import UserProfile from '../../components/userprofile'
import EmotionChart from '../../components/EmotionChart'

import { getHistoryMessages, sendMessage } from '../../services/api'
import { BASE_URL } from '../../config/env'
import './index.less'
import defaultAvatar from '../../assets/images/default-avatar.png'
import decorationImage from '../../assets/decoration.jpg'

// 获取微信同声传译插件
let plugin = null
let manager = null
let webSpeechRecognition = null
let webSpeechSynthesis = null

if (Taro.getEnv() === Taro.ENV_TYPE.WEAPP) {
  plugin = requirePlugin('WechatSI')
  manager = plugin.getRecordRecognitionManager()
} else if (Taro.getEnv() === Taro.ENV_TYPE.WEB) {
  // H5环境下使用Web Speech API
  if (typeof window !== 'undefined') {
    webSpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition
    webSpeechSynthesis = window.speechSynthesis
  }
}

export default function Index() {
  // 使用Auth Context
  const { userInfo, isLoggedIn, logout } = useAuth()
  
  // 调试日志
  console.log('Index页面 - userInfo:', userInfo);
  console.log('Index页面 - isLoggedIn:', isLoggedIn);
  
  const [searchText, setSearchText] = useState('')
  // 添加菜单状态
  const [menuVisible, setMenuVisible] = useState(false)
  // 添加用户资料抽屉状态
  const [userProfileVisible, setUserProfileVisible] = useState(false)
  // 添加情感分析抽屉状态
  const [emotionChartVisible, setEmotionChartVisible] = useState(false)

  // 添加对话历史状态
  const [chatHistory, setChatHistory] = useState([])
  // 添加是否正在加载更多状态
  const [isLoadingMore, setIsLoadingMore] = useState(false)
  // 添加当前加载的开始索引
  const [currentBegin, setCurrentBegin] = useState(0)
  // 添加是否已加载全部的状态
  const [hasLoadedAll, setHasLoadedAll] = useState(false)
  // 添加流式响应状态
  const [streamingResponse, setStreamingResponse] = useState('')
  // 添加是否正在接收流的状态
  const [isStreaming, setIsStreaming] = useState(false)
  // 添加当前消息ID，用于关联流式消息片段
  const [currentMessageId, setCurrentMessageId] = useState(null)
  // 添加当前会话中用户发送的消息计数
  const [countMyMessage, setCountMyMessage] = useState(0)
  // 添加用户是否手动上滚标志
  const [userScrolledUp, setUserScrolledUp] = useState(false)
  // 滚动容器引用
  const scrollViewRef = useRef(null)
  // 添加打断状态
  const [isInterrupted, setIsInterrupted] = useState(false)
  // 语音识别相关状态
  const [isRecording, setIsRecording] = useState(false)
  const [voiceText, setVoiceText] = useState('')
  const [recordingStatus, setRecordingStatus] = useState(0) // 0: 未开始, 1: 录音中, 2: 识别中
  
  // 语音播放相关状态
  const [playingMessageId, setPlayingMessageId] = useState(null)
  const [isPlaying, setIsPlaying] = useState(false)
  const [audioContext, setAudioContext] = useState(null)

  // 页面加载时获取历史对话
  useEffect(() => {
    if (isLoggedIn) {
      fetchChatHistory(0)
    }
  }, [isLoggedIn])
  
  // 初始化语音识别管理器
  useEffect(() => {
    if (manager) {
      // 开始录音识别时会调用此事件
      manager.onStart = (res) => {
        console.log('语音识别开始:', res)
        setRecordingStatus(1)
        setVoiceText('正在聆听中...')
      }
      
      // 识别结束事件
      manager.onStop = (res) => {
        console.log('语音识别结束:', res)
        setIsRecording(false)
        setRecordingStatus(0)
        
        if (res.result && res.result.trim()) {
          setSearchText(res.result)
          setVoiceText('')
          // 自动发送识别到的文本
          setTimeout(() => {
            if (res.result.trim()) {
              setSearchText(res.result)
              handleSearch()
            }
          }, 100)
        } else {
          setVoiceText('未识别到语音内容')
          setTimeout(() => setVoiceText(''), 2000)
        }
      }
      
      // 识别错误事件
      manager.onError = (res) => {
        console.error('语音识别错误:', res)
        setIsRecording(false)
        setRecordingStatus(0)
        setVoiceText('')
        
        let errorMsg = '语音识别失败'
        let showRetry = true
        
        switch (res.retcode) {
          case -30001:
            errorMsg = '录音接口出错，请检查设备麦克风'
            break
          case -30002:
            errorMsg = '录音被暂停'
            showRetry = false
            break
          case -30003:
            errorMsg = '录音数据传输失败，请重试'
            break
          case -30004:
            errorMsg = '网络异常，请检查网络连接后重试'
            break
          case -30005:
            errorMsg = '语音识别服务暂时不可用'
            break
          case -30006:
            errorMsg = '识别超时，请说话时间控制在30秒内'
            break
          case -30007:
            errorMsg = '启动参数错误，请重新尝试'
            break
          case -30008:
            errorMsg = '网络请求失败，请检查网络连接'
            break
          case -30011:
            // 正在识别中，这是正常状态，不需要显示错误提示
            console.log('正在识别中，忽略此状态提示')
            return // 直接返回，不显示任何提示
          case -30012:
            // 当前无识别任务，这通常是正常的状态变化，不需要显示错误
            console.log('当前无识别任务，忽略此错误')
            return // 直接返回，不显示任何提示
          case -40001:
            errorMsg = '调用频率过高，请稍后再试'
            break
          default:
            errorMsg = `语音识别失败，错误码: ${res.retcode}`
        }
        
        if (showRetry) {
          ui.showModal({
          title: '语音识别失败',
          content: errorMsg,
          showCancel: true,
          cancelText: '取消',
          confirmText: '重试',
          success: (modalRes) => {
            if (modalRes.confirm) {
              // 延迟重试，避免频繁调用
              setTimeout(() => {
                startVoiceRecognition()
              }, 1000)
            }
          }
        })
        } else {
          ui.showToast({
            title: errorMsg,
            icon: 'none',
            duration: 2000
          })
        }
      }
    }
  }, [])
  
  // 清理定时器和语音播放资源
  useEffect(() => {
    return () => {
      if (scrollToUpperTimeoutRef.current) {
        clearTimeout(scrollToUpperTimeoutRef.current)
      }
      // 只在确实有录音任务进行时才停止语音识别
      if (manager && isRecording) {
        try {
          manager.stop()
        } catch (error) {
          console.log('清理语音识别资源时出错:', error)
        }
      }
      // 停止语音播放并清理音频资源
       if (audioContext) {
         try {
           audioContext.stop()
           audioContext.destroy()
         } catch (error) {
           console.log('清理音频资源时出错:', error)
         }
       }
       if (isPlaying) {
         setIsPlaying(false)
         setPlayingMessageId(null)
       }
    }
  }, [isPlaying, isRecording])
  
  // 监听聊天历史变化，滚动到底部
  useEffect(() => {
    // 仅在以下情况自动滚动：
    // 1. 有新消息且不是加载历史消息且用户没有手动上滚
    // 2. 正在流式响应中(强制滚动)
    if ((chatHistory.length > 0 && !isLoadingMore && !userScrolledUp) || isStreaming) {
      // 稍微延迟滚动，确保DOM更新
      setTimeout(() => scrollToBottom(), 100)
    }
  }, [chatHistory, streamingResponse, isStreaming, isLoadingMore])
  
  // 处理滚动事件，检测用户是否手动上滚
  const handleScroll = (e) => {
    if (scrollViewRef.current) {
      const { scrollTop, scrollHeight, clientHeight } = e.detail
      
      // 调试信息
      console.log('滚动位置:', scrollTop, '容器高度:', scrollHeight, '可视区域:', clientHeight)
      
      // 如果滚动位置距离底部超过50px，认为用户已手动上滚
      const isScrolledUp = scrollTop + clientHeight < scrollHeight - 50
      setUserScrolledUp(isScrolledUp)
      
      // 如果滚动到底部或接近底部，重置上滚标志
      if (scrollTop + clientHeight >= scrollHeight - 10) {
        setUserScrolledUp(false)
      }
    }
  }
  
  // 滚动到底部（优化版）
  const scrollToBottom = useCallback(() => {
    // 使用双重requestAnimationFrame确保在渲染后执行
    requestAnimationFrame(() => {
      requestAnimationFrame(() => {
        if (Taro.getEnv() === Taro.ENV_TYPE.WEAPP) {
          // 微信小程序环境
          const query = Taro.createSelectorQuery()
          query.select('#chat-scroll-view')
            .boundingClientRect()
          query.selectViewport()
            .scrollOffset()
          query.exec((res) => {
            if (res && res[0] && res[1]) {
              const scrollHeight = res[0].height
              const currentScrollTop = res[1].scrollTop
              const viewportHeight = res[1].height
              
              console.log('微信滚动:', scrollHeight, currentScrollTop, viewportHeight)
              
              // 滚动到底部
              Taro.pageScrollTo({
                scrollTop: scrollHeight,
                duration: 200
              })
            }
          })
        } else if (scrollViewRef && scrollViewRef.current) {
          // H5环境
          const scrollView = scrollViewRef.current
          console.log('H5滚动容器:', scrollView, scrollView?.scrollHeight)
          
          if (scrollView && typeof scrollView.scrollTo === 'function') {
            scrollView.scrollTo({
              top: scrollView.scrollHeight,
              behavior: isStreaming ? 'auto' : 'smooth' // 流式响应时使用auto更流畅
            })
          } else if (scrollView) {
            // 备用滚动方法
            scrollView.scrollTop = scrollView.scrollHeight
          }
        }
      })
    })
  }, [isStreaming])

  // 微信小程序专用滚动函数
  const weappScrollToBottom = useCallback(() => {
    if (Taro.getEnv() === Taro.ENV_TYPE.WEAPP) {
      setTimeout(() => {
        Taro.createSelectorQuery()
          .select('#chat-scroll-view')
          .boundingClientRect()
          .selectViewport()
          .scrollOffset()
          .exec((res) => {
            if (res && res[0] && res[1]) {
              Taro.pageScrollTo({
                scrollTop: res[0].height,
                duration: 200
              })
            }
          })
      }, 150)
    }
  }, [])

  // 获取历史对话
  const fetchChatHistory = async (begin = 0) => {
    if (hasLoadedAll && begin > 0) return

    try {
      setIsLoadingMore(true)
      
      // 记录开始时间，确保最少显示1秒加载动画
      const startTime = Date.now()
      
      // 获取AI消息
      const aiResult = await getHistoryMessages(begin, 10, 2)
      // 获取用户消息
      const userResult = await getHistoryMessages(begin, 10, 1)
      
      if (aiResult.code === 100200 && userResult.code === 100200) {
        // 合并消息并按时间排序
        let combinedMessages = [
          ...aiResult.data.map(msg => ({ 
            ...msg,
            parsedContent: parseAIMessage(msg.sentence)
          })),
          ...userResult.data
        ]
        
        // 按时间排序
        combinedMessages.sort((a, b) => a.time - b.time)
        
        // 如果是加载更多，则追加到现有历史
        if (begin > 0) {
          setChatHistory(prev => [...combinedMessages, ...prev])
        } else {
          setChatHistory(combinedMessages)
          // 首次加载完成后滚动到底部
          setUserScrolledUp(false) // 重置上滚标志
          setTimeout(() => {
            scrollToBottom()
            // 微信环境额外调用
            if (Taro.getEnv() === Taro.ENV_TYPE.WEAPP) {
              weappScrollToBottom()
            }
          }, 200)
        }
        
        // 更新当前索引
        setCurrentBegin(begin + 10 + countMyMessage)
        
        // 检查是否还有更多历史可加载
        if (combinedMessages.length < 10) {
          setHasLoadedAll(true)
        }
      }
      
      // 确保加载动画至少显示1秒
      const elapsedTime = Date.now() - startTime
      const minLoadingTime = 1000 // 1秒
      
      if (elapsedTime < minLoadingTime) {
        await new Promise(resolve => setTimeout(resolve, minLoadingTime - elapsedTime))
      }
      
    } catch (error) {
      ui.showToast({
        title: '获取历史对话失败',
        icon: 'none'
      })
      console.error('获取历史对话失败', error)
      
      // 即使出错也要确保最少1秒的加载时间
      const elapsedTime = Date.now() - startTime
      const minLoadingTime = 1000
      
      if (elapsedTime < minLoadingTime) {
        await new Promise(resolve => setTimeout(resolve, minLoadingTime - elapsedTime))
      }
    } finally {
      setIsLoadingMore(false)
    }
  }
  
  // 解析AI消息，直接返回原始消息
  const parseAIMessage = (message) => {
    if (!message) return { isJson: false, emotion: '中性', score: '0', response: '' };
    
    // 处理换行符转换 - 先处理双换行，再处理单换行
    let processedMessage = message;
    // 处理 \\n\\n -> \n\n
    processedMessage = processedMessage.replace(/\\\\n\\\\n/g, '\n\n');
    // 处理 \\n -> \n
    processedMessage = processedMessage.replace(/\\\\n/g, '\n');
    // 如果上面的不工作，尝试这个
    processedMessage = processedMessage.replace(/\\n\\n/g, '\n\n');
    processedMessage = processedMessage.replace(/\\n/g, '\n');
    
    console.log('原始消息:', message);
    console.log('处理后消息:', processedMessage);
    
    // 直接返回处理后的消息
    return {
      isJson: false,
      emotion: '中性',
      score: '0',
      response: processedMessage
    };
  }
  
  // 清理流式响应数据 - 移除每行开头的data前缀并处理换行符
  const cleanStreamData = (text) => {
    console.log('原始数据:', text);
    
    if (!text) return '';
    
    // 按行分割文本
    const lines = text.split('\n');
    
    // 处理每一行，移除开头的"data:"前缀
    const cleanedLines = lines.map(line => {
      // 如果行以"data:"开头，移除这个前缀和后面的冒号
      if (line.startsWith('data:')) {
        return line.substring(5); // 移除"data:"
      }
      return line;
    });
    
    // 重新拼接成完整文本
    let result = cleanedLines.join('');
    
    // 处理换行符转换 - 先处理双换行，再处理单换行
    // 处理 \\n\\n -> \n\n
    result = result.replace(/\\\\n\\\\n/g, '\n\n');
    // 处理 \\n -> \n
    result = result.replace(/\\\\n/g, '\n');
    // 如果上面的不工作，尝试这个
    result = result.replace(/\\n\\n/g, '\n\n');
    result = result.replace(/\\n/g, '\n');
    
    console.log('清理后数据:', result);
    
    return result;
  };
  
  // 防抖引用
  const scrollToUpperTimeoutRef = useRef(null)
  
  // 处理滚动到顶部，加载更多历史
  const handleScrollToUpper = useCallback(() => {
    // 添加更严格的条件检查
    if (isLoadingMore || hasLoadedAll || isStreaming) {
      return
    }
    
    // 清除之前的定时器
    if (scrollToUpperTimeoutRef.current) {
      clearTimeout(scrollToUpperTimeoutRef.current)
    }
    
    // 防抖处理，避免频繁触发
    scrollToUpperTimeoutRef.current = setTimeout(() => {
      // 再次检查状态，确保条件仍然满足
      if (!isLoadingMore && !hasLoadedAll && !isStreaming && chatHistory.length > 0) {
        console.log('触发下拉刷新加载历史消息')
        fetchChatHistory(currentBegin)
      }
    }, 800) // 增加到800ms防抖，进一步减少误触发
  }, [isLoadingMore, hasLoadedAll, currentBegin, isStreaming, chatHistory.length])
  
  Taro.useDidShow(() => {
    console.log('Page showed.')
    
    // 如果未登录，跳转到登录页
    if (!isLoggedIn) {
      ui.navigateTo({
        url: '/pages/login/index'
      })
    }
    // 移除页面显示时的自动滚动，避免菜单操作时意外滚动
  })

  // 检查网络状态
  const checkNetworkStatus = async () => {
    try {
      // H5环境下直接检查navigator.onLine
      if (Taro.getEnv() === Taro.ENV_TYPE.WEB) {
        if (typeof navigator !== 'undefined' && !navigator.onLine) {
          ui.showToast({
            title: '网络连接异常，请检查网络设置',
            icon: 'none',
            duration: 2000
          })
          return false
        }
        return true
      }
      
      // 微信小程序环境
      const networkInfo = await Taro.getNetworkType()
      if (networkInfo.networkType === 'none') {
        ui.showToast({
          title: '网络连接异常，请检查网络设置',
          icon: 'none',
          duration: 2000
        })
        return false
      }
      return true
    } catch (error) {
      console.error('检查网络状态失败:', error)
      return true // 检查失败时默认允许继续
    }
  }

  // 文字转语音播放功能
  const playTextToSpeech = async (text, messageId) => {
    // 重新检测Web Speech Synthesis API支持
    const speechSynthesis = window.speechSynthesis || window.webkitSpeechSynthesis
    
    if ((!plugin && !speechSynthesis) || !text || text.trim() === '') {
      ui.showToast({
        title: '当前浏览器不支持语音播放',
        icon: 'none'
      })
      console.log('语音播放检测结果:', {
        plugin: !!plugin,
        speechSynthesis: !!speechSynthesis,
        webSpeechSynthesis: !!webSpeechSynthesis,
        text: text
      })
      return
    }

    try {
      // 如果正在播放其他消息，先停止
      if (isPlaying && playingMessageId !== messageId) {
        stopTextToSpeech()
      }

      // 如果正在播放当前消息，则停止播放
      if (isPlaying && playingMessageId === messageId) {
        stopTextToSpeech()
        return
      }

      setIsPlaying(true)
      setPlayingMessageId(messageId)

      if (plugin) {
        // 微信小程序环境：使用微信同声传译插件进行文字转语音
        plugin.textToSpeech({
        lang: 'zh_CN',
        content: text,
        success: (res) => {
          console.log('语音合成成功', res)
          if (res.retcode === 0 && res.filename) {
            // 使用微信音频API播放合成的语音
            const innerAudioContext = Taro.createInnerAudioContext()
            setAudioContext(innerAudioContext)
            
            innerAudioContext.src = res.filename
            innerAudioContext.autoplay = true
            
            innerAudioContext.onPlay(() => {
              console.log('开始播放语音')
            })
            
            innerAudioContext.onEnded(() => {
              console.log('语音播放结束')
              setIsPlaying(false)
              setPlayingMessageId(null)
              innerAudioContext.destroy()
              setAudioContext(null)
            })
            
            innerAudioContext.onError((err) => {
              console.error('音频播放错误:', err)
              ui.showToast({
                title: '语音播放失败',
                icon: 'none'
              })
              setIsPlaying(false)
              setPlayingMessageId(null)
              innerAudioContext.destroy()
              setAudioContext(null)
            })
          } else {
            console.error('语音合成失败:', res)
            ui.showToast({
              title: '语音合成失败',
              icon: 'none'
            })
            setIsPlaying(false)
            setPlayingMessageId(null)
          }
        },
        fail: (err) => {
          console.error('语音合成失败', err)
          ui.showToast({
            title: '语音播放失败',
            icon: 'none'
          })
          setIsPlaying(false)
          setPlayingMessageId(null)
        }
      })
      } else if (speechSynthesis) {
        // H5环境：使用Web Speech API进行文字转语音
        const utterance = new SpeechSynthesisUtterance(text)
        utterance.lang = 'zh-CN'
        utterance.rate = 1
        utterance.pitch = 1
        utterance.volume = 1
        
        utterance.onstart = () => {
          console.log('开始播放语音')
        }
        
        utterance.onend = () => {
          console.log('语音播放结束')
          setIsPlaying(false)
          setPlayingMessageId(null)
        }
        
        utterance.onerror = (err) => {
          console.error('语音播放错误:', err)
          // 如果是用户主动中断，不显示错误提示
          if (err.error !== 'interrupted') {
            ui.showToast({
              title: '语音播放失败',
              icon: 'none'
            })
          }
          setIsPlaying(false)
          setPlayingMessageId(null)
        }
        
        console.log('开始语音合成播放:', text)
        speechSynthesis.speak(utterance)
      }
    } catch (error) {
      console.error('语音播放错误:', error)
      ui.showToast({
        title: '语音播放出错',
        icon: 'none'
      })
      setIsPlaying(false)
      setPlayingMessageId(null)
    }
  }

  // 停止语音播放
  const stopTextToSpeech = () => {
    if (audioContext) {
      try {
        audioContext.stop()
        audioContext.destroy()
        setAudioContext(null)
      } catch (error) {
        console.error('停止音频播放错误:', error)
      }
    }
    
    // H5环境下停止语音合成
    const speechSynthesis = window.speechSynthesis || window.webkitSpeechSynthesis
    if (speechSynthesis && speechSynthesis.speaking) {
      console.log('停止语音合成播放')
      speechSynthesis.cancel()
    }
    
    setIsPlaying(false)
    setPlayingMessageId(null)
  }

  // 检查录音权限
  const checkRecordPermission = async () => {
    try {
      // H5环境下检查麦克风权限
      if (Taro.getEnv() === Taro.ENV_TYPE.WEB) {
        if (typeof navigator !== 'undefined' && navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
          try {
            // 尝试获取麦克风权限
            const stream = await navigator.mediaDevices.getUserMedia({ audio: true })
            // 立即停止流，我们只是检查权限
            stream.getTracks().forEach(track => track.stop())
            return true
          } catch (error) {
            console.error('麦克风权限被拒绝:', error)
            ui.showToast({
              title: '需要麦克风权限才能使用语音识别',
              icon: 'none',
              duration: 2000
            })
            return false
          }
        } else {
          ui.showToast({
            title: '当前浏览器不支持麦克风访问',
            icon: 'none',
            duration: 2000
          })
          return false
        }
      }
      
      // 微信小程序环境
      const setting = await Taro.getSetting()
      if (setting.authSetting['scope.record'] === false) {
        // 用户之前拒绝了权限，需要引导用户手动开启
        ui.showModal({
          title: '需要录音权限',
          content: '请在设置中开启录音权限以使用语音输入功能',
          showCancel: false,
          confirmText: '去设置',
          success: () => {
            Taro.openSetting()
          }
        })
        return false
      } else if (setting.authSetting['scope.record'] === undefined) {
        // 还没有授权过，请求授权
        try {
          await Taro.authorize({ scope: 'scope.record' })
          return true
        } catch (error) {
          console.log('用户拒绝了录音权限')
          ui.showToast({
            title: '需要录音权限才能使用语音输入',
            icon: 'none',
            duration: 2000
          })
          return false
        }
      }
      return true
    } catch (error) {
      console.error('检查录音权限失败:', error)
      return false
    }
  }

  // 开始语音识别
  const startVoiceRecognition = async () => {
    if (!manager && !webSpeechRecognition) {
      ui.showToast({
        title: '当前浏览器不支持语音识别',
        icon: 'none'
      })
      return
    }
    
    if (isRecording) {
      return
    }
    
    // 检查网络状态
    const hasNetwork = await checkNetworkStatus()
    if (!hasNetwork) {
      return
    }
    
    // 检查录音权限
    const hasPermission = await checkRecordPermission()
    if (!hasPermission) {
      return
    }
    
    try {
      setIsRecording(true)
      setRecordingStatus(1)
      setVoiceText('正在聆听中...')
      
      if (manager) {
        // 微信小程序环境：使用微信语音识别
        manager.start({
          duration: 30000, // 最长30秒
          lang: 'zh_CN' // 中文识别
        })
      } else if (webSpeechRecognition) {
        // H5环境：使用Web Speech Recognition
        const recognition = new webSpeechRecognition()
        recognition.lang = 'zh-CN'
        recognition.continuous = false
        recognition.interimResults = true  // 启用中间结果
        recognition.maxAlternatives = 1
        
        // 设置语音识别的服务配置
        if ('serviceURI' in recognition) {
          recognition.serviceURI = 'wss://speech.platform.bing.com/speech/recognition/interactive/cognitiveservices/v1'
        }
        
        // 增加超时时间
        let speechTimeout = null
        let hasReceivedSpeech = false
        
        recognition.onstart = () => {
          console.log('语音识别开始')
          setRecordingStatus(1)
          setVoiceText('正在聆听中...')
          hasReceivedSpeech = false
          
          // 设置15秒超时
          speechTimeout = setTimeout(() => {
            if (!hasReceivedSpeech) {
              recognition.stop()
              ui.showToast({
                title: '请说话，未检测到语音输入',
                icon: 'none',
                duration: 2000
              })
            }
          }, 15000)
        }
        
        recognition.onspeechstart = () => {
          console.log('检测到语音输入')
          hasReceivedSpeech = true
          setVoiceText('正在识别中...')
          if (speechTimeout) {
            clearTimeout(speechTimeout)
            speechTimeout = null
          }
        }
        
        recognition.onresult = (event) => {
          console.log('语音识别结果:', event)
          hasReceivedSpeech = true
          
          if (event.results.length > 0) {
            const result = event.results[event.results.length - 1]
            const transcript = result[0].transcript
            console.log('识别到的文本:', transcript)
            
            if (result.isFinal) {
              setSearchText(transcript)
              setVoiceText('')
              // 自动发送识别到的文本
              setTimeout(() => {
                if (transcript.trim()) {
                  setSearchText(transcript)
                  handleSearch()
                }
              }, 100)
            } else {
              // 显示中间结果
              setVoiceText(`正在识别: ${transcript}`)
            }
          }
        }
        
        recognition.onerror = (event) => {
          console.error('语音识别错误:', event)
          setIsRecording(false)
          setRecordingStatus(0)
          setVoiceText('')
          
          // 清理超时定时器
          if (speechTimeout) {
            clearTimeout(speechTimeout)
            speechTimeout = null
          }
          
          let errorMsg = '语音识别失败'
          switch (event.error) {
            case 'no-speech':
              // 如果已经检测到语音但没有识别结果，给出不同的提示
              errorMsg = hasReceivedSpeech ? '语音识别失败，请重试' : '未检测到语音，请靠近麦克风重试'
              break
            case 'audio-capture':
              errorMsg = '无法访问麦克风，请检查权限'
              break
            case 'not-allowed':
              errorMsg = '麦克风权限被拒绝'
              break
            case 'network':
              errorMsg = '网络错误，请检查网络连接'
              break
            case 'aborted':
              // 用户主动停止，不显示错误
              return
            default:
              errorMsg = `语音识别失败: ${event.error}`
          }
          
          ui.showToast({
            title: errorMsg,
            icon: 'none',
            duration: 2000
          })
        }
        
        recognition.onend = () => {
          console.log('语音识别结束')
          
          // 清理超时定时器
          if (speechTimeout) {
            clearTimeout(speechTimeout)
            speechTimeout = null
          }
          setIsRecording(false)
          setRecordingStatus(0)
        }
        
        recognition.start()
      }
    } catch (error) {
      console.error('启动语音识别失败:', error)
      setIsRecording(false)
      setRecordingStatus(0)
      setVoiceText('')
      ui.showToast({
        title: '启动语音识别失败',
        icon: 'none'
      })
    }
  }
  
  // 停止语音识别
  const stopVoiceRecognition = () => {
    if (!manager || !isRecording) {
      return
    }
    
    try {
      setRecordingStatus(2) // 设置为识别中状态
      setVoiceText('正在识别中...')
      manager.stop()
    } catch (error) {
      console.error('停止语音识别失败:', error)
      setIsRecording(false)
      setRecordingStatus(0)
      setVoiceText('')
      ui.showToast({
        title: '停止语音识别失败',
        icon: 'none'
      })
    }
  }

  // 处理搜索/发送消息
  const handleSearch = async () => {
    if (searchText.trim()) {
      try {
        // 增加消息计数
        setCountMyMessage(prev => prev + 1)
        
        // 创建临时消息对象显示用户消息
        const userMessage = {
          id: Date.now(), // 临时ID
          isUser: true,
          sentence: searchText,
          time: Date.now()
        }
        
        // 添加到聊天历史
        setChatHistory(prev => [...prev, userMessage])
        
        // 清空输入框
        const sentText = searchText
        setSearchText('')
        
        // 创建AI消息占位
        const newMessageId = Date.now() + 1
        setCurrentMessageId(newMessageId)
        
        // 添加空白AI消息，用于显示流式响应
        const aiMessage = {
          id: newMessageId,
          isUser: false,
          isStreaming: true,
          sentence: '',
          time: Date.now(),
          parsedContent: {
            isJson: false,
            emotion: '中性',
            score: '0',
            response: ''
          }
        }
        
        setChatHistory(prev => [...prev, aiMessage])
        
        // 开始流式请求
        setIsStreaming(true)
        setStreamingResponse('')
        setIsInterrupted(false) // 重置打断状态
        setUserScrolledUp(false) // 强制重置上滚标志
        

        
        // 获取流式请求配置
        const streamConfig = await sendMessage(sentText, true)
        
        // 创建WebSocket连接
        const taskId = Math.random().toString(36).substring(2, 15)
        let responseText = ''
        
        // 直接使用fetch API进行流式请求
        if (Taro.getEnv() === Taro.ENV_TYPE.WEB) {
          try {
            // 使用fetch API (仅H5环境)
            const response = await fetch(streamConfig.url, {
              method: streamConfig.method || 'POST',
              headers: streamConfig.header
            })
            
            if (!response.ok) {
              throw new Error(`HTTP error! status: ${response.status}`)
            }
            
            const reader = response.body.getReader()
            const decoder = new TextDecoder("utf-8")
            
            while (true) {
              const { done, value } = await reader.read()
              
              if (done) {
                break
              }
              
              // 检查是否被打断
              if (isInterrupted) {
                console.log('流式响应被打断')
                break
              }
              
              // 解码文本并添加到响应
              const chunk = decoder.decode(value, { stream: true })
              responseText += chunk
              
              // 注释掉END标记检查，直接显示原始消息
              // if (responseText.includes('END')) {
              //   setIsStreaming(false)
              //   // 确保立即移除END标记
              //   responseText = responseText.replace(/\s*\}?\s*END\s*$/g, '')
              // }
              
              // 处理流式响应 - 直接使用原始文本
              const displayText = cleanStreamData(responseText)
              
              // 更新流式响应
              setStreamingResponse(displayText)
              
              // 同时更新聊天历史中的消息
              setChatHistory(prev => prev.map(msg => 
                msg.id === newMessageId 
                  ? { 
                      ...msg, 
                      sentence: displayText,
                      parsedContent: { 
                        ...msg.parsedContent, 
                        response: displayText 
                      } 
                    } 
                  : msg
              ))
              
              // 重置用户上滚标志，确保新消息显示
              setUserScrolledUp(false)
              

            }
          } catch (error) {
            console.error('流式请求失败', error)
            ui.showToast({
              title: '响应失败，请重试',
              icon: 'none'
            })
            setIsStreaming(false)
          } finally {
            // 流式响应结束
            setIsStreaming(false)
            setStreamingResponse('')
            
            // 确保消息的isStreaming状态被更新
            setChatHistory(prev => prev.map(msg => 
              msg.id === newMessageId 
                ? { ...msg, isStreaming: false } 
                : msg
            ))
            
            // 确保在完成时滚动到底部
            setTimeout(() => {
              scrollToBottom()
              // 微信环境额外调用
              if (Taro.getEnv() === Taro.ENV_TYPE.WEAPP) {
                weappScrollToBottom()
              }
            }, 150)
          }
        } else {
          // 微信小程序环境
          // 使用Taro.request，但不能真正流式处理，只能模拟
          try {
            const response = await Taro.request({
              url: streamConfig.url,
              header: streamConfig.header,
              method: 'POST'
            })
            
            if (response.statusCode === 200 && response.data) {
              // 处理响应
              let content = response.data
              
              // 注释掉END标记检查，直接使用原始内容
              // if (content.includes('END')) {
              //   content = content.replace(/\s*\}?\s*END\s*$/g, '')
              // }
              
              // 清理文本内容 - 直接使用原始内容
              content = cleanStreamData(content)
              
              // 模拟流式显示
              simulateStreamingDisplay(content, newMessageId)
            }
          } catch (error) {
            console.error('请求失败', error)
            ui.showToast({
              title: '响应失败，请重试',
              icon: 'none'
            })
            setIsStreaming(false)
          }
        }
      } catch (error) {
        console.error('发送消息失败', error)
        ui.showToast({
          title: '发送失败，请重试',
          icon: 'none'
        })
        setIsStreaming(false)
      }
    }
  }
  
  // 模拟流式显示
  const simulateStreamingDisplay = (fullContent, messageId) => {
    setIsStreaming(true)
    // 重置上滚标志，确保消息显示期间保持滚动
    setUserScrolledUp(false)
    let index = 0
    
    // 直接使用原始文本，不进行任何处理
    const totalLength = fullContent.length
    
    // 每隔30ms显示一个字符（稍微加快一些显示速度）
    const typingInterval = setInterval(() => {
      // 检查是否被打断
      if (isInterrupted) {
        clearInterval(typingInterval)
        return
      }
      
      if (index <= totalLength) {
        // 使用字符串截取而不是数组拼接，避免破坏字符编码
        const partialContent = fullContent.substring(0, index)
        setStreamingResponse(partialContent)
        
        // 同时更新聊天历史中的消息
        setChatHistory(prev => prev.map(msg => 
          msg.id === messageId 
            ? { 
                ...msg, 
                sentence: partialContent,
                parsedContent: { 
                  ...msg.parsedContent, 
                  response: partialContent 
                },
                isStreaming: index < totalLength // 当全部显示完毕时，取消streaming状态
              } 
            : msg
        ))
        
        // 每添加10个字符时滚动一次，避免频繁滚动
        if (index % 10 === 0) {
          scrollToBottom()
          // 微信环境额外调用
          if (Taro.getEnv() === Taro.ENV_TYPE.WEAPP) {
            weappScrollToBottom()
          }
        }
        
        index++
      } else {
        clearInterval(typingInterval)
        setIsStreaming(false)
        
        // 确保消息的isStreaming状态被更新
        setChatHistory(prev => prev.map(msg => 
          msg.id === messageId 
            ? { ...msg, isStreaming: false } 
            : msg
        ))
        
        // 确保在完成时滚动到底部
        setTimeout(() => {
          scrollToBottom()
          // 微信环境额外调用
          if (Taro.getEnv() === Taro.ENV_TYPE.WEAPP) {
            weappScrollToBottom()
          }
        }, 150)
      }
    }, 30)
    
    // 存储interval引用以便打断时清除
    window.currentTypingInterval = typingInterval
  }

  // 处理菜单点击，修改为显示/隐藏侧边栏
  const handleMenuClick = () => {
    setMenuVisible(!menuVisible)
  }
  
  // 关闭菜单
  const closeMenu = () => {
    setMenuVisible(false)
  }

  // 打开用户资料抽屉
  const openUserProfile = () => {
    setUserProfileVisible(true)
    closeMenu() // 关闭菜单
  }
  
  // 关闭用户资料抽屉
  const closeUserProfile = () => {
    setUserProfileVisible(false)
  }

  // 打开情感分析抽屉
  const openEmotionChart = () => {
    setEmotionChartVisible(true)
    closeMenu() // 关闭菜单
  }
  
  // 关闭情感分析抽屉
  const closeEmotionChart = () => {
    setEmotionChartVisible(false)
  }
  




  // 格式化时间显示
  const formatTime = (timestamp) => {
    const date = new Date(timestamp)
    const hours = date.getHours().toString().padStart(2, '0')
    const minutes = date.getMinutes().toString().padStart(2, '0')
    return `${hours}:${minutes}`
  }

  // 获取情感对应的颜色样式类
  const getEmotionClass = (emotion) => {
    switch(emotion) {
      case '积极':
        return 'emotion-positive'
      case '消极':
        return 'emotion-negative'
      default:
        return 'emotion-neutral'
    }
  }

  // 处理打断AI回答
  const handleInterrupt = () => {
    if (isStreaming) {
      setIsInterrupted(true)
      setIsStreaming(false)
      setStreamingResponse('')
      
      // 清除正在进行的打字间隔
      if (window.currentTypingInterval) {
        clearInterval(window.currentTypingInterval)
        window.currentTypingInterval = null
      }
      
      // 更新当前正在流式显示的消息状态
      if (currentMessageId) {
        setChatHistory(prev => prev.map(msg => 
          msg.id === currentMessageId 
            ? { 
                ...msg, 
                isStreaming: false,
                sentence: msg.sentence + ' [已打断]',
                parsedContent: {
                  ...msg.parsedContent,
                  response: msg.parsedContent.response + ' [已打断]'
                }
              } 
            : msg
        ))
      }
      
      ui.showToast({
        title: '已打断AI回答',
        icon: 'success',
        duration: 1500
      })
    }
  }

  return (
    <View className='index'>
      {/* 菜单遮罩层 */}
      {menuVisible && (
        <View className='menu-overlay' onClick={closeMenu}></View>
      )}
      
      {/* 侧边菜单 */}
      <View className={`side-menu ${menuVisible ? 'visible' : ''}`}>
        <View className='menu-header'>
          <View className='user-avatar'>
            <Image 
              className='avatar-image' 
              src={userInfo?.photo || defaultAvatar} 
            />
          </View>
          <Text className='user-name'>{
            userInfo?.name || 
            (userInfo?.loginType === 'wechat' ? '微信用户' : '未登录')
          }</Text>
        </View>
        
        <View className='menu-items'>
          <View className='menu-item' onClick={openUserProfile}>
            <Text>个人信息</Text>
          </View>
          <View className='menu-item' onClick={openEmotionChart}>
            <Text>情绪得分报告</Text>
          </View>

          <View className='menu-item' onClick={logout}>
            <Text>退出登录</Text>
          </View>
        </View>
        
        {/* 装饰区域 */}
        <View className='menu-decoration'>
          <Text className='decoration-text'>Soul Talk</Text>
          <Image 
            className='decoration-image' 
            src={decorationImage} 
          />
        </View>
      </View>

      {/* 用户资料抽屉 */}
      <UserProfile visible={userProfileVisible} onClose={closeUserProfile} />

      {/* 情感分析抽屉 */}
      <EmotionChart visible={emotionChartVisible} onClose={closeEmotionChart} />



      {/* 顶部导航栏 */}
      <View className='header'>
        <View className='menu-icon' onClick={handleMenuClick}>
          <View className='menu-line'></View>
          <View className='menu-line'></View>
          <View className='menu-line'></View>
        </View>
        
        <View className='header-title'>
          <Text className='app-name'>我是soul talk，很高兴见到你！</Text>
        </View>
        
      </View>
      
      {/* 中间聊天/搜索输入区域 */}
      <ScrollView
        className='chat-container'
        scrollY
        scrollWithAnimation
        enhanced
        showScrollbar={false}
        enablePassive
        bounces
        ref={node => {
          scrollViewRef.current = node
          // 微信小程序需要额外处理
          if (Taro.getEnv() === Taro.ENV_TYPE.WEAPP && node) {
            scrollViewRef.current.node = node
          }
        }}
        upperThreshold={150}
         refresherEnabled
         refresherThreshold={180}
        refresherDefaultStyle="black"
        refresherBackground="#ffffff"
        refresherTriggered={isLoadingMore}
        onRefresherRefresh={handleScrollToUpper}
        onScroll={handleScroll}
        id="chat-scroll-view"
        scrollIntoView={isStreaming ? 'bottom-anchor' : ''}
      >
        {/* 加载更多提示 */}
        {isLoadingMore && (
          <View className='loading-more'>
            <View className='loading-spinner'></View>
            <Text>加载更多历史消息...</Text>
          </View>
        )}
        
        {/* 历史记录加载完毕提示 */}
        {hasLoadedAll && chatHistory.length > 0 && (
          <View className='no-more-messages'>
            <Text>没有更多历史消息了</Text>
          </View>
        )}

        {/* 聊天消息列表 */}
        {chatHistory.map((message) => (
          <View key={message.id} className={`message-item ${message.isUser ? 'user-message' : 'ai-message'}`}>
            {/* 消息时间 */}
            <Text className='message-time'>
              {formatTime(message.time)}
            </Text>
            
            {/* 消息内容 */}
            <View className='message-content'>
              {message.isUser ? (
                <Text>{message.sentence}</Text>
              ) : (
                <>
                  <Text className={message.isStreaming ? 'typing' : ''}>{message.parsedContent.response}</Text>
                  {/* AI消息的语音播放按钮 */}
                  {!message.isStreaming && message.parsedContent.response && (
                    <View className='voice-play-container'>
                      <View 
                        className={`voice-play-button ${
                          isPlaying && playingMessageId === message.id ? 'playing' : ''
                        }`}
                        onClick={() => playTextToSpeech(message.parsedContent.response, message.id)}
                      >
                        <Text className='voice-play-icon'>
                          {isPlaying && playingMessageId === message.id ? '⏸️' : '🔊'}
                        </Text>
                      </View>
                    </View>
                  )}
                </>
              )}
            </View>
          </View>
        ))}
        
        {chatHistory.length === 0 && !isLoadingMore && (
          <View className='empty-chat'>
            <Text>没有聊天记录，开始新的对话吧！</Text>
          </View>
        )}
        
        {/* 底部锚点元素，用于scrollIntoView */}
        <View id="bottom-anchor" style={{height: 1, width: '100%'}} />
      </ScrollView>
      
      {/* 底部输入框区域 */}
      <View className='input-container'>
        <View className='search-bar'>
          {/* 打断按钮 - 仅在AI回答时显示 */}
          {isStreaming && (
            <View className='interrupt-button' onClick={handleInterrupt}>
              <Text className='interrupt-text'>打断</Text>
            </View>
          )}
          
          <View className='voice-icon'>
            <Text className='icon-circle'></Text>
          </View>
          <Input
            className='search-input'
            placeholder='和我一起聊天吧'
            value={searchText}
            onInput={e => setSearchText(e.detail.value)}
            onConfirm={handleSearch}
            disabled={isStreaming || isRecording}
          />
          
          {/* 发送按钮 */}
          <View className='send-button-container'>
            <Button 
              className={`send-button ${searchText.trim() ? 'active' : ''}`}
              onClick={handleSearch}
              disabled={isStreaming || isRecording || !searchText.trim()}
            >
              发送
            </Button>
          </View>
          
          {/* 语音输入按钮 */}
          <View className='voice-button-container'>
            {!isRecording ? (
              <Button 
                className='voice-button'
                onTouchStart={startVoiceRecognition}
                disabled={isStreaming}
              >
                🎤
              </Button>
            ) : (
              <Button 
                className='voice-button recording'
                onTouchEnd={stopVoiceRecognition}
              >
                {recordingStatus === 1 ? '🔴' : '⏳'}
              </Button>
            )}
          </View>
        </View>
        
        {/* 语音识别状态提示 */}
        {voiceText && (
          <View className='voice-status'>
            <Text className='voice-status-text'>{voiceText}</Text>
          </View>
        )}
        
        {/* AI生成提示 */}
        <View className='ai-disclaimer'>
          <Text>内容由 AI 生成，仅供参考</Text>
        </View>
      </View>
      
    </View>
  )
}
