import { create } from 'zustand';
import { SpeechRecognitionManager, RecognitionResult, RecognitionStatus, RecognitionHistoryItem } from '../utils/speechRecognition';
import { dbManager } from '../utils/database';
import { requestPermissions } from '../utils/permissions';
import { Audio, InterruptionModeIOS } from 'expo-av';
import { soundPlayer } from '../utils/sound';

interface AIState {
  status: 'inactive' | 'active' | 'voiceinput' | 'listening' | 'speaking' | 'thinking' | 'error';
  audioLevel: number;
  isOverlayVisible: boolean;
  recognitionStatus: RecognitionStatus;
  currentRecognitionText: string;
  recognitionHistory: RecognitionHistoryItem[];
  permissionsGranted: boolean;
  
  // 语音识别管理器
  speechManager: SpeechRecognitionManager | null;
  
  // 方法
  initSpeechManager: (appKey: string, accessKey: string) => void;
  initPermissions: () => Promise<void>;
  startRecognition: () => Promise<void>;
  stopRecognition: () => Promise<void>;
  loadHistory: () => Promise<void>;
  setOverlayVisible: (visible: boolean) => void;
  handleLongPress: () => Promise<void>;
  handlePressOut: () => Promise<void>;
  handleOverlayDismiss: () => void;
  currentAudio: Audio.Sound | null;
  
  // 添加音频控制方法
  playAudio: (audioSource: number | { uri: string }) => Promise<void>;
  stopAudio: () => Promise<void>;
  botResponse: {
    text: string;
    audioUrl: string;
  } | null;
  displayedText: string;
  botConfig: {
    apiUrl: string;
    headers: Record<string, string>;
    workflowId: string;
  };
  
  setBotConfig: (config: Partial<AIState['botConfig']>) => void;
  sendToBotAndPlay: (text: string) => Promise<void>;
  updateDisplayText: (text: string) => void;
  configureAudioSession: () => Promise<void>;
  initsound: () => Promise<void>;
  startListening: () => Promise<void>;
  stopListening: () => Promise<void>;
}

export const useAIStore = create<AIState>((set, get) => ({
  status: 'inactive',
  audioLevel: 0,
  isOverlayVisible: false,
  recognitionStatus: 'idle',
  currentRecognitionText: '',
  recognitionHistory: [],
  speechManager: null,
  permissionsGranted: false,
  currentAudio: null,
  botResponse: null,
  displayedText: '',
  botConfig: {
    apiUrl: 'https://api.coze.cn/v1/workflow/run',
    headers: {
      'Content-Type': 'application/json',
      'Authorization': 'Bearer pat_UDWqItdRhucBMZ8sJA1fV5HwrRv6vI6Har0ZAO22ssRhrJ74uxv88Bz5ZQhjDwnN',
      'Accept': '*/*',
    },
    workflowId: '7447469597647192076',
  },

  initPermissions: async () => {
    try {
      await requestPermissions();
      set({ permissionsGranted: true });
    } catch (error) {
      console.error('初始化权限失败:', error);
      set({ permissionsGranted: false });
    }
  },

  initSpeechManager: (appKey, accessKey) => {
    let previousText = '';         // 记录之前的文本
    let lastDefiniteText = '';     // 记录最后一个确定的文本
    
    const manager = new SpeechRecognitionManager(
      appKey,
      accessKey,
      (result) => {
        if (result.result?.utterances) {
          const utterances = result.result.utterances;
          let displayText = '';
          
          // 找到最长的确定文本
          let longestDefiniteText = '';
          utterances.forEach(utterance => {
            if (utterance.definite && 
                utterance.text.length > longestDefiniteText.length &&
                !longestDefiniteText.includes(utterance.text)) {
              longestDefiniteText = utterance.text;
            }
          });

          // 如果有新的确定文本
          if (longestDefiniteText && longestDefiniteText !== lastDefiniteText) {
            lastDefiniteText = longestDefiniteText;
            previousText = longestDefiniteText;
            displayText = longestDefiniteText;
          } else {
            // 查找最后一个未确定的文本
            const lastUtterance = utterances[utterances.length - 1];
            if (!lastUtterance.definite) {
              const tentativeText = lastUtterance.text;
              // 如果未确定的文本不是之前文本的一部分，则显示它
              if (!previousText.includes(tentativeText)) {
                displayText = previousText + tentativeText;
              } else {
                displayText = previousText;
              }
            } else {
              displayText = previousText;
            }
          }

          if (displayText) {
            set({ currentRecognitionText: displayText });
          }
        }
      },
      (error) => {
        console.error('Speech recognition error:', error);
        set({ 
          status: 'inactive',
          recognitionStatus: 'error',
        });
      }
    );
    set({ speechManager: manager });
  },

  startRecognition: async () => {
    const { speechManager } = get();
    if (!speechManager) return;

    try {
      set({ recognitionStatus: 'connecting' });
      await speechManager.startRecording();
      set({ recognitionStatus: 'recording' });
    } catch (error) {
      console.error('Failed to start recognition:', error);
      set({ 
        status: 'inactive',
        recognitionStatus: 'error' 
      });
    }
  },

  stopRecognition: async () => {
    const { speechManager, currentRecognitionText } = get();
    if (!speechManager) return;

    try {
      set({ recognitionStatus: 'processing' });
      await speechManager.stopRecording();

      if (currentRecognitionText) {
        // 保存到历史记录
        await dbManager.saveRecognition({
          text: currentRecognitionText,
          timestamp: Date.now(),
        });

        // 发送到机器人，但不清除当前文本
        await get().sendToBotAndPlay(currentRecognitionText);

        // 重新加载历史记录
        await get().loadHistory();
      }

      set({ recognitionStatus: 'idle' });
    } catch (error) {
      console.error('Failed to stop recognition:', error);
      set({ recognitionStatus: 'error' });
    }
  },

  loadHistory: async () => {
    try {
      const history = await dbManager.getHistory();
      set({ recognitionHistory: history });
    } catch (error) {
      console.error('Failed to load history:', error);
    }
  },

  setOverlayVisible: (visible) => set({ isOverlayVisible: visible }),

  handleLongPress: async () => {
    const state = get();
    if (state.status !== 'speaking') {
      const { permissionsGranted } = get();
      
      try {
        // 如果权限未授予，先请求权限
        if (!permissionsGranted) {
          await get().initPermissions();
        }
        
        // 如果权限已授予，开始录音
        if (get().permissionsGranted) {
          set({ 
            status: 'voiceinput',
            isOverlayVisible: true,
            recognitionStatus: 'connecting'
          });
          await soundPlayer.playLongPressOn();
          await get().startRecognition();
        } else {
          console.error('未获得必要权限');
          set({ 
            status: 'inactive',
            isOverlayVisible: false,
            recognitionStatus: 'error'
          });
        }
      } catch (error) {
        console.error('Long press error:', error);
        set({ 
          status: 'inactive',
          isOverlayVisible: false,
          recognitionStatus: 'error'
        });
      }
    }
  },

  handlePressOut: async () => {
    const state = get();
    if (state.status === 'voiceinput') {
      // 1. 先改变状态
      set({ 
        status: 'active',
        recognitionStatus: 'processing'
      });

      try {
        // 2. 停止语音识别的录音部分
        const { speechManager } = get();
        if (speechManager) {
          await speechManager.stopRecording();
        }

        // 3. 播放音效
        await soundPlayer.playLongPressOff();

        // 4. 处理识别结果和后续操作
        if (state.currentRecognitionText) {
          await dbManager.saveRecognition({
            text: state.currentRecognitionText,
            timestamp: Date.now(),
          });
          await get().sendToBotAndPlay(state.currentRecognitionText);
          await get().loadHistory();
        }

        set({ recognitionStatus: 'idle' });
      } catch (error) {
        console.error('Error in handlePressOut:', error);
        set({ recognitionStatus: 'error' });
      }
    }
  },

  handleOverlayDismiss: async () => {
    const { currentAudio } = get();
    if (currentAudio) {
      try {
        await currentAudio.stopAsync();
        await currentAudio.unloadAsync();
      } catch (error) {
        console.error('Error stopping audio in dismiss:', error);
      }
    }
    
    set({ 
      status: 'inactive',
      isOverlayVisible: false,
      currentRecognitionText: '',
      displayedText: '',
      recognitionStatus: 'idle',
      currentAudio: null,
    });
  },


  stopAudio: async () => {
    const { currentAudio } = get();
    if (currentAudio) {
      try {
        await currentAudio.stopAsync();
        // 等待音频完全停止后再卸载
        await new Promise(resolve => setTimeout(resolve, 500));
        await currentAudio.unloadAsync();
        set({ currentAudio: null, status: 'active' });
      } catch (error) {
        // 捕获并处理"Seeking interrupted"错误
        if (error.message !== 'Seeking interrupted.') {
          console.error('Error stopping audio:', error);
        }
        set({ currentAudio: null, status: 'active' });
      }
    }
  },


  setBotConfig: (config) => {
    set((state) => ({
      botConfig: {
        ...state.botConfig,
        ...config,
      },
    }));
  },

  sendToBotAndPlay: async (text) => {
    const { botConfig, playAudio } = get();
    
    try {
      set({ status: 'thinking' });
      
      const response = await fetch(botConfig.apiUrl, {
        method: 'POST',
        headers: botConfig.headers,
        body: JSON.stringify({
          workflow_id: botConfig.workflowId,
          parameters: {
            user_input: text,
          },
        }),
      });

      const result = await response.json();
      
      if (result.code === 0) {
        const botData = JSON.parse(result.data);
        set({ 
          botResponse: {
            text: botData.output,
            audioUrl: botData.link,
          },
          displayedText: '',
        });

        // 使用统一的播放方法
        await playAudio({ uri: botData.link });

        // 音频开始播放后，再开始打字机效果
        const textToDisplay = botData.output;
        let currentIndex = 0;
        
        const typewriterInterval = setInterval(() => {
          if (currentIndex <= textToDisplay.length) {
            get().updateDisplayText(textToDisplay.slice(0, currentIndex));
            currentIndex++;
          } else {
            clearInterval(typewriterInterval);
          }
        }, 50);
      }
    } catch (error) {
      console.error('发送消息到机器人失败:', error);
      set({ status: 'inactive' });
    }
  },

  updateDisplayText: (text) => {
    set({ displayedText: text });
  },

  // 首先创建一个通用的音频配置函数
  configureAudioSession: async () => {
    await Audio.setAudioModeAsync({
      allowsRecordingIOS: false,
      playsInSilentModeIOS: true,
      staysActiveInBackground: false,
      interruptionModeIOS: InterruptionModeIOS.DoNotMix,
    });
  },

  playAudio: async (audioSource) => {
    const { currentAudio } = get();
    
    try {
      if (currentAudio) {
        await currentAudio.stopAsync();
        await currentAudio.unloadAsync();
      }

      await get().configureAudioSession();

      const { sound } = await Audio.Sound.createAsync(
        audioSource,
        { 
          shouldPlay: false,
          volume: 1.0,
          progressUpdateIntervalMillis: 100,
        },
        (status) => {
          // 这里的状态检测可以保留，但我们将在后续进行延迟检测
          console.log('音频加载状态:', status);
        },
        true
      );

      // 延迟状态检测
      setTimeout(() => {
        if (sound) {
          sound.getStatusAsync().then((status) => {
            if (!status.isLoaded) {
              console.log('音频加载失败', status);
              set({ status: 'inactive', currentAudio: null });
            } else {
              console.log('音频加载成功', status);
              set({ status: 'speaking' }); // 仅在成功加载后更新状态
            }
          });
        }
      }, 100); // 延迟100毫秒进行状态检测

      set({ currentAudio: sound });

      sound.setOnPlaybackStatusUpdate((playbackStatus) => {
        if (playbackStatus.isLoaded) {
          if (playbackStatus.didJustFinish) {
            set({ status: 'active', currentAudio: null });
            setTimeout(() => {
              sound.unloadAsync().catch(() => {});
            });
          }
        }
      });

      await sound.playAsync();
    } catch (error) {
      console.error('Error playing audio:', error);
      set({ status: 'inactive', currentAudio: null });
    }
  },

  initsound: async () => {
    await soundPlayer.initsound();
  },

  startListening: async () => {
    const state = get();
    if (state.status !== 'speaking' && state.status !== 'listening') {
      await soundPlayer.playLongPressOn();
      const { speechManager } = get();
      if (!speechManager) return;

      try {
        set({ recognitionStatus: 'connecting' });
        await speechManager.startRecording();
        set({ recognitionStatus: 'recording' });
      } catch (error) {
        console.error('Failed to start recognition:', error);
        set({ 
          status: 'inactive',
          recognitionStatus: 'error' 
        });
      }
    }
  },

  stopListening: async () => {
    const state = get();
    if (state.status === 'listening') {
      await soundPlayer.playLongPressOff();
      const { speechManager, currentRecognitionText } = get();
      if (!speechManager) return;

      try {
        set({ recognitionStatus: 'processing' });
        await speechManager.stopRecording();

        if (currentRecognitionText) {
          // 保存到历史记录
          await dbManager.saveRecognition({
            text: currentRecognitionText,
            timestamp: Date.now(),
          });

          // 发送到机器人，但不清除当前文本
          await get().sendToBotAndPlay(currentRecognitionText);

          // 重新加载历史记录
          await get().loadHistory();
        }

        set({ recognitionStatus: 'idle' });
      } catch (error) {
        console.error('Failed to stop recognition:', error);
        set({ recognitionStatus: 'error' });
      }
    }
  },

})); 