import React, { useState, useRef, useCallback, useMemo } from 'react';
// 改为通过后端代理获取字幕时间戳

interface UseTextToSpeechProps {
  onStatusChange: (status: 'idle' | 'synthesizing' | 'playing' | 'error') => void;
}

interface TimestampData {
  text: string;
  start: number;
  end: number;
  phoneme?: string;
}

interface PhonemeData {
  text: string;
  start: number;
  end: number;
  phoneme: string;
}

interface AudioData {
  audioUrl: string;
  timestamps: TimestampData[];
  phonemes: PhonemeData[];
}

const useTextToSpeech = ({ onStatusChange }: UseTextToSpeechProps) => {
  const [isPlaying, setIsPlaying] = useState(false);
  const [isSynthesizing, setIsSynthesizing] = useState(false);
  const [error, setError] = useState<string | null>(null);
  const audioRef = useRef<HTMLAudioElement | null>(null);

  // 阿里云TTS配置
  const config = useMemo(() => ({
    accessKeyId: 'LTAI5t8B96Te1kfhJ3RfCmyh',
    accessKeySecret: 'zhtgh4GsdUnKzrSB2HfBIx5XYk0Xj3',
    appKey: 'O6UzF6Mne1k44weO',
    region: 'cn-shanghai',
    voice: 'xiaoyun', // 女声
    format: 'mp3',
    sampleRate: 16000,
    volume: 50,
    // 语速：负值更慢（阿里云范围通常为 -500~500，不同线路略有差异）
    speechRate: -60,
    pitchRate: 0,
    // REST 不再请求字幕相关参数，由后端 WS 提供
    enableSubtitle: false,
    enableTimestamp: false,
    subtitleType: 'sentence',
    enablePhoneme: false
  }), []);

  // Token缓存（当前简化为常量，缓存变量暂不使用）

  // 获取Token - 简化版本，使用固定token
  const getToken = useCallback(async (): Promise<string> => {
    // 使用用户提供的有效 AccessToken，供现有 REST 代理使用
    return '4ba7b3a2f69d46a793f5d2f851770f14'
  }, []);

  // 统一构建请求体
  const buildRequestBody = useCallback((text: string, opts?: Partial<typeof config>): Record<string, unknown> => {
    return {
      appkey: config.appKey,
      token: undefined as unknown, // 稍后填充 token
      text,
      format: opts?.format ?? config.format,
      sample_rate: config.sampleRate,
      voice: config.voice,
      volume: config.volume,
      speech_rate: config.speechRate,
      pitch_rate: config.pitchRate,
      // 时间戳相关参数（可选）
      // 阿里部分线路要求使用 1/0，而不是 true/false
      enable_subtitle: (opts?.enableSubtitle ?? config.enableSubtitle) ? 1 : 0,
      enable_timestamp: (opts?.enableTimestamp ?? config.enableTimestamp) ? 1 : 0,
      subtitle_type: opts?.subtitleType ?? config.subtitleType,
      enable_phoneme: (opts?.enablePhoneme ?? config.enablePhoneme) ? 1 : 0
    }
  }, [config])

  // 实际请求一次 TTS（返回原始 Response）
  const requestOnce = useCallback(async (body: Record<string, unknown>): Promise<Response> => {
    const token = await getToken()
    ;(body as { token?: string }).token = token

    const res = await fetch('/ali-tts/stream/v1/tts', {
      method: 'POST',
      headers: { 'Content-Type': 'application/json' },
      body: JSON.stringify(body)
    })
    return res
  }, [getToken])

  // 文字转语音（强制 JSON + 时间戳/音素）
  const synthesizeText = useCallback(async (text: string): Promise<AudioData> => {
    try {
      setIsSynthesizing(true);
      setError(null);
      onStatusChange('synthesizing');
      // 并行：1) 后端代理获取 subtitles  2) REST 获取音频(mp3)
      const wsPromise = fetch('http://localhost:5174/api/subtitles', {
        method: 'POST',
        headers: { 'Content-Type': 'application/json' },
        body: JSON.stringify({ text, voice: 'siyue' })
      }).then(async (r) => {
        if (!r.ok) throw new Error(await r.text())
        const data = await r.json()
        return (data?.subtitles || []) as TimestampData[]
      }).catch(() => [] as TimestampData[])

      const primaryBody = buildRequestBody(text, { format: 'mp3', enableSubtitle: false, enableTimestamp: false, enablePhoneme: false })
      const response = await requestOnce(primaryBody)

      if (!response.ok) {
        const errorText = await response.text()
        throw new Error(`TTS API请求失败: ${response.status} - ${errorText}`)
      }
      const buf = await response.arrayBuffer()
      const audioBlob = new Blob([buf], { type: 'audio/mp3' })
      const audioUrl = URL.createObjectURL(audioBlob)
      const wsTs = await wsPromise
      return { audioUrl, timestamps: wsTs as unknown as TimestampData[], phonemes: [] }

    } catch (err) {
      console.error('TTS合成失败:', err);
      const errorMessage = err instanceof Error ? err.message : 'TTS合成失败';
      setError(errorMessage);
      onStatusChange('error');
      throw err;
    } finally {
      setIsSynthesizing(false);
    }
  }, [config, onStatusChange, getToken, buildRequestBody, requestOnce]);

  // 播放音频
  const playAudio = useCallback((audioData: AudioData): Promise<void> => {
    return new Promise((resolve, reject) => {
      try {
        // 停止当前播放
        if (audioRef.current) {
          audioRef.current.pause();
          audioRef.current.currentTime = 0;
        }

        // 创建新的音频对象
        const audio = new Audio(audioData.audioUrl);
        audioRef.current = audio;
        
        // 存储时间戳数据到音频元素上，供外部使用
        (audio as HTMLAudioElement & { timestamps?: TimestampData[]; phonemes?: PhonemeData[] }).timestamps = audioData.timestamps;
        (audio as HTMLAudioElement & { timestamps?: TimestampData[]; phonemes?: PhonemeData[] }).phonemes = audioData.phonemes;

        audio.onloadstart = () => {
          console.log('开始加载音频');
        };

        audio.oncanplay = () => {
          console.log('音频可以播放');
        };

        audio.onplay = () => {
          console.log('开始播放音频');
          setIsPlaying(true);
          onStatusChange('playing');
        };

        audio.onended = () => {
          console.log('音频播放结束');
          setIsPlaying(false);
          onStatusChange('idle');
          // 清理URL对象
          URL.revokeObjectURL(audioData.audioUrl);
          resolve();
        };

        audio.onerror = (e) => {
          console.error('音频播放错误:', e);
          setIsPlaying(false);
          onStatusChange('error');
          setError('音频播放失败');
          URL.revokeObjectURL(audioData.audioUrl);
          reject(new Error('音频播放失败'));
        };

        // 开始播放
        audio.play().catch((playError) => {
          console.error('播放失败:', playError);
          setIsPlaying(false);
          onStatusChange('error');
          setError('播放失败');
          URL.revokeObjectURL(audioData.audioUrl);
          reject(playError);
        });

      } catch (err) {
        console.error('创建音频对象失败:', err);
        setIsPlaying(false);
        onStatusChange('error');
        setError('创建音频对象失败');
        reject(err);
      }
    });
  }, [onStatusChange]);

  // 停止播放
  const stopPlayback = useCallback(() => {
    if (audioRef.current) {
      audioRef.current.pause();
      audioRef.current.currentTime = 0;
      setIsPlaying(false);
      onStatusChange('idle');
    }
  }, [onStatusChange]);

  // 主要的文字转语音并播放方法
  const speakText = useCallback(async (text: string): Promise<void> => {
    try {
      // 先停止当前播放
      stopPlayback();

      // 合成语音
      const audioData = await synthesizeText(text);
      
      // 播放语音
      await playAudio(audioData);
      
    } catch (err) {
      console.error('文字转语音播放失败:', err);
      setIsPlaying(false);
      onStatusChange('error');
    }
  }, [synthesizeText, playAudio, stopPlayback, onStatusChange]);

  // 清理资源
  React.useEffect(() => {
    return () => {
      if (audioRef.current) {
        audioRef.current.pause();
        audioRef.current = null;
      }
    };
  }, []);

  return {
    speakText,
    synthesizeText,
    playAudio,
    stopPlayback,
    isPlaying,
    isSynthesizing,
    error,
    audioRef
  };
};

export default useTextToSpeech;
