import { ref, onUnmounted, computed, readonly } from 'vue'

export interface SpeechRecognitionOptions {
  continuous?: boolean
  interimResults?: boolean
  maxAlternatives?: number
  language?: string
}

export interface VoiceRecognitionResult {
  transcript: string
  confidence: number
  isFinal: boolean
}

export interface SpeechRecognitionState {
  isListening: boolean
  isSupported: boolean
  error: string | null
  transcript: string
  interimTranscript: string
}

// 类型声明
declare global {
  interface Window {
    SpeechRecognition: any
    webkitSpeechRecognition: any
  }
}

interface ISpeechRecognition extends EventTarget {
  continuous: boolean
  interimResults: boolean
  lang: string
  maxAlternatives: number
  serviceURI: string
  grammars: any
  
  start(): void
  stop(): void
  abort(): void
  
  onstart: ((this: ISpeechRecognition, ev: Event) => any) | null
  onend: ((this: ISpeechRecognition, ev: Event) => any) | null
  onerror: ((this: ISpeechRecognition, ev: ISpeechRecognitionErrorEvent) => any) | null
  onresult: ((this: ISpeechRecognition, ev: ISpeechRecognitionEvent) => any) | null
  onnomatch: ((this: ISpeechRecognition, ev: ISpeechRecognitionEvent) => any) | null
  onsoundstart: ((this: ISpeechRecognition, ev: Event) => any) | null
  onsoundend: ((this: ISpeechRecognition, ev: Event) => any) | null
  onspeechstart: ((this: ISpeechRecognition, ev: Event) => any) | null
  onspeechend: ((this: ISpeechRecognition, ev: Event) => any) | null
  onaudiostart: ((this: ISpeechRecognition, ev: Event) => any) | null
  onaudioend: ((this: ISpeechRecognition, ev: Event) => any) | null
}

interface ISpeechRecognitionErrorEvent extends Event {
  error: string
  message: string
}

interface ISpeechRecognitionEvent extends Event {
  results: ISpeechRecognitionResultList
  resultIndex: number
}

interface ISpeechRecognitionResultList {
  length: number
  item(index: number): ISpeechRecognitionResult
  [index: number]: ISpeechRecognitionResult
}

interface ISpeechRecognitionResult {
  length: number
  item(index: number): ISpeechRecognitionAlternative
  isFinal: boolean
  [index: number]: ISpeechRecognitionAlternative
}

interface ISpeechRecognitionAlternative {
  transcript: string
  confidence: number
}

export function useSpeechRecognition(options: SpeechRecognitionOptions = {}) {
  const {
    continuous = true,
    interimResults = true,
    maxAlternatives = 1,
    language = 'zh-CN'
  } = options

  // 状态管理
  const state = ref<SpeechRecognitionState>({
    isListening: false,
    isSupported: false,
    error: null,
    transcript: '',
    interimTranscript: ''
  })

  let recognition: ISpeechRecognition | null = null

  // 初始化语音识别
  const initSpeechRecognition = () => {
    // 检查浏览器兼容性
    const SpeechRecognition = 
      window.SpeechRecognition || 
      window.webkitSpeechRecognition

    if (!SpeechRecognition) {
      state.value.isSupported = false
      state.value.error = '当前浏览器不支持语音识别功能'
      return false
    }

    state.value.isSupported = true

    // 创建识别实例
    recognition = new SpeechRecognition()
    
    if (recognition) {
      recognition.continuous = continuous
      recognition.interimResults = interimResults
      recognition.maxAlternatives = maxAlternatives
      recognition.lang = language

      // 配置事件监听
      recognition.onstart = () => {
        state.value.isListening = true
        state.value.error = null
      }

      recognition.onend = () => {
        state.value.isListening = false
      }

      recognition.onerror = (event: ISpeechRecognitionErrorEvent) => {
        state.value.isListening = false
        handleSpeechError(event.error)
      }

      recognition.onresult = (event: ISpeechRecognitionEvent) => {
        let interimTranscript = ''
        let finalTranscript = ''

        for (let i = event.resultIndex; i < event.results.length; i++) {
          const result = event.results[i]
          if (result && result[0]) {
            const transcript = result[0].transcript

            if (result.isFinal) {
              finalTranscript += transcript
            } else {
              interimTranscript += transcript
            }
          }
        }

        state.value.interimTranscript = interimTranscript
        if (finalTranscript) {
          state.value.transcript = finalTranscript
        }
      }
    }

    return true
  }

  // 错误处理
  const handleSpeechError = (error: string) => {
    const errorMessages: Record<string, string> = {
      'no-speech': '未检测到语音输入，请重试',
      'audio-capture': '无法访问麦克风，请检查权限设置',
      'not-allowed': '麦克风权限被拒绝，请允许访问麦克风',
      'network': '网络连接错误，请检查网络',
      'service-not-allowed': '语音识别服务不可用',
      'bad-grammar': '语法错误',
      'language-not-supported': '不支持当前语言'
    }

    state.value.error = errorMessages[error] || `语音识别错误: ${error}`
  }

  // 开始识别
  const start = async () => {
    if (!state.value.isSupported) {
      if (!initSpeechRecognition()) {
        return false
      }
    }

    if (state.value.isListening) {
      return true
    }

    try {
      // 清除之前的错误和临时文本
      state.value.error = null
      state.value.interimTranscript = ''
      
      recognition?.start()
      return true
    } catch (error) {
      state.value.error = '启动语音识别失败'
      //console.error('Speech recognition start error:', error)
      return false
    }
  }

  // 停止识别
  const stop = () => {
    if (recognition && state.value.isListening) {
      recognition.stop()
    }
  }

  // 中止识别
  const abort = () => {
    if (recognition && state.value.isListening) {
      recognition.abort()
    }
  }

  // 重置状态
  const reset = () => {
    state.value.transcript = ''
    state.value.interimTranscript = ''
    state.value.error = null
  }

  // 获取完整文本
  const getFullTranscript = () => {
    return state.value.transcript + state.value.interimTranscript
  }

  // 组件卸载时清理
  onUnmounted(() => {
    if (recognition && state.value.isListening) {
      recognition.abort()
    }
  })

  // 初始化
  initSpeechRecognition()

  return {
    state: readonly(state),
    start,
    stop,
    abort,
    reset,
    getFullTranscript,
    
    // 便捷访问器
    isListening: computed(() => state.value.isListening),
    isSupported: computed(() => state.value.isSupported),
    error: computed(() => state.value.error),
    transcript: computed(() => state.value.transcript),
    interimTranscript: computed(() => state.value.interimTranscript),
    fullTranscript: computed(() => getFullTranscript())
  }
}
