import CryptoJS from 'crypto-js'

export interface XunfeiConfig {
  appId: string
  apiKey: string
  apiSecret: string
}

export interface RecognitionResult {
  text: string
  isFinal: boolean
}

export class XunfeiVoiceRecognition {
  private config: XunfeiConfig
  private ws: WebSocket | null = null
  private audioContext: AudioContext | null = null
  private isRecording = false

  constructor(config: XunfeiConfig) {
    // 验证配置参数
    if (!config.appId || !config.apiKey || !config.apiSecret) {
      throw new Error('讯飞API配置不完整：ad822389、api_key、api_secret都不能为空')
    }
    
    console.log('XunfeiVoiceRecognition 初始化配置:')
    console.log('- APP_ID:', config.appId)
    console.log('- API_KEY:', config.apiKey ? '已设置' : '未设置')
    console.log('- API_SECRET:', config.apiSecret ? '已设置' : '未设置')
    
    this.config = config
  }

  static isSupported(): boolean {
    return !!(
      typeof navigator !== 'undefined' && 
      navigator.mediaDevices && 
      typeof navigator.mediaDevices.getUserMedia === 'function' && 
      typeof WebSocket !== 'undefined'
    )
  }

  private getWebSocketUrl(): string {
    // 使用传统的讯飞语音识别API，这个更稳定
    const host = 'iat-api.xfyun.cn'
    const path = '/v2/iat'
    const date = new Date().toUTCString()
    
    console.log('🔗 构建讯飞语音识别WebSocket URL...')
    console.log('- 使用的app_id:', this.config.appId)
    console.log('- 使用的api_key:', this.config.apiKey)
    console.log('- 时间戳:', date)
    
    // 按照讯飞官方文档格式构建签名
    const signatureOrigin = `host: ${host}\ndate: ${date}\nGET ${path} HTTP/1.1`
    const signature = CryptoJS.HmacSHA256(signatureOrigin, this.config.apiSecret).toString(CryptoJS.enc.Base64)
    
    const authorization = `api_key="${this.config.apiKey}", algorithm="hmac-sha256", headers="host date request-line", signature="${signature}"`
    const authorizationBase64 = btoa(authorization)
    
    // 讯飞语音识别API的URL格式
    const wsUrl = `wss://${host}${path}?authorization=${authorizationBase64}&date=${encodeURIComponent(date)}&host=${host}`
    
    console.log('🔗 生成的WebSocket URL:', wsUrl)
    return wsUrl
  }

  async startRecognition(
    onResult: (result: RecognitionResult) => void,
    onError: (error: string) => void
  ): Promise<void> {
    if (this.isRecording) {
      throw new Error('已在录音中')
    }

    try {
      console.log('开始语音识别...')
      
      // 检查浏览器支持
      if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
        throw new Error('浏览器不支持麦克风访问')
      }
      
      console.log('请求麦克风权限...')
      
      // 获取麦克风权限和音频流
      const stream = await navigator.mediaDevices.getUserMedia({
        audio: {
          sampleRate: 16000,
          channelCount: 1,
          echoCancellation: true,
          noiseSuppression: true,
          autoGainControl: true
        }
      })
      
      console.log('麦克风权限获取成功')
      console.log('音频流信息:', {
        tracks: stream.getAudioTracks().length,
        settings: stream.getAudioTracks()[0]?.getSettings()
      })
      
      // 创建WebSocket连接
      const wsUrl = this.getWebSocketUrl()
      console.log('🔗 创建WebSocket连接:', wsUrl)
      this.ws = new WebSocket(wsUrl)
      
      this.ws.onopen = () => {
        console.log('WebSocket连接已建立')
        this.startAudioRecording(stream, onResult, onError)
      }
      
      this.ws.onmessage = (event) => {
        console.log('收到WebSocket消息:', event.data)
        
        try {
          // 检查消息数据是否为空或无效
          if (!event.data || typeof event.data !== 'string') {
            console.warn('⚠️ 收到无效的WebSocket消息数据')
            return
          }
          
          const data = JSON.parse(event.data)
          console.log('解析后的数据结构:', JSON.stringify(data, null, 2))
          
          // 验证响应数据结构
          if (!data || typeof data !== 'object') {
            console.error('❌ 无效的响应数据结构')
            onError('服务器返回无效数据格式')
            return
          }
          
          if (data.code === 0) {
            console.log('服务器响应成功，code=0')
            const result = data.data?.result
            console.log('提取的result数据:', result)
            
            if (result && result.ws && Array.isArray(result.ws)) {
              const text = result.ws.map((ws: any) => {
                if (ws.cw && Array.isArray(ws.cw)) {
                  return ws.cw.map((cw: any) => {
                    return (cw.w && typeof cw.w === 'string') ? cw.w : ''
                  }).join('')
                }
                return ''
              }).join('') || ''
              
              console.log('识别结果:', { text, status: data.data?.status, hasText: !!text })
              
              if (text) {
                onResult({
                  text,
                  isFinal: data.data?.status === 2
                })
              } else {
                console.log('识别结果为空，继续等待...')
              }
            } else {
              console.log('没有result数据或数据格式不正确，可能是连接确认消息')
            }
          } else {
            console.error('识别服务返回错误:', data)
            let errorMsg = data.message || data.desc || `API错误 (code: ${data.code})`
            
            // 针对特定错误码提供更友好的错误信息
            switch (data.code) {
              case 10004:
                errorMsg = '音频格式不支持，请检查麦克风设置'
                break
              case 10013:
                errorMsg = '参数错误，请检查API配置'
                break
              case 10163:
                errorMsg = '参数验证错误，请检查API密钥配置'
                console.warn('参数验证错误，但继续识别...')
                return // 不要因为参数验证错误就结束识别
              case 11200:
                errorMsg = '认证失败，请检查API密钥'
                break
              default:
                errorMsg = data.message || data.desc || `API错误 (code: ${data.code})`
            }
            
            onError(errorMsg)
          }
        } catch (parseError) {
          console.error('解析WebSocket消息失败:', parseError, '原始数据:', event.data)
          onError('解析服务器响应失败，请检查网络连接')
        }
      }
      
      this.ws.onerror = (event) => {
        console.error('WebSocket连接错误:', event)
        onError('WebSocket连接错误')
      }
      
      this.ws.onclose = (event) => {
        console.log('WebSocket连接已关闭:', event.code, event.reason)
        if (event.code !== 1000) {
          onError(`WebSocket连接异常关闭: ${event.code} - ${event.reason}`)
        }
        this.cleanup()
      }
      
    } catch (error) {
      console.error('启动录音失败:', error)
      onError(`启动录音失败: ${error}`)
    }
  }

  private startAudioRecording(
    stream: MediaStream,
    _onResult: (result: RecognitionResult) => void,
    _onError: (error: string) => void
  ): void {
    console.log('开始音频录制和处理...')
    // 使用更高的采样率以提高音质
    this.audioContext = new AudioContext({ sampleRate: 16000 })
    const source = this.audioContext.createMediaStreamSource(stream)
    
    // 添加音频增益和滤波器以提高音质
    const gainNode = this.audioContext.createGain()
    gainNode.gain.value = 2.0 // 增加音量增益
    
    // 添加高通滤波器去除低频噪音
    const highPassFilter = this.audioContext.createBiquadFilter()
    highPassFilter.type = 'highpass'
    highPassFilter.frequency.value = 300 // 过滤300Hz以下的噪音
    
    // 添加低通滤波器去除高频噪音
    const lowPassFilter = this.audioContext.createBiquadFilter()
    lowPassFilter.type = 'lowpass'
    lowPassFilter.frequency.value = 3400 // 保留3400Hz以下的语音频率
    
    // 创建更小的缓冲区以提高实时性
    const processor = this.audioContext.createScriptProcessor(2048, 1, 1)
    
    processor.onaudioprocess = (event) => {
      if (this.ws && this.ws.readyState === WebSocket.OPEN && this.isRecording) {
        const inputBuffer = event.inputBuffer.getChannelData(0)
        
        // 音频静音检测 - 避免发送无效音频
        const rms = Math.sqrt(inputBuffer.reduce((sum, sample) => sum + sample * sample, 0) / inputBuffer.length)
        const silenceThreshold = 0.01 // 静音阈值
        
        if (rms > silenceThreshold) {
          const pcmData = this.convertToPCM16(inputBuffer)
          
          console.log('处理音频数据，长度:', pcmData.length, '字节, RMS:', rms.toFixed(4))
          
          // 发送音频数据帧 - 讯飞标准格式
          const audioFrame = {
            data: {
              status: 1, // 1表示音频数据帧
              format: 'audio/L16;rate=16000',
              audio: btoa(String.fromCharCode(...pcmData)),
              encoding: 'raw'
            }
          }
          
          this.ws.send(JSON.stringify(audioFrame))
        } else {
          console.log('检测到静音，跳过发送')
        }
      }
    }
    
    // 连接音频处理链
    source.connect(gainNode)
    gainNode.connect(highPassFilter)
    highPassFilter.connect(lowPassFilter)
    lowPassFilter.connect(processor)
    processor.connect(this.audioContext.destination)
    
    this.isRecording = true
    console.log('录音状态设置为true')
    
    // 发送开始帧 - 讯飞语音识别API标准格式
    if (this.ws && this.ws.readyState === WebSocket.OPEN) {
      console.log('发送开始帧...')
      console.log('使用的app_id:', this.config.appId)
      
      const startFrame = {
        common: {
          app_id: this.config.appId
        },
        business: {
          language: 'zh_cn',        // 中文识别
          domain: 'iat',            // 通用识别领域
          accent: 'mandarin',       // 普通话
          vinfo: 1,                 // 开启标点符号
          vad_eos: 10000,          // 语音检测结束时间(毫秒) - 增加到10秒，避免过早结束
          ptt: 0,                   // 标点符号
          rlang: 'zh-cn',          // 返回语言
          pd: 'iat',               // 通用识别领域，避免court领域的参数错误
          nunum: 1,                // 数字规范化
          speex_size: 60,          // speex音频帧长度
          nbest: 1,                // 返回识别结果数量
          wbest: 1                 // 词级别置信度
        },
        data: {
          status: 0,  // 0表示开始帧
          format: 'audio/L16;rate=16000',
          audio: '',
          encoding: 'raw'
        }
      }
      
      console.log('发送的开始帧数据:', JSON.stringify(startFrame, null, 2))
      this.ws.send(JSON.stringify(startFrame))
      console.log('开始帧发送成功')
    } else {
      console.error('WebSocket未就绪，无法发送开始帧')
    }
  }

  private convertToPCM16(input: Float32Array): Uint8Array {
    const output = new ArrayBuffer(input.length * 2)
    const view = new DataView(output)
    
    for (let i = 0; i < input.length; i++) {
      const sample = Math.max(-1, Math.min(1, input[i]))
      view.setInt16(i * 2, sample * 0x7FFF, true)
    }
    
    return new Uint8Array(output)
  }

  stopRecognition(): void {
    console.log('停止语音识别...')
    
    if (this.ws && this.ws.readyState === WebSocket.OPEN) {
      // 发送结束帧 - 讯飞标准格式
      const endFrame = {
        data: {
          status: 2,  // 2表示结束帧
          format: 'audio/L16;rate=16000',
          audio: '',
          encoding: 'raw'
        }
      }
      
      console.log('发送结束帧:', JSON.stringify(endFrame))
      this.ws.send(JSON.stringify(endFrame))
    }
    
    this.isRecording = false
    this.cleanup()
  }

  private cleanup(): void {
    console.log('清理资源...')
    
    if (this.audioContext) {
      this.audioContext.close()
      this.audioContext = null
    }
    
    if (this.ws) {
      this.ws.close()
      this.ws = null
    }
    
    this.isRecording = false
    console.log('资源清理完成')
  }
}

export default XunfeiVoiceRecognition