import Taro from '@tarojs/taro'

export interface VoiceDebugInfo {
  platform: string
  deviceInfo: any
  audioContext: any
  audioCapabilities: any
  networkStatus: any
  systemInfo: any
}

export class VoiceDebugger {
  private static instance: VoiceDebugger
  private debugInfo: VoiceDebugInfo | null = null

  static getInstance(): VoiceDebugger {
    if (!VoiceDebugger.instance) {
      VoiceDebugger.instance = new VoiceDebugger()
    }
    return VoiceDebugger.instance
  }

  // 收集调试信息
  async collectDebugInfo(): Promise<VoiceDebugInfo> {
    console.log('🔍 开始收集语音播放调试信息...')
    
    const debugInfo: VoiceDebugInfo = {
      platform: process.env.TARO_ENV || 'unknown',
      deviceInfo: null,
      audioContext: null,
      audioCapabilities: null,
      networkStatus: null,
      systemInfo: null
    }

    try {
      // 获取设备信息
      try {
        debugInfo.deviceInfo = await Taro.getDeviceInfo()
        console.log('📱 设备信息:', debugInfo.deviceInfo)
      } catch (error) {
        console.warn('⚠️ 获取设备信息失败:', error)
      }

      // 获取系统信息
      try {
        debugInfo.systemInfo = await Taro.getSystemInfo()
        console.log('💻 系统信息:', debugInfo.systemInfo)
      } catch (error) {
        console.warn('⚠️ 获取系统信息失败:', error)
      }

      // 获取网络状态
      try {
        debugInfo.networkStatus = await Taro.getNetworkType()
        console.log('🌐 网络状态:', debugInfo.networkStatus)
      } catch (error) {
        console.warn('⚠️ 获取网络状态失败:', error)
      }

      // 测试音频上下文创建
      try {
        const audioContext = Taro.createInnerAudioContext()
        debugInfo.audioContext = {
          created: !!audioContext,
          obeyMuteSwitch: audioContext.obeyMuteSwitch,
          volume: audioContext.volume,
          src: audioContext.src,
          paused: audioContext.paused,
          duration: audioContext.duration,
          currentTime: audioContext.currentTime,
          buffered: audioContext.buffered,
          readyState: audioContext.readyState
        }
        console.log('🎵 音频上下文信息:', debugInfo.audioContext)
        
        // 销毁测试用的音频上下文
        audioContext.destroy()
      } catch (error) {
        console.error('❌ 创建音频上下文失败:', error)
        debugInfo.audioContext = { error: error.message || '创建失败' }
      }

      // 检查音频能力
      debugInfo.audioCapabilities = {
        hasAudioContext: typeof Taro.createInnerAudioContext === 'function',
        hasAudioElement: typeof Audio !== 'undefined',
        hasWebAudio: typeof AudioContext !== 'undefined' || typeof webkitAudioContext !== 'undefined'
      }
      console.log('🔊 音频能力:', debugInfo.audioCapabilities)

    } catch (error) {
      console.error('❌ 收集调试信息失败:', error)
    }

    this.debugInfo = debugInfo
    return debugInfo
  }

  // 测试语音播放
  async testVoicePlayback(testUrl: string): Promise<{
    success: boolean
    error?: string
    details: any
  }> {
    console.log('🧪 开始测试语音播放:', testUrl)
    
    const result = {
      success: false,
      error: undefined as string | undefined,
      details: {} as any
    }

    try {
      // 先检查URL有效性
      if (!testUrl || !testUrl.startsWith('http')) {
        result.error = '无效的音频URL'
        return result
      }

      // 测试网络请求
      try {
        const networkTest = await Taro.request({
          url: testUrl,
          method: 'HEAD',
          timeout: 5000
        })
        result.details.networkTest = {
          success: true,
          statusCode: networkTest.statusCode,
          headers: networkTest.header
        }
        console.log('✅ 网络请求测试成功:', networkTest.statusCode)
      } catch (networkError) {
        result.details.networkTest = {
          success: false,
          error: networkError.message || '网络请求失败'
        }
        console.warn('⚠️ 网络请求测试失败:', networkError)
      }

      // 创建音频上下文
      const audioContext = Taro.createInnerAudioContext()
      result.details.audioContext = {
        created: !!audioContext,
        obeyMuteSwitch: audioContext.obeyMuteSwitch,
        volume: audioContext.volume
      }

      // 配置音频上下文
      audioContext.obeyMuteSwitch = false
      audioContext.volume = 1.0
      audioContext.src = testUrl

      // 设置事件监听
      let eventLog: any[] = []
      
      audioContext.onPlay(() => {
        eventLog.push({ event: 'onPlay', time: Date.now() })
        console.log('▶️ 播放事件触发')
      })

      audioContext.onCanplay(() => {
        eventLog.push({ 
          event: 'onCanplay', 
          time: Date.now(),
          duration: audioContext.duration 
        })
        console.log('🎵 可播放事件触发, 时长:', audioContext.duration)
      })

      audioContext.onTimeUpdate(() => {
        eventLog.push({ 
          event: 'onTimeUpdate', 
          time: Date.now(),
          currentTime: audioContext.currentTime,
          duration: audioContext.duration
        })
      })

      audioContext.onEnded(() => {
        eventLog.push({ event: 'onEnded', time: Date.now() })
        console.log('🏁 播放结束事件触发')
        result.success = true
      })

      audioContext.onError((err: any) => {
        eventLog.push({ 
          event: 'onError', 
          time: Date.now(),
          error: err 
        })
        console.error('❌ 播放错误事件触发:', err)
        result.error = err.errMsg || err.message || '播放失败'
      })

      // 开始播放
      audioContext.play()
      console.log('▶️ 开始播放测试音频')

      // 等待播放结果
      await new Promise((resolve) => {
        const timeout = setTimeout(() => {
          console.log('⏰ 播放测试超时')
          audioContext.stop()
          audioContext.destroy()
          if (!result.success) {
            result.error = '播放测试超时'
          }
          resolve(undefined)
        }, 10000) // 10秒超时

        audioContext.onEnded(() => {
          clearTimeout(timeout)
          resolve(undefined)
        })

        audioContext.onError(() => {
          clearTimeout(timeout)
          resolve(undefined)
        })
      })

      result.details.eventLog = eventLog
      result.details.finalState = {
        paused: audioContext.paused,
        duration: audioContext.duration,
        currentTime: audioContext.currentTime,
        src: audioContext.src
      }

      // 清理
      audioContext.destroy()

    } catch (error) {
      console.error('❌ 语音播放测试失败:', error)
      result.error = error.message || '测试失败'
    }

    return result
  }

  // 生成调试报告
  generateDebugReport(): string {
    if (!this.debugInfo) {
      return '❌ 未收集到调试信息，请先调用 collectDebugInfo()'
    }

    const report = `
🔍 语音播放调试报告
====================

📱 平台信息:
- 平台: ${this.debugInfo.platform}
- 设备信息: ${JSON.stringify(this.debugInfo.deviceInfo, null, 2)}

💻 系统信息:
${JSON.stringify(this.debugInfo.systemInfo, null, 2)}

🌐 网络状态:
${JSON.stringify(this.debugInfo.networkStatus, null, 2)}

🎵 音频上下文:
${JSON.stringify(this.debugInfo.audioContext, null, 2)}

🔊 音频能力:
${JSON.stringify(this.debugInfo.audioCapabilities, null, 2)}

====================
报告生成时间: ${new Date().toLocaleString()}
    `

    return report
  }

  // 获取调试信息
  getDebugInfo(): VoiceDebugInfo | null {
    return this.debugInfo
  }
}

// 导出单例实例
export const voiceDebugger = VoiceDebugger.getInstance()
