/**
 * 多模态AI服务
 * 支持语音识别、语音合成、图像识别等多模态交互
 */

'use client'

// 语音识别类型声明
declare global {
  interface Window {
    SpeechRecognition: any
    webkitSpeechRecognition: any
    speechSynthesis: SpeechSynthesis
  }
}

export interface VoiceConfig {
  language: string
  continuous: boolean
  interimResults: boolean
  maxAlternatives: number
}

export interface SpeechResult {
  transcript: string
  confidence: number
  isFinal: boolean
}

export interface ImageAnalysisResult {
  objects: Array<{
    name: string
    confidence: number
    bbox: [number, number, number, number]
  }>
  text: string[]
  scene: string
  medicalContent?: {
    bodyParts: string[]
    symptoms: string[]
    medicalEquipment: string[]
  }
}

export interface VoiceSettings {
  voice?: SpeechSynthesisVoice
  rate: number
  pitch: number
  volume: number
  language: string
}

class MultimodalAI {
  private speechRecognition: SpeechRecognition | null = null
  private speechSynthesis: SpeechSynthesis | null = null
  private isListening = false
  private onSpeechResult?: (result: SpeechResult) => void
  private onSpeechError?: (error: string) => void

  constructor() {
    this.initializeSpeechRecognition()
    this.initializeSpeechSynthesis()
  }

  /**
   * 初始化语音识别
   */
  private initializeSpeechRecognition() {
    if (typeof window === 'undefined') return

    if ('webkitSpeechRecognition' in window || 'SpeechRecognition' in window) {
      const SpeechRecognition = window.webkitSpeechRecognition || window.SpeechRecognition
      this.speechRecognition = new SpeechRecognition()
      
      this.speechRecognition.continuous = false
      this.speechRecognition.interimResults = true
      this.speechRecognition.maxAlternatives = 1
      this.speechRecognition.lang = 'zh-CN'

      this.speechRecognition.onstart = () => {
        this.isListening = true
        console.log('语音识别开始')
      }

      this.speechRecognition.onresult = (event: SpeechRecognitionEvent) => {
        for (let i = event.resultIndex; i < event.results.length; i++) {
          const result = event.results[i]
          const transcript = result[0].transcript
          const confidence = result[0].confidence

          if (this.onSpeechResult) {
            this.onSpeechResult({
              transcript,
              confidence,
              isFinal: result.isFinal
            })
          }
        }
      }

      this.speechRecognition.onerror = (event: SpeechRecognitionErrorEvent) => {
        this.isListening = false
        const errorMessage = this.getErrorMessage(event.error)
        if (this.onSpeechError) {
          this.onSpeechError(errorMessage)
        }
      }

      this.speechRecognition.onend = () => {
        this.isListening = false
        console.log('语音识别结束')
      }
    }
  }

  /**
   * 初始化语音合成
   */
  private initializeSpeechSynthesis() {
    if (typeof window !== 'undefined' && 'speechSynthesis' in window) {
      this.speechSynthesis = window.speechSynthesis
    }
  }

  /**
   * 开始语音识别
   */
  startListening(config?: Partial<VoiceConfig>): Promise<SpeechResult> {
    return new Promise((resolve, reject) => {
      if (!this.speechRecognition) {
        reject('语音识别不支持')
        return
      }

      if (this.isListening) {
        reject('正在进行语音识别')
        return
      }

      // 应用配置
      if (config) {
        if (config.language) this.speechRecognition.lang = config.language
        if (config.continuous !== undefined) this.speechRecognition.continuous = config.continuous
        if (config.interimResults !== undefined) this.speechRecognition.interimResults = config.interimResults
        if (config.maxAlternatives) this.speechRecognition.maxAlternatives = config.maxAlternatives
      }

      // 设置回调
      this.onSpeechResult = (result) => {
        if (result.isFinal) {
          resolve(result)
        }
      }

      this.onSpeechError = (error) => {
        reject(error)
      }

      this.speechRecognition.start()
    })
  }

  /**
   * 停止语音识别
   */
  stopListening() {
    if (this.speechRecognition && this.isListening) {
      this.speechRecognition.stop()
    }
  }

  /**
   * 语音合成
   */
  speak(text: string, settings?: Partial<VoiceSettings>): Promise<void> {
    return new Promise((resolve, reject) => {
      if (!this.speechSynthesis) {
        reject('语音合成不支持')
        return
      }

      const utterance = new SpeechSynthesisUtterance(text)
      
      // 应用设置
      if (settings) {
        if (settings.voice) utterance.voice = settings.voice
        if (settings.rate) utterance.rate = settings.rate
        if (settings.pitch) utterance.pitch = settings.pitch
        if (settings.volume) utterance.volume = settings.volume
        if (settings.language) utterance.lang = settings.language
      } else {
        // 默认设置
        utterance.lang = 'zh-CN'
        utterance.rate = 1
        utterance.pitch = 1
        utterance.volume = 1
      }

      utterance.onend = () => resolve()
      utterance.onerror = (event) => reject(event.error)

      this.speechSynthesis.speak(utterance)
    })
  }

  /**
   * 获取可用语音
   */
  getVoices(): SpeechSynthesisVoice[] {
    if (!this.speechSynthesis) return []
    return this.speechSynthesis.getVoices()
  }

  /**
   * 图像识别（集成多种AI服务）
   */
  async analyzeImage(imageFile: File): Promise<ImageAnalysisResult> {
    try {
      // 首先尝试本地OCR
      const textResults = await this.extractTextFromImage(imageFile)
      
      // 然后尝试物体识别
      const objectResults = await this.detectObjects(imageFile)
      
      // 医疗内容识别
      const medicalResults = await this.analyzeMedicalContent(imageFile)
      
      return {
        objects: objectResults,
        text: textResults,
        scene: 'unknown',
        medicalContent: medicalResults
      }
    } catch (error) {
      console.error('图像分析失败:', error)
      throw new Error('图像分析失败')
    }
  }

  /**
   * 从图像中提取文字（OCR）
   */
  private async extractTextFromImage(imageFile: File): Promise<string[]> {
    try {
      const formData = new FormData()
      formData.append('image', imageFile)
      
      const response = await fetch('/api/ocr', {
        method: 'POST',
        body: formData
      })
      
      if (!response.ok) {
        throw new Error('OCR服务不可用')
      }
      
      const result = await response.json()
      return result.texts || []
    } catch (error) {
      console.log('OCR服务不可用，使用fallback')
      return []
    }
  }

  /**
   * 物体检测
   */
  private async detectObjects(imageFile: File): Promise<Array<{name: string; confidence: number; bbox: [number, number, number, number]}>> {
    try {
      // 这里可以集成TensorFlow.js或其他客户端AI模型
      // 或者调用云端API
      return await this.clientSideObjectDetection(imageFile)
    } catch (error) {
      console.log('物体检测失败:', error)
      return []
    }
  }

  /**
   * 客户端物体检测（使用TensorFlow.js）
   */
  private async clientSideObjectDetection(imageFile: File): Promise<Array<{name: string; confidence: number; bbox: [number, number, number, number]}>> {
    // 这里需要加载TensorFlow.js模型
    // 为了简化，这里返回模拟数据
    return new Promise((resolve) => {
      setTimeout(() => {
        resolve([
          { name: 'person', confidence: 0.95, bbox: [100, 100, 200, 300] },
          { name: 'medical_equipment', confidence: 0.8, bbox: [300, 150, 400, 250] }
        ])
      }, 1000)
    })
  }

  /**
   * 医疗内容分析
   */
  private async analyzeMedicalContent(imageFile: File): Promise<{bodyParts: string[]; symptoms: string[]; medicalEquipment: string[]}> {
    try {
      const formData = new FormData()
      formData.append('image', imageFile)
      
      const response = await fetch('/v1/ai/analyze-image', {
        method: 'POST',
        body: formData
      })
      
      if (!response.ok) {
        throw new Error('医疗图像分析服务不可用')
      }
      
      const result = await response.json()
      return {
        bodyParts: result.bodyParts || [],
        symptoms: result.symptoms || [],
        medicalEquipment: result.medicalEquipment || []
      }
    } catch (error) {
      console.log('医疗图像分析服务不可用')
      return {
        bodyParts: [],
        symptoms: [],
        medicalEquipment: []
      }
    }
  }

  /**
   * 智能语音命令解析
   */
  parseVoiceCommand(transcript: string): {
    intent: string
    entities: Record<string, string>
    confidence: number
  } {
    const normalizedText = transcript.toLowerCase()
    
    // 导航命令
    if (normalizedText.includes('去') || normalizedText.includes('导航') || normalizedText.includes('位置')) {
      return {
        intent: 'navigation',
        entities: { location: this.extractLocation(normalizedText) },
        confidence: 0.9
      }
    }
    
    // 查询命令
    if (normalizedText.includes('查询') || normalizedText.includes('查看') || normalizedText.includes('了解')) {
      return {
        intent: 'query',
        entities: { target: this.extractQueryTarget(normalizedText) },
        confidence: 0.8
      }
    }
    
    // 预约命令
    if (normalizedText.includes('预约') || normalizedText.includes('挂号')) {
      return {
        intent: 'appointment',
        entities: { department: this.extractDepartment(normalizedText) },
        confidence: 0.95
      }
    }
    
    return {
      intent: 'unknown',
      entities: {},
      confidence: 0.1
    }
  }

  /**
   * 提取位置信息
   */
  private extractLocation(text: string): string {
    const locations = ['急诊科', '内科', '外科', '儿科', '药房', '卫生间', '收费处']
    for (const location of locations) {
      if (text.includes(location)) {
        return location
      }
    }
    return ''
  }

  /**
   * 提取查询目标
   */
  private extractQueryTarget(text: string): string {
    const targets = ['科室', '医生', '设备', '药品', '价格', '时间']
    for (const target of targets) {
      if (text.includes(target)) {
        return target
      }
    }
    return ''
  }

  /**
   * 提取科室信息
   */
  private extractDepartment(text: string): string {
    const departments = ['内科', '外科', '儿科', '妇产科', '眼科', '耳鼻喉科']
    for (const dept of departments) {
      if (text.includes(dept)) {
        return dept
      }
    }
    return ''
  }

  /**
   * 获取错误信息
   */
  private getErrorMessage(error: string): string {
    switch (error) {
      case 'no-speech':
        return '未检测到语音，请重试'
      case 'audio-capture':
        return '音频捕获失败，请检查麦克风权限'
      case 'not-allowed':
        return '麦克风权限被拒绝'
      case 'network':
        return '网络错误，请检查网络连接'
      case 'language-not-supported':
        return '不支持的语言'
      default:
        return '语音识别失败，请重试'
    }
  }

  /**
   * 检查浏览器支持
   */
  static checkBrowserSupport(): {
    speechRecognition: boolean
    speechSynthesis: boolean
    mediaDevices: boolean
  } {
    if (typeof window === 'undefined') {
      return {
        speechRecognition: false,
        speechSynthesis: false,
        mediaDevices: false
      }
    }

    return {
      speechRecognition: 'webkitSpeechRecognition' in window || 'SpeechRecognition' in window,
      speechSynthesis: 'speechSynthesis' in window,
      mediaDevices: 'mediaDevices' in navigator && 'getUserMedia' in navigator.mediaDevices
    }
  }

  /**
   * 智能语音助手对话
   */
  async startVoiceConversation(onMessage: (message: string) => void): Promise<void> {
    const support = MultimodalAI.checkBrowserSupport()
    
    if (!support.speechRecognition || !support.speechSynthesis) {
      throw new Error('浏览器不支持语音功能')
    }

    try {
      await this.speak('您好，请说话')
      
      while (true) {
        const result = await this.startListening({ continuous: false, interimResults: false })
        
        if (result.transcript.toLowerCase().includes('退出') || 
            result.transcript.toLowerCase().includes('结束')) {
          await this.speak('再见！')
          break
        }
        
        onMessage(result.transcript)
        
        // 等待AI响应后继续
        await new Promise(resolve => setTimeout(resolve, 2000))
      }
    } catch (error) {
      console.error('语音对话失败:', error)
      throw error
    }
  }
}

// 导出单例实例
export const multimodalAI = new MultimodalAI()

// 导出快捷方法
export const startVoiceRecognition = (config?: Partial<VoiceConfig>) => multimodalAI.startListening(config)
export const speakText = (text: string, settings?: Partial<VoiceSettings>) => multimodalAI.speak(text, settings)
export const analyzeImage = (imageFile: File) => multimodalAI.analyzeImage(imageFile)
export const parseVoiceCommand = (transcript: string) => multimodalAI.parseVoiceCommand(transcript)

export default multimodalAI