/**
 * AI服务路由
 * 提供AI聊天、预测分析、图像识别等API
 */

import express from 'express'
import multer from 'multer'
import path from 'path'
import { authenticateToken } from '../middleware/auth'

const router = express.Router()

// 配置文件上传
const storage = multer.diskStorage({
  destination: (req, file, cb) => {
    cb(null, path.join(__dirname, '../../uploads/ai'))
  },
  filename: (req, file, cb) => {
    const uniqueSuffix = Date.now() + '-' + Math.round(Math.random() * 1E9)
    cb(null, `ai-${uniqueSuffix}${path.extname(file.originalname)}`)
  }
})

const upload = multer({ 
  storage,
  limits: { fileSize: 10 * 1024 * 1024 }, // 10MB
  fileFilter: (req, file, cb) => {
    if (file.mimetype.startsWith('image/')) {
      cb(null, true)
    } else {
      cb(new Error('只支持图像文件'))
    }
  }
})

// AI对话接口
router.post('/chat', authenticateToken, async (req, res) => {
  try {
    const { message, serviceType, context, preferences } = req.body

    if (!message || !serviceType) {
      return res.status(400).json({
        success: false,
        message: '缺少必要参数'
      })
    }

    // 意图识别
    const intent = await analyzeIntent(message, serviceType)
    
    // 实体提取
    const entities = await extractEntities(message, serviceType, context)
    
    // 生成响应
    const response = await generateAIResponse(message, intent, entities, serviceType, context)
    
    // 记录对话历史
    await saveConversationHistory(req.user?.id || 'anonymous', {
      message,
      response: response.content,
      serviceType,
      intent: response.intent,
      timestamp: new Date()
    })

    res.json({
      success: true,
      data: {
        id: `ai_${Date.now()}`,
        content: response.content,
        intent: response.intent,
        confidence: response.confidence,
        entities: response.entities,
        suggestions: response.suggestions,
        actions: response.actions,
        metadata: {
          processingTime: response.processingTime,
          model: 'hospital-ai-v1.0'
        }
      }
    })

  } catch (error) {
    console.error('AI聊天错误:', error)
    res.status(500).json({
      success: false,
      message: 'AI服务暂时不可用'
    })
  }
})

// 图像分析接口
router.post('/analyze-image', authenticateToken, upload.single('image'), async (req, res) => {
  try {
    if (!req.file) {
      return res.status(400).json({
        success: false,
        message: '请上传图像文件'
      })
    }

    const { analysisType = 'general' } = req.body
    const imagePath = req.file.path

    // 并行执行多种分析
    const [ocrResult, objectDetection, medicalAnalysis] = await Promise.all([
      performOCR(imagePath),
      detectObjects(imagePath),
      analyzeMedicalImage(imagePath, analysisType)
    ])

    const result = {
      ocr: ocrResult,
      objects: objectDetection,
      medical: medicalAnalysis,
      metadata: {
        filename: req.file.filename,
        size: req.file.size,
        uploadTime: new Date().toISOString()
      }
    }

    res.json({
      success: true,
      data: result
    })

  } catch (error) {
    console.error('图像分析错误:', error)
    res.status(500).json({
      success: false,
      message: '图像分析失败'
    })
  }
})

// Ollama Llava3多模态图像分析接口
router.post('/analyze-image-llava', authenticateToken, upload.single('image'), async (req, res) => {
  try {
    if (!req.file) {
      return res.status(400).json({
        success: false,
        message: '请上传图像文件'
      })
    }

    const { 
      prompt, 
      analysisType = 'general',
      includeRecommendations = false 
    } = req.body

    // 读取上传的图像文件并转换为base64
    const fs = require('fs')
    const imageBuffer = fs.readFileSync(req.file.path)
    const imageBase64 = `data:${req.file.mimetype};base64,${imageBuffer.toString('base64')}`

    // 调用Ollama llava3多模态分析
    const analysisResult = await analyzeWithOllamaLlava(imageBase64, prompt, analysisType, includeRecommendations)

    // 清理临时文件
    try {
      fs.unlinkSync(req.file.path)
    } catch (cleanupError) {
      console.warn('清理临时文件失败:', cleanupError)
    }

    res.json({
      success: true,
      data: {
        analysis: analysisResult.content,
        provider: analysisResult.provider,
        model: analysisResult.model,
        usage: analysisResult.usage,
        metadata: {
          filename: req.file.filename,
          originalName: req.file.originalname,
          size: req.file.size,
          uploadTime: new Date().toISOString(),
          processingTime: analysisResult.metadata?.processingTime,
          analysisType,
          includeRecommendations
        }
      }
    })

  } catch (error) {
    console.error('Llava3图像分析错误:', error)
    
    // 清理临时文件
    if (req.file) {
      try {
        const fs = require('fs')
        fs.unlinkSync(req.file.path)
      } catch (cleanupError) {
        console.warn('清理临时文件失败:', cleanupError)
      }
    }

    const errorMessage = error instanceof Error ? error.message : 'Llava3图像分析失败'
    const errorStack = error instanceof Error ? error.stack : undefined

    res.status(500).json({
      success: false,
      message: errorMessage,
      details: process.env.NODE_ENV === 'development' ? errorStack : undefined
    })
  }
})

// 多模态对话接口（支持文本+图像）
router.post('/multimodal-chat', authenticateToken, upload.single('image'), async (req, res) => {
  try {
    const { message, serviceType = 'medical', sessionId } = req.body

    if (!message) {
      return res.status(400).json({
        success: false,
        message: '缺少消息内容'
      })
    }

    let imageBase64 = null
    
    // 如果有上传的图像，转换为base64
    if (req.file) {
      const fs = require('fs')
      const imageBuffer = fs.readFileSync(req.file.path)
      imageBase64 = `data:${req.file.mimetype};base64,${imageBuffer.toString('base64')}`
      
      // 清理临时文件
      try {
        fs.unlinkSync(req.file.path)
      } catch (cleanupError) {
        console.warn('清理临时文件失败:', cleanupError)
      }
    }

    // 调用多模态AI聊天
    const response = await multimodalChat(message, imageBase64, {
      serviceType,
      sessionId,
      userId: req.user?.id || 'anonymous'
    })

    res.json({
      success: true,
      data: {
        id: response.id,
        content: response.content,
        provider: response.provider,
        model: response.model,
        usage: response.usage,
        metadata: {
          hasImage: !!imageBase64,
          imageSize: imageBase64?.length,
          processingTime: response.metadata?.processingTime,
          serviceType,
          sessionId
        }
      }
    })

  } catch (error) {
    console.error('多模态对话错误:', error)
    
    // 清理临时文件
    if (req.file) {
      try {
        const fs = require('fs')
        fs.unlinkSync(req.file.path)
      } catch (cleanupError) {
        console.warn('清理临时文件失败:', cleanupError)
      }
    }

    const errorMessage = error instanceof Error ? error.message : '多模态对话失败'
    const errorStack = error instanceof Error ? error.stack : undefined

    res.status(500).json({
      success: false,
      message: errorMessage,
      details: process.env.NODE_ENV === 'development' ? errorStack : undefined
    })
  }
})

// 预测分析接口
router.get('/predictions', authenticateToken, async (req, res) => {
  try {
    const { type, timeframe = '24h' } = req.query

    let predictions = []

    switch (type) {
      case 'patient_flow':
        predictions = await predictPatientFlow(timeframe as string)
        break
      case 'equipment_failure':
        predictions = await predictEquipmentFailure()
        break
      case 'department_load':
        predictions = await predictDepartmentLoad()
        break
      case 'resource_optimization':
        predictions = await generateResourceOptimization()
        break
      default:
        predictions = await getAllPredictions()
    }

    res.json({
      success: true,
      data: predictions
    })

  } catch (error) {
    console.error('预测分析错误:', error)
    res.status(500).json({
      success: false,
      message: '预测分析失败'
    })
  }
})

// 获取高优先级预警
router.get('/alerts', authenticateToken, async (req, res) => {
  try {
    const alerts = await getHighPriorityAlerts()
    
    res.json({
      success: true,
      data: alerts
    })

  } catch (error) {
    console.error('获取预警失败:', error)
    res.status(500).json({
      success: false,
      message: '获取预警失败'
    })
  }
})

// 语音转文字接口
router.post('/speech-to-text', authenticateToken, upload.single('audio'), async (req, res) => {
  try {
    if (!req.file) {
      return res.status(400).json({
        success: false,
        message: '请上传音频文件'
      })
    }

    const { language = 'zh-CN' } = req.body
    const audioPath = req.file.path

    // 使用语音识别服务
    const transcript = await speechToText(audioPath, language)

    res.json({
      success: true,
      data: {
        transcript,
        confidence: 0.95,
        language
      }
    })

  } catch (error) {
    console.error('语音识别错误:', error)
    res.status(500).json({
      success: false,
      message: '语音识别失败'
    })
  }
})

// 文字转语音接口
router.post('/text-to-speech', authenticateToken, async (req, res) => {
  try {
    const { text, voice = 'zh-CN-XiaoxiaoNeural', rate = 1.0 } = req.body

    if (!text) {
      return res.status(400).json({
        success: false,
        message: '请提供要转换的文本'
      })
    }

    // 生成语音文件
    const audioBuffer = await textToSpeech(text, voice, rate)
    const audioUrl = await saveAudioFile(audioBuffer)

    res.json({
      success: true,
      data: {
        audioUrl,
        text,
        voice,
        duration: estimateAudioDuration(text)
      }
    })

  } catch (error) {
    console.error('语音合成错误:', error)
    res.status(500).json({
      success: false,
      message: '语音合成失败'
    })
  }
})

// 知识库查询接口
router.post('/knowledge', authenticateToken, async (req, res) => {
  try {
    const { query, category = 'general', limit = 5 } = req.body

    if (!query) {
      return res.status(400).json({
        success: false,
        message: '请提供查询内容'
      })
    }

    const results = await queryKnowledgeBase(query, category, limit)

    res.json({
      success: true,
      data: results
    })

  } catch (error) {
    console.error('知识库查询错误:', error)
    res.status(500).json({
      success: false,
      message: '知识库查询失败'
    })
  }
})

// AI模型训练接口（管理员专用）
router.post('/train', authenticateToken, async (req, res) => {
  try {
    // 检查管理员权限
    if (!req.user?.role || req.user.role !== 'admin') {
      return res.status(403).json({
        success: false,
        message: '权限不足'
      })
    }

    const { modelType, trainingData } = req.body

    // 启动模型训练
    const trainingJob = await startModelTraining(modelType, trainingData)

    res.json({
      success: true,
      data: {
        jobId: trainingJob.id,
        status: 'started',
        estimatedTime: trainingJob.estimatedTime
      }
    })

  } catch (error) {
    console.error('模型训练错误:', error)
    res.status(500).json({
      success: false,
      message: '模型训练启动失败'
    })
  }
})

// 获取模型性能指标
router.get('/model-metrics', authenticateToken, async (req, res) => {
  try {
    const { modelType } = req.query

    const metrics = await getModelMetrics(modelType as string)

    res.json({
      success: true,
      data: metrics
    })

  } catch (error) {
    console.error('获取模型指标错误:', error)
    res.status(500).json({
      success: false,
      message: '获取模型指标失败'
    })
  }
})

// ===== 辅助函数 =====

/**
 * 意图识别
 */
async function analyzeIntent(message: string, serviceType: string): Promise<string> {
  const normalizedMessage = message.toLowerCase()
  
  // 基于关键词的意图识别
  const intentPatterns = {
    navigation: ['在哪', '怎么走', '位置', '导航'],
    appointment: ['预约', '挂号', '就诊'],
    symptom_consultation: ['疼', '痛', '不舒服', '症状'],
    facility_query: ['卫生间', '厕所', '药房', '停车'],
    emergency: ['急诊', '紧急', '急救'],
    store_query: ['吃', '喝', '买', '店'],
    promotion: ['活动', '优惠', '折扣']
  }

  for (const [intent, keywords] of Object.entries(intentPatterns)) {
    if (keywords.some(keyword => normalizedMessage.includes(keyword))) {
      return intent
    }
  }

  return 'general_inquiry'
}

/**
 * 实体提取
 */
async function extractEntities(message: string, serviceType: string, context: any): Promise<any[]> {
  const entities = []
  
  // 科室实体识别
  const departments = ['内科', '外科', '儿科', '妇产科', '急诊科', '眼科']
  for (const dept of departments) {
    if (message.includes(dept)) {
      entities.push({ type: 'department', value: dept, confidence: 0.9 })
    }
  }

  // 设施实体识别
  const facilities = ['药房', '卫生间', '收费处', '检验科']
  for (const facility of facilities) {
    if (message.includes(facility)) {
      entities.push({ type: 'facility', value: facility, confidence: 0.9 })
    }
  }

  // 症状实体识别
  const symptoms = ['头痛', '发烧', '咳嗽', '胸痛', '腹痛']
  for (const symptom of symptoms) {
    if (message.includes(symptom)) {
      entities.push({ type: 'symptom', value: symptom, confidence: 0.8 })
    }
  }

  return entities
}

/**
 * 生成AI响应
 */
async function generateAIResponse(message: string, intent: string, entities: any[], serviceType: string, context: any): Promise<any> {
  const startTime = Date.now()
  
  let content = ''
  let suggestions: string[] = []
  let actions: any[] = []

  // 根据服务类型和意图生成响应
  switch (serviceType) {
    case 'hospital':
      const hospitalResponse = await generateHospitalResponse(intent, entities, message)
      content = hospitalResponse.content
      suggestions = hospitalResponse.suggestions
      actions = hospitalResponse.actions
      break
    
    case 'attraction':
      const attractionResponse = await generateAttractionResponse(intent, entities, message)
      content = attractionResponse.content
      suggestions = attractionResponse.suggestions
      actions = attractionResponse.actions
      break
    
    default:
      content = '您好，我是AI助手。请问有什么可以帮您的吗？'
      suggestions = ['帮助', '功能介绍']
  }

  return {
    content,
    intent,
    confidence: 0.85,
    entities,
    suggestions,
    actions,
    processingTime: Date.now() - startTime
  }
}

/**
 * 生成医院响应
 */
async function generateHospitalResponse(intent: string, entities: any[], message: string): Promise<any> {
  let content = ''
  let suggestions: string[] = []
  let actions: any[] = []

  const deptEntity = entities.find(e => e.type === 'department')
  const facilityEntity = entities.find(e => e.type === 'facility')
  const symptomEntity = entities.find(e => e.type === 'symptom')

  switch (intent) {
    case 'navigation':
      if (deptEntity) {
        content = `${deptEntity.value}位于门诊楼二楼，我来为您导航。`
        actions.push({
          type: 'navigate',
          label: '开始导航',
          data: { department: deptEntity.value }
        })
        suggestions = ['预约挂号', '查看专家', '了解科室']
      } else if (facilityEntity) {
        content = `${facilityEntity.value}的位置信息如下...`
        actions.push({
          type: 'navigate',
          label: '前往导航',
          data: { facility: facilityEntity.value }
        })
      } else {
        content = '请告诉我您要去的科室或设施，我来为您导航。'
        suggestions = ['急诊科', '内科', '外科', '药房']
      }
      break

    case 'appointment':
      if (deptEntity) {
        content = `正在为您查询${deptEntity.value}的预约信息...\n\n今日还有号源，是否需要预约？`
        actions.push({
          type: 'appointment',
          label: '立即预约',
          data: { department: deptEntity.value }
        })
        suggestions = ['查看专家', '预约时间', '取消预约']
      } else {
        content = '请选择您要预约的科室：'
        suggestions = ['内科预约', '外科预约', '儿科预约']
      }
      break

    case 'symptom_consultation':
      if (symptomEntity) {
        content = await getSymptomAdvice(symptomEntity.value)
        actions.push({
          type: 'appointment',
          label: '立即挂号',
          data: { symptom: symptomEntity.value }
        })
        suggestions = ['了解更多', '预约专家', '急诊咨询']
      } else {
        content = '请描述您的症状，我来为您提供就医建议。'
        suggestions = ['头痛', '发烧', '胸痛', '腹痛']
      }
      break

    case 'emergency':
      content = '🚨 紧急情况请立即前往急诊科！\n\n急诊科位置：门诊楼一楼\n急救电话：120'
      actions.push({
        type: 'emergency',
        label: '前往急诊科',
        data: { department: '急诊科' }
      }, {
        type: 'call',
        label: '拨打120',
        data: { phone: '120' }
      })
      break

    default:
      content = '您好！我是医院智能助手，可以帮您导航、预约、咨询。请问需要什么帮助？'
      suggestions = ['科室导航', '预约挂号', '症状咨询', '设施查询']
  }

  return { content, suggestions, actions }
}

/**
 * 生成景区响应
 */
async function generateAttractionResponse(intent: string, entities: any[], message: string): Promise<any> {
  // 景区响应逻辑
  return {
    content: '景区助手为您服务',
    suggestions: ['景区介绍', '美食推荐', '设施查询'],
    actions: []
  }
}

/**
 * 症状建议
 */
async function getSymptomAdvice(symptom: string): Promise<string> {
  const symptomAdvice: { [key: string]: string } = {
    '头痛': '头痛可能由多种原因引起，建议挂神经内科或内科。如伴有发热、呕吐等症状，请及时就医。',
    '发烧': '体温超过37.5°C为发热，建议挂内科。如体温超过38.5°C或伴有其他严重症状，建议前往急诊科。',
    '咳嗽': '咳嗽持续超过一周建议就医，可挂呼吸内科或内科。如伴有胸痛、咳血，请立即就医。',
    '胸痛': '胸痛可能涉及心脏问题，建议立即前往急诊科或心内科就诊。',
    '腹痛': '腹痛位置和性质不同可能涉及不同科室，建议先挂内科或急诊科进行初步诊断。'
  }

  return symptomAdvice[symptom] || '建议您详细描述症状，并及时就医获得专业诊断。'
}

/**
 * OCR识别
 */
async function performOCR(imagePath: string): Promise<any> {
  // 调用现有的OCR服务
  try {
    const ocrService = require('../services/ppOcrService')
    return await ocrService.processImage(imagePath)
  } catch (error) {
    console.log('OCR服务不可用:', error)
    return { texts: [], confidence: 0 }
  }
}

/**
 * 物体检测
 */
async function detectObjects(imagePath: string): Promise<any> {
  // 这里可以集成第三方物体检测服务
  return {
    objects: [
      { name: 'person', confidence: 0.95, bbox: [100, 100, 200, 300] }
    ]
  }
}

/**
 * 医疗图像分析
 */
async function analyzeMedicalImage(imagePath: string, analysisType: string): Promise<any> {
  // 医疗专用图像分析
  return {
    bodyParts: ['chest', 'arm'],
    symptoms: [],
    medicalEquipment: ['stethoscope'],
    analysis: '检测到医疗相关内容'
  }
}

// 其他辅助函数的实现...
async function predictPatientFlow(timeframe: string): Promise<any> {
  return { type: 'patient_flow', data: [], timeframe }
}

async function predictEquipmentFailure(): Promise<any> {
  return { type: 'equipment_failure', data: [] }
}

async function predictDepartmentLoad(): Promise<any> {
  return { type: 'department_load', data: [] }
}

async function generateResourceOptimization(): Promise<any> {
  return { type: 'resource_optimization', data: [] }
}

async function getAllPredictions(): Promise<any> {
  return { predictions: [] }
}

async function getHighPriorityAlerts(): Promise<any> {
  return { alerts: [] }
}

async function speechToText(audioPath: string, language: string): Promise<string> {
  return '语音识别结果'
}

async function textToSpeech(text: string, voice: string, rate: number): Promise<Buffer> {
  return Buffer.from('audio data')
}

async function saveAudioFile(audioBuffer: Buffer): Promise<string> {
  return '/api/audio/generated.mp3'
}

function estimateAudioDuration(text: string): number {
  return text.length * 0.1 // 估算语音时长
}

async function queryKnowledgeBase(query: string, category: string, limit: number): Promise<any> {
  return { results: [] }
}

async function startModelTraining(modelType: string, trainingData: any): Promise<any> {
  return { id: Date.now(), estimatedTime: 3600 }
}

async function getModelMetrics(modelType: string): Promise<any> {
  return { accuracy: 0.95, precision: 0.90, recall: 0.88 }
}

async function saveConversationHistory(userId: string, conversation: any): Promise<void> {
  // 保存对话历史
}

/**
 * 使用Ollama Llava3进行图像分析
 */
async function analyzeWithOllamaLlava(
  imageBase64: string, 
  prompt?: string, 
  analysisType: string = 'general', 
  includeRecommendations: boolean = false
): Promise<any> {
  try {
    // 导入前端的realAIService
    const { realAIService } = require('../../../h5/src/lib/realAIService')
    
    if (analysisType !== 'general') {
      // 使用专业医疗分析
      return await realAIService.analyzeMedicalImage(imageBase64, analysisType, {
        includeRecommendations
      })
    } else {
      // 使用通用图像分析
      return await realAIService.analyzeImage(imageBase64, prompt)
    }
  } catch (error) {
    console.error('Ollama Llava3分析失败:', error)
    throw error
  }
}

/**
 * 多模态聊天（文本+图像）
 */
async function multimodalChat(
  message: string, 
  imageBase64?: string | null, 
  options: {
    serviceType?: string
    sessionId?: string
    userId?: string
  } = {}
): Promise<any> {
  try {
    // 导入前端的realAIService
    const { realAIService } = require('../../../h5/src/lib/realAIService')
    
    // 构建消息内容
    const messageContent = imageBase64 ? [
      {
        type: 'text',
        text: message
      },
      {
        type: 'image_url',
        image_url: {
          url: imageBase64,
          detail: 'high'
        }
      }
    ] : message

    // 构建消息
    const messages = [{
      role: 'user',
      content: messageContent
    }]

    // 调用AI服务
    return await realAIService.chat(messages, {
      provider: 'ollama',
      sessionId: options.sessionId || 'default',
      temperature: 0.7,
      maxTokens: 4000
    })
  } catch (error) {
    console.error('多模态聊天失败:', error)
    throw error
  }
}

export default router