import { Router, Request, Response } from 'express'
import OpenAI from 'openai'
import { authenticateToken, rateLimiter } from '../middleware/auth'
import { aiFunctionRegistry } from '../services/aiFunctionRegistry'
import { taskContextProvider } from '../services/taskContextProvider'
import { taskExecutors } from '../services/taskFunctionExecutors'

const router = Router()

// 注册所有任务函数
taskExecutors.forEach(executor => {
  aiFunctionRegistry.register(executor)
})

// 所有聊天路由需要认证
router.use(authenticateToken)

// SSE 流式聊天：POST /api/chat/stream
// 请求体: { messages: Array<{role: 'system'|'user'|'assistant', content: string}>, sessionId?: string, context?: any }
router.post('/stream', rateLimiter(20, 1), async (req: Request, res: Response) => {
  try {
    // 设置 SSE 头
    res.setHeader('Content-Type', 'text/event-stream')
    res.setHeader('Cache-Control', 'no-cache, no-transform')
    res.setHeader('Connection', 'keep-alive')
    res.flushHeaders?.()

    const abortController = new AbortController()

    // 客户端断开时中止
    req.on('close', () => {
      abortController.abort()
      try { res.end() } catch { /* ignore */ }
    })

    const openai = new OpenAI({
      apiKey: process.env.OPENAI_API_KEY,
      baseURL: process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1'
    })

    const { messages = [], context } = req.body || {}

    const systemPrefix = {
      role: 'system' as const,
      content: '你是AIDE的智能任务助手，擅长任务创建、修改、优先级管理、时间安排与建议。在回答中尽量简洁，并在需要时给出可执行的下一步。'
    }

    // 组合消息（如已包含system则不重复添加）
    const preparedMessages = Array.isArray(messages) && messages.length > 0 && messages[0]?.role === 'system'
      ? messages
      : [systemPrefix, ...messages]

    // 将上下文压缩到一段系统提示中（可选）
    if (context) {
      preparedMessages.unshift({
        role: 'system',
        content: `以下是用户上下文，回答时酌情参考：${JSON.stringify(context).slice(0, 2000)}`
      } as any)
    }

    const model = process.env.OPENAI_MODEL || 'gpt-3.5-turbo'

    const completion = await openai.chat.completions.create({
      model,
      messages: preparedMessages as any,
      temperature: 0.7,
      stream: true
    }, { signal: abortController.signal })

    for await (const chunk of completion) {
      const delta = chunk.choices?.[0]?.delta?.content || ''
      if (delta) {
        res.write(`data: ${JSON.stringify({ delta, final: false })}\n\n`)
      }

      const finish = chunk.choices?.[0]?.finish_reason
      if (finish) {
        res.write(`data: ${JSON.stringify({ final: true, reason: finish })}\n\n`)
        break
      }
    }

    // 结束标记
    res.write('data: [DONE]\n\n')
    res.end()
  } catch (error) {
    try {
      res.write(`data: ${JSON.stringify({ error: 'STREAM_ERROR', message: error instanceof Error ? error.message : String(error) })}\n\n`)
    } catch { /* ignore */ }
    try { res.end() } catch { /* ignore */ }
  }
})

export default router

// 语音转写：POST /api/chat/transcribe
// 请求体(JSON): { audioBase64: string, mimeType?: string }
router.post('/transcribe', rateLimiter(10, 5), async (req: Request, res: Response) => {
  try {
    const { audioBase64, mimeType } = req.body || {}
    if (!audioBase64 || typeof audioBase64 !== 'string') {
      return res.status(400).json({ success: false, error: 'MISSING_AUDIO', message: '缺少音频数据' })
    }

    const base64Data = audioBase64.includes(',') ? audioBase64.split(',').pop() : audioBase64
    const buffer = Buffer.from(base64Data as string, 'base64')

    // 将音频临时写入磁盘，便于OpenAI SDK读取
    const fs = await import('fs')
    const path = await import('path')
    const os = await import('os')
    const tmpDir = os.tmpdir()
    const ext = (mimeType && mimeType.includes('wav')) ? '.wav' : (mimeType && mimeType.includes('m4a')) ? '.m4a' : (mimeType && mimeType.includes('ogg')) ? '.ogg' : '.mp3'
    const tmpFile = path.join(tmpDir, `aide-audio-${Date.now()}${ext}`)
    await fs.promises.writeFile(tmpFile, buffer)

    const openai = new OpenAI({
      apiKey: process.env.OPENAI_API_KEY,
      baseURL: process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1'
    })

    // 使用 Whisper 转写
    const result = await (openai as any).audio.transcriptions.create({
      file: fs.createReadStream(tmpFile),
      model: process.env.OPENAI_TRANSCRIBE_MODEL || 'whisper-1',
      response_format: 'json'
    })

    // 清理临时文件
    fs.promises.unlink(tmpFile).catch(() => {})

    const anyResult = result as any
    const text = (anyResult && (anyResult.text || anyResult?.data?.text)) || ''
    return res.json({ success: true, text })
  } catch (error) {
    console.error('语音转写失败:', error)
    return res.status(500).json({ success: false, error: 'TRANSCRIBE_ERROR', message: error instanceof Error ? error.message : String(error) })
  }
})


