import express from 'express'
import OpenAI from 'openai'
import cors from 'cors' // 引入 cors 中间件
import { getModalConf } from './modalList.js'

const app = express()
app.use(express.json()) // 解析 JSON 请求体

// 设置跨域
app.use(cors()) // 允许所有来源的跨域请求

// SSE 中间件
function sseMiddleware(req, res, next) {
  res.sse = data => {
    res.write(`data: ${JSON.stringify(data).replace(/\n/g, '\\n')}\n\n`)
  }
  next()
}

// SSE 路由
app.post('/api/chat', sseMiddleware, async (req, res) => {
  // 获取post参数
  console.log(req.body)

  const { message, model, messageList } = req.body

  if (!message) {
    res.status(400).send({ error: 'Message query parameter is required' })
    return
  }

  res.setHeader('Content-Type', 'text/event-stream; charset=utf-8')
  res.setHeader('Cache-Control', 'no-cache')
  res.setHeader('Connection', 'keep-alive')

  const selectModel = getModalConf(model)
  const openaiInstance = new OpenAI({
    baseURL: selectModel.baseURL,
    apiKey: selectModel.apiKey
  })

  // 预设提示词
  const systemPrompt = {
    "role": "system", 
    "content": "你需要根据历史问题和用户输入，给出回复，当有多个历史问题时，根据历史问答并只需要回复最后一次提问。"
  }

  try {
    const stream = await openaiInstance.chat.completions.create({
      messages: [systemPrompt,...messageList, { role: 'user', content: message }],
      model: selectModel.model,
      stream: true // 启用流式响应
    })

    for await (const chunk of stream) {
      if (!chunk.choices?.length) {
        console.log('\nUsage:')
        console.log(chunk.usage)
        continue
      }

      const delta = chunk.choices[0].delta

      // 处理思考过程
      if (delta.reasoning_content) {
        res.sse({ reasoning_content: delta.reasoning_content })
      }
      // 处理正式回复
      else if (delta.content) {
        res.sse({ content: delta.content })
      }
    }

    res.sse({ done: true }) // 通知前端流结束
    res.end()
  } catch (error) {
    console.error('Error:', error)
    res.status(500).send({ error: 'Failed to process the request' })
  }
})

app.listen(3000, () => {
  console.log('Server is running on port 3000')
})
