import { NextRequest, NextResponse } from 'next/server'
import { OpenAI } from 'openai'

/**
 * @swagger
 * /api/chat:
 *   post:
 *     summary: AI 聊天接口
 *     description: 与 AI 助手进行对话，支持流式响应
 *     tags:
 *       - AI 聊天
 *     requestBody:
 *       required: true
 *       content:
 *         application/json:
 *           schema:
 *             type: object
 *             required:
 *               - messages
 *             properties:
 *               messages:
 *                 type: array
 *                 description: 消息数组
 *                 items:
 *                   type: object
 *                   properties:
 *                     role:
 *                       type: string
 *                       enum: [user, assistant, system]
 *                       description: 消息角色
 *                     content:
 *                       type: string
 *                       description: 消息内容
 *           example:
 *             messages:
 *               - role: user
 *                 content: "你好，请介绍一下仙山八骏"
 *     responses:
 *       200:
 *         description: 成功响应，返回流式数据
 *         content:
 *           text/event-stream:
 *             schema:
 *               type: string
 *               description: 服务器发送事件流
 *       400:
 *         description: 请求参数错误
 *         content:
 *           application/json:
 *             schema:
 *               type: object
 *               properties:
 *                 error:
 *                   type: string
 *                   example: "Messages array is required"
 *       500:
 *         description: 服务器内部错误
 *         content:
 *           application/json:
 *             schema:
 *               type: object
 *               properties:
 *                 error:
 *                   type: string
 *                   example: "Internal server error"
 */

const openai = new OpenAI({
  apiKey: "sk-UxCEY2aVwM2VZmWVFf3338C98dFe454882624966C3961343",
  baseURL: "http://72.18.81.35:13000/v1",
})

export async function POST(req: NextRequest) {
  try {
    const { messages } = await req.json()

    if (!messages || !Array.isArray(messages)) {
      return NextResponse.json(
        { error: 'Messages array is required' },
        { status: 400 }
      )
    }

    const stream = await openai.chat.completions.create({
      model: '7517830219147853880',
      messages: [
        ...messages,
      ],
      stream: true,
      temperature: 0.7,
      max_tokens: 1000,
    })

    // 创建流式响应
    const encoder = new TextEncoder()
    const readable = new ReadableStream({
      async start(controller) {
        try {
          for await (const chunk of stream) {
            const content = chunk.choices[0]?.delta?.content || ''
            if (content) {
              const data = encoder.encode(`data: ${JSON.stringify({ content })}\n\n`)
              controller.enqueue(data)
            }
          }
          // 发送结束信号
          controller.enqueue(encoder.encode('data: [DONE]\n\n'))
          controller.close()
        } catch (error) {
          console.error('Stream error:', error)
          controller.error(error)
        }
      }
    })

    return new Response(readable, {
      headers: {
        'Content-Type': 'text/event-stream',
        'Cache-Control': 'no-cache',
        'Connection': 'keep-alive',
      }
    })
  } catch (error) {
    console.error('Chat API error:', error)
    return NextResponse.json(
      { error: 'Internal server error' },
      { status: 500 }
    )
  }
}
