import { NextRequest, NextResponse } from 'next/server'
import OpenAI from 'openai'

const openai = new OpenAI({
  baseURL: 'https://api.chatanywhere.tech',
  apiKey: 'sk-edjsAJH2Eed2qJJRYInVN2HRYux9RWMz5VMS84bRMnMgEABg', // 替换为你的 OpenAI API Key
})

 async function handle(request: NextRequest) {
  // const { message } = await request.json()

  const stream = new TransformStream()
  const writer = stream.writable.getWriter()

  ;(async () => {
    try {
      const completionStream = await openai.chat.completions.create({
        model: 'gpt-3.5-turbo',
        messages: [{ role: 'user', content: '你好啊' }],
        stream: true,
      })

      for await (const chunk of completionStream) {
        const content = chunk.choices[0]?.delta?.content || ''
        await writer.write(
          new TextEncoder().encode(`data: ${JSON.stringify({ content })}\n\n`)
        )
      }
    } catch (error) {
      await writer.write(
        new TextEncoder().encode(
          `data: ${JSON.stringify({ error: error.message })}\n\n`
        )
      )
    } finally {
      await writer.close()
    }
  })()

  return new Response(stream.readable, {
    headers: {
      'Content-Type': 'text/event-stream',
      'Cache-Control': 'no-cache',
      Connection: 'keep-alive',
    },
  })
}
export const POST = handle
