import OpenAI from 'openai'
// import {SocksProxyAgent} from 'socks-proxy-agent'
// SOCKS_PROXY = socks5://127.0.0.1:51837
// httpsAgent: new SocksProxyAgent(process.env.SOCKS_PROXY)

const openai = new OpenAI({
  apiKey: process.env['OPENAI_API_KEY'], // This is the default and can be omitted
})
console.log('process.env', process.env['OPENAI_API_KEY'])
async function main() {
  const chatCompletion = await openai.chat.completions.create({
    messages: [{ role: 'user', content: 'Say this is a test' }],
    model: 'gpt-3.5-turbo',
  })
  console.log('chatCompletion', chatCompletion)
}

main()
// async function main1() {
//   const embedding = await openai.embeddings.create({
//     model: 'text-embedding-ada-002',
//     input: 'The quick brown fox jumped over the lazy dog',
//     encoding_format: 'float',
//   })

//   console.log(embedding)
// }

// main1()

// // 准备请求参数
// const prompt =
//   'The following is a conversation with an AI assistant. The assistant is helpful, creative, clever, and very friendly. \n' +
//   'User: Hello, who are you?\n' +
//   'AI: Hi there! I am an AI assistant designed to help you with various tasks. How can I assist you today?\n'
// const model = 'text-davinci-002'
// const parameters = {
//   prompt: prompt,
//   temperature: 0.5,
//   max_tokens: 150,
//   top_p: 1,
//   frequency_penalty: 0,
//   presence_penalty: 0,
// }

// // 发起请求
// openai2
//   .complete(parameters)
//   .then(response => {
//     console.log(response.data.choices[0].text)
//   })
//   .catch(error => {
//     console.log(error)
//   })

// const openai3 = new OpenAI();

// async function main3() {
//     const stream = await openai3.chat.completions.create({
//         model: "gpt-4",
//         messages: [{ role: "user", content: "Say this is a test" }],
//         stream: true,
//     });
//     for await (const chunk of stream) {
//         process.stdout.write(chunk.choices[0]?.delta?.content || "");
//     }
// }

// main3();
