from langchain_community.adapters import openai as lc_openai
from openaiConfigurations import openai_api_key, openai_api_base
import openai

messages = [{"role": "user", "content": "hi"}]

# 原始的openai调用
result = openai.chat.completions.create(
    messages=messages, model="gpt-3.5-turbo", temperature=0,
    openai_api_key = openai_api_key,
    openai_api_base = openai_api_base)
result.choices[0].message.model_dump()

# 适配器调用模拟了原opneai的调用方式
lc_result = lc_openai.chat.completions.create(
    messages=messages, model="gpt-3.5-turbo", temperature=0,
    openai_api_key = openai_api_key,
    openai_api_base = openai_api_base
)

lc_result.choices[0].message  # Attribute access
# lc_result["choices"][0]["message"]  # Also compatible with index access

# 更换大模型提供商
lc_result = lc_openai.chat.completions.create(
    messages=messages, model="claude-2", temperature=0, provider="ChatAnthropic",
    openai_api_key = openai_api_key,
    openai_api_base = openai_api_base
)
lc_result.choices[0].message

# 适配器应该是在给熟悉大模型api但不熟悉langchainapi的用户提供的
# 一种解决方案,感觉用处不大