import openai
import os
import openai
from dotenv import load_dotenv, find_dotenv
from openai import OpenAI
from openai import AzureOpenAI
from langchain_openai import AzureChatOpenAI
def get_api_version():
    _ = load_dotenv(find_dotenv())
    return os.environ['API_VERSION']

def get_azure_endpoint():
    _ = load_dotenv(find_dotenv())
    return os.environ['AZURE_ENDPOINT']

def get_api_key():
    _ = load_dotenv(find_dotenv())
    return os.environ['API_KEY']


client = AzureOpenAI(
    api_version=get_api_version(),
    azure_endpoint=get_azure_endpoint(),  # This is the default and can be omitted
    api_key=get_api_key()
)

langchain_client = AzureChatOpenAI(
        azure_endpoint=get_azure_endpoint().rstrip('/'),  # 移除尾部斜杠，只保留基础URL
        azure_deployment="gpt-4o",  # 重命名为 azure_deployment
        model_name="gpt-4o",
        openai_api_version=get_api_version(),  # 参数名不变
        openai_api_key=get_api_key(),
        openai_api_type="azure",
    )
# 一个封装 OpenAI 接口的函数，参数为 Prompt，返回对应结果
def get_completion(prompt, model="gpt-4o"):
    '''
    prompt: 对应的提示词
    model: 调用的模型，默认为 gpt-4o
    '''
    response = client.chat.completions.create(
        messages=[
            {
                "role": "user",
                "content": prompt,
            }
        ],
        model=model,
        temperature=0.7,
    )
    # 调用 OpenAI 的 ChatCompletion 接口
    return response.choices[0].message.content

# # part1 专用
# def get_completion_from_messages(messages, model="gpt-4o", temperature=0):
#     response = client.chat.completions.create(
#             model=model,
#             messages=messages,
#             temperature=temperature, # 控制模型输出的随机程度
#             )
#         # print(str(response.choices[0].message))
#     return response.choices[0].message.content

def get_completion_from_messages(messages, model="gpt-4o", temperature=0, max_tokens=500):
    response = client.chat.completions.create(
            model=model,
            messages=messages,
            temperature=temperature, # 控制模型输出的随机程度
            max_tokens=max_tokens,
            )
    return response.choices[0].message.content

def get_completion_and_token_count(messages,
        model="gpt-4o",
        temperature=0,
        max_tokens=500):
    response = client.chat.completions.create(
    model=model,
    messages=messages,
    temperature=temperature,
    max_tokens=max_tokens,
    )
    content = response.choices[0].message.content
    token_dict = {
    'prompt_tokens':response.usage.prompt_tokens,
    'completion_tokens':response.usage.completion_tokens,
    'total_tokens':response.usage.total_tokens,
    }
    return content, token_dict


def get_moderation(input):
    response = client.moderations.create(
        model="omni-moderation-latest",
        input="...text to classify goes here...",
    )
    return response