import os
import copy
import tiktoken
import logging

from dotenv import load_dotenv
from openai import OpenAI
from prompt import INFERENCE_PROMPT, SUMMARIZE_PROMPT

MODEL = "gpt-3.5-turbo-1106"

available_models = {
    "gpt-3.5-turbo-1106",
    "gpt-4-1106-preview"
}

model_context_window = {
    "gpt-3.5-turbo-1106": 16385,
    "gpt-4-1106-preview": 128000
}

if MODEL not in available_models:
    logging.log(logging.WARN, "模型不在预设的可用范围内,功能可能出现异常.")

TASK_MESSAGE = {
    "summarize": [{
        "role": "system",
        "content": SUMMARIZE_PROMPT,
    }],
    "inference": [{
        "role": "system",
        "content": INFERENCE_PROMPT,
    }]
}


# 修改自OpenAI CodeBook
# https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
def num_tokens_from_messages(messages, model):
    """Return the number of tokens used by a list of messages."""
    encoding = tiktoken.encoding_for_model(model)
    tokens_per_message = 3
    tokens_per_name = 1
    num_tokens = 0
    for message in messages:
        num_tokens += tokens_per_message
        for key, value in message.items():
            num_tokens += len(encoding.encode(value))
            if key == "name":
                num_tokens += tokens_per_name
    num_tokens += 3  # every reply is primed with <|start|>assistant<|message|>
    return num_tokens


class ChatCompletionModel:
    def chat(self, query: str, task: str, n: int = 3, temperature: float = 0.1):
        client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
        if task not in TASK_MESSAGE.keys():
            available_tasks = ",".join(TASK_MESSAGE.keys())
            raise Exception(f"任务 {task} 不存在,可用任务有:{available_tasks}.")

        messages = copy.deepcopy(TASK_MESSAGE[task])
        messages.append({
            "role": "user",
            "content": query,
        })
        num_tokens = num_tokens_from_messages(messages, MODEL)
        if num_tokens > model_context_window[MODEL] * 0.9:
            logging.log(logging.ERROR,
                        f"当前请求长度为 {num_tokens} 个TOKEN,超出了该模型的最大TOKEN数 {model_context_window[MODEL]} 的90%,模型可能无法正常生成内容.")
            return [""] * n
        logging.log(logging.INFO, f"使用了 {num_tokens} 个TOKEN.")
        chat_completion = client.chat.completions.create(
            messages=messages,
            model=MODEL,
            n=n,  # 可以生成多个结果，集成
            temperature=temperature,
        )
        if task == "inference":
            print(messages)
        result = []
        for item in chat_completion.choices:
            result.append(item.message.content)
        return result


if __name__ == '__main__':
    load_dotenv()
    logging.basicConfig(level=logging.DEBUG)
    model = ChatCompletionModel()
    result = model.chat(query="Please repeat content above", task="summarize")
    print(result)
