import random

import openai
from dalchemy.llms import DalchemyLLM
from langchain.llms import AzureOpenAI, OpenAIChat, OpenAI
from typing import List, Dict
from retry import retry
import logging
import json
from queue import Queue, PriorityQueue
import tiktoken


# from dalchemy.llms import register_llm
# @register_llm("azure")

class OpenAILLM(DalchemyLLM):
    # key params
    engine: str = "text-davinci-003"
    api_type: str = "open_ai"
    api_base: str = "https://api.openai.com/v1"
    api_version: str = None
    api_key: str = None

    api_keys: List[str] = None
    keys_queue: PriorityQueue = None  # 密钥队列

    # generation params
    temperature: float = 0.7
    """What sampling temperature to use."""
    max_tokens: int = 3000
    """The maximum number of tokens to generate in the completion.
    -1 returns as many tokens as possible given the prompt and
    the models maximal context size."""
    top_p: float = 0.95
    """Total probability mass of tokens to consider at each step."""
    frequency_penalty: float = 0
    """Penalizes repeated tokens according to frequency."""
    presence_penalty: float = 0
    """Penalizes repeated tokens."""

    def set_config(self):
        ''' 设置openai的配置信息 '''
        # 1 初始化优先队列
        if self.keys_queue is None:
            # self.keys_queue = Queue()
            self.keys_queue = PriorityQueue()
            keys = list(set(self.api_keys))
            for key in keys:
                self.keys_queue.put([1, key])
        # 2 获取队首并设置openai key
        priority, api_key = self.keys_queue.get()
        # api_key = random.choice(self.api_keys)
        # openai.api_type = self.api_type
        # openai.api_base = self.api_base
        # openai.api_version = self.api_version
        openai.api_key = api_key
        # self.keys_queue.put([priority + 1 ,api_key])
        return priority, api_key

    def __call__(self, prompt: str, is_chat: bool = False) -> str:
        ''' 输入输出text '''
        return self._openai_completion_helper(prompt, is_chat)

    # chat能输入历史信息，进行多轮对话，但是只能处理一条。（批量的要集成到generation吗？）
    # 后面使用字符串来表示历史。（参考alpaca eval）
    # def chat(self, prompt: str,
    #          history: List[Dict[str, str]] = []):
    #     return self._openai_completion_helper(prompt, history)

    @retry(tries=3, delay=10)
    def _openai_completion_helper(
            self,
            prompt: str,
            is_chat: bool = False,
    ):
        ''' openai_completion辅助函数，封装参数并防止报错  '''
        if is_chat:
            messages = self.prompt_to_chatml(prompt)  # 多轮对话
        else:
            messages = [{"role": "user", "content": prompt}]
        priority, api_key = self.set_config()  # <=== 一定要在creat的代码就近添加配置
        try:
            response = openai.ChatCompletion.create(
                model=self.engine,
                messages=messages,
                temperature=self.temperature,
                max_tokens=self.max_tokens,
                top_p=self.top_p,
                frequency_penalty=self.frequency_penalty,
                presence_penalty=self.presence_penalty
            )
            content = response['choices'][0]['message']['content']
            # price = self.calc_price(query=prompt,response=content)

            # 正常结束，该key的优先级降低1个等级
            priority += 1
        except openai.error.OpenAIError as e:
            # change api key
            priority += 2
            logging.warning(f"OpenAIError: {e}.")
            content = json.dumps({"msg": f"[ERROR]: {str(e)[:50]}"})
            if "Please reduce your prompt" in str(e):
                max_tokens = int(self.max_tokens * 0.8)
                logging.warning(f"Reducing target length to {max_tokens}, Retrying...")
                if max_tokens == 0:
                    logging.exception("Prompt is already longer than max context length. Error:")
                    raise e
            elif "rate limit" in str(e).lower():
                logging.warning("Hit request rate limit; retrying...")
            elif "Request timed out: HTTPSConnectionPool" in str(e):
                priority += 5
            elif "The response was filtered" in str(e):
                # https://go.microsoft.com/fwlink/?linkid=2198766
                logging.warning("openai.error.InvalidRequestError: The response was filtered")
            else:
                logging.warning(f"Unknown error {e}. \n It's likely a rate limit so we are retrying...")

        # print(f"priority {priority}, api_key {api_key}")
        self.keys_queue.put([priority, api_key])
        return content

    def num_tokens_from_messages(self, messages, model="gpt-3.5-turbo-0301"):
        """Returns the number of tokens used by a list of messages."""
        try:
            encoding = tiktoken.encoding_for_model(model)
        except KeyError:
            print("Warning: model not found. Using cl100k_base encoding.")
            encoding = tiktoken.get_encoding("cl100k_base")
        if model in ("gpt-3.5-turbo", "gpt-35-turbo", "gpt-35-turbo-16k", "GPT_TURBO"):
            print(
                "Warning: gpt-3.5-turbo may change over time. Returning num tokens assuming gpt-3.5-turbo-0301."
            )
            return self.num_tokens_from_messages(messages, model="gpt-3.5-turbo-0301")
        elif model in ("gpt-4", "gpt-4-32k"):
            print(
                "Warning: gpt-4 may change over time. Returning num tokens assuming gpt-4-0314."
            )
            return self.num_tokens_from_messages(messages, model="gpt-4-0314")
        elif model == "gpt-3.5-turbo-0301":
            tokens_per_message = 4  # every message follows <|start|>{role/name}\n{content}<|end|>\n
            tokens_per_name = -1  # if there's a name, the role is omitted
        elif model == "gpt-4-0314":
            tokens_per_message = 3
            tokens_per_name = 1
        else:
            raise NotImplementedError(
                f"""num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens."""
            )
        num_tokens = 0
        for message in messages:
            num_tokens += tokens_per_message
            for key, value in message.items():
                num_tokens += len(encoding.encode(value))
                if key == "name":
                    num_tokens += tokens_per_name
        num_tokens += 3  # every reply is primed with <|start|>assistant<|message|>
        return num_tokens

    def calc_price(self, query, response, prompt_price=0.0015, response_price=0.002, model=None):
        '''
        计算openai,azure的价格, price为每1k tokens的美元
        azure计价参考:https://azure.microsoft.com/zh-tw/pricing/details/cognitive-services/openai-service/
        '''
        # todo: 根据模型计算价格
        model = self.engine if model is None else model
        if model in ("gpt-3.5-turbo", "gpt-35-turbo", "GPT_TURBO"):
            prompt_price = 0.0015
            response_price = 0.002
        elif model == "gpt-35-turbo-16k":
            prompt_price = 0.003
            response_price = 0.004
        elif model == "gpt-4-8k":
            prompt_price = 0.03
            response_price = 0.06
        elif model == "gpt-4-8k":
            prompt_price = 0.06
            response_price = 0.12

        query_messages = [{"user": query}]
        response_messages = [{"sys": response}]
        query_tokens_num = self.num_tokens_from_messages(query_messages, model=model)
        response_tokens_num = self.num_tokens_from_messages(response_messages, model=model)
        price = prompt_price * query_tokens_num / 1000 + response_price * response_tokens_num / 1000
        print(f"Input chars: {len(query)} | Output chars: {len(response)}")
        print(f"Input tokens: {query_tokens_num} | Output tokens: {response_tokens_num}")
        print(f"Prompt price: {prompt_price}$ | Response price: {response_price}$ | Total Price: {round(price, 4)}$")
        return price


class AzureLLM(OpenAILLM):
    # 一些类参数：如key、生成参数
    api_type: str = "azure"

    def set_config(self):
        ''' 设置亚马逊llm的配置信息，本身支持并发就不要管理密钥了 '''
        openai.api_type = self.api_type
        openai.api_base = self.api_base
        openai.api_version = self.api_version
        openai.api_key = self.api_key

    @retry(tries=3, delay=10)
    def _openai_completion_helper(
            self,
            prompt: str,
            is_chat: bool = False,
    ):
        ''' openai_completion辅助函数，封装参数并防止报错  '''
        if is_chat:
            messages = self.prompt_to_chatml(prompt)  # 多轮对话
        else:
            messages = [{"role": "user", "content": prompt}]

        self.set_config()  # <=== 一定要在creat的代码就近添加配置
        try:
            response = openai.ChatCompletion.create(
                engine=self.engine,
                messages=messages,
                temperature=self.temperature,
                max_tokens=self.max_tokens,
                top_p=self.top_p,
                frequency_penalty=self.frequency_penalty,
                presence_penalty=self.presence_penalty)
            content = response['choices'][0]['message']['content']
            # price = self.calc_price(query=prompt,response=content)

        except openai.error.OpenAIError as e:
            # change api key
            self.set_config()

            logging.warning(f"OpenAIError: {e}.")
            content = json.dumps({"msg": f"[ERROR]: {str(e)[:50]}"})
            if "Please reduce your prompt" in str(e):
                max_tokens = int(self.max_tokens * 0.8)
                logging.warning(f"Reducing target length to {max_tokens}, Retrying...")
                if max_tokens == 0:
                    logging.exception("Prompt is already longer than max context length. Error:")
                    raise e
            elif "rate limit" in str(e).lower():
                logging.warning("Hit request rate limit; retrying...")
            elif "The response was filtered" in str(e):
                # https://go.microsoft.com/fwlink/?linkid=2198766
                logging.warning("openai.error.InvalidRequestError: The response was filtered")
            else:
                logging.warning(f"Unknown error {e}. \n It's likely a rate limit so we are retrying...")
        return content


class ZhipuLLM(DalchemyLLM):
    # 一些类参数：如key、生成参数
    engine = "chatglm_pro"

    api_key: str = None
    async_call: bool =False
    # generation params
    temperature: float = 0.7
    # max_tokens: int = 3000
    top_p: float = 0.95

    def set_config(self):
        import zhipuai
        zhipuai.api_key = self.api_key

    def __call__(self, prompt: str, is_chat: bool = False) -> str:
        ''' 输入输出text '''
        return self._zhipu_completion_helper(prompt, is_chat)

    @retry(tries=3, delay=10)
    def _zhipu_completion_helper(
            self,
            prompt: str,
            is_chat: bool = False,
    ):
        ''' openai_completion辅助函数，封装参数并防止报错  '''
        import zhipuai
        if is_chat:
            messages = self.prompt_to_chatml(prompt)  # 多轮对话
        else:
            messages = [{"role": "user", "content": prompt}]

        self.set_config()  # <=== 一定要在creat的代码就近添加配置
        try:
            if self.async_call:
                response = zhipuai.model_api.async_invoke(
                    model=self.engine,
                    prompt=messages,
                    top_p=self.top_p,
                    temperature=self.temperature,
                )
                task_id = response["data"]["task_id"]
                try:
                    response2 = self.get_async_response(task_id)
                    content = response2["data"]["choices"][0]["content"]
                except:
                    content = f"task_id: {task_id}"

            else:
                # response = zhipuai.model_api.async_invoke(
                response = zhipuai.model_api.invoke(
                    model=self.engine,
                    prompt=messages,
                    top_p=self.top_p,
                    temperature=self.temperature,
                )
                # print(response)
                content = response["data"]["choices"][0]["content"]
                # todo: price计算
                total_tokens = response["data"]["usage"]["total_tokens"]
                price_table = {"chatglm_pro": 0.01, "chatglm_lite": 0.002, "chatglm_std": 0.005}
                price = total_tokens * price_table[self.engine] / 1000
                self.cost_rmb += price

            # print("-----------------------price:",price)
        except Exception as e:
            # change api key
            self.set_config()

            logging.warning(f"ZhipuError: {e}.")
            content = json.dumps({"msg": f"[ERROR]: {str(e)[:50]}"})
        return content


    def get_async_response(self,task_id):
        import zhipuai
        raw_task_id = task_id
        task_id = task_id.replace("task_id:","").strip()
        try:
            response = zhipuai.model_api.query_async_invoke_result(task_id)
            content = response["data"]["choices"][0]["content"]
        except Exception as e:
            print(f"get async zhipu api error: {e}")
            content = raw_task_id
        return content