import asyncio
import time
import aiohttp
import openai
import itertools

from agent.llm_agent_parent import LLMAgentParent
from bot.enterprise_wechat_gpt.util.msg_buffer_util import MsgBufferUtil
from bot.enterprise_wechat_gpt.util.thread_local_util import *
from bot.enterprise_wechat_gpt.util.api_key_util import *
from bot.enterprise_wechat_gpt.util.request_api_limit_util import get_token
from bot.enterprise_wechat_gpt.util.token_count_util import num_tokens_from_messages
from bot.enterprise_wechat_gpt.util.id_distributor import distribute


# url = 'https://api.xiaojuan.ml/v1/chat/completions'
from util_tool import utils

url_xiaojuan = 'https://api.xiaojuan.tech/v1/chat/completions'
# url = 'https://api.proxy.chatlingxi.com/v1/chat/completions'
url_bak = 'https://api.chatanywhere.cn/v1/chat/completions'
lingxi_url = 'https://api.chatlingxi.com/v1/chat/completions'
app_4_gpt_url = "https://api.app4gpt.com/v1/chat/completions"

msg_buffer_util = MsgBufferUtil()
# lock = threading.Lock()

azure_keys = [
    ("https://lx-gpt-australia-east.openai.azure.com/", "lx-gpt-4-ae", "f31f66c8601d488eac461ade81c98da1"),
    ("https://lx-gpt-canada-east.openai.azure.com/", "lx-gpt-4-ce", "6349021ec7fa47a7b526e92bf638ccf3"),
    ("https://lx-gpt-east-us2.openai.azure.com/", "lx-gpt-4-eu2", "aefe46a980ba4a5bb24845f5618b05ce"),
    ("https://lx-gpt-france-central.openai.azure.com/", "lx-gpt-4-fc", "21b9bf1adb5b48c2a6d177a02e0c0816"),
    ("https://lx-gpt-norway-east.openai.azure.com/", "lx-gpt-4-ne", "ebbb7c0d824f437394f3ccdf56bdaf64"),
    ("https://lx-gpt-south-india.openai.azure.com/", "lx-gpt-4-sl", "42a1c23922394c559a335e9786e5b358"),
    ("https://lx-gpt-sweden-central.openai.azure.com/", "lx-gpt-4-sc", "7b175824ea6c42e8a07c8456704bb791"),
    ("https://lx-gpt-uk-south.openai.azure.com/", "lx-gpt-4-us", "b896d019f5964300a93b821351ac9017"),
    ("https://lx-gpt-west-u.openai.azure.com/", "lx-gpt-4-wu", "340e512ddc7848bf814570dccd8ec149"),
]
random.shuffle(azure_keys)
azure_data_cycle = itertools.cycle(azure_keys)

try:
    loop = asyncio.get_event_loop()
except RuntimeError:
    asyncio.set_event_loop(asyncio.new_event_loop())
    loop = asyncio.get_event_loop()


async def fetch(headers, payload, url, timeout=60):
    async with aiohttp.ClientSession() as session:
        response = await session.post(url=url,
                                      headers=headers, json=payload,
                                      timeout=aiohttp.ClientTimeout(total=timeout))
        content = await response.content.read()
    return content


class LLMAgent(LLMAgentParent):
    def __init__(self, prompt, origin_prompt=None):
        super().__init__(prompt)
        self.origin_prompt = origin_prompt
        self.gpt_prompt = [
            {"role": "system", "content": prompt}
        ]
        self.model_prompt = "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: " + self.prompt + "ASSISTANT:"
        if distribute(local_data.user_id, 2):
            self.model_prompt_self = """请不要忘记，你是专业的、非常懂营销的信贷领域的销售员，你的名字叫京小鑫，你现在在通过微信和用户进行沟通。
    #你主要的任务是在回应并解决用户的问题，然后完成金条和信用卡的销售转化。\n###""" + prompt + "\n###京东金融福利推荐官:"
        else:
            self.model_prompt_self = "你是一个专业的助手，你需要基于用户的问题，给出准确的答案。USER：" + prompt + "ASSISTANT:"

    async def chat_with_gpt(self, retry_count=2, temperature=0.9, agent_name='agentXX', model='gpt-4-1106-preview',
                            save_data=True, timeout=60):
        user_id = local_data.user_id
        buffer = msg_buffer_util.get_buffer_top(user_id)
        if buffer:
            if buffer.get('thread_id') != threading.current_thread().ident:
                logging.info(f"userId:[{user_id}] {agent_name} 处理一轮消息的线程已经变更为:{buffer.get('thread_id')}, 该线程放弃处理该消息")
                return 'abandon'
            if buffer.get('msg') != local_data.msg:
                logging.info(f"userId:[{user_id}] {agent_name} 当前处理的消息和缓冲区的消息不一致, 该线程放弃处理该消息")
                return 'abandon'
        key_url = get_key_url(model=model)
        key = key_url['key']
        url = key_url['url']
        if get_token(url):
            logging.info(f"userId:[{user_id}] {agent_name} api_key:[{key}], url:{url} 没有超过请求速率限制, 允许请求api")
        tokens = num_tokens_from_messages(self.gpt_prompt)
        logging.info(f"userId:[{user_id}] {agent_name} api_key:[{key}], url:{url}, model:{model} tokens:{tokens} prompt is: {self.gpt_prompt}")
        headers = {
            "Content-Type": "application/json",
            "Authorization": "Bearer " + key
        }
        payload = {
            "model": model,
            "messages": self.gpt_prompt,
            "temperature": temperature,
            "max_tokens": 350
        }
        start_time = time.time()
        content = ''
        try:
            content = await fetch(headers, payload, url + '/v1/chat/completions', timeout)

            result = json.loads(content.decode())["choices"][0]['message']['content']
            if save_data:
                loop.run_in_executor(None, self.save_train_data, self.prompt, result,
                                     self.__class__.__name__, "enterprise_wechat_llm", model, user_id)
            return result
        except (asyncio.exceptions.TimeoutError, aiohttp.ClientOSError) as e:
            # 请求超时
            if retry_count > 0:
                logging.warning(f"userId:[{user_id}] {agent_name}, api_key:[{key}], url:{url}, model:{model}, 请求gpt代理超时, 重试, e: {e}")
                return await self.chat_with_gpt(retry_count=retry_count - 1, temperature=temperature,
                                                agent_name=agent_name, model=model, timeout=timeout)
            else:
                logging.warning(f"userId:[{user_id}] {agent_name}, api_key:[{key}], url:{url}, model:{model}, 请求超过重试次数, 直接请求保底 e: {e}")
                return await self.achat_with_lingxi(agent_name=agent_name)
        except Exception as e:
            # 其他异常
            if content == '':
                logging.error(f"userId:[{user_id}] {agent_name}, api_key:[{key}], url:{url}, model:{model}, error: {e} content: {content}")
                # 这里表示其他连接异常，因为没有获取到content
                return await self.chat_with_gpt(retry_count=retry_count - 1, temperature=temperature,
                                                agent_name=agent_name, model=model, timeout=timeout)
            decode = content.decode("utf-8")
            logging.warning(f"userId:[{user_id}] {agent_name}, api_key:[{key}], url:{url}, model:{model}, gpt代理接口返回异常, 信息: {e} content: {decode}")
            if "Connection timed out" in decode or "Bad Gateway" in decode:
                retry_count = 2
                # 如果接口返回 'Connection timed out' 直接请求保底
                # return await self.achat_with_lingxi(agent_name=agent_name)
            if "balance is not enough" in decode or "insufficient balance" in decode or "该令牌额度已用尽" in decode:
                logging.warning(f"userId:[{user_id}] {agent_name}, api_key:[{key}], url:{url}, model:{model}, 余额不足, 移除该key, 重试")
                remove_key(key, model)
                if 'gpt-4' in decode:
                    # logging.info(f"userId:[{user_id}] gpt-4 api_key余额不足, 放进gpt-3的key池")
                    # add_key_url(key_url, model='gpt-3-0613')
                    pass
                retry_count = 2
            if retry_count > 0:
                if "Rate limit reached for 10KTPM-200RPM in organization" in decode:
                    logging.error(f"userId:[{user_id}] api_key:[{key}], url:{url} 1分钟内token超限")
                    sleep_time = 3
                    set_rate_limit(key)
                elif "rate_limit_exceeded" in decode:
                    logging.error(f"userId:[{user_id}] api_key:[{key}], url:{url} 超过请求速率")
                    sleep_time = 2
                    set_rate_limit(key)
                elif "OpenAI response error" in decode:
                    sleep_time = 2
                else:
                    # TODO 先改为3秒，后续根据每个key的实际请求速率来调整
                    sleep_time = 2
                logging.info(f"userId:[{user_id}] wait {sleep_time}s retry {model} {agent_name}")
                await asyncio.sleep(sleep_time)
                return await self.chat_with_gpt(retry_count=retry_count - 1, temperature=temperature,
                                                agent_name=agent_name, model=model, timeout=timeout)
            logging.warning(f"userId:[{user_id}] {agent_name}, api_key:[{key}], url:{url}, model:{model}, 请求超过重试次数, 直接请求保底")
            return await self.achat_with_lingxi(agent_name=agent_name)
        finally:
            logging.info(f"userId:[{user_id}] {agent_name} 请求大模型{model}耗时: {time.time() - start_time} s")

    async def achat_with_azure(self, retry_count=1, agent_name='agentXX', save_data=True):
        user_id = local_data.user_id
        logging.info(f"userId:[{user_id}] gpt3.5_azure prompt is: {self.gpt_prompt}")
        start_time = time.time()
        try:
            response = await openai.ChatCompletion.acreate(
                api_type="azure",
                api_version="2023-03-15-preview",
                api_base="https://lingxi-openai.openai.azure.com",
                api_key="45a5ee249f364e208dd950f87ab5aba7",
                engine="gpt-35",
                messages=self.gpt_prompt,
                temperature=0.9,
                max_tokens=300,
                request_timeout=30,
            )
            result = response["choices"][0]['message']['content']
        except Exception as ee:
            try:
                logging.error(f"userId:[{user_id}] gpt3.5 error: {ee}")
                if retry_count > 0:
                    return await self.achat_with_azure(retry_count - 1)
                logging.error(f"userId:[{user_id}] 请求azure超过重试次数, 直接返回''")
                result = ""
                if save_data:
                    loop.run_in_executor(None, self.save_train_data, self.prompt, result,
                                         self.__class__.__name__, 'enterprise_wechat_llm', 'gpt3_5_azure', user_id)
            except openai.error.InvalidRequestError:
                raise openai.error.InvalidRequestError("token limit", None)
            except openai.error.Timeout:
                raise TimeoutError
            except Exception:
                logging.error(f"userId:[{user_id}] gpt3.5 stream retry error: {ee}")
                return ""
        finally:
            logging.info(f"userId:[{user_id}] {agent_name} 请求大模型gpt3.5_azure耗时: {time.time() - start_time} s")
        return result

    async def achat_with_azure_gpt4(self, retry_count=0, temperature=0.9, agent_name='agentXX', save_data=True):
        user_id = local_data.user_id
        logging.info(f"userId:[{user_id}] {agent_name} retry_count:{retry_count} azure-gpt-4 prompt is: {self.gpt_prompt}")
        result = ""
        key_tuple = next(azure_data_cycle)
        url = key_tuple[0]
        model = key_tuple[1]
        key = key_tuple[2]
        content = ""
        try:
            headers = {
                "Content-Type": "application/json",
                "api-key": key
            }
            payload = {
                "messages": self.gpt_prompt,
                "temperature": temperature,
                "max_tokens": 1024,
                "model": "gpt-4-1106-preview",
                # "request_timeout": 30
                # "stream": True,
            }
            async with aiohttp.ClientSession() as session:
                response = await session.post(url + "openai/deployments/" + model + "/chat/completions?api-version=2023-05-15",
                                              headers=headers, json=payload,
                                              timeout=aiohttp.ClientTimeout(total=100))
                content = await response.content.read()
            result = json.loads(content.decode())["choices"][0]['message']['content']
            if save_data:
                loop.run_in_executor(None, self.save_train_data, self.prompt, result,
                                     self.__class__.__name__, "enterprise_wechat_llm", "azure-gpt-4", user_id)
        except Exception as ee:
            if content:
                logging.error(f"userId:[{user_id}] azure-gpt-4 gpt代理接口返回异常 api_key:{key} receive message is:{content.decode()} e:{ee}")
            if retry_count < 1:
                logging.info(f"userId:[{user_id}] {agent_name} 重试 azure-gpt-4")
                return await self.achat_with_azure_gpt4(retry_count=retry_count + 1, agent_name=agent_name)
            else:
                logging.warning(f"userId:[{user_id}] {agent_name}, api_key:[{key}], 请求azure-gpt-4超过重试次数, 直接请求第三方代理")
                return await self.chat_with_gpt(agent_name='话术生成', timeout=50)
        return result

    async def achat_llm(self, agent_name='话术风格', model_version='wechat_llama-2_13b_0928',
                               max_new_tokens=1024, save_data=True):   # wechat_llama-2_13b_new
        user_id = local_data.user_id
        buffer = msg_buffer_util.get_buffer_top(user_id)
        if buffer:
            if buffer.get('thread_id') != threading.current_thread().ident:
                logging.info(f"userId:[{user_id}] {agent_name}Agent 处理一轮消息的线程已经变更, 该线程放弃处理该消息")
                return 'abandon'
            if buffer.get('msg') != local_data.msg:
                logging.info(f"userId:[{user_id}] {agent_name}Agent 当前处理的消息和缓冲区的消息不一致, 该线程放弃处理该消息")
                return 'abandon'
        start_time = time.time()
        async for x in self.achat_llm_stream(agent_name=agent_name, model_version=model_version, max_new_tokens=max_new_tokens):
            pass
            # logger.info("the llm response is :{}".format(x["text"]))
        logging.info(f"userId:[{user_id}] {agent_name}Agent 请求自有大模型耗时: {time.time() - start_time} s")
        try:
            result = x.get("text", "")
        except:
            logging.warning(f"userId:[{user_id}] {agent_name}Agent 请求自有大模型x变量异常, 直接请求第三方代理")
            result = await self.achat_with_azure_gpt4(agent_name=agent_name)
        if result == "":
            logging.warning(f"userId:[{user_id}] {agent_name}Agent 请求自有大模型返回空, 直接请求第三方代理 x:{x}")
            result = await self.achat_with_azure_gpt4(agent_name=agent_name)
        if save_data:
            loop.run_in_executor(None, self.save_train_data, self.prompt, result,
                                 self.__class__.__name__, 'enterprise_wechat_llm', model_version, user_id)
        return result

    async def achat_llm_stream(self, agent_name, model_version, max_new_tokens):
        user_id = local_data.user_id
        logging.info(f"userId:[{user_id}] {agent_name}Agent prompt is :{self.model_prompt_self}")
        payload = {
            "model": model_version,
            "prompt": self.model_prompt_self,
            "temperature": 0.9,
            "max_new_tokens": max_new_tokens,
            'repetition_penalty': 1.0,
            'top_p': 1.0,
            "stop": '###',
            "stop_token_ids": None,
            "echo": False,
        }
        try:
            async with aiohttp.ClientSession() as session:
                timeout = 15 if env_str == 'prod' else 15
                async with session.post(
                        "http://" + profile_['external']['lingxi_llm_host'] + ":21001" + "/worker_generate_stream",
                        headers={"User-Agent": "FastChat Client"}, json=payload,
                        timeout=aiohttp.ClientTimeout(total=timeout)) as response:
                    # An empty delimiter is not supported by aiohttp, so we'll read chunk by chunk
                    buffer = b''
                    async for line in response.content.iter_any():
                        buffer += line
                        while b'\0' in buffer:
                            chunk, buffer = buffer.split(b'\0', 1)
                            if chunk:
                                data = json.loads(chunk.decode())
                                yield data
        except:
            logging.error(f"userId:[{user_id}] {agent_name}Agent 请求自有大模型超时")

    async def achat_with_lingxi(self, retry_count=1, temperature=0.9, agent_name='agentXX', model='gpt-4-0314',
                                save_data=True):
        user_id = local_data.user_id
        key = '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'
        logging.info(f"userId:[{user_id}] {agent_name} lingxi-gpt, api_key:[{key}], model:{model} prompt is: {self.gpt_prompt}")
        headers = {
            "Content-Type": "application/json",
            "Authorization": "Bearer " + key
        }
        payload = {
            "model": model,
            "messages": self.gpt_prompt,
            "temperature": temperature,
            "max_tokens": 300
        }
        start_time = time.time()
        try:
            content = await fetch(headers, payload, lingxi_url)
            return json.loads(content.decode())["choices"][0]['message']['content']
        except Exception as e:
            logging.error(f"userId:[{user_id}] 请求自有代理失败:{e}")
            if retry_count > 0:
                return await self.achat_with_lingxi(retry_count - 1)
            logging.error(f"userId:[{user_id}] lingxi-gpt 请求超过重试次数, 直接返回''")
            result = ""
            if save_data:
                loop.run_in_executor(None, self.save_train_data, self.prompt, result,
                                     self.__class__.__name__, 'enterprise_wechat_llm', model, user_id)
            return result
        finally:
            logging.info(f"userId:[{user_id}] {agent_name} lingxi-gpt 请求自有代理{model}耗时: {time.time() - start_time} s")
