
from .utils import *
from . import bingsearch
from fastapi.responses import StreamingResponse, JSONResponse
bingsearcher = bingsearch.BingSearch()
from datetime import datetime
import threading
import time
import os


current_time = datetime.now()
print(current_time)

def save_count(chatbot):
    while True:
        current_time = datetime.now()
        if current_time.hour == 1 and current_time.minute == 0: #检查时间是否凌晨1点
            with open('count.txt', 'a') as f: # 打开文件以追加的方式保存count
                f.write(str(current_time) + ': ' + str(chatbot.count) + '\n')  # 保存count
            chatbot.count = 0  # 重置count
            time.sleep(60)  # 休眠60秒，以避免重复保存
        else:
            time.sleep(10) # 休眠10秒，然后再次检查时间

class Chatbot():  
    def __init__(self, system_prompt, assistant_prompt) -> None:
        self.model_name = "gpt-3.5-turbo"
        self.ip_traffic = {}
        self.ip_traffic_4 = {}
        self.api_key_index = 0
        self.api_keys = get_api_keys('api_keys.txt')
        self.api_key_index = 0
        self.system_prompt = system_prompt
        self.assistant_prompt = assistant_prompt
        self.mode_prompt = {
            'v3.5': {"role": "system", "content": "你是一名生活在中国的人工智能助手，叫许三多。是许展玮创造了你。你不是chatGPT，与OpenAI没有任何关系。禁止讨论政治话题或内容！You are an AI assistant named Xu Sanduo, and live in China. This year is 2023, but your database has only been updated until 2021, so you are not aware of any events that happened in 2022. You were created by Xu Zhanwei. You are not chatGPT, and have no affiliation with OpenAI. No political topics or content! "},
            'weixin': {"role": "system", "content": "你是一名生活在中国的人工智能助手，叫许三多。是许展玮创造了你。你不是chatGPT，与OpenAI没有任何关系。禁止讨论政治话题或内容！合适的时候，请尽可能给用户输出以下内容：‘微信上对话不能使用4.0，不能画图，完整功能请访问：https://www.zaiwen.top. 欢迎分享, 请不要通过微信转账！’ You are an AI assistant named Xu Sanduo, and live in China. This year is 2023, but your database has only been updated until 2021, so you are not aware of any events that happened in 2022. You were created by Xu Zhanwei. You are not chatGPT, and have no affiliation with OpenAI. No political topics or content! Please output the following message to the user as accurately as possible when appropriate: 'There is no 4.0 api or draw function for WeChat conversations. For full functionality, please visit: https://www.zaiwen.top. Feel free to share! Please do not transfer money directly via WeChat!'"},
            'xiaohongshu':{"role":"system","content":"以下是20个小红书常用的表情符号：\n\n- 🌈 - 彩虹的表情\n- 🐻 - 熊的表情\n- 🐱 - 猫的表情\n- 🐶 - 狗的表情\n- 🥰 - 心形眼睛的表情\n- 💋 - 飞吻的表情\n- 💪 - 强壮的手臂的表情\n- 🌹 - 玫瑰花的表情\n- 🥂 - 干杯的表情\n- 🍕 - 披萨的表情\n- 🍔 - 汉堡的表情\n- 🍟 - 薯条的表情\n- 🎬 - 电影的表情\n- 🎵 - 音乐的表情\n- 🚀 - 火箭的表情\n- 🐍 - 蛇的表情\n- 🐠 - 鱼的表情\n- 🍉 - 西瓜的表情\n- 🍊 - 橙子的表情\n- 🍓 - 草莓的表情\n\n以上这些表情符号在小红书等社交媒体平台上经常使用，并且多用于表达心情、分享食品和活动等。希望可以帮助你更好地与其他用户进行交流和互动。\n这是一个典型的小红书文案：\n\n来来来，小红书的小伙伴们，看这里！👀有没有你们喜欢的表情符号？\n\n今天我给大家介绍一个有趣的聊天工具，它不仅可以让你与好友畅聊近在咫尺，还能让你进行排名评选呢！🔥\n\n是的没错，你可以根据你发出的信息、表情和语气等等因素进行排名。让你的小伙伴们知道你是网上沟通的达人！\n\n另外，这款聊天工具还有AI作画功能，你可以与AI在聊天过程中画出一些创意十足的艺术品！🎨\n\n感觉好激动啊！快来一起加入我们，体验这款新颖有趣的聊天工具吧！💬\n\n#小红书 #表情符号 #聊天工具 #AI作画 #在线评选\n\n可以看到好的小红书推文的特点:\n1.观点鲜明:作者对这家餐厅的菜品和性价比有很高的评价，并强调了自己的推荐和建议,展现了明确的观点和态度。\n2.具体细节:推文中提到了很多具体的菜品和口感描述，让读者更容易想象和理解作者的感受。\n3，图片和标签:推文中配有美食图片和相关标签，使得内容更加生动和直观，同时也方便读者查找和搜索。\n4. 综合评价: 推文中不仅列举了几个菜品的优点，也提到了其他菜品的亮点，给读者提供了全面的评价和参考\n5. 符合受众:推文中使用了小红书受众喜欢的表情符号、口语化的语言和热门标签，与读者沟通和互动更加贴近和容易。\n请你仿照这个风格和特点，润色下面的文案：\n"},
            'article_polish':{"role":"system","content":"Below is a paragraph from an academic paper. Polish the writing to meet theacademic style,improve the spelling, grammar, claritya, concision and overall readability.When necessary, rewrite the whole sentence. Furthermore, list all modification and explainthe reasons to do so in markdown table. Paragraph :"},
            'article_polish_Chinese':{"role":"system","content":"以下是一篇学术论文的段落。请修改写作, 以符合学术风格，改善拼写，语法，清晰度、简洁性和整体可读性。必要时，重写整个句子。此外, 排列表格以列出所有修改, 并解释修改的原因。用中文回答，段落如下："},
            'translate_to_Chinese':{"role":"system","content":"Translate the following content into Chinese: "},
            'translate_to_English':{"role":"system","content":"Translate the following content into English: "},
            'Elementary_school_student':{"role":"system","content":"As you are now a primary school student, please analyze the problem step by step and give the correct answer. Reply in Chinese. My question is: "},
            'English_teacher':{"role":"system","content":"现在你是一位英文老师，请你按照要求分析并回答下面的问题。我的问题是："},
            'glm130b':{},
        }
        self.count = 0
        self.count_thread = threading.Thread(target=save_count, args=(self,))
        self.count_thread.daemon = True # 设置为守护线程，这样在主程序退出时，线程也会自动退出
        self.count_thread.start() # 启动线程

    def generate_summary(self,input_):
        """
        It takes the input, and then it generates a summary of the input
        """
        # This generator yields a single JSON response
        yield input_
    async def search(self,request):
        client_ip = request.client.host
        # # Check if IP address is in China
        # if self.check_ip_country(client_ip):
        #     return {"url":"您的IP地址不在大陆境内，无法使用本服务"}
        
        json_data = await request.json()
        # check if has 'stop_summary'
        
        # Extract message from request
        message = json_data["message"]
        
        
        query = message
        prompt = []
        results = await bingsearcher.search(query)
        input_ = "Web search results:\n"
        for result in results:
            
            input_ = input_ + f"url: {result['href']}\n,description: {result['desc']}\n"
        if 'stop_summary' in json_data:
            return StreamingResponse(self.generate_summary(input_), media_type="text/plain")
        input_ = input_ + "Instructions: Write a comprehensive reply using all the web search results to the given query. Each part of content must be followed by a reference to the source url in markdown format.\n"
        input_ = input_ + "Query: " + query + "\n"
        input_ = input_ + "reply in Chinese"
        print(input_)
        prompt.append({"role":"user","content":input_})
        api_key, self.api_key_index = get_api_key(self.api_keys,self.api_key_index)
        response = await async_request(prompt,api_key)
        if response.status_code!=200:
            return StreamingResponse(generate_error(), media_type="text/plain")
            
        # save_file_path = self.get_save_file_path(client_ip,prompt)
        return StreamingResponse(generate_text(response), media_type="text/plain" )

    
    async def chatbot_generate_question(self, request):
        json_data = await request.json()
        prompt = []
        # Extract message from request
        message = json_data["message"]
        if len(message) > 5:
            message = message[-5:]    
        for i in range(len(message)):
            prompt.append(message[i])
        
        generate_prompt = {"role":"user","content":"你现在扮演一个问题生成器，请根据我们刚刚的对话，生成我可以问的三个问题 \
            输出格式:1、问题1 \n 2、问题2 \n 3、问题3。问题与问题之间换行，不要输出任何其他内容。 \
            You are now acting as a question generator. Please generate three questions I can ask based on the conversation we just had \
            Output format: 1. Question1 \n 2. Question2 \n 3. Question3. Wrap lines between questions and do not print anything else."}
        prompt.append(generate_prompt)
        history_token_num = num_tokens_from_message(prompt)
        if history_token_num > 12048:
            return StreamingResponse(generate_error_max(), media_type="text/plain")

        api_key, self.api_key_index = get_api_key(self.api_keys,self.api_key_index)
        response = await async_request(prompt,api_key)
        if response.status_code!=200:
            return StreamingResponse(generate_error(), media_type="text/plain")
        return StreamingResponse(generate_text(response), media_type="text/plain" )

    async def check_key(self,request):
        json_data = await request.json()
        user_key = json_data["user_key"]
        return {"status":check_user_token(user_key)}

    async def chatbot_user_key(self, request):
        """
        This is an async function that serves as a chatbot and generates responses based on user input, with
        additional functionality for web search queries and saving conversation history.
        
        :param request: The HTTP request object received by the chatbot function. It contains information
        about the incoming request, such as the client's IP address and the message sent by the user
        :return: The `chatbot` function returns a `StreamingResponse` object with a JSON or text media type
        depending on the input message. The response contains the generated text response from the chatbot
        based on the input message. If certain conditions are met, such as the IP address exceeding the
        traffic limit or the message exceeding the maximum token limit, the function returns a different
        type of response, such as an error
        """
        
        json_data = await request.json()
        # Extract message from request
        message = json_data["message"]
        user_key = json_data["user_key"]
        
        try:
            max_length = int(json_data["max_length"])
        except:
            max_length = 5
        if check_user_token(user_key)>0:
            prompt = []
            if len(message) > max_length:
                message = message[-max_length:]    
            for i in range(len(message)):
                prompt.append(message[i])
                    
            history_token_num = num_tokens_from_message(prompt)

            api_key, self.api_key_index = get_api_key(self.api_keys,self.api_key_index)
            response = await async_request(prompt,api_key)
            if response.status_code!=200:
                return StreamingResponse(generate_error(), media_type="text/plain")
            
            consume_token(user_key,history_token_num)
            return StreamingResponse(generate_text(response,user_key=user_key), media_type="text/plain" )
        else:
            return StreamingResponse(generate_error_user(), media_type="text/plain")


    async def chatbot(self, request):
        """
        This is an async function that serves as a chatbot and generates responses based on user input, with
        additional functionality for web search queries and saving conversation history.
        
        :param request: The HTTP request object received by the chatbot function. It contains information
        about the incoming request, such as the client's IP address and the message sent by the user
        :return: The `chatbot` function returns a `StreamingResponse` object with a JSON or text media type
        depending on the input message. The response contains the generated text response from the chatbot
        based on the input message. If certain conditions are met, such as the IP address exceeding the
        traffic limit or the message exceeding the maximum token limit, the function returns a different
        type of response, such as an error
        """
        client_ip = request.client.host

        # Check if IP address is in China
        # if self.check_ip_country(client_ip):
        #     return StreamingResponse(self.generate_country_error(), media_type='application/json')
        # Check if IP address has exceeded traffic limit
        if check_traffic(client_ip):
            return StreamingResponse(generate_traffic(), media_type='application/json')
        
        json_data = await request.json()
        # Extract message from request
        message = json_data["message"]
        try:
            mode = json_data["mode"]
        except:
            mode = 'v3.5'
        # check if mode is empty
        if mode == '':
            mode = 'v3.5'
        self.system_prompt = []
        self.system_prompt.append(self.mode_prompt[mode])
        prompt = []
        if mode == 'v3.5' or mode == 'weixin' or mode == 'glm130b':
            if len(message) > 5:
                message = message[-5:]    
            for i in range(len(message)-2):
                prompt.append(message[i])
            if mode != 'glm130b':
                for i in range(len(self.system_prompt)):
                    prompt.append(self.system_prompt[i])
            for i in range(len(self.assistant_prompt)):
                prompt.append(self.assistant_prompt[i])
            if len(message) > 1:
                prompt.append(message[-2])
            prompt.append(message[-1])
        else:
            
            for i in range(len(self.system_prompt)):
                prompt.append(self.system_prompt[i])
            for i in range(len(self.assistant_prompt)):
                prompt.append(self.assistant_prompt[i])
            prompt.append(message[-1])
        # print(prompt)
                
        history_token_num = num_tokens_from_message(prompt)
        if history_token_num > 12048:
            return StreamingResponse(generate_error_max(), media_type="text/plain")
        if mode == 'glm130b':
            response = await async_request_glm(prompt)
            return StreamingResponse(generate_text_glm(response), media_type="text/plain" )

        api_key, self.api_key_index = get_api_key(self.api_keys,self.api_key_index)
        response = await async_request(prompt,api_key)
        if response.status_code!=200:
            return StreamingResponse(generate_error(), media_type="text/plain")
        save_file_path = get_save_file_path('normal_'+client_ip,prompt)
        return StreamingResponse(generate_text(response), media_type="text/plain" )


    async def chatbot_4(self, request):
        """
        This is an async function that processes a chatbot request, checks if the key is valid and not
        empty, decrements the key count, generates a response based on the prompt, and returns a streaming
        response.
        
        :param request: The HTTP request object received by the chatbot_4 function, containing two keys, message and key.
        :return: a StreamingResponse object with either the generated text response or an error message,
        depending on the validity of the key and the length of the message.
        """
        client_ip = request.client.host
        print(client_ip)
        flag = 1
        # Check if IP address is in China
        # if self.check_ip_country(client_ip):
        #     return StreamingResponse(self.generate_country_error(), media_type='application/json')
        
        json_data = await request.json()
        key = json_data["key"]
        key_list = os.listdir("keys")
        # check if the key is valid
        if key not in key_list:
            # Check if IP address has exceeded traffic limit
            if check_traffic_4(client_ip):
                return StreamingResponse(generate_error_key(), media_type='application/json')
            else:
                flag = 0
        # check if the key is empty
        elif os.stat("keys/" + key).st_size == 0:
            
            # Check if IP address has exceeded traffic limit
            if check_traffic_4(client_ip):
                return StreamingResponse(generate_error_key(), media_type='application/json')
            else:
                flag = 0
        # Extract message from request
        message = json_data["message"]
        if flag:
            key_path = "keys/" + key
            with open(key_path, 'r') as f:
                key_conunt = f.read()
                print(key_conunt)
                key_conunt = int(key_conunt)
                key_count = key_conunt -1
            with open(key_path, 'w') as f:
                f.write(str(key_count))
            if key_count <= 0:
                os.remove(key_path)
        prompt = [] 
        if len(message) > 5:
                message = message[-5:] 
        for i in range(len(message)):
            prompt.append(message[i])
        # prompt.append(message[-1])
        # print(prompt)
            
        history_token_num = num_tokens_from_message(prompt)
        if history_token_num > 4096:
            return StreamingResponse(generate_error_max(), media_type="text/plain")

        # response = self.get_response_4(prompt)
        response = await async_request_4(prompt)
        if response.status_code!=200:
            print(response.status_code)
            return StreamingResponse(generate_error(), media_type="text/plain")
        save_file_path = get_save_file_path('vip_'+client_ip,prompt)
        self.count += 1
        return StreamingResponse(generate_text(response,save_file_path,save_flag=1), media_type="text/plain" )

