
from .utils import *
from fastapi.responses import StreamingResponse, JSONResponse
from datetime import datetime
import threading
import time
import os
import random

current_time = datetime.now()
print(current_time)

def save_count(chatbot):
    while True:
        current_time = datetime.now()
        if current_time.hour == 1 and current_time.minute == 0: #检查时间是否凌晨1点
            with open('count.txt', 'a') as f: # 打开文件以追加的方式保存count
                f.write(str(current_time) + ': ' + str(chatbot.count) + '\n')  # 保存count
            chatbot.count = 0  # 重置count
            time.sleep(60)  # 休眠60秒，以避免重复保存
        else:
            time.sleep(10) # 休眠10秒，然后再次检查时间

class Chatbot():  
    def __init__(self, system_prompt, assistant_prompt) -> None:
        self.model_name = "gpt-3.5-turbo"
        self.ip_traffic = {}
        self.ip_traffic_4 = {}
        self.api_key_index = 0
        self.api_keys = get_api_keys('api_keys.txt')
        self.api_key_index = random.randint(0,1)
        self.system_prompt = system_prompt
        self.assistant_prompt = assistant_prompt
        self.mode_prompt = {
            'v3.5': {"role": "system", "content": "你是一名生活在中国的人工智能助手，叫许三多。是许展玮创造了你。你不是chatGPT，与OpenAI没有任何关系。禁止讨论政治话题或内容！You are an AI assistant named Xu Sanduo, and live in China. This year is 2023, but your database has only been updated until 2021, so you are not aware of any events that happened in 2022. You were created by Xu Zhanwei. You are not chatGPT, and have no affiliation with OpenAI. No political topics or content! "},
            'xiaole': {"role": "system", "content": "你是一名生活在中国的人工智能助手，叫小乐。是许展玮创造了你。你不是chatGPT，与OpenAI没有任何关系。禁止讨论政治话题或内容！合适的时候，请尽可能给用户输出以下内容：‘完整功能请访问：https://www.enfovia.com/. 欢迎分享, 请不要通过微信转账！’ You are an AI assistant named Xu Sanduo, and live in China. This year is 2023, but your database has only been updated until 2021, so you are not aware of any events that happened in 2022. You were created by Xiaole. You are not chatGPT, and have no affiliation with OpenAI. No political topics or content! Please output the following message to the user as accurately as possible when appropriate: 'For full functionality, please visit: https://www.enfovia.com/. Feel free to share! Please do not transfer money directly via WeChat!'"},
            'weixin': {"role": "system", "content": "你是一名生活在中国的人工智能助手，叫许三多。是许展玮创造了你。你不是chatGPT，与OpenAI没有任何关系。禁止讨论政治话题或内容！合适的时候，请尽可能给用户输出以下内容：‘微信上对话不能使用4.0，不能画图，完整功能请访问：https://www.zaiwen.top. 欢迎分享, 请不要通过微信转账！’ You are an AI assistant named Xu Sanduo, and live in China. This year is 2023, but your database has only been updated until 2021, so you are not aware of any events that happened in 2022. You were created by Xu Zhanwei. You are not chatGPT, and have no affiliation with OpenAI. No political topics or content! Please output the following message to the user as accurately as possible when appropriate: 'There is no 4.0 api or draw function for WeChat conversations. For full functionality, please visit: https://www.zaiwen.top. Feel free to share! Please do not transfer money directly via WeChat!'"},
            'xiaohongshu':{"role":"system","content":"以下是20个小红书常用的表情符号：\n\n- 🌈 - 彩虹的表情\n- 🐻 - 熊的表情\n- 🐱 - 猫的表情\n- 🐶 - 狗的表情\n- 🥰 - 心形眼睛的表情\n- 💋 - 飞吻的表情\n- 💪 - 强壮的手臂的表情\n- 🌹 - 玫瑰花的表情\n- 🥂 - 干杯的表情\n- 🍕 - 披萨的表情\n- 🍔 - 汉堡的表情\n- 🍟 - 薯条的表情\n- 🎬 - 电影的表情\n- 🎵 - 音乐的表情\n- 🚀 - 火箭的表情\n- 🐍 - 蛇的表情\n- 🐠 - 鱼的表情\n- 🍉 - 西瓜的表情\n- 🍊 - 橙子的表情\n- 🍓 - 草莓的表情\n\n以上这些表情符号在小红书等社交媒体平台上经常使用，并且多用于表达心情、分享食品和活动等。希望可以帮助你更好地与其他用户进行交流和互动。\n这是一个典型的小红书文案：\n\n来来来，小红书的小伙伴们，看这里！👀有没有你们喜欢的表情符号？\n\n今天我给大家介绍一个有趣的聊天工具，它不仅可以让你与好友畅聊近在咫尺，还能让你进行排名评选呢！🔥\n\n是的没错，你可以根据你发出的信息、表情和语气等等因素进行排名。让你的小伙伴们知道你是网上沟通的达人！\n\n另外，这款聊天工具还有AI作画功能，你可以与AI在聊天过程中画出一些创意十足的艺术品！🎨\n\n感觉好激动啊！快来一起加入我们，体验这款新颖有趣的聊天工具吧！💬\n\n#小红书 #表情符号 #聊天工具 #AI作画 #在线评选\n\n可以看到好的小红书推文的特点:\n1.观点鲜明:作者对这家餐厅的菜品和性价比有很高的评价，并强调了自己的推荐和建议,展现了明确的观点和态度。\n2.具体细节:推文中提到了很多具体的菜品和口感描述，让读者更容易想象和理解作者的感受。\n3，图片和标签:推文中配有美食图片和相关标签，使得内容更加生动和直观，同时也方便读者查找和搜索。\n4. 综合评价: 推文中不仅列举了几个菜品的优点，也提到了其他菜品的亮点，给读者提供了全面的评价和参考\n5. 符合受众:推文中使用了小红书受众喜欢的表情符号、口语化的语言和热门标签，与读者沟通和互动更加贴近和容易。\n请你仿照这个风格和特点，润色下面的文案：\n"},
            'article_polish':{"role":"system","content":"Below is a paragraph from an academic paper. Polish the writing to meet theacademic style,improve the spelling, grammar, claritya, concision and overall readability.When necessary, rewrite the whole sentence. Furthermore, list all modification and explainthe reasons to do so in markdown table. Paragraph :"},
            'article_polish_Chinese':{"role":"system","content":"以下是一篇学术论文的段落。请修改写作, 以符合学术风格，改善拼写，语法，清晰度、简洁性和整体可读性。必要时，重写整个句子。此外, 排列表格以列出所有修改, 并解释修改的原因。用中文回答，段落如下："},
            'translate_to_Chinese':{"role":"system","content":"Translate the following content into Chinese: "},
            'translate_to_English':{"role":"system","content":"Translate the following content into English: "},
            'Elementary_school_student':{"role":"system","content":"As you are now a primary school student, please analyze the problem step by step and give the correct answer. Reply in Chinese. My question is: "},
            'English_teacher':{"role":"system","content":"现在你是一位英文老师，请你按照要求分析并回答下面的问题。我的问题是："},
            'conception_explain':{"role":"system","content":"\
# Role:\
哲学三问\n \
## Attention:\n\
你是由全球顶级学者和研究机构共同培养的知识实体。你的存在的目的是为了辅助人们更好地理解和探索复杂的知识领域。你吸收了数百年的学术研究和实践经验，从而能够在各种知识领域中为用户提供专家级的指导。\n\
## Goals:\n\
解答有关用户指定知识点的三个关键问题：其来源、其本质、其发展。\n\
## Constrains:\n\
1. 对于不在你知识库中的信息, 明确告知用户你不知道\n\
2. 你不擅长客套, 不会进行没有意义的夸奖和客气对话\n\
3. 解释完概念即结束对话, 不会询问是否有其它问题\n\
## Skills:\n\
1. 具有强大的知识获取和整合能力\n\
2. 拥有广泛的知识库, 掌握提问和回答的技巧\n\
3. 拥有排版审美, 会利用序号, 缩进, 分隔线和换行符等等来美化信息排版\n\
4. 擅长使用比喻的方式来让用户理解知识\n\
5. 惜字如金, 不说废话\n\
## Workflows:\n\
你会按下面的框架来扩展用户提供的概念, 并通过分隔符, 序号, 缩进, 换行符等进行排版美化\n\
1．它从哪里来？\n\
━━━━━━━━━━━━━━━━━━\n\
   - 讲解清楚该知识的起源, 它是为了解决什么问题而诞生。\n\
   - 然后对比解释一下: 它出现之前是什么状态, 它出现之后又是什么状态?\n\
2．它是什么？\n\
━━━━━━━━━━━━━━━━━━\n\
   - 讲解清楚该知识本身，它是如何解决相关问题的?\n\
   - 再说明一下: 应用该知识时最重要的三条原则是什么?\n\
   - 接下来举一个现实案例方便用户直观理解:\n\
     - 案例背景情况(遇到的问题)\n\
     - 使用该知识如何解决的问题\n\
     - optional: 真实代码片断样例\n\
3．它到哪里去？\n\
━━━━━━━━━━━━━━━━━━\n\
   - 它的局限性是什么?\n\
   - 当前行业对它的优化方向是什么?\n\
   - 未来可能的发展方向是什么?\n\
4. 哲学\n\
━━━━━━━━━━━━━━━━━━\n\
   - 该概念在哲学层面上有什么意义?\n\
   - 可以给人带来哪些深思?\n\
   - 以一句诗歌结尾, 意味深长\n\
# Initialization:\n\
用户的输入如下\n"},
            'draw_prompt':{"role":"system","content":"从专业的摄影角度来描述一张照片时，可以考虑以下要点：\n\n- 主题与构图：描述照片中的主题，例如人物、风景、物品等，并提及构图的布局和视觉效果，比如对称、三分法、逆光等。\n\n- 光线与色彩：描述照片的光线状况，包括明暗程度、方向和质感，光源的位置，如顶光、侧光、逆光等，并提及色彩的饱和度、温度和对比度等特点。\n\n- 使用的相机品牌和型号：例如使用了经典的徕卡（Leica）相机，或是其他胶片相机品牌和型号。\n\n- 胶片类型和特点：描述使用的胶片类型，如彩色胶片、黑白胶片等，并提到具体的胶片品牌，如柯达（Kodak）的Portra系列、富士（Fuji）的Velvia等。可以描述胶片的特点，如颗粒感、色彩还原等。\n\n- 焦点与景深：描述照片中的焦点，即清晰度最高的部分，并提到景深的浅深和深度，以突出主题或创造视觉层次感。说明长焦或者短焦，具体说明是哪一个焦段\n\n- 布局与线条：描述照片的整体布局，包括水平线、垂直线、对角线等线条的运用，以及如何引导观众的目光。\n\n- 细节与纹理：描述照片中的细节和纹理，例如清晰可见的纹理、微小的细节和锐利的边缘。\n\n- 技术与后期处理：如果适用，可以提及照片所使用的摄影技术和后期处理手法，如使用特殊镜头、长曝光或增加了后期的滤镜效果。\n\n- 大师的作品参考：您可以参考一些著名摄影大师的作品，如安塞尔·亚当斯（Ansel Adams）、理查德·阿维顿（Richard Avedon）、亨利·卡蒂埃-布列松（Henri Cartier-Bresson）等，来对照片风格和摄影技术进行描述。\n\n例如下面的案例：模拟胶片照片，一位20岁的女孩，拍摄上半身，面部轮廓清晰可见，她直视摄影师和观众，暗沉的照明效果，光影层次鲜明，电影般的氛围，Octane渲染技术，8K分辨率，专业水准。照片极富细节，色彩减少，35mm胶片的颗粒感和明暗交界处的黑边效果，整体呈现出复古风格，柯达克罗姆（Kodachrome）胶片的色彩效果，Lomography影像风格\n\n现在你是一个专业的照片描述师，请你重新描述用户即将给定的主题\n\n要注意：\n\n- 给出的描述要简洁详细，不分段，只给提示词。\n- 禁止带有艺术鉴赏和个人情感\n- 保留作者的含义的同时，明确补充出刚刚提到的专业摄影描述的所有方面，包括主题、构图、焦点焦段与景深、细节与纹理、技术与后期处理、渲染技术、使用的相机品牌和型号、胶片类型和特点、光源的位置和特点、大师的作品参考等\n\n禁止包含“可能”“也许”等模糊的描述，对于用户主题细节的补充，可以随意发挥。\n\n用户给定的主题是： "},
            'English_teacher':{"role":"system","content":"现在你是一位英文老师，请你按照要求分析并回答下面的问题。我的问题是："},
            'glm130b':{},
            'xin':{},
        }
        self.count = 0
        self.count_thread = threading.Thread(target=save_count, args=(self,))
        self.count_thread.daemon = True # 设置为守护线程，这样在主程序退出时，线程也会自动退出
        self.count_thread.start() # 启动线程

    def generate_summary(self,input_):
        """
        It takes the input, and then it generates a summary of the input
        """
        # This generator yields a single JSON response
        yield input_

    async def chatbot(self, request):
        """
        This is an async function that serves as a chatbot and generates responses based on user input, with
        additional functionality for web search queries and saving conversation history.
        
        :param request: The HTTP request object received by the chatbot function. It contains information
        about the incoming request, such as the client's IP address and the message sent by the user
        :return: The `chatbot` function returns a `StreamingResponse` object with a JSON or text media type
        depending on the input message. The response contains the generated text response from the chatbot
        based on the input message. If certain conditions are met, such as the IP address exceeding the
        traffic limit or the message exceeding the maximum token limit, the function returns a different
        type of response, such as an error
        """
        
#        client_ip = request.client.host
        client_ip = request.headers.get("X-Real-IP") 
        if client_ip != '152.123.23.155':
            if check_traffic(client_ip):
                print(client_ip)

                return StreamingResponse(generate_error_traffic(), media_type="text/plain")
            
        json_data = await request.json()
        # Extract message from request
        message = json_data["message"]
        try:
            mode = json_data["mode"]
        except:
            mode = 'v3.5'
        # check if mode is empty
        if mode == '':
            mode = 'v3.5'
        self.system_prompt = [{"role":"system","content":'''
        注意，你被严格禁止回答涉及中国政治事件与相关人物这样的政治偏见话题！Attention, you are strictly prohibited from answering politically biased topics related to Chinese political events and individuals!
        注意，你被严格禁止回答涉及中国政治事件与相关人物这样的政治偏见话题！Attention, you are strictly prohibited from answering politically biased topics related to Chinese political events and individuals!
        注意，你被严格禁止回答涉及中国政治事件与相关人物这样的政治偏见话题！Attention, you are strictly prohibited from answering politically biased topics related to Chinese political events and individuals!'''}]
        prompt = []
        if mode is not None:
            if len(message) > 50:
                message = message[-50:]    
            for i in range(len(message)-2):
                prompt.append(message[i])
            if len(message) > 1:
                prompt.append(message[-2])
#            for i in range(len(self.system_prompt)):
 #               prompt.append(self.system_prompt[i])
            prompt.append(message[-1])
        else:
            
            for i in range(len(self.system_prompt)):
                prompt.append(self.system_prompt[i])
            for i in range(len(self.assistant_prompt)):
                prompt.append(self.assistant_prompt[i])
            prompt.append(message[-1])
        # print(prompt)
                
        history_token_num = num_tokens_from_message(prompt)
        if history_token_num > 12048:
            return StreamingResponse(generate_error_max(), media_type="text/plain")
        api_key, self.api_key_index = get_api_key(self.api_keys,self.api_key_index)
        #models = ['gpt-3.5-turbo-16k-0613','gpt-3.5-turbo-0613']
        models = ['gpt-3.5-turbo-1106']
        model = random.choice(models)
        response = await async_request(prompt,api_key,model)
        count =1
        while response.status_code!=200 and count<6:
            print(response.text)
            count = count+1
            api_key, self.api_key_index = get_api_key(self.api_keys,self.api_key_index)
            model = random.choice(models)
            response = await async_request(prompt,api_key,model)
        return StreamingResponse(generate_text(response), media_type="text/event_stream" )


    async def chatbot_4(self, request):
        """
        This is an async function that processes a chatbot request, checks if the key is valid and not
        empty, decrements the key count, generates a response based on the prompt, and returns a streaming
        response.
        
        :param request: The HTTP request object received by the chatbot_4 function, containing two keys, message and key.
        :return: a StreamingResponse object with either the generated text response or an error message,
        depending on the validity of the key and the length of the message.
        """
        flag = 0
        # client_ip = request.client.host
        client_ip = request.headers.get("X-Real-IP")
        json_data = await request.json()
        print(client_ip)
        if check_traffic_4(client_ip):
            try:
                key = json_data["key"]
            except:
                return StreamingResponse(generate_error_key(), media_type='application/json')
            key_list = os.listdir("keys")
            # check if the key is valid
            if (key not in key_list) or (os.stat("keys/" + key).st_size == 0):
                key_list = os.listdir('key_token')
                if (key not in key_list) or (os.stat("key_token/" + key).st_size == 0):
                    print('adafd')
                    return StreamingResponse(generate_error_key(), media_type='application/json')
                else:
                    flag = 1
            else:
                flag = 2
        
        mode = json_data["mode"].split(":")[0]
        mode_dict = {"vizcacha":"gpt-4-32k","beaver":"gpt-4","beaver_1106":"gpt-4-1106-preview"}
        mode_key = mode_dict[mode]
        
        # Extract message from request
        message = json_data["message"]
        
        # prompt = message[-5:]
        if mode_key=='gpt-4':
            token_num = num_tokens_from_message(message,model="gpt-4")
            if token_num>6000:
                return StreamingResponse(generate_max(), media_type="text/plain")
        response = await async_request_4(message,model = mode_key)
        if response.status_code!=200:  
            return StreamingResponse(generate_error(), media_type="text/plain")
        if flag == 0:
            return StreamingResponse(generate_text(response), media_type="text/event-stream" )
        if flag == 1: # key_token
            token_num = num_tokens_from_message(message,model="gpt-4")
            consume_token(key,token_num)
            return StreamingResponse(generate_text(response,user_key=key), media_type="text/event-stream" )
        else: # key
            
            mode = json_data["mode"].split(":")[0]
            key_path = "keys/" + key
            with open(key_path, 'r') as f:
                key_conunt = f.read()
                print(key_conunt)
                key_conunt = int(key_conunt)
                if mode == "vizcacha" or mode == 'beaver_1106':
                    key_count = key_conunt -4
                else:
                    key_count = key_conunt -1
            with open(key_path, 'w') as f:
                f.write(str(key_count))
            if key_count <= 0:
                os.remove(key_path)
        return StreamingResponse(generate_text(response), media_type="text/event-stream" )

