import json

import requests
from lxml import etree
import tiktoken
import os
import inspect
import random
import string

from openai import OpenAI


def googleSearch(query,numberResult=10,siteUrl=None):
    apiKey = "AIzaSyBDIbsFJBUMt2_agRtD_ZBjdYfj0OoIvU8"
    cesId = "b223cd48fb42543d4"
    url = "https://www.googleapis.com/customsearch/v1"
    if siteUrl == None:
        params = {
            "q":query,
            "key":apiKey,
            "cx":cesId,
            "num":numberResult
        }
    else:
        params = {
            "q": query,
            "key": apiKey,
            "cx": cesId,
            "num": numberResult,
            "siteSearch":siteUrl
        }

    response = requests.get(url,params=params)
    response.raise_for_status()

    #提取结果
    print(response.json())
    searchResult = response.json().get('items', [])

    results = [{
        'title': item['title'],
        'link': item['link'],
        'snippet': item['snippet']
    } for item in searchResult]
    return results

#results = googleSearch("什么是glm4",numberResult=5,siteUrl="https://www.zhihu.com")
#print(results)

def get_search_answer(cookie,q,url):
    user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"

    code_ = False
    headers = {
        'authority': 'www.zhihu.com',
        'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
        'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
        'cache-control': 'max-age=0',
        'cookie': cookie,
        'upgrade-insecure-requests': '1',
        'user-agent': user_agent,
    }
    print(url)
    # 普通问答地址
    if 'zhihu.com/question' in url:
        res = requests.get(url, headers=headers).text
        res_xpath = etree.HTML(res)
        title = res_xpath.xpath('//div/div[1]/div/h1/text()')[0]
        text_d = res_xpath.xpath(
            '//*[@id="root"]/div/main/div/div/div[3]/div[1]/div/div[2]/div/div/div/div[2]/span[1]/div/div/span/p/text()')

    # 专栏地址
    elif 'zhuanlan' in url:
        headers['authority'] = 'zhaunlan.zhihu.com'
        res = requests.get(url, headers=headers).text
        res_xpath = etree.HTML(res)
        title = res_xpath.xpath('//div[1]/div/main/div/article/header/h1/text()')[0]
        text_d = res_xpath.xpath('//div/main/div/article/div[1]/div/div/div/p/text()')
        code_ = res_xpath.xpath('//div/main/div/article/div[1]/div/div/div//pre/code/text()')

        # 特定回答的问答网址
    elif 'answer' in url:
        res = requests.get(url, headers=headers).text
        res_xpath = etree.HTML(res)
        title = res_xpath.xpath('//div/div[1]/div/h1/text()')[0]
        text_d = res_xpath.xpath('//div[1]/div/div[3]/div/div/div/div[2]/span[1]/div/div/span/p/text()')

    if title == None:
        return None
    else:
        title = windows_create_name(title)

        # 创建问题答案正文
        text = ""
        for t in text_d:
            txt = str(t).replace('\n',' ')
            text += txt

        # 如果有code，则将code追加到正文的追后面
        for c in code_:
            co = str(c).replace("\n"," ")
            text += co

        encoding = tiktoken.encoding_for_model("gpt-3.5-turbo")

        json_data = [
            {
                "link": url,
                "title": title,
                "content": text,
                "tokens": len(encoding.encode(text))
            }
        ]

        json_dir = r"F:\ai\03大模型开发实战\07智能在线搜索平台案例实践\%s" % q
        if not os.path.exists(json_dir):
            os.makedirs(json_dir)

        with open(json_dir+r"\%s.json" % title,"w") as f:
            json.dump(json_data,f)

        return title

cookie = '_xsrf=KUGUnRXehTTl5B4wphGCPAvzRVRllwl6; _zap=1dc3f095-3d66-4428-b584-623f2ec5aa67; Hm_lvt_98beee57fd2ef70ccdd5ca52b9740c49=1744510118; d_c0=PJKTHKVNSxqPTvB_-WlA1aCE4JtgpUxOt9U=|1744510117; __snaker__id=fgPGpxeMLd9v0duF; l_cap_id="Mjg5YzNkMTVmMThmNGQ2NTg5Y2VkZDk4MDcwNTIyMGM=|1744546752|5d97cd71101dfe9286eda5cc3c04f7cd80e958f2"; r_cap_id="N2FkNWU3MzQ1ZDAwNDk5MGJlNDRhMGYzYWZkMjQ1YWI=|1744546752|aceef79ba067425eff7f3a822d39bc3c23075549"; cap_id="YmQ2OGEyOWU5ZjJlNDAwNTk4ZjQ1YWIyMjkyOTJjMjQ=|1744546752|1fb1a7f3d478ad2a0a47f9362be6620993619b18"; gdxidpyhxdE=S%2BHe8HUKT1Rebc4RwlC4vAdHRJRK%2Fp60BNPRt%5CJNfxJadNzkefgOR8xGe6PhvuSHRc6hxO1pZS3iVEo%2BNNX%2FjdsaQLwRyhseCKs5sXmXiSE9G%5C%2FO%5CczNJTK35GtGuEihX%2F52RIblHSy4Uj%2Bn3Z%5C8xf82DrIE9j2fggxkrh1lOCajh3yu%3A1744558487225; __zse_ck=004_IYkZ/mdVQwnObMv6x3BMF/eBCTFZaTmEhyUIT1zjxDT4Vx4ZUJKXlGOrAEFpwsJme=v1gOLOL4GbBh3zebwbn6NgaoJ3Z9odReNEg8SLQsGk/fJZqxFKe0vd/kcAGhBX-fZIAALgkbGCxEJonenh0f91CtJm4Fs1yu4JsGn5noBnp0o4QbPFrxEvQWKNVutowVa/SvM5aXOhlvk26/y+ZUaoULh2px1HAl4MR7IE2EF1aVHvbXHt5ShMq4sFmCfIG; BEC=8ce9e721fafad59a55ed220f1ad7f253; captcha_session_v2=2|1:0|10:1744617557|18:captcha_session_v2|88:RTZoWVNUQWpwQUpjVDh0YVduUDVQRkhBNzk5ckZFdTJTV3lXTExTY0NhbjlBb3hCY3J2eXVSanNFVmV2c082Qw==|d8ccd08d12d8df3f7b606c3c761158a1a6f2fb0e96b5f86c5243699111dcad6c'
# title=get_search_answer(cookie,q="什么是glm4",url="https://zhuanlan.zhihu.com/p/704719982")
# print(title)

# with open(r"F:\ai\03大模型开发实战\07智能在线搜索平台案例实践\什么是glm4\GLM4大模型微调入门实战-命名实体识别（NER）任务.json","r") as f:
#     jd = json.load(f)
# print(jd)


def auto_functions(functions_list):
    """
    Chat模型的functions参数编写函数
    :param functions_list: 包含一个或者多个函数对象的列表；
    :return：满足Chat模型functions参数要求的functions对象
    """
    def functions_generate(functions_list):
        # 创建空列表，用于保存每个函数的描述字典
        functions = []
        # 对每个外部函数进行循环
        for function in functions_list:
            # 读取函数对象的函数说明
            function_description = inspect.getdoc(function)
            # 读取函数的函数名字符串
            function_name = function.__name__

            system_prompt = '以下是某的函数说明：%s,输出结果必须是一个JSON格式的字典，只输出这个字典即可，前后不需要任何前后修饰或说明的语句' % function_description
            user_prompt = '根据这个函数的函数说明，请帮我创建一个JSON格式的字典，这个字典有如下5点要求：\
                           1.字典总共有三个键值对；\
                           2.第一个键值对的Key是字符串name，value是该函数的名字：%s，也是字符串；\
                           3.第二个键值对的Key是字符串description，value是该函数的函数的功能说明，也是字符串；\
                           4.第三个键值对的Key是字符串parameters，value是一个JSON Schema对象，用于说明该函数的参数输入规范。\
                           5.输出结果必须是一个JSON格式的字典，只输出这个字典即可，前后不需要任何前后修饰或说明的语句' % function_name

            response = client.chat.completions.create(
                              model="gpt-3.5-turbo",
                              messages=[
                                {"role": "system", "content": system_prompt},
                                {"role": "user", "content": user_prompt}
                              ]
                            )
            json_str=response.choices[0].message.content.replace("```json","").replace("```","")
            json_function_description=json.loads(json_str)
            json_str={"type": "function","function":json_function_description}
            functions.append(json_str)
        return functions
    ## 最大可以尝试4次
    max_attempts = 4
    attempts = 0

    while attempts < max_attempts:
        try:
            functions = functions_generate(functions_list)
            break  # 如果代码成功执行，跳出循环
        except Exception as e:
            attempts += 1  # 增加尝试次数
            print("发生错误：", e)
            if attempts == max_attempts:
                print("已达到最大尝试次数，程序终止。")
                raise  # 重新引发最后一个异常
            else:
                print("正在重新运行...")
    return functions

def get_answer(q,cookie=cookie):
    """
        智能助手函数，当你无法回答某个问题时，调用该函数，能够获得答案
        :param q: 必选参数，询问的问题，字符串类型对象
        :param cookie: 可选参数，当前网站的缓存cookie，字符串类型对象
        :return：某问题的答案，以字符串形式呈现
        """
    # 调用转化函数，将用户的问题转化为更适合在知乎上进行搜索的关键词
    q = convert_keyword(q)

    # 默认搜索返回10个答案（我这儿为了节省资源改成3）
    results = googleSearch(q,numberResult=5,siteUrl="https://zhihu.com")

    # 创建对应问题的子文件夹
    folderPath = r"F:\ai\03大模型开发实战\07智能在线搜索平台案例实践\%s" % q
    if not os.path.exists(folderPath):
        os.makedirs(folderPath)

    # 单独提取links放在一个list中
    numToken = 0
    content = ""
    for item in results:
        url = item["link"]
        title = get_search_answer(cookie,q,url)
        title_file = r"F:\ai\03大模型开发实战\07智能在线搜索平台案例实践\%s\%s.json" % (q,title)
        if not os.path.exists(title_file):
            os.makedirs(title_file)

        with open(title_file,"r") as f:
            jd = json.load(f)
        numToken += jd[0]["tokens"]
        if numToken <= 12000:
            content += jd[0]["content"]
        else:
            break

        return(content)

# qa = getAnswer(q="什么是gpt-4o",cookie=cookie)
# print(qa)

def windows_create_name(s,max_length=255):
    """
       将字符串转化为符合Windows文件/文件夹命名规范的名称。

       参数:
       - s (str): 输入的字符串。
       - max_length (int): 输出字符串的最大长度，默认为255。

       返回:
       - str: 一个可以安全用作Windows文件/文件夹名称的字符串。
       """

    # Windows文件/文件夹名称中不允许的字符列表
    forbidden_chars = ['<', '>', ':', '"', '/', '\\', '|', '?', '*']
    for char in forbidden_chars:
        s = s.replace(char,"_")

    # 删除尾部的空格或点
    s = s.rstrip(".")

    # 检查是否存在以下不允许被用于文档名称的关键词，如果有的话则替换为下划线
    reserved_names = ["CON", "PRN", "AUX", "NUL", "COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7", "COM8",
                      "COM9",
                      "LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", "LPT9"]
    if s.upper() in reserved_names:
        s += "_"

    # 如果字符串过长，进行截断
    if len(s) > max_length:
        s = s[:max_length]

    return s

res = windows_create_name("一切.con")
print(res)

api_key = "sk-6S0PtpNia71gjcfwSsDPsJ9mGqsVPr2XRQzAx1dHbJS7RW4t"
api_base="https://chatapi.littlewheat.com/v1"

client = OpenAI(api_key=api_key,base_url=api_base)

def run_conversation(messages, functions_list=None, model="gpt-3.5-turbo"):
    """
        能够自动执行外部函数调用的对话模型
        :param messages: 必要参数，字典类型，输入到Chat模型的messages参数对象
        :param functions_list: 可选参数，默认为None，可以设置为包含全部外部函数的列表对象
        :param model: Chat模型，可选参数，默认模型为gpt-3.5-turbo
        :return：Chat模型输出结果
        """
    if functions_list == None:
        response = client.chat.completions.create(
            model=model,
            messages=messages
        )
        response_message = response.choices[0].message
        final_response = response_message.content
    # 若存在外部函数库，则需要灵活选取外部函数并进行回答
    else:
        # 创建functions对象
        tools = auto_functions(functions_list)
        # 创建外部函数库字典
        available_functions = {func.__name__:func for func in functions_list}
        # 第一次调用大模型
        response = client.chat.completions.create(model=model,
                                                  messages=messages,
                                                  tools=tools,
                                                  tool_choice="auto")
        response_message = response.choices[0].message

        tool_calls = response_message.tool_calls
        if tool_calls:
            messages.append(response_message.model_dump())
            for tool_call in tool_calls:
                function_name = tool_call.function.name
                function_to_call = available_functions[function_name]
                function_args = json.loads(tool_call.function.arguments)

                ## 真正执行外部函数的就是这儿的代码
                function_response = function_to_call(**function_args)
                messages.append({
                    "role":"tool",
                    "content":function_response,
                    "tool_call_id":tool_call.id
                })

            ## 第二次调用模型
            second_response=client.chat.completions.create(model=model,
                                           messages=messages,
                                           tools=tools)
            # 获取最终结果
            print(second_response.choices[0].message)
            final_response = second_response.choices[0].message.content
        else:
            final_response = response_message.content

        return final_response


# response=run_conversation(messages=[
#     {"role": "system", "content": "根据用户输入的问题进行回答，如果知道问题的答案，请回答问题答案，如果不知道问题答案，调用智能助手函数回答’"},
#     {"role": "user", "content": '介绍一下什么是glm5'}
#     ],
#     functions_list=[get_answer],
#     model="gpt-3.5-turbo")
# print(response)

# response = client.chat.completions.create(model="gpt-3.5-turbo",
#                                           messages=[
#                                               {"role": "user", "content": "你知道什么是知乎么？"}
#                                           ]
#                                           )
# print(response)

# response = client.chat.completions.create(model="gpt-3.5-turbo",
#                                           messages=[
#                                               {"role": "user", "content": "我如果想在知乎上搜索一些问题的答案，在设计搜索关键词方面，有什么技巧么？"}
#                                           ]
#                                           )
# print(response)

def generate_random_key(length=30):
    print(random.choice(string.ascii_letters+string.digits))
    return ''.join(random.choice(string.ascii_letters+string.digits) for i in range(length))

def convert_keyword(q):
    """
        将用户输入的问题转化为适合在知乎上进行搜索的关键词
    """
    response = client.chat.completions.create(model="gpt-3.5-turbo",
                                              messages=[
                                                  {"role": "system",
                                                   "content": "你专门负责将用户的问题转化为知乎网站搜索关键词，只返回一个你认为最合适的搜索关键词即可"},
                                                  {"role": "user", "content": "请问，GPT-3.5微调总共分为几步？"},
                                                  {"role": "assistant", "content": "GPT-3.5微调流程"},
                                                  {"role": "user", "content": q}
                                              ]
                                              )
    q = response.choices[0].message.content
    return q

# res = convert_keyword("GLM5是用来做什么的？")
# print(res)

def identify_model(q):
    # 创建密钥
    sk = generate_random_key()

    # 调用模型进行判别
    response = client.chat.completions.create(model="gpt-3.5-turbo",
                                              messages=[
                                                  {"role": "system", "content": "你是一个用户问题判断器，专门用于判别你是否知道用户当前问题的答案。\
                                                              如果不知道，请回答“%s”，若知道，请正常回答" % sk},
                                                  {"role": "user", "content": "请问，GPT-3.5微调总共分为几步？"},
                                                  {"role": "assistant", "content": "%s" % sk},
                                                  {"role": "user", "content": q}
                                                ]
                                              )
    res = response.choices[0].message.content
    if sk in res or '对不起' in res or '抱歉' in res or '超出知识库' in res:
        return (True)
    else:
        return (res)

def auto_search_answer(q):
    # 调用判别模型
    res = identify_model(q)
    if res == True:
        messages = [{"role": "user", "content": q}]
        res = run_conversation(messages=messages,
                               functions_list=[get_answer],
                               model="glm-4",
                               function_call={"type": "function", "function": {"name": "get_answer"}})

    return(res)
