import json
import os
import pandas as pd
from bs4 import BeautifulSoup
import requests
# LLM
import ollama

class Extractor:
    def __init__(self):
        pass

    @staticmethod
    def __load_file__(path: str) -> dict:
        # path = "resources/ncee.json"
        with open(path) as json_file:
            content: dict = json.load(json_file)

        return content

    @staticmethod
    def __load_url__(url: str) -> dict:
        # 设置请求头
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36',
            'Cookie': r'''buvid3=D6635B93-5318-5797-21DE-E3971D1EC71500724infoc; b_nut=1716791300; _uuid=B1DB8DAA-7D2B-76A10-4ED10-5F7D1D43888504691infoc; buvid_fp=b992cdf1cb8d057a83cb5e7965e602aa; buvid4=74F5BA21-0A0B-4D09-5A14-B070A7EFEE3C02056-024052706-%2Bb0i9d9hqltqxg6As6Gf5w%3D%3D; enable_web_push=DISABLE; header_theme_version=CLOSE; CURRENT_FNVAL=4048; rpdid=|(u|Jkm|ukkl0J'u~uYm~m)uJ; bsource=search_baidu; home_feed_column=5; browser_resolution=1691-919; bili_ticket=eyJhbGciOiJIUzI1NiIsImtpZCI6InMwMyIsInR5cCI6IkpXVCJ9.eyJleHAiOjE3MjI1NjA5NzcsImlhdCI6MTcyMjMwMTcxNywicGx0IjotMX0.bLSHn0FUVtLvblUthsFoi6NSSYxre-VQU_J7uBMNcmo; bili_ticket_expires=1722560917; SESSDATA=02035e63%2C1737855843%2C23397%2A71CjChZt4cnYuNWHZIlXu9aKcmCsWnM5x9w3xD--oG-UGzO-_J8mQPNu2PhpNUf5I7FoUSVkhVLWFYZktub0hOdWFQdXA1Y2VHODhpWVh0QV9meF9qRE0tMGxfeGtZMlBPamZmRUprWHJZZEstWmc1eXl1MkFyMHh1OU9RQXpRYkZQaGlxeURjbjJBIIEC; bili_jct=29b6d190b545ed2769f2852b4748a36b; DedeUserID=430527725; DedeUserID__ckMd5=131f1b3932468faa; bp_t_offset_430527725=959777654234415104; b_lsid=713EFC109_191031C422F; sid=76fa112i''',
            'Accept-Encoding': 'gzip, deflate, br, zstd',
            'Accept': 'application/json, text/plain, */*'
        }

        # 发送HTTP请求
        response = requests.get(url=url, headers=headers)

        # 检查请求是否成功
        if response.status_code == 200:
           return json.loads(response.text)

    @staticmethod
    def __analyse_title__(keyword_title: str) -> str:
        soup = BeautifulSoup(keyword_title, 'html.parser')
        em_tag = soup.em

        if em_tag is None:
            return keyword_title.split('</em>')[-1]

        print(em_tag.string)
        return em_tag.string + "：" + keyword_title.split('</em>')[-1]

    @staticmethod
    def format_response(response: dict):
        content = []
        message_response = response["message"]["content"]
        try:
            # content = ast.literal_eval(message_response)
            # content = json.loads(message_response)
            content = message_response
        except:
            print("解析Json返回异常: {}".format(message_response))

        return content

    @staticmethod
    def __llm_service__(prompt_key_word: str, category: str):
        categories = {"ncee": {"高考备考与策略": "提供高考备考技巧、复习方法及志愿填报建议。",
                               "高考经历与反思": "分享个人或他人关于高考的经历、感受及反思。",
                               "高考心理与激励": "提供心理调适建议、励志故事以及正面激励的内容。",
                               "高考政策与社会影响": "探讨高考制度对社会结构和个人命运的影响。",
                               "高考挑战与应对": "讨论高考面临的挑战及应对措施。",
                               "高考资源与辅助": "提供高考相关的学习资源和支持。"
                               },
                      "employment": {"职业规划与指导": "提供大学生如何进行职业规划的建议。",
                                     "就业现状与分析": "分析当前大学生就业市场的情况。",
                                     "就业难题探讨": "探讨大学生就业难的原因及其背后的社会经济因素。",
                                     "就业出路与建议": "提供关于大学生如何寻找出路和职业发展的建议。",
                                     "个人经历分享": "分享个人关于就业的经验和教训。",
                                     "就业市场趋势": "分析就业市场的趋势和发展方向。",
                                     }
                      }
        # LLM
        system_prompt = r'''你是一名文章归纳师，你的角色是基于制定的内容进行归类。类别范围在：{categories}'''.format(categories=str(categories[category].keys()))
        prompt = r'''把下述内容归纳，从中进行分类：{content}
                            要求：
                            1. 以文本形式返回最匹配的一个类别，不需要包含其他信息
                            '''.format(content=prompt_key_word)

        response = ollama.chat(model='qwen2',
                               stream=False,
                               messages=[
                                   {'role': 'system', 'content': system_prompt},
                                   {'role': 'user', 'content': prompt}],
                               # format="json"
                               )
        result_content = Extractor.format_response(response)
        return result_content


if __name__ == '__main__':
    ncee_url = "https://api.bilibili.com/x/web-interface/wbi/search/all/v2?__refresh__=true&_extra=&context=&page={page}&page_size=42&order=&duration=&from_source=&from_spmid=333.337&platform=pc&highlight=1&single_column=0&keyword=高考"
    employment_url = "https://api.bilibili.com/x/web-interface/wbi/search/all/v2?__refresh__=true&_extra=&context=&page={page}&page_size=42&order=&duration=&from_source=&from_spmid=333.337&platform=pc&highlight=1&single_column=0&keyword=大学生就业"

    data_category_dict = {
        "ncee": ncee_url,
        "employment": employment_url
    }

    columns = ["id", "author", "arcurl", "title", "description", "play", "favorites", "video_review", "review", "tag", "category", "content"]
    for data_category in data_category_dict:
        data_list = []
        for page_id in range(1, 20):
            url = data_category_dict[data_category]
            content = Extractor.__load_url__(url.format(page=str(page_id)))
            if content is not None and len(content) > 0:
                results = content["data"]["result"]
                video_data: dict
                for video_data in results:
                    if video_data.__contains__("result_type") and video_data["result_type"] == "video":
                        datas: list = video_data["data"]
                        for data in datas:
                            tmp_detail = Extractor.__analyse_title__(data["title"]) + "," + data["description"] + "," + data["tag"]
                            data_list.append([data["id"], data["author"], data["arcurl"],
                                              Extractor.__analyse_title__(data["title"]), data["description"],
                                              data["play"], data["favorites"], data["video_review"],
                                              data["review"], data["tag"], Extractor.__llm_service__(tmp_detail, data_category),
                                              tmp_detail])

            df = pd.DataFrame(data=data_list, columns=columns)
            df.to_excel("./resources/" + data_category + ".xlsx", index=False)

    # ncee_path = "resources/ncee.json"
    # employment_path = "resources/employment.json"
    # data_category_dict = {
    #     "ncee": ncee_path,
    #     "employment": employment_path
    # }

    # for data_category in data_category_dict:
    #     path = data_category_dict[data_category]
    #     columns = ["id", "author", "arcurl", "title", "description", "tag"]
    #     data_list = []
    #     content = Extractor.__load_file__(path)
    #     if content is not None and len(content) > 0:
    #         results = content["data"]["result"]
    #         video_data: dict
    #         for video_data in results:
    #             if video_data.__contains__("result_type") and video_data["result_type"] == "video":
    #                 datas: list = video_data["data"]
    #                 for data in datas:
    #                     data_list.append([data["id"], data["author"], data["arcurl"],
    #                                       Extractor.__analyse_title__(data["title"]), data["description"], data["tag"]])
    #
    #     df = pd.DataFrame(data=data_list, columns=columns)
    #     df.to_excel("./resources/" + data_category + ".xlsx", index=False)



