"""
粉笔爬虫
爬取内容：面试题目
"""
import requests
from dataModels import Category, Interview
import json


class FbSpider:
    def __init__(self):
        self.homepage = 'https://www.fenbi.com/spa/tiku/guide/catalog/gwyms?prefix=gwyms'
        self.user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36"
        self.cookies_body = "sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%2218ffb24f5b2255-0ad5207a289969-13462c6f-2073600-18ffb24f5b3951%22%2C%22first_id%22%3A%22%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E8%87%AA%E7%84%B6%E6%90%9C%E7%B4%A2%E6%B5%81%E9%87%8F%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC%22%2C%22%24latest_referrer%22%3A%22https%3A%2F%2Fwww.baidu.com%2Flink%22%7D%2C%22%24device_id%22%3A%2218ffb24f5b2255-0ad5207a289969-13462c6f-2073600-18ffb24f5b3951%22%7D; userid=120574930; sess=9/pnmDsn0pRE/dc2gdPbl7HZma4eQUlJASHAHwMe+dbbHTkPoIHztfawixZ1QRIgfq9FyT2ewTgbBG9cKQ4/VlJ7Pos7qPGBjGgr2lLcInA=; sid=3678081; persistent=5eWbG85PQ5bNKAkRdUMxBa+guQR4bGPr9O4seVKlulb6DGXBdL4BnrTYBpxd+Z7oyaxWyQDji4uSX7VgriWzfA==; "
        self.cookies = ""

    def get_cookies(self, cookies_acw_tc):
        self.cookies = self.cookies_body + cookies_acw_tc

    def get_html(self, url, cookies_acw_tc):
        self.get_cookies(cookies_acw_tc)
        headers = {
            "User-Agent": self.user_agent,
            "Cookie": self.cookies,
        }
        response = requests.get(url, headers=headers)
        if response.status_code != 200 and response.status_code != 304:
            raise Exception(f"Cookie错误，代码为：{response.status_code}")
        return response

    def get_question(self):
        cate_url = "https://tiku.fenbi.com/api/gwyms/categories?&filter=giant&app=web&kav=100&av=100&hav=100&version=3.0.0.0"
        cookies_acw_tc = "acw_tc=0bdd34ea17179154112698283ee994daa2c2f940ff6422eddee601ecea84de"
        cate_str = self.get_html(cate_url, cookies_acw_tc).text
        cate_dict = json.loads(cate_str)
        for cat in cate_dict:
            category = Category()
            category.id = cat['id']
            category.name = cat['name']
            category.count = cat['count']
            existed_category = Category.select().where(Category.id == category.id)
            if existed_category:
                category.save()  # 这是更新操作，无法新增
            else:
                category.save(force_insert=True)  # 强制插入

            self.extract_interview(cat['id'], cat['name'])

    def extract_interview(self, keypointid, catename):
        print(f"正在拉取大类：{catename}")
        interv_id_url = f"https://tiku.fenbi.com/api/gwyms/giants?keypointId={keypointid}&kav=100&av=100&hav=100&app=web"
        cookies_acw_tc = ""
        interv_resp = self.get_html(interv_id_url, cookies_acw_tc)
        interv_id_str = interv_resp.text  # 题目的id_str
        interv_id_list = list(eval(interv_id_str))  # 题目的id_list
        interv_resp_cookie = interv_resp.headers["Set-Cookie"].split(';')[0]
        for interv_id in interv_id_list:
            print(f"------正在拉取题目：{interv_id}")
            interv_url = f"https://tiku.fenbi.com/api/gwyms/universal/auth/solutions?type=9&questionIds={interv_id}&kav=100&av=100&hav=100&app=web"
            cookies_acw_tc = interv_resp_cookie
            interv_str = self.get_html(interv_url, cookies_acw_tc).text
            interv_dict = json.loads(interv_str)
            interview = Interview()
            interview.id = interv_id
            interview.category_id = keypointid
            interview.category_name = catename
            interview.answer = ""
            interview.analysis = ""
            interview.difficulty = "0"
            interview.source = ""
            interview.question = interv_dict["solutions"][0]["content"]
            if interv_dict["solutions"][0]["difficulty"]:
                interview.difficulty = interv_dict["solutions"][0]["difficulty"]
            for lab in interv_dict["solutions"][0]["solutionAccessories"]:
                if lab["label"] == 'stzd':
                    interview.analysis = lab["content"]
                if lab["label"] == 'sfdt':
                    interview.answer = lab["content"]
            if interv_dict["solutions"][0]["source"]:
                interview.source = interv_dict["solutions"][0]["source"]

            existed_interview = Interview.select().where(Interview.id == interview.id)
            if existed_interview:
                # interview.save()  # 这是更新操作，无法新增
                print(f"------题目已存在：{interv_id}")
                continue
            else:
                interview.save(force_insert=True)  # 强制插入
            print(f"------题目拉取完成：{interv_id}")
        return


if __name__ == '__main__':
    fbsSpider = FbSpider()
    fbsSpider.get_question()
