#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2022/10/16 22:54
# @Author  : when
# @File    : boss_crawler.py
# @Description : boss网页爬虫

from loguru import logger
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions

from crawler.orm_model.page import Page
from crawler.orm_model.page import PageTypeChoice
from crawler.base_module.chrome_driver import ChromeDriver
from components.proxy_utils.zm_proxy.zm_proxy import ZMProxy
from components.mysql_client.sqlalchemy_connector import SqlConnector


def init_page():
    """
    初始化boss的页面page页面url入库
    :param db_session:
    :return:
    """
    db_session = SqlConnector()
    # boss城市对应编号
    boss_city_map = [[100010000, "全国"], [101010100, "北京"], [101020100, "上海"], [101280100, "广州"], [101280600, "深圳"],
                     [101210100, "杭州"], [101030100, "天津"], [101110100, "西安"], [101190400, "苏州"], [101200100, "武汉"],
                     [101230200, "厦门"], [101250100, "长沙"], [101270100, "成都"], [101180100, "郑州"], [101040100, "重庆"]]
    page_list = []
    # 非登录用户，最多可查看10页数据
    page_url = "https://www.zhipin.com/web/geek/job?query=python&city={}&page={}"
    page_urls = [page_url.format(city_num, i + 1) for city_num, _ in boss_city_map for i in range(10)]
    for url in page_urls:
        page_obj = Page(url=url, type=PageTypeChoice.BOSS_JOB_INFO)
        page_list.append(page_obj)
    db_session.insert(page_list)
    logger.info(f"成功入库{len(page_list)}条待爬取页面数据")


class BossCrawler(ChromeDriver):
    def __init__(self, page_obj):
        super(BossCrawler, self).__init__()
        self.page_obj = page_obj
        self.init_driver()

    def __del__(self):
        if not self.driver:
            return
        self.driver.quit()

    def init_driver(self):
        if self.driver:
            self.driver.quit()
        # 获取代理
        proxy_format = ZMProxy().get_proxy()
        # headless模式下必须添加user-agen参数，否则获取不到html内容
        user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) " \
                     "Chrome/106.0.0.0 Safari/537.36"
        self.init_options(["--headless", f"--user-agent={user_agent}", "--lang=zh_CN.utf-8", "--disable-infobars",
                           f"--proxy-server={proxy_format}", "--disable-gpu", "--ignore-certificate-errors",
                           "--ignore-urlfetcher-cert-requests"])
        super(BossCrawler, self).init_driver()
        # 检测代理
        res = self.check_proxy(retry_num=3)
        assert res, "获取代理失败，结束执行"

    def get_page_html(self, wait_time=0, retry_num=0):
        flag = False
        for i in range(retry_num + 1):
            try:
                wait_time_tmp = wait_time
                self.driver.get(self.page_obj.url)
                # selenium.common.exceptions.TimeoutException
                flag_element = None
                if self.page_obj.type == PageTypeChoice.BOSS_JOB_INFO:
                    flag_element = expected_conditions.presence_of_element_located((By.CLASS_NAME, "job-card-wrapper"))
                if self.page_obj.type == PageTypeChoice.BOSS_JOB_DETAILS:
                    flag_element = expected_conditions.presence_of_element_located((By.CLASS_NAME, "job-boss-info"))

                if flag_element:
                    html_flag = WebDriverWait(self.driver, wait_time).until(flag_element)
                    assert html_flag, "爬取出错，未获取到指定标志元素"
                else:
                    self.driver.implicitly_wait(wait_time)
                page_source = self.driver.page_source
            except Exception as e:
                wait_time += wait_time_tmp
                page_source = str(e)
                if i != retry_num:
                    error_str = str(e).split('\n')[0]
                    logger.error(f"Page({self.page_obj.id}):爬取失败，进行第{i + 1}次重试... 失败原因：{type(e)} {error_str}")
                self.init_driver()
                continue
            flag = True
            break
        if not flag:
            return False, page_source
        return True, page_source
