#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2022/10/19 0:56
# @Author  : when
# @File    : page_producer.py
# @Description : 需要爬取页面的生产者
import time
import traceback

from loguru import logger
from pymysql.err import OperationalError
from datetime import datetime

from crawler.config.settings import PAGE_NUM
from crawler.orm_model.page import Page
from crawler.orm_model.page import PageStatusChoice
from crawler.website.boss.boss_crawler import BossCrawler
from components.mysql_client.sqlalchemy_connector import SqlConnector


def crawler_page_html(page_objs: list):
    for page_obj in page_objs:
        page_obj: Page
        log_prefix = f"Page({page_obj.id}): "
        # 更新默认值
        page_obj.update_time = datetime.now()
        page_obj.status = PageStatusChoice.FINISH
        # 调用driver爬取url的html内容
        logger.info(f"{log_prefix} 开始爬取")
        boss_crawler = BossCrawler(page_obj)
        ret, html_str = boss_crawler.get_page_html(wait_time=15, retry_num=3)
        if not ret:
            logger.info(f"{log_prefix} 爬取失败")
            page_obj.status = PageStatusChoice.FAIL
        page_obj.html = html_str
        logger.info(f"{log_prefix} 结束爬取")


def run():
    db = SqlConnector()
    while True:
        try:
            # 定量获取带爬取url
            page_list = db.query(Page).filter_by(status=PageStatusChoice.INIT).limit(PAGE_NUM).all()
            db.commit()
            if not page_list:
                logger.info(f"未从数据库中获取到待爬取的记录")
                time.sleep(20)
                continue
            page_ids = [obj.id for obj in page_list]
            logger.info(f"获取待爬取记录，开始爬取... page_ids={page_ids}")
            crawler_page_html(page_list)
            # TODO: 搞懂此处更新数据库的原理
            db.commit()
            logger.info(f"爬取结果写入数据库完成, page_ids={page_ids}")
        except OperationalError:
            db = SqlConnector()
        except Exception as e:
            logger.error(f"主程序错误：{e}\n堆栈信息：{traceback.format_exc()}")


def test():
    db = SqlConnector()
    page = db.query(Page).filter_by(id=2467).first()
    boss_crawler = BossCrawler(page)
    ret, html_str = boss_crawler.get_page_html(wait_time=10, retry_num=3)
    print(ret, html_str)


if __name__ == '__main__':
    # init_page()
    run()
    # test()
