# -*- coding: utf-8 -*-
# @Author   : SongLiangCheng
# @QQ       : 2192383945
# @Time     : 2022/11/29 8:48
# @File     : http.py 
# @Project  : scraping
# @Desc     :  通用爬取方法
import re
import sys
import threading
import time

import  logging
from concurrent.futures._base import Future

from concurrent.futures.thread import ThreadPoolExecutor

logger = logging.getLogger(__name__)

logger.info(f'{__name__} start')
def get_page(url: str, get_doc,parse_content,parse_next_url,save_contents,works:int):
    pool = ThreadPoolExecutor(max_workers=works)
    daemons:list[Future] = []
    while True:
        # 页面内容
        # doc = pq(url)
        doc, is_parser = get_doc(url)
        if not is_parser:
            continue
        # 解析 1页内容数据入库
        # parse_content(doc)
        daemons.append(pool.submit(parse_content,doc))
        try:
            # 下一页
            url = parse_next_url(doc)
            logger.info(url)
        except Exception as e:
            logger.info(e)
            break
    # 等抓完页面
    while True:
        time.sleep(3)
        logger.info(threading.enumerate())

        # 所有全部完成，才为True
        flag = all([ daemon.done() for daemon in daemons])

        if flag:
            save_contents(daemons)
            pool.shutdown()
            logger.info(threading.enumerate())
            break

logger.info(f'{__name__} end')
