#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2024/2/21 15:57
# @Author  : 王凯
# @File    : timing_run.py
# @Project : scrapy_spider

import os
import subprocess
from pathlib import Path

from loguru import logger
import concurrent.futures
from tqdm import tqdm

spider_path = Path(__file__).parent / 'tax_policy' / 'spiders'


def get_all_spiders():
    ret = set()
    for top, dirs, nondirs in os.walk(spider_path):
        for file in nondirs:
            if file.endswith('policy.py'):
                ret.add(file.split('.py')[0])
    logger.info(f"找到爬虫文件 {len(ret)} 个")
    return list(ret)


def run_one(spider_name):
    try:
        subprocess.run(f"scrapy crawl {spider_name}".split(), check=True)
    except Exception:
        pass


def run():
    # os.environ["SCRAPY_PROJECT"] = "linux_runtime_filter"  # 设置定时爬取的环境配置（增量）
    os.environ["SCRAPY_PROJECT"] = "batch_run"  # 设置定时爬取的环境配置（批量）
    with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
        datas = get_all_spiders()
        tasks = [executor.submit(run_one, data) for data in datas]
        for task in tqdm(concurrent.futures.as_completed(tasks), total=len(tasks),
                         desc=f"爬虫任务 pid:{os.getpid()}"):
            if task.exception():
                raise task.exception()


if __name__ == '__main__':
    run()
