from scrapy.utils.project import get_project_settings
from scrapy.crawler import CrawlerRunner
import time
from twisted.internet import reactor
import logging

# 配置日志记录
logging.basicConfig(filename='spider_log.txt', level=logging.INFO, encoding="utf-8",
                    format='%(asctime)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')

# 获取Scrapy项目的设置
settings = get_project_settings()
# 创建CrawlerRunner对象
runner = CrawlerRunner(settings)

def run_spider():
    st = time.time()
    logging.info(f"weibo_top crawl started at {time.ctime(st)}")
    d = runner.crawl('weibo_top')

    def on_crawl_finished(result):
        et = time.time()
        duration = (et - st) / 60
        # total_items = result.spider.crawler.stats.get_value('item_scraped_count')
        # valid_items = result.spider.crawler.stats.get_value('valid_item_count', 0)
        # ignore_items = result.spider.crawler.stats.get_value('ignored_item_count', 0)
        # logging.info(f'本次爬取的总item数量为: {total_items}')
        # logging.info(f'本次爬取的有效item数量为: {valid_items}')
        # logging.info(f'本次爬取的重复item数量为: {ignore_items}')
        logging.info(f'weibo_top crawl duration {duration:.2f} min')
        # 安排下一次爬虫运行
        reactor.callLater(7200, run_spider)

    # 为爬虫完成事件添加回调
    d.addBoth(on_crawl_finished)

if __name__ == '__main__':
    run_spider()
    try:
        reactor.run()
    except KeyboardInterrupt:
        # 处理用户手动中断（Ctrl+C）
        reactor.stop()