#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2024/1/30 16:57
# @Author  : 王凯
# @File    : log_collector_custom.py
# @Project : scrapy_spider
import datetime
import pprint

import dateutil.tz
from scrapy.statscollectors import StatsCollector
from utils.logs import logger


class LogStatsCollector(StatsCollector):
    def __init__(self, crawler):
        super().__init__(crawler)
        logger.info("LogStatsCollector初始化")

    def close_spider(self, spider, reason):
        all_requests = self.get_value("downloader/request_count", default=0)
        exception_requests = self.get_value("downloader/exception_count", default=0)
        ignore_requests = self.get_value("downloader/exception_type_count/scrapy.exceptions.IgnoreRequest", default=0)
        filtered_requests = self.get_value("bloomfilter/filtered", default=0)

        all_responses = self.get_value("downloader/response_count", default=0)
        _200_responses = self.get_value("downloader/response_status_count/200", default=0)
        _404_responses = self.get_value("downloader/response_status_count/404", default=0)

        retry_requests = self.get_value("retry/count", default=0)
        log_count_error = self.get_value("log_count/ERROR", default=0)
        error_responses = exception_requests - ignore_requests
        finish_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        start_time = (
            self.get_value("start_time").astimezone(dateutil.tz.gettz("Asia/Shanghai")).strftime("%Y-%m-%d " "%H:%M:%S")
        )
        item_count = self.get_value("item_scraped_count", default=0)

        self.set_value("all_requests", all_requests)  # 所有请求的个数
        self.set_value("ignore_requests", ignore_requests or filtered_requests)  # 请求被忽略的个数
        self.set_value("all_responses", all_responses)  # 所有响应的个数
        self.set_value("retry_requests", retry_requests)
        self.set_value("_200_responses", _200_responses)  # 成功响应的个数
        self.set_value("_404_responses", _404_responses)
        self.set_value("error_responses", error_responses)  # 错误响应的个数
        self.set_value("log_count_error", log_count_error)  # 错误日志个数

        self.set_value("start_time", start_time)
        self.set_value("finish_time", finish_time)
        self.set_value("item", item_count)

        stats_print_dict = {
            "start_time": "爬虫开始的时间",
            "finish_time": "爬虫结束的时间",
            "log_count_error": "错误日志个数",
            "all_requests": "所有请求的个数",
            # "start_requests": "开始请求的个数",
            # "parse_requests": "列表页请求的个数",
            # "parse_info_requests": "详情页请求的个数",
            "ignore_requests": "请求被忽略的个数",
            "retry_requests": "重试请求的个数",
            "all_responses": "所有响应的个数",
            "_200_responses": "成功响应的个数",
            "_404_responses": "响应404的个数",
            "error_responses": "错误响应的个数",
            "item": "item总数",
        }

        info_dict_zh = {v: self._stats.get(k) for k, v in stats_print_dict.items()}
        info_dict = {k: v for k, v in self._stats.items() if k not in stats_print_dict.keys()}
        if self._dump:
            logger.info(
                "Dumping Scrapy stats:\n" + pprint.pformat({**info_dict, **info_dict_zh}),
                extra={"spider": spider},
            )
        self._persist_stats(self._stats, spider)
