#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2024/10/10 14:34
# @Author  : 王凯
# @File    : duperfilter.py
# @Project : scrapy_spider

from scrapy.crawler import Crawler
from scrapy.dupefilters import RFPDupeFilter as BaseDupeFilter
from typing_extensions import Self


class RFPDupeFilter(BaseDupeFilter):

    @classmethod
    def from_crawler(cls, crawler: Crawler) -> Self:
        cls.spider = crawler.spider
        bf = getattr(crawler.spider, "bf", None)
        if bf:
            cls.bf = bf
        return super().from_crawler(crawler)

    def request_seen(self, request):
        fp = self.request_fingerprint(request)
        bf = getattr(self, 'bf', None)
        if bf:
            batch_run_parser_name_list = self.spider.settings.getlist("BATCH_RUN_PARSER_NAME_LIST", ['parse_content', 'parse_detail'])
            if request.callback.__name__ in batch_run_parser_name_list:
                if bf.exists(fp):
                    self.spider.logger.info(f"bloom filter request url exists, skip: {request}")
                    return True
        if fp in self.fingerprints:
            return True
        self.fingerprints.add(fp)
        if self.file:
            self.file.write(fp + "\n")
        return False
