# -*- coding: utf-8 -*-
import scrapy
from urllib.parse import urljoin
from ..items import DailispiderItem
from DaiLiSpider.bloom_filter import Bloom
"""
1、布隆过滤，过滤掉已经爬过的ip地址
2、settings中的缓存？

"""


class DailiSpider(scrapy.Spider):
    name = 'daili_89'
    allowed_domains = ['89ip.cn']
    start_urls = ['http://www.89ip.cn/']

    custom_settings = {
        'ROBOTSTXT_OBEY': False,
        'CONCURRENT_REQUESTS': 4,
        'COOKIES_ENABLED': False,
        'DOWNLOAD_DELAY': .3,
        'DOWNLOADER_MIDDLEWARES': {
            # 配置随机代理
            # 'DaiLiSpider.downloadermiddlewares.ProxyMiddleware': 300,

        },
        'ITEM_PIPELINES': {
            'DaiLiSpider.pipelines.CheckIpPipeline': 299,
            'DaiLiSpider.sqlitepipeline.SqlitePipeline': 300,
        },
        # 'DEFAULT_REQUEST_HEADERS': {
        #     'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
        #     'Accept-Language': 'en',
        #     'Accept-Encoding': 'gzip, deflate, br',
        #     'User-Agent': '',
        #     'Cookie': '',
        #     'Referer': '',
        # },
        'DB_NAME': 'daili',
    }

    def parse(self, response):
        # 布隆过滤
        self.bloom = Bloom(bloom_filename='daili_98.bm')

        page = 1
        yield scrapy.Request(
            url=response.url,
            callback=self.parse_all_page,
            dont_filter=True,
            meta={
                'page': 1,
            },
        )

    def parse_all_page(self, response):
        all_ips = response.xpath("//table[@class='layui-table']/tbody/tr")
        for ip in all_ips:
            ip_info = ip.xpath("td/text()").extract()
            ip = ip_info[0].strip()
            ip_port = ip_info[1].strip()
            ip_address = ip_info[2].strip()
            ip_operator = ip_info[3].strip()
            last_check_time = ip_info[4].strip()

            # test output
            print(ip, ip_port, ip_address, ip_operator, last_check_time)

            item = DailispiderItem()
            item['ip'] = ip
            item['ip_port'] = ip_port
            item['ip_address'] = ip_address
            item['ip_operator'] = ip_operator
            item['last_check_time'] = last_check_time

            # 根据布隆过滤判断是否要返回item
            if not self.bloom.fliter_data(item):
                yield item

        # 递归调用
        next_page = response.meta['page'] + 1
        per_url = urljoin(response.url, f'index_{next_page}.html')
        if all_ips:
            yield scrapy.Request(
                url=per_url,
                callback=self.parse_all_page,
                dont_filter=True,
                meta={
                    'page': next_page,
                },
            )
