import logging

import scrapy

from Graze.bloomfilter import BloomFilterHelper
from Graze.items import UrlItem


class GrazeSpider(scrapy.Spider):
    name = 'graze'
    allowed_domains = ['movie.douban.com']
    start_urls = ['https://movie.douban.com']
    url_count = 0

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self.bl = BloomFilterHelper()
        self.bl.is_url_new(self.start_urls[0])

    def parse(self, response):
        url_list = response.xpath('//a[@href]')
        for url_e in url_list:
            if self.url_count >= self.bl.get_max_url_num():
                logging.info('reach max url number, program terminated!')
                # print('reach max url number, program terminated!')
                return

            item = UrlItem()
            url = url_e.xpath('./@href').get()
            if url.startswith('http'):
                if self.bl.is_url_new(url):
                    item['url'] = url
                    self.url_count += 1
                    logging.info('url find' + '[' + str(self.url_count) + ']: ' + url)
                    # print('url find' + '[' + str(self.url_count) + ']: ' + url)
                    yield item

                    yield scrapy.Request(url, self.parse)

                else:
                    logging.info('url duplicated: ' + url)
                    # print('url duplicated: ' + url)
