from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
from kuaidaili_redis.items import KuaidailiRedisItem

from scrapy_redis.spiders import RedisCrawlSpider


class MyCrawlRedisSpider(RedisCrawlSpider):
    """Spider that reads urls from redis queue (myspider:start_urls)."""
    name = 'mycrawlredisspider'
    redis_key = 'mycrawlredisspider:start_urls'
    
    # allowed_domains = ['www.dushu.com']
    allowed_domains = ['www.kuaidaili.com']



    # page_links = LinkExtractor(restrict_xpaths=(r'//div[@class="book-info"]/h3'))

    # profile_links = LinkExtractor(restrict_xpaths=(r'//tbody//td[1]'))

    # links = LinkExtractor(allow=(r'/guoxue/\d+/'))
    links = LinkExtractor(restrict_xpaths=r'//div[@id="listnav"]//a')
    rules = (
        # Rule(page_links),
        Rule(links, callback = "parse_test",follow=True),
    )

    # def __init__(self, *args, **kwargs):
    #     # Dynamically define the allowed domains list.
    #     domain = kwargs.pop('domain', '')
    #     self.allowed_domains = filter(None, domain.split(','))
    #     super(MyCrawlRedisSpider, self).__init__(*args, **kwargs)

    def parse_test(self, response):
        # print('开始爬取...')
        # item = KuaidailiRedisItem()
        # item['sourec'] = "dushu"
        # item['ip'] = "123"
        # item['port'] = "6379"
        # yield item

        print('开始爬取...')
        ip_list = response.xpath('//tbody/tr/td[1]/text()').extract()
        port_list = response.xpath('//tbody/tr/td[2]/text()').extract()
        for i in range(len(ip_list)):
            item = KuaidailiRedisItem()
            item['ip'] = ip_list[i]
            item['port'] = port_list[i]
            yield item
