# -*- coding: utf-8 -*-
import scrapy
from scrapy.spiders import CrawlSpider,Rule
from scrapy.linkextractors import LinkExtractor
from snowball_master.items import SnowballMasterItem
# from scrapy_redis.spiders import RedisCrawlSpider

class MasterSpider(CrawlSpider):
    name = 'master'
    allowed_domains = ['bj.5i5j.com']
    # start_urls = ['http://xueqiu.com/']
    start_urls = ['https://bj.5i5j.com/ershoufang/n1518/']
    item = SnowballMasterItem()

    def parse(self,response):
        item = self.item
        result = response.selector.re(r"https://bj.5i5j.com/ershoufang/n[0-9]+/\?wscckey=[0-9a-z_]+")
        url = response.xpath("//div[@class='pageSty rf']/a[1]/@href").extract_first()

        if url:
            item['url'] = response.url
            next_url = response.urljoin(url)
            yield scrapy.Request(url=next_url,callback=self.parse,dont_filter=True)
        else:
            print(result)
            yield scrapy.Request(url=result[0],callback=self.parse,dont_filter=True)

    # rules = (
    #     Rule(LinkExtractor(allow=[r"/ershoufang/n[0-9]+/\?wscckey=[0-9a-z_]+",r'/ershoufang/n[0-9]+/$']), callback='parse_item',
    #          follow=True),
    # )

    def parse_item(self, response):
        item = self.item
        if response.url is not None:
            item['url'] = response.url
            print(item["url"])
        else:
            print("******************8")

        # print('=============')
        # print(item)
        return item
