# -*- coding: utf-8 -*-

from fangtianxia_scrapy.items import  FangtianxiaScrapyItem
from scrapy_redis.spiders import RedisSpider
import scrapy

class FangtianxiaSpider(RedisSpider):
    name = 'fangtianxia'
    allowed_domains = ['https://cd.fang.com/']
    # start_urls = ['https://cd.newhouse.fang.com/house/s/']
    redis_key = 'fang:start_urls'
    base_domain = 'https://cd.newhouse.fang.com'
    counter = 1

    def parse(self, response):
        print('*' * 50)
        print('开始接受response')
        print('开始解析...')

        divs = response.xpath('//div[@class="clearfix"]')
        for div in divs:
            images = div.xpath('.//div[1]//a//img[2]//@src').getall()
            categorys = div.xpath('.//div[2]//div[@class="nlcd_name"]//a/text()').get()
            print('这是spider_url:',images)
            print('这是spider_category:',categorys)
            print('解析完成，开始item...')
            items = FangtianxiaScrapyItem(image_urls=images,category=categorys)
            yield items

        next_url = response.xpath('//ul[@class="clearfix"]//li[last()]//a//@href').getall()
        for urls in next_url:
            if  self.counter == 1:
                print('开始请求下一页...',self.base_domain + urls)
                yield scrapy.Request(self.base_domain + urls, callback=self.parse,dont_filter=True)
            else:
                return

        self.counter += 1