import scrapy
from ..items import WawjItem
from scrapy import Request


class ZufangSpider(scrapy.Spider):
    name = 'zufang'
    allowed_domains = ['hz.5i5j.com']
    #初始爬取地址
    start_urls = ['https://hz.5i5j.com/zufang/']
    #爬取多页
    url = 'https://hz.5i5j.com/zufang/n%d/'
    page = 2
    # def start_requests(self):
    #     url = "https://hz.5i5j.com/zufang/"
    #     yield scrapy.Request(url=url, callback=self.parse)
    def parse(self, response):
        li_list = response.xpath("//ul[@class='pList']/li")
        item = WawjItem()
        for li in li_list:
            item["house_name"] = li.xpath("./div[2]/h3/a/text()").extract_first()
            item["house_style"] = li.xpath("./div[2]/div[1]/p[1]/text()").extract_first()
            item["house_address"] = li.xpath("./div[2]/div[1]/p[2]/text()").extract_first()
            item["house_detail_address"] = li.xpath("./div[2]/div[1]/p[2]/a/text()").extract_first()
            item["house_condition"] = li.xpath("./div[2]/div[1]/p[3]/text()").extract_first()
            item["house_price"] = li.xpath("./div[2]/div[1]/div[1]/p[1]/strong/text()").extract_first()
            item["house_way"] = li.xpath("./div[2]/div[1]/div[1]/p[2]/text()").extract_first()
            # print(item)
            yield item


        # if(self.page <= 7):
        #     next_url = f'https://hz.5i5j.com/zufang/n{self.page}/'
        #     print(f'{next_url},{self.page}')
        #     self.page += 1
        #     yield scrapy.Request(url=next_url, callback=self.parse, dont_filter=True)

        if self.page <= 10:
            # 将页码数加之url(存在列表中)末尾，使之成为新的url
            url = self.url % self.page
            self.page += 1
            print(f'{self.start_urls},{url}')
            # 这条程序里利用了一个回调机制，即callback, 回调的对象是parse, 也就是当前方法，通过不断的回调，程序将陷入循环
            # 解决爬取重复 添加dont_filter=True
            yield scrapy.Request(url=url, callback=self.parse, dont_filter=True)
