import scrapy


class QfangSpider(scrapy.Spider):
    name = 'qfang'
    allowed_domains = ['qfang.com']
    start_urls = ['https://shanghai.qfang.com/sale/f1']
    page_num=1
    def parse(self, response):
        # print('------>', response)
        li_list = response.xpath('/html/body/div[4]/div/div[1]/div[4]/ul/li')
        for li_temp in li_list:
            f_name = li_temp.xpath('normalize-space(./div[2]/div[1]/a/text())').extract_first()
            f_style = li_temp.xpath('normalize-space(./div[2]/div[2]/p[1]/text())').extract_first()
            f_square_meter = li_temp.xpath('normalize-space(./div[2]/div[2]/p[2]/text())').extract_first()
            f_level = li_temp.xpath('normalize-space(./div[2]/div[2]/p[4]/text())').extract_first()
            f_year = li_temp.xpath('normalize-space(./div[2]/div[2]/p[6]/text())').extract_first()
            f_location = li_temp.xpath('./div[2]/div[3]/div/a/text()').extract_first()
            f_price = li_temp.xpath('normalize-space(./div[3]/p[1]/span[1]/text())').extract_first()
            f_meter_price = li_temp.xpath('normalize-space(./div[3]/p[2]/text())').extract_first()
            a_list = li_temp.xpath('./div[2]/div[3]/div/a')
            location = ''
            num = 0
            for a_temp in a_list:
                num += 1
                f_location = a_temp.xpath('./text()').extract_first()
                location += '%s' % f_location
                if num < 3:
                    location += '-'
            print(f_name, f_style, f_square_meter, f_level, f_year, location, f_price, f_meter_price)
            yield {
                "f_name": f_name,
                "f_style": f_style,
                "f_square_meter": f_square_meter,
                "f_level": f_level,
                "f_year": f_year,
                "location": location,
                "f_price": f_price,
                "f_meter_price": f_meter_price
            }
        self.page_num += 1
        if self.page_num <= 10:
            # print(self.page_num)
            next_page_url = "https://shanghai.qfang.com/sale/f%s" % self.page_num
            # 生成Request，爬取下一页的数据
            yield scrapy.Request(url=next_page_url, callback=self.parse)