import scrapy
import re
from scrapy import cmdline
from demo_59.items import ZuFangItem


class Demo59Spider(scrapy.Spider):
    name = 'demo59'
    allowed_domains = ['cs.58.com']
    start_urls = ['https://cs.58.com/house.shtml']

    def parse(self, response):
        # 提取租房 二手房链接
        links = response.xpath('//div[@class="ui-category-head"]/a/@href')[0:2].extract()
        print("租房和二手房链接:",links)
        for link in links:
            # 分别向两个链接发请求
            #print(link)
            if 'chuzu' in link:
                yield scrapy.Request(url=link, callback=self.get_zufang_data)

    '''解析租房信息'''
    def get_zufang_data(self, response):
        # 标题
        titles = response.xpath('//div[@class="des"]/h2/a/text()').extract()
        # 价格
        price_list = response.xpath('//div[@class="money"]')
        # 详情页链接
        hrefs = response.xpath('//div[@class="des"]/h2/a/@href').extract()
        # 地址信息
        address_list = response.xpath('//p[@class="infor"]')
        for title, price, href, address in zip(titles, price_list, hrefs, address_list):
            # string(.)方法的使用：https://www.cnblogs.com/CYHISTW/p/12312570.html
            a = address.xpath('string(.)').extract_first()
            # 地址会出现一些特殊符号 所以对其进行替换处理
            zufang_addre = re.sub(r'\xa0|\n', '-', a, re.S).replace('  ', '').strip()
            # print(zufang_addre)
            zf_item = ZuFangItem()
            zf_item['fang_title'] = title.strip()
            zf_item['fang_price'] = price.xpath('string(.)').extract_first().strip()
            zf_item['fang_href'] = href
            zf_item['fang_address'] = zufang_addre
            print("租房数据：", zf_item)
            yield zf_item

        for i in range(2,11):
            zufang_next_url = "https://cs.58.com/chuzu/pn{}".format(i)
            yield scrapy.Request(url=zufang_next_url,callback=self.get_zufang_data)

if __name__ == '__main__':
    cmdline.execute(['scrapy' , 'runspider', 'demo59.py'])