import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapy_redis.spiders import RedisCrawlSpider
from tc58.items import Tc58Item


class A58tcSpider(RedisCrawlSpider):
    name = '58tc'

    #allowed_domains = ['58.com']
    #start_urls = ['https://yt.58.com/ershoufang/p1/']
    redis_key = '58_mation'
    link1=LinkExtractor(restrict_xpaths='//*[@id="__layout"]/div/section/section[3]/section[1]/section[4]/div/ul') #1：租房 2：二手房
    #lin2=LinkExtractor(r)
    rules = (
        #//*[@id="pager_wrap"]/div //*[@id="__layout"]/div/section/section[3]/section[1]/section[4]/div/ul
        Rule(link1, callback='parse_item', follow=True),
    )

    def parse_item(self, response):
        item=Tc58Item()
        div_list=response.xpath('//*[@id="__layout"]/div/section/section[3]/section[1]/section[2]/div')
        for div in div_list:
            title=div.xpath('./a/div[2]/div[1]/div[1]/h3/text()').extract_first()
            info_house="".join(str(div.xpath('./a/div[2]/div[1]/section/div[1]/p[1]//text()').getall())[1:-2].replace("",""))
            info_addrs="".join(str(div.xpath('./a/div[2]/div[1]/section/div[2]//text()').extract())[1:-2].replace(" ",""))
            info_rmb="".join(str(div.xpath('./a/div[2]/div[2]//text()').extract())[1:-2].replace(" ",""))
            print(title,'\n',info_house,'\n',info_addrs,'\n',info_rmb)
            item['title']=title
            item['info_house']=info_house
            item['info_addrs']=info_addrs
            item['info_rmb']=info_rmb
            yield item
            #print(title)

