# -*- coding: utf-8 -*-
from scrapy import Spider, Request
from scrapy.selector import Selector
import requests
import re
from lxml import etree
import json
import time
from  szhouse.items import  SzhouseItem


class licaiSpider(Spider):
    
    name = 'lianjiahouse'  #https://su.lianjia.com/zufang/pg3/#contentList
    start_urls=['https://su.lianjia.com/zufang/']

    custom_settings = {'ITEM_PIPELINES': {
       'szhouse.pipelines.SzhousePipeline': 300,
    }, 'DOWNLOAD_DELAY': 0.1,'DOWNLOAD_TIMEOUT':20}


    def parse(self,response):
        #detail_html=str(response.body)
        item = SzhouseItem()
        trs = response.xpath('//div[@class="content w1150"]/div[@class="content__article"]/div[@class="content__list"]/div[@class="content__list--item"]')

        for index,tr in enumerate(trs):
            item['house_name'] = tr.xpath('./div[@class="content__list--item--main"]/p[@class="content__list--item--title twoline"]/a/text()').extract_first().replace(' ','').replace('\n', '')
            tt_link = tr.xpath('./a/@href').extract_first()
            item['link'] ='https://su.lianjia.com'+tt_link

            tt=tr.xpath('./div[@class="content__list--item--main"]/p[@class="content__list--item--des"]/text()[4]').extract_first()
            tt_fx=tr.xpath('./div[@class="content__list--item--main"]/p[@class="content__list--item--des"]/text()[6]').extract_first()
            tt_ad= tr.xpath('./div[@class="content__list--item--main"]/p[@class="content__list--item--des"]/a[1]/text()').extract_first()
            #jinsheng
            tt_js_fx=tr.xpath('./div[@class="content__list--item--main"]/p[@class="content__list--item--des"]/text()[5]').extract_first()
            tt_js_mj=tr.xpath('./div[@class="content__list--item--main"]/p[@class="content__list--item--des"]/text()[3]').extract_first()
            if(tt_ad is None):
                item['address'] =tr.xpath('./div[@class="content__list--item--main"]/p[@class="content__list--item--des"]/span/text()').extract_first()
                item['fangxing'] = str(tt_js_fx).replace(' ','').replace('\n','')
                item['mianji'] = str(tt_js_mj).replace(' ','').replace('\n','')
            else:
                item['address'] =tt_ad
                item['fangxing']=tt_fx.replace(' ','').replace('\n','')
                item['mianji']=tt.replace(' ','').replace('\n', '')

            # tt_fx=tr.xpath('./div[@class="content__list--item--main"]/p[@class="content__list--item--des"]/text()[6]').extract_first()
            # if( tt_fx is None):
            #     item['fangxing'] = 'null'
            # else:
            #     item['fangxing']=tt_fx.replace(' ','').replace('\n','')
            # tt=tr.xpath('./div[@class="content__list--item--main"]/p[@class="content__list--item--des"]/text()[4]').extract_first()
            # if(tt is None):
            #     item['mianji']='null'
            # else:
            #     item['mianji']=tt.replace(' ','').replace('\n', '')

            
            
          
                
            # re_html=r'<i>/</i>(.*?)<i>/</i>(.*?)<i>/</i>(.*?)<span class="hide">(.*?)</span>'
            # re2=r'<p class="content__list--item--des"><span class="room__left">(.*?)</span><i>/</i>(.*?)<i>/</i>(.*?)<i>/</i>(.*?)</p>'
            # res=re.compile(re_html,re.S)
            # res2=re.compile(re2,re.S)

            # if(re.findall(res,str(response.body,'utf-8'))!= null):
            #     refind= re.findall(res,str(response.body,'utf-8'))[index]
            # elif(re.findall(res2,str(response.body,'utf-8'))!= null)

            # refind= re.findall(res or res2,str(response.body,'utf-8'))[index]   
            # item['mianji'] =str(refind[0]).replace(' ','').replace('\n','')
            # item['fangxing'] = str(refind[2]).replace(' ','').replace('\n','')
            
            
            item['fabuDate'] = tr.xpath('./div[@class="content__list--item--main"]/p[@class="content__list--item--time oneline"]/text()').extract_first()
            # item['detail'] = tr.xpath('./div[@class="content__list--item--main"]/p[@class="content__list--item--bottom oneline"]/i/text()').extract_first()
            d_list=tr.xpath('./div[@class="content__list--item--main"]/p[@class="content__list--item--bottom oneline"]/i')
            detail_list=''
            for ii in d_list:
                detail_list += ii.xpath('./text()').extract_first()
                detail_list=detail_list+','
             #   detail_list=''.join(ii.xpath('./text()').extract_first())
            item['detail']=detail_list
            item['money'] = tr.xpath('./div[@class="content__list--item--main"]/span/em/text()').extract_first()
            
            yield item
            
        
        for i in range(1,2):
            url='https://su.lianjia.com/zufang/pg{}/'.format(str(i))
            yield Request(url,callback=self.parse)




