# -*- coding: utf-8 -*-
import re

import scrapy
from ftx.fang.items import FangItem

class FtxSpider(scrapy.Spider):
    name = 'ftx'
    start_urls = ['https://www.fang.com/SoufunFamily.htm']
    def parse(self, response):
        privent = None
        tr_list = response.xpath('//div[@class="outCont" and @id="c02"]//tr')
        for tr in tr_list:
            pri_temp = tr.xpath('.//strong/text()').get()
            if pri_temp ==None or pri_temp==' ':
                pass
            else:
                privent = pri_temp
                if privent=='其它'or privent=='香港':
                    continue
            a_list = tr.xpath('.//td[3]/a')
            for a in a_list:
                try:
                    city_name = a.xpath('./text()').get()
                    city_url = a.xpath('./@href').get()
                    city_newhouse_url = city_url.split('.')[0]+'.newhouse.fang.com/house/s/'
                except Exception as e:
                    print(e)
                yield scrapy.Request(url=city_newhouse_url,callback=self.parse_detail,meta={'info':(privent,city_name)})
                break
            break

    def parse_detail(self,response):
        privent, city_name = response.meta['info']
        li_list = response.xpath('//div[contains(@class,"nl_con")]/ul/li')
        for li in li_list:
            try:
                nlc_details = li.xpath('.//div[@class="nlc_details"]')
                house_name = nlc_details.xpath('.//div[@class="nlcd_name"]/a/text()').get()
                house_name = str(house_name).strip()
                # house_url = nlc_details.xpath('.//div[@class="nlcd_name"]/a/@href').get()
                # house_url = 'http:'+ house_url
                rooms = nlc_details.xpath('.//div[contains(@class,"house_type")]/a/text()').getall()
                rooms = list(filter(lambda x:x.endswith('居'),rooms))
                area = ''.join(nlc_details.xpath('.//div[contains(@class,"house_type")]/text()').getall())
                area = re.sub(r'\s|/|－','',area)
                address = nlc_details.xpath('//div[@class="address"]/a/@title').get()
                district = ''.join(nlc_details.xpath('//div[@class="address"]/a//text()').getall())
                district = re.search(r'.*\[(.+)\].*]',district,re.S).group(1)
                is_sale = nlc_details.xpath('.//div[contains(@class,"fangyuan")]/span/text()').get()
                price = ''.join(nlc_details.xpath('.//div[@class="nhouse_price"]//text()').getall())
                price = re.sub(r'\s|价格待定|广告','',price)
                origen_url = 'https:'+nlc_details.xpath('.//div[@class="nlcd_name"]/a/@href').get()
                fang_item = FangItem(house_name=house_name,rooms=rooms,area=area,address=address,district=district,is_sale=is_sale,price=price,origen_url=origen_url)
                yield fang_item
                # print(fang_item)7
            except Exception as e:
                print(e)
        next = 'https://newhouse.fang.com' + response.xpath('//div[@class="page"]//a[@class="next"]/@href').get()
        print(next)
        yield scrapy.Request(url=next,callback=self.parse_detail,meta={'info':(privent,city_name)})





