# -*- coding: utf-8 -*-
import scrapy
import re
from copy import deepcopy

class Fang1Spider(scrapy.Spider):
    name = 'fang1'
    allowed_domains = ['fang.com']
    start_urls = ['https://www.fang.com/SoufunFamily.htm']


    def parse(self, response):
        trs = response.xpath('//div[@class="outCont"]/table/tr')[:-2]
        sf = None    # 省份或者直辖市

        for tr in trs:
            sf_ = tr.xpath('./td[2]//text()').get()  # 获取省份或者直辖市
            sf_ = re.sub('\s', '', sf_)  # 判断有没有内容
            if sf_:
                sf = sf_
            city_l = tr.xpath('./td[3]/a')
            for c in city_l:
                city_name = c.xpath('./text()').get()
                city_url = c.xpath('./@href').get()
                url_l = city_url.split('.')
                if 'bj.' in city_url:  # 北京是个例外
                    new_house_url = 'https://newhouse.fang.com/house/s/b91/'
                    esf_url = 'https://esf.fang.com/?ctm=1.bj.xf_search.head.104'
                else:
                    new_house_url = url_l[0] + '.' + 'newhouse.fang.com' + '/house/s/b91'
                    esf_url = url_l[0] + '.' + 'esf.fang.com' + '/house/i31'
                print('城市名字:　', city_name)
                print('新房url:　',new_house_url)
                print('二手房url:　', esf_url)

                yield scrapy.Request(
                    new_house_url,  # 新房
                    callback=self.parse_new,
                    meta={'info': deepcopy(city_name)}

                )

                yield scrapy.Request(
                    esf_url,  # 旧房
                    callback=self.parse_old,
                    meta={'info': deepcopy(city_name)}
                )



    def parse_new(self, response):
        city_name = response.meta['info']
        li_list = response.xpath('//div[@class="nl_con clearfix"]/ul/li')
        for li in li_list:
            item = {}
            item['新房信息'] = city_name
            house_name = li.xpath('.//div[@class="nlcd_name"]/a/text()').get()
            house_name = re.sub('\s', '', house_name)
            item['标题'] = house_name
            price = li.xpath('.//div[@class="nhouse_price"]//text()').getall()
            price = re.sub(r'\s|\t|广告', '', ''.join(price))
            item['价格'] = ''.join(price)
            phone = li.xpath('.//div[@class="tel"]/p/text()').get()
            item['电话'] = phone
            info = li.xpath('.//div[@class="house_type clearfix"]//text()').getall()
            info = re.sub(r'\t|/|－|\s', '', ''.join(info))
            item['面积'] = info
            address = li.xpath('.//div[@class="address"]/a/@title').get()
            item['地址'] = address
            content = li.xpath('.//div[@class="fangyuan"]//text()').getall()
            content = re.sub(r'\t|/|－|\s', '', ''.join(content))
            item['概要'] = content
            print(item)
            yield item

        next_url = response.xpath('.//a[text()="下一页"]/@href').get()
        if next_url:
            yield response.follow(
                next_url,
                callback=self.parse_new
            )



    def parse_old(self, response):
        city_name = response.meta['info']
        dl_list = response.xpath('//div[@class="shop_list shop_list_4"]/dl')
        print(len(dl_list))
        for dl in dl_list:
            item = {}
            item['旧房信息'] = city_name
            item['标题'] = dl.xpath('.//h4/a/@title').get()
            price = dl.xpath('./dd[2]//text()').getall()
            price = re.sub(r'\r|\n|\s', '', ''.join(price))
            item['价格'] = ''.join(price)
            d = dl.xpath('./dd/p[2]/a/@title').get()
            item['地址'] = d
            mj = dl.xpath('./dd/p[1]/text()').getall()
            mj = re.sub(r'\r|\n|\s', '', ''.join(mj))
            item['面积'] = ''.join(mj)
            print(item)
            yield item

        next_url = response.xpath('//a[text()="下一页"]/@href')
        if next_url:
            response.follow(
                next_url,
                callback=self.parse_old
            )
