# -*- coding: utf-8 -*-
import scrapy
import re
from scrapy import Request
from ganji_zf.items import GanjiZfItem


class GjzfSpider(scrapy.Spider):
    name = 'gjzf'
    allowed_domains = ['wh.ganji.com']
    start_urls = ['http://wh.ganji.com/fang1']

    def parse(self, response):

        item = GanjiZfItem()

        dl_list = response.xpath("//div[@class='f-list-item ershoufang-list']/dl")
        for dl in dl_list:
            # 标题
            house_title = dl.xpath("./dd[@class='dd-item title']/a/text()").extract_first().strip()
            item['house_title'] = house_title

            # 出租类型 户型 面积 朝向 楼层 装修
            house_info = dl.xpath("./dd[@class='dd-item size']/span/text()").extract()
            # 出租类型
            item['rent_type'] = house_info[0]
            # 户型 面积 朝向 楼层 装修
            for i in range(1, len(house_info)):
                info = house_info[i]
                if re.match(r".+卫|.+室|.+厅", info):
                    item['house_type'] = info
                elif re.match(r'(\d+?)㎡', info):
                    item['house_area'] = int(re.match(r'(\d+?)㎡', info).group(1))
                elif re.match(r'\w+向', info):
                    item['house_dire'] = info
                elif re.match(r'[高|中|低]层', info):
                    item['house_floor'] = info.strip()
                elif re.match(r'[(\w+装修)|毛坯]', info):
                    item['house_deco'] = info
                    # print('='*20)
                    # print(item['house_deco'])
                
                if not item.get('house_type'):
                    item['house_type'] = 'NAN'
                if not item.get('house_area'):
                    item['house_area'] = 'NAN'
                if not item.get('house_dire'):
                    item['house_dire'] = 'NAN'
                if not item.get('house_floor'):
                    item['house_floor'] = 'NAN'
                if not item.get('house_deco'):
                    item['house_deco'] = 'NAN'

            # 月租价格
            house_rent = dl.xpath("./dd[@class='dd-item info']/div[@class='price']/span[1]/text()").extract_first()
            if house_rent is not None:
                try:
                    item['house_rent'] = int(house_rent)
                except ValueError:
                    item['house_rent'] = house_rent
            else:
                item['house_rent'] = 'NAN'

            yield item

            # 下一页
            next_u = response.xpath("//div[@class='pageBox']//ul/li//a/span[text()='下一页 >']/../@href").extract_first()
            next_url = "http://wh.ganji.com" + next_u
            
            page_num = re.match(r"o(\d{1,3})", response.url).group(1)
            if page_num:
                page_num = int(page_num)
                print("="*20)
                print(page_num)

            if next_url:
                next_request = Request(url=next_url, callback=self.parse)
                next_request.headers['Referer'] = response.url
                yield next_request
            # 免费代理不稳定, 经常丢失数据,下面为对数据有丢失的情况进行处理
            elif (not dl_list) and response.status == 200:
                yield Request(url=response.url, callback=self.parse, dont_filter=True)
            elif ('partial' in response.flags) or (page_num <= 104):
                yield Request(url=response.url, callback=self.parse, dont_filter=True)
            else:
                with open('./check.html', 'w') as f:
                    f.write(response.body.decode())
