import scrapy
from ..items import BeikespiderItem
from ..util import hash_sha1



class BeikeSpider(scrapy.Spider):
    name = 'beike'
    allowed_domains = ['ke.com']
    start_urls = ['https://www.ke.com/city/']

    def parse(self, response: scrapy.http.Response):
        """获取城市"""
        li_list = response.xpath('//div[@class="city-item VIEWDATA"]/div[2]/ul/li/div[2]/div/ul/li')
        for li in li_list:
            item = BeikespiderItem()
            items = dict()

            city_name = li.xpath('./a/text()').extract_first()
            city_url = li.xpath('./a/@href').extract_first()
            item['city_name'] = city_name if city_name else ''  # 城市名
            item['city_url'] = "https:" + city_url + "/ershoufang/" if city_url else ''  # 城市链接
            item['city_id'] = hash_sha1(item['city_name'] + item['city_url'])  # 城市id
            # print(item)
            items['city_data'] = item
            yield items
            yield scrapy.Request(item['city_url'], callback=self.region_crawl,
                                 meta={'city_url': item["city_url"], 'city_id': item['city_id']})

    def region_crawl(self, response: scrapy.http.Response):
        """获取区域"""
        city_url = response.meta['city_url']
        city_id = response.meta['city_id']
        a_list = response.xpath('//div[@data-role="ershoufang"]/div/a')
        for a in a_list:
            item = BeikespiderItem()
            items = dict()

            region_name = a.xpath('./text()').extract_first()
            region_url = a.xpath('./@href').extract_first()
            item['region_name'] = region_name if region_name else ''                                # 区域名
            item['region_url'] = city_url + region_url.split('/')[-2] if region_url else ''         # 区域链接
            item['region_id'] = hash_sha1(item['region_name'] + item['region_url'])                 # 区域id
            item['city_id'] = city_id                                                               # 城市id

            # print(item)
            items['region_data'] = item
            yield items
            yield scrapy.Request(item['region_url'], callback=self.page_crawl,
                                 meta={'region_id': item["region_id"]})

    def page_crawl(self, response: scrapy.http.Response):
        """获取区域二手房列表和部分信息"""
        region_id = response.meta['region_id']
        li_list = response.xpath('//li[@class="clear"]')
        for li in li_list:
            item = BeikespiderItem()

            title = li.xpath('./div/div[1]/a/text()').extract_first()
            room_url = li.xpath('./div/div[1]/a/@href').extract_first()
            follows_update = li.xpath('./div/div[2]/div[3]/text()').extract()
            follows_update = follows_update[1].split('/') if len(follows_update) == 2 else ''
            total_price = li.xpath('./div/div[2]/div[5]/div[1]/span/text()').extract_first()
            total_price_unit = li.xpath('./div/div[2]/div[5]/div[1]/i[2]/text()').extract_first()
            unit_price = li.xpath('./div/div[2]/div[5]/div[2]/span/text()').extract_first()

            item['title'] = title if title else ''                                              # 标题
            item['room_url'] = room_url if room_url else ''                                     # 二手房详情页链接
            item['follows'] = follows_update[0].strip() if len(follows_update) == 2 else ''     # 关注人数
            item['up_date'] = follows_update[1].strip() if len(follows_update) == 2 else ''     # 发布时间
            item['total_price'] = total_price + total_price_unit if total_price else ''         # 总价
            item['unit_price'] = unit_price if unit_price else ''                               # 面积价格
            item['room_id'] = hash_sha1(item['title'] + item['room_url'])                       # 二手房id
            item['region_id'] = region_id                                                       # 区域id

            # print(item)
            yield scrapy.Request(url=item['room_url'], callback=self.room_crawl, meta={'item': item})

        # 翻页
        page_xp = response.xpath('//div[@class="page-box house-lst-page-box"]/@page-data').extract_first()
        if not page_xp:
            return
        page_number = eval(page_xp)
        all_page = page_number['totalPage']     # 全部页数
        current_page = page_number['curPage']   # 当前页面数
        # if '/pg' not in f'{response.url}' and all_page:
        if current_page == 1:
            for page in range(1, all_page + 1):
                print(f'分页{page}')
                page_url = response.url + f'pg{page}/'
                yield scrapy.Request(url=page_url, callback=self.page_crawl, meta={'region_id': region_id})

    def room_crawl(self, response: scrapy.http.Response):
        """获取区域二手房详情"""

        item = response.meta['item']
        items = dict()

        around_xp = response.xpath('//div[@class="aroundInfo"]')
        cell_name = around_xp.xpath('./div[1]/a/text()').extract_first()
        areas = around_xp.xpath('./div[2]/span[2]/a/text()').extract()
        area = areas[0] + areas[1] if len(areas) == 2 else ''

        message_xp = response.xpath('//div[@class="introContent"]/div[1]/div[2]/ul')
        house_type = message_xp.re(r'房屋户型</span>(.*)</li>')
        floor = message_xp.re(r'所在楼层</span>(.*)</li>')
        floor_area = message_xp.re(r'建筑面积</span>(.*)</li>')
        house_structure = message_xp.re(r'户型结构</span>(.*)</li>')
        internal_area = message_xp.re(r'套内面积</span>(.*)</li>')
        building_type = message_xp.re(r'建筑类型</span>(.*)</li>')
        orientation = message_xp.re(r'房屋朝向</span>(.*)</li>')
        building_structure = message_xp.re(r'建筑结构</span>(.*)</li>')
        decoration = message_xp.re(r'装修情况</span>(.*)</li>')
        elevator_family = message_xp.re(r'梯户比例</span>(.*)</li>')
        elevator = message_xp.re(r'配备电梯</span>(.*)</li>')

        item['cell_name'] = cell_name if cell_name else ''                                  # 小区名称
        item['area'] = area if area else ''                                                 # 所在区域
        item['house_type'] = house_type[0] if house_type else ''                            # 房屋户型
        item['floor'] = floor[0] if floor else ''                                           # 所在楼层
        item['floor_area'] = floor_area[0] if floor_area else ''                            # 建筑面积
        item['house_structure'] = house_structure[0] if house_structure else ''             # 户型结构
        item['internal_area'] = internal_area[0] if internal_area else ''                   # 套内面积
        item['building_type'] = building_type[0] if building_type else ''                   # 建筑类型
        item['orientation'] = orientation[0] if orientation else ''                         # 房屋朝向
        item['building_structure'] = building_structure[0] if building_structure else ''    # 建筑结构
        item['decoration'] = decoration[0] if decoration else ''                            # 装修情况
        item['elevator_family'] = elevator_family[0] if elevator_family else ''             # 梯户比例
        item['elevator'] = elevator[0] if elevator else ''                                  # 配备电梯

        items['room_data'] = item

        # print(item['room_url'])
        # print(item)
        yield items
