import scrapy
from urllib.parse import urlparse
from hsmarket.items import HsmarketItem


class BeikeSpider(scrapy.Spider):
    name = 'beike_spider'
    start_urls = ['https://gz.ke.com/ershoufang/pazhou']
    # https://m.ke.com/liverpool/api/ershoufang/getList?cityId=440100&condition=%252Fpazhou%252Fpg2&curPage=1
    # https://m.ke.com/gz/ershoufang/pazhou/

    def parse(self, response):
        houses = response.xpath('//li[@class="clear"]')
        for house in houses:
            item = HsmarketItem()
            item['site'] = 'beike'
            item['title'] = house.xpath(
                    './/div[@class="title"]/a/@title').extract_first()
            item['pos'] = house.xpath(
                    './/div[@class="positionInfo"]/a/text()').get()
            item['price'] = house.xpath(
                    './/div[@class="totalPrice totalPrice2"]/span/text()'
                    ).get()
            info = house.xpath('.//div[@class="houseInfo"]/text()[2]').get()
            info = info.replace('\n', '').replace(' ', '').split('|')
            item['floor'] = info[0].strip()
            item['build'] = info[1].strip()
            item['layout'] = info[2].strip()
            item['size'] = info[-2].strip()
            item['direction'] = info[-1].strip()
            fwinfo = house.xpath(
                    './/div[@class="followInfo"]/text()[2]').get().replace(
                            '\n', '').replace(' ', '')
            fwinfo = fwinfo.split('/')
            item['follows'] = fwinfo[0]
            item['published'] = fwinfo[1]
            img_url = house.xpath(
                    './/a/img[@class="lj-lazy"]/@data-original').get()
            item['image_url'] = img_url
            file_name = urlparse(img_url).path.split('/')[2].split('!')[0]
            item['file_name'] = file_name
            yield item
