#!/usr/bin/env python
# -*- coding: utf-8 -*-

import scrapy
from scrapy.http import Request

from house.items import HouseItem


class AjkSpider(scrapy.Spider):
    name = "home"
    allowed_domains = ["wuhan.anjuke.com"]
    start_urls = [
        "https://wuhan.anjuke.com/sale/donghugaoxinwuhan/?from=SearchBar"
    ]

    def __init__(self, *args, **kwargs):
        super(AjkSpider, self).__init__(*args, **kwargs)

    def start_requests(self):
        return [Request("https://wuhan.anjuke.com/sale/donghugaoxinwuhan/?from=SearchBar",
                        callback=self.home_list
                        )]

    def home_list(self, response):
        """
        解析所有的房源列表，获取到url后调用parse解析子页
        获取下一页
        """
        for house in response.xpath('//ul[@id="houselist-mod-new"]/li[@class="list-item"]'):
            title = house.xpath(
                'div[@class="house-details"]/div[@class="house-title"]/a/text()').extract_first()
            total_price = house.xpath('div/span[@class="price-det"]/strong/text()').extract_first()
            unit_price = house.xpath('div/span[@class="unit-price"]/text()').extract_first()
            details = ""
            for detail in house.xpath('div[@class="house-details"]/div[@class="details-item"]/span'):
                details += detail.xpath('text()').extract_first().strip()

            item = HouseItem()
            item['title'] = title.strip()
            item['details'] = details.strip()
            item['total_price'] = total_price.strip()
            item['unit_price'] = unit_price.strip()
            print(title.strip())
            yield item
            # print(details.strip())
            # print(total_price.strip())
            # print(unit_price.strip())

        # for div_home in response.xpath('//div[@class="house-details"]/div[@class="house-title"]'):
        #     house_name = div_home.xpath('a/text()').extract_first()
        #     home_url = div_home.xpath('a/@href').extract_first()
        #     print("house_name:", house_name)
        #     yield Request(home_url,
        #                   callback=self.parse)

        # 下一页
        i=1
        while i <= 5:
            next_page = response.xpath('//div[@class="multi-page"]/a[@class="aNxt"]/@href')
            if next_page is not None:
                next_page_url = 'https://wuhan.anjuke.com/sale/donghugaoxinwuhan/p{}/#filtersort'.format(i)
                print("next page url:", next_page_url)
                yield Request(next_page_url,
                              callback=self.home_list)
            i += 1

    def parse(self, response):
        item = HouseItem()
        # header
        div_main_content = response.xpath('//div[@class="clearfix title-guarantee"]')
        if div_main_content is not None:
            item['title'] = div_main_content.xpath('h3/text()').extract_first()
        yield item
