# -*- coding: utf-8 -*-
import scrapy
import re
from snowball_slave.items import SnowballSlaveItem
from scrapy_redis.spiders import RedisSpider

class SlaveSpider(RedisSpider):
    name = 'slave'
    allowd_domains = ["bj.5i5j.com"]
    # allowed_domains = ['xueqiu.com']
    # start_urls = ['http://xueqiu.com/']
    redis_key = 'fangspider:start_urls'

    # def __init__(self,*args,**kwargs):
    #     domain = kwargs.pop('domain','')
    #     print(domain)
    #     self.allowed_domains=filter(None,domain.split((',')))
    #     super(SlaveSpider,self).__init__(*args,**kwargs)

    def parse(self, response):
        # print(response.text)
        # hlist = response.xpath("//ul/li[@class='houst_ctn']")
        hlist = response.xpath("//ul[@class='pList']/li/div[@class='listImg']/a/@href").extract()
        print(hlist)
        for u in hlist:
            url = response.urljoin(u)
            # url = "https://bj.5i5j.com"+u
            print(url)
            yield scrapy.Request(url=url,callback=self.parse_item)
        # for vo in hlist:
        #     item = SnowballSlaveItem()
        #     item['title'] = vo.xpath(".//span[@class='house_name left']/text()").extract_first()
        #     item['address'] = vo.css("span.addressName::text").extract_first()
        #     item['time'] = vo.re("<span>(.*?)开盘</span>")[0]
        #     item['clicks'] = vo.re("<span><i>([0-9]+)</i>浏览</span>")[0]
        #     item['price'] = vo.css("i.fontS24::text").extract_first()
        #     yield item

    def parse_item(self,response):
        try:
            if response.status == 200:
                item = SnowballSlaveItem()
                # print('=============')
                item['village'] = response.xpath("//div[@class='zushous']/ul/li[1]/a/text()").extract_first()
                item['building_type'] = response.xpath("//div[@class='zushous']/ul/li[2]/text()").extract_first()
                item['region'] = response.xpath("//div[@class='zushous']/ul/li[3]/a/text()").extract_first()
                item['subway'] = response.xpath("//div[@class='zushous']/ul/li[5]/a/text()").extract_first()
                item['Apartment_layout'] = response.xpath("//div[@class='infocon fl']/ul/li[1]/span/text()").extract_first()
                item['floorage'] = response.xpath("//div[@class='infocon fl']/ul/li[2]/span/text()").extract_first()
                item['year_built'] = response.xpath("//div[@class='infocon fl']/ul/li[4]/span/text()").extract_first()
                item['layout_of_house'] = response.xpath("//div[@class='infocon fl']/ul/li[5]/span/text()").extract_first()
                item['property_right'] = response.xpath("//div[@class='infocon fl']/ul/li[6]/span/text()").extract_first()
                item['property_right_years'] = response.xpath("//div[@class='infocon fl']/ul/li[7]/span/text()").extract_first()
                item['planning_purpose'] = response.xpath("//div[@class='infocon fl']/ul/li[8]/span/text()").extract_first()
                item['building_structure'] = response.xpath("//div[@class='infocon fl']/ul/li[9]/span/text()").extract_first()
                item['purchase_period'] = response.xpath("//div[@class='infocon fl']/ul/li[10]/span/text()").extract_first()
                item['unit_price'] = response.xpath("//div[@class='danjia']/span/text()").extract_first()
                item['total_price'] = response.xpath("//div[@class='de-price fl']/span/text()").extract_first()

                if item["village"] is None:
                    url = response.selector.re("window.location.href='(.*?)'")[0]
                    # print("---------------------")
                    # print(url)
                    return scrapy.Request(url=url, callback=self.parse_item)

                print(item)
                yield item
            else:
                print("页面访问错误{}".format(response.status))
        except Exception as e:
            print("parse_item访问错误:{}".format(e))
