# -*- coding: utf-8 -*-
import redis
import os
import scrapy
import time
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapy_redis.spiders import RedisCrawlSpider
from h58Pro.items import H58ProItem
from h58Pro import settings

# from selenium.webdriver import Chrome
# from selenium.webdriver import ChromeOptions

BASE_DIR = os.path.dirname(os.path.abspath(__file__))


# class ZufangSpider(CrawlSpider):


# def __init__(self):
#     super(ZufangSpider, self).__init__()
#     #
#     option = ChromeOptions()
#     option.add_experimental_option('excludeSwitches', ['enable-automation'])
#     file_path = os.path.join(BASE_DIR, 'chromedriver.exe')
#     self.bro = Chrome(executable_path=file_path, options=option)

class ZufangSpider(RedisCrawlSpider):
    name = 'zufang'
    # allowed_domains = ['www.xxx.com']
    # start_urls = ['https://sh.zu.fang.com/']
    redis_key = 'sh_zufang'
    links = LinkExtractor(allow=r'house/i\d+/')
    rules = (
        Rule(links, callback='parse_item', follow=False),
    )
    redis_conn = redis.Redis(
        host=settings.REDIS_HOST,
        port=settings.REDIS_PORT,
        encoding=settings.REDIS_ENCODING,
        password=settings.REDIS_PARAMS.get('password', '')
    )

    def parse_item(self, response):
        dl_list = response.xpath('//*[@id="listBox"]/div[2]/dl')
        for dl in dl_list:

            uri = dl.xpath('./dt/a/@href').extract_first()
            print(uri)
            if uri:

                model_url = 'https://sh.zu.fang.com' + uri
                ex = self.redis_conn.sadd('urls', model_url)
                if ex == 1:
                    time.sleep(1)
                    yield scrapy.Request(model_url, callback=self.parse_detail)

        print('正在查询新数据')

    def parse_detail(self, response):
        div_list = response.xpath('//div[@class="trl-item2 clearfix"]')
        address = ''
        for div in div_list:
            address_list = div.xpath('./div[@class="rcont"]//text()').extract()
            address_sec = ''
            for i in address_list:
                i = i.strip().replace('\n', ' ')
                address_sec = address_sec + i
            address = address + address_sec + '|'

        price_list = response.xpath('//div[@class="trl-item sty1"]')
        price = ''
        for i in price_list:
            text_list = i.xpath('.//text()').extract()
            text = ' '.join(text_list).strip()
            price = text
        title = response.xpath('//div[@class="title"]/text()').extract_first()
        item = H58ProItem()
        item['title'] = title.strip()
        item['price'] = price
        item['address'] = address
        time.sleep(1)
        yield item
        # cc = {}
        # cc['title'] = title.strip()
        # cc['price'] = price
        # cc['address'] = address
        # print(cc)

    # def closed(self, spider):
    #     self.bro.quit()
