# -*- coding: utf-8 -*-
import scrapy
import redis
import time
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapy_redis.spiders import RedisCrawlSpider
from h58Pro.items import HaizuItem
from h58Pro import settings


class HaizuSpider(RedisCrawlSpider):
    name = 'haizu'
    # allowed_domains = ['www.xxx.com']
    # start_urls = ['http://www.hizhu.com/beijing/shangquan.html']
    redis_key = 'haizu'
    rules = (
        Rule(LinkExtractor(allow=r'\?p=\d+$'), callback='parse_item', follow=True),
    )
    redis_conn = redis.Redis(
        host=settings.REDIS_HOST,
        port=settings.REDIS_PORT,
        encoding=settings.REDIS_ENCODING,
        password=settings.REDIS_PARAMS.get('password', '')
    )

    def parse_item(self, response):
        item = HaizuItem()
        li_list = response.xpath('/html/body/div[1]/div/div[4]/div[2]/ul/li')
        time.sleep(5)
        for li in li_list:
            urls = li.xpath('./div/a/@href').extract_first()
            ex = self.redis_conn.sadd('urls', urls)
            if ex == 1:
                title = li.xpath('./div/div[1]/h3/a/text()').extract_first()
                item['title'] = title.strip().replace(u'\xa0', '')
                price_list = li.xpath('./div/div[2]/p//text()').extract()
                price = ' '.join(price_list)
                item['price'] = price.strip().replace(u'\xa0', '')
                info = li.xpath('./div/div[1]/p[1]/text()').extract_first()
                item['info'] = info.strip().replace(u'\xa0', u'')
                address = li.xpath('./div/div[1]/p[2]/text()').extract_first()
                item['address'] = address.strip().replace(u'\xa0', '')

                return item

