# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from scrapy.http import Request as sreq
from scrapy.http import FormRequest as f
from girl.items.hotel import *
from scrapy_redis.spiders import RedisCrawlSpider
import datetime
import redis
import platform

class HotelSpider(RedisCrawlSpider):
    name = "hotel"
    start_urls = [
        "http://lovehotel-navi.jp/h/top/",
        "http://lovehotel-navi.jp/th/top/",
        "http://lovehotel-navi.jp/n/top/",
        "http://lovehotel-navi.jp/hr/top/",
        "http://lovehotel-navi.jp/tt/top/",
        "http://lovehotel-navi.jp/so/top/",
        "http://lovehotel-navi.jp/t/top/",
        "http://lovehotel-navi.jp/k/top/",
        "http://lovehotel-navi.jp/cg/top/",
        "http://lovehotel-navi.jp/s/top/",
        "http://lovehotel-navi.jp/q/top/",
        "http://lovehotel-navi.jp/o/top/",
    ]

    tmp = {'girl.newdatapipelines.NewDataPipeline': 300}
    custom_settings = {
        'ITEM_PIPELINES': tmp
    }
    pool = redis.ConnectionPool(host='localhost', port=6379)
    conn = redis.Redis(connection_pool=pool)
    conn.sadd("hotel:start_urls", *start_urls)
    custom_settings = {
        # 'RETRY_ENABLED' : False,
        # "DOWNLOAD_TIMEOUT": 380,
        # "DOWNLOAD_DELAY ": 0.25,
        "CONCURRENT_REQUESTS": 60,
    }
    if platform.system() != 'Darwin':
        time = datetime.datetime.now().strftime('%m-%d-%H-%M')
        custom_settings["LOG_FILE"] = "/mnt/scrapy/crawler/hotel-%s.log" % time

    def parse(self, response):
        yield f.from_response(response, formcss="#search form", callback=self.parse0,
                              formdata={"kwd": ""})

    def parse0(self, response):
        x = response.xpath
        for y in x("//div[@class='searchshopbox_in']/div"):
            y = y.xpath
            url = y(".//div[@class='searchimg']/a/@href").extract_first()
            level = y(".//div[@class='kuchikomi']/span/text()").extract_first()
            price = y(".//p[@class='system']//text()").extract()
            yield sreq(url, callback=self.parse1, meta={"level": level, "price": price})
        url = response.css('.paging a::attr(href)').extract()
        for x in url:
            yield sreq(x, callback=self.parse0)

    def parse1(self, response):
        item = hotelItem()
        x = response.xpath
        item["area"] = x("//p[@class='area']/text()").extract_first()
        item["name"] = x("//p[@class='name']/text()").extract_first()
        item["tel"] = x("//p[@class='tel']/text()").extract_first()
        item["address"] = x("//p[@class='address']/text()").extract()
        item["desc"] = x("//div[@class='hotel_property']//text()").extract()
        item["level"] = response.meta.get("level")
        item["price"] = response.meta.get("price")
        item["url"] = response.url
        item["date"] = str(datetime.date.today())
        item["cover"] = x("//div[@class='shopimg']/img/@src").extract_first()
        yield item
        try:
            xx = response.css(".room_link a")[0]
            url = xx.css("::attr(href)").extract_first()
            yield sreq(url, callback=self.parse2)
        except:
            pass

    def parse2(self, response):
        item = roomItem()
        x = response.xpath
        for yy in response.css(".roomlist .shoproombox"):
            y = yy.xpath
            item["cover"] = y(".//img/@src").extract_first()
            item["name"] = y(".//p[@class='roomno']/text()").extract_first()
            item["workTime"] = yy.re(">(.*~.*)<")
            item["desc"] = y(".//div[@class='roomtext']/div/text()").extract_first()
            item["cost"] = yy.css(".roomtext .roomsystem::text").extract()
            item["url"] = response.url
            item["date"] = str(datetime.date.today())
            item["hotelUrl"] = response.request.url
            yield item
