# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from scrapy.spiders import *
from scrapy.http import Request as sreq
from girl.items.hotel import *
import datetime
import platform
import scrapy

class HotelareaSpider(CrawlSpider):
    name = "hotelarea"
    start_urls = [
        "http://lovehotel-navi.jp/h/top/",
    ]
    custom_settings = {}
    if platform.system() != 'Darwin':
        time = datetime.datetime.now().strftime('%m-%d-%H-%M')
        custom_settings["LOG_FILE"] = "/mnt/scrapy/crawler/hotelarea-%s.log" % time
    custom_settings = {
        "MYEXT_ENABLED":False,
        "SCHEDULER":"scrapy.core.scheduler.Scheduler"
    }
    # custom_settings["MYEXT_ENABLED"] = False
    # custom_settings["DUPEFILTER_CLASS"] = ""
    # custom_settings["SCHEDULER"] = "scrapy.core.scheduler.Scheduler"
    # custom_settings["SCHEDULER_QUEUE_CLASS"] = "scrapy.core.scheduler.Scheduler"

    def parse(self, response):
        for x in response.css(".area_link a"):
            if x.css("::attr(href)").extract_first() == "http://lovehotel-navi.jp/":
                continue
            # import ipdb;ipdb.set_trace()
            item = areaItem()
            item["url"] = x.css("::attr(href)").extract_first()
            item["name"] = x.css("::text").extract_first()
            item["tid"] = 1
            yield item
            yield sreq(item["url"], callback=self.parse0)

    def parse0(self, response):
        item = areaItem()
        x = response.css
        city = x(".condbox > a::text").extract()
        # city = response.xpath("//div[@class=condbox]")/[1].extract()
        soncity = []
        for y in response.css(".condbox:nth-child(2) > div"):
            soncity.append(y.css("a::text").extract())
            # tmp.appned(y("a::attr(href)").extract())
        tmp = zip(city, soncity)
        item["name"] = tmp
        item["url"] = response.request.url
        item["tid"] = 0
        yield item
