# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import Rule
from scrapy.http import Request as sreq
from girl.items.host2 import *
import redis
from scrapy_redis.spiders import RedisCrawlSpider
from datetime import datetime
import platform

class Host2Spider(RedisCrawlSpider):
    name = "host2"
    allowed_domains = ["host2.jp"]
    start_urls = ["http://www.host2.jp/shop/index.html"]
    domain = "http://www.host2.jp"
    rules = (
        Rule(LinkExtractor(allow=('/shop/\S+/index.html')), callback="parse1"),
        # Rule(LinkExtractor(allow=('/\S+/index.html'),restrict_xpaths=("//*[@class='tit']")),follow=True,callback='girl'),
    )
    custom_settings = {
        'RETRY_ENABLED': False,
        "CONCURRENT_REQUESTS": 30,
        "HTTPCACHE_EXPIRATION_SECS": 60 * 60 * 24 * 300
    }
    tmp = {'girl.newdatapipelines.NewDataPipeline': 300}
    custom_settings = {
        'ITEM_PIPELINES': tmp
    }
    pool = redis.ConnectionPool(host='localhost', port=6379)
    conn = redis.Redis(connection_pool=pool)
    conn.sadd("host2:start_urls", *start_urls)
    redis_key = "host2:start_urls"
    if platform.system() != 'Darwin':
        time = datetime.now().strftime('%m-%d-%H-%M')
        custom_settings["LOG_FILE"] = "/mnt/scrapy/crawler/host2-%s.log" % time

    def parse1(self, response):
        item = {}
        item["address"] = response.xpath("//*[@class='adrs']/text()").extract_first()
        item["phone"] = response.xpath("//*[@class='tel']/text()").extract_first().replace("TEL:", "")
        item["name"] = response.xpath("//*[@class='tit']/text()").extract_first()
        item["area"] = response.xpath("//*[@class='area']/text()").extract_first()
        item["logo"] = response.xpath("//div[@class='bd']/div[@class='img']/img/@src").extract_first()
        item["cover"] = response.xpath("//*[@class='shop-pr']/img/@src").extract_first()
        item["info"] = response.xpath("//*[@class='info']/text()").extract_first()
        item["url"] = response.url
        # for x in response.css(".shop-photo.shopPhoto img"):
        #     item["imgList"].append(x.css("::attr(src)"))
        item["img"] = response.css(".shop-pr img::attr(src)").extract_first()
        yield sreq(response.urljoin("system.html"), callback=self.parse2, meta={"item": item})

        for x in response.xpath("//div[@class='cts']//li[@class='cell link']/@data-href").extract():
            yield sreq(response.urljoin(x), callback=self.girl,meta={"url":response.url})



    def parse2(self, response):
        item = storesItem()
        item.update(response.meta["item"])
        item["cost"] = response.css(".shop-system-tbl ::text").extract()
        yield item

    def girl(self, response):
        item = girlItem()
        item["name"] = response.xpath("//*[@class='staff-name']/text()").extract_first()
        item["birthday"] = response.xpath("//*[@class='cmt']/text()").extract_first()
        item["height"] = response.xpath("//*[@class='cmt']/text()").extract()[1]
        item["blood"] = response.xpath("//*[@class='cmt']/text()").extract()[2]
        item["constellation"] = response.xpath("//*[@class='cmt']/text()").extract()[3]
        item["point"] = response.xpath("//*[@class='f_type']/li/text()").extract_first()
        item["questions"] = response.xpath("//tr[@valign='top']/td//text()").extract()
        item["imgList"] = response.xpath("//div[@class='cts']//img/@src").extract()
        item["url"] = response.url
        item["storeUrl"] = response.meta["url"]
        for x in response.xpath("//dl/dd//text()").extract():
            if x.find("Instagram") > 0:
                item["instagram"] = x
            elif x.find("rss") > 0:
                item["blog"] = x
            elif x.find("@") > 0:
                item["mail"] = x
            elif x.find("-") > 0:
                item["phone"] = x.replace("TEL:", "")
            else:
                item["lineId"] = x
        item["movieList"] = response.xpath("//*[@class='btn_play']/@link").extract()
        tmp = response.xpath("//*[@class='cap']")
        item["comment"] = tmp.xpath("string(.)").extract_first()
        yield item
