# -*- coding: utf-8 -*-
from datetime import *
import re
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import Rule
from scrapy_redis.spiders import RedisCrawlSpider
from girl.items import dmm
import redis
import platform
from datetime import *
def process_value(value):
    m = re.search(".*/detail/=/cid=.+/", value)
    if m:
        return m.group()


class NewdmmSpider(RedisCrawlSpider):
    name = 'newdmm'
    rules = (
        Rule(LinkExtractor(
            allow=('.*digital/(videoa|anime|videoc|nikkatsu)/-/detail/=/cid=.+/'),
            process_value=process_value),
            callback='girl'),
    )
    start_urls = [
            'http://www.dmm.co.jp/digital/videoa/-/list/=/sort=saleranking_asc/page=1/',
            'http://www.dmm.co.jp/digital/videoa/-/list/=/sort=saleranking_asc/page=2/',
            'http://www.dmm.co.jp/digital/videoa/-/list/=/sort=saleranking_asc/page=3/',
            'http://www.dmm.co.jp/digital/videoc/-/list/=/sort=saleranking_asc/page=1/',
            'http://www.dmm.co.jp/digital/videoc/-/list/=/sort=saleranking_asc/page=2/',
            'http://www.dmm.co.jp/digital/videoc/-/list/=/sort=saleranking_asc/page=3/',
            'http://www.dmm.co.jp/digital/anime/-/list/=/sort=saleranking_asc/page=1/',
            'http://www.dmm.co.jp/digital/anime/-/list/=/sort=saleranking_asc/page=2/',
            'http://www.dmm.co.jp/digital/anime/-/list/=/sort=saleranking_asc/page=3/',
            'http://www.dmm.co.jp/digital/nikkatsu/-/list/=/sort=saleranking_asc/page=1/',
            'http://www.dmm.co.jp/digital/nikkatsu/-/list/=/sort=saleranking_asc/page=2/',
            'http://www.dmm.co.jp/digital/nikkatsu/-/list/=/sort=saleranking_asc/page=3/',
            ]
    custom_settings = {
        "DUPEFILTER_CLASS":"",
        "HTTPCACHE_ENABLED":False,
        "SCHEDULER_PERSIST":False
    }

    pool = redis.ConnectionPool(host='localhost', port=6379)
    conn = redis.Redis(connection_pool=pool)
    conn.delete("newdmm:start_urls")
    conn.sadd("newdmm:start_urls", *start_urls)

    if platform.system() != 'Darwin':
        time = datetime.now().strftime('%m-%d-%H-%M')
        custom_settings["LOG_FILE"] = "/mnt/scrapy/crawler/dmm-%s.log" % time

    def girl(self, response):
        item = dmm.girlItem()
        x = response.xpath
        tmp = x("//table[@class='mg-b20']//tr//text()").extract()
        key = ""
        item["title"] = x("//h1[@id='title']/text()").extract_first()
        item["name"] = ""
        item["bwh"] = ""
        reg = re.compile(r"^\s+$")
        for da in tmp:
            if reg.search(da):
                continue
            if key:
                item[key] = da
                key = ""
            if da.find("対応デバイス") >= 0:
                key = "apply"
                # elif da == "配信開始日":
                # key = "title"
            elif da.find("配信開始日") >= 0:
                key = "release"
            elif da.find("収録時間") >= 0:
                key = "time"
            elif da.find("監督") >= 0:
                key = "director"
            elif da.find("名前") >= 0:
                key = "name"
            elif da.find("サイズ") >= 0:
                key = "bwh"
            elif da.find("品番") >= 0:
                key = "designation"
            elif da.find("レーベル") >= 0:
                key = "tag"
            elif da.find("メーカー") >= 0:
                key = "manufacturer"
            elif da.find("シリーズ") >= 0:
                key = "series"
        item["date"] = str(date.today())
        item["bigImg"] = x("//*[@id='sample-video']/a/@href").extract_first()
        item["play"] = x("//*[@id='performer']/a/text()").extract()
        item["cate"] = x("//table[@class='mg-b20']//tr//a[contains(@href,'keyword')]/text()").extract()
        item["imgList"] = x("//div[@id='sample-image-block']//img/@src").extract()
        item["url"] = response.url
        item["thumb"] = x("//div[@id='sample-video']//img/@src").extract_first()
        tmp = x("//a[@class='d-btn']//@onclick").extract()
        if tmp:
            item["video"] = re.search(r"\('(.*)'\)", tmp[0]).group(1)
        yield item

    def close(spider, reason):
        # import ipdb;ipdb.set_trace()
        pass
