# -*- coding: utf-8 -*-
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
# from girl.items import city
from ..items import city
import re
from datetime import *
class NewgirlcitySpider(CrawlSpider):
    name = "newgirlcity"
    allowed_domains = ["cityheaven.net"]
    db_shell_path = "girl.dbShell.city.newgirl.newgirl"
    timestr = date.today().strftime('%Y%m%d')
    start_urls = [
        'https://www.cityheaven.net/hokkaido/girl-list/typ601/date%s' % timestr,
    ]
    rules = (
        Rule(LinkExtractor(deny=('.*/---'),
        restrict_xpaths=("//ul[@class='areaSelectionList clearfix']//a")),
        follow=True,callback="sourceNum"),
        Rule(LinkExtractor(deny=('.*/---'),
        restrict_xpaths=("//div[@class='shop_nav_list']//a")),
        follow=True,callback="sourceNum1"),
        Rule(LinkExtractor(deny=('.*/---'),allow=('girlid-'),
        restrict_xpaths=("//div[@id='content']//a")),
        callback="parse0"),

    )
    tmp = {'girl.newdatapipelines.NewDataPipeline': 300}
    custom_settings = {
        'ITEM_PIPELINES':tmp
    }
    def sourceNum(self,response):
        sdata = self.crawler.stats.get_value('sdata',0)
        sdata = int(sdata) + int(response.css('#all-count_left::text').extract_first())
        self.crawler.stats.set_value('sdata',sdata)
    def sourceNum1(self,response):
        pass

    def parse0(self,response):
        item = city.girlItem()
        try:
            name = response.xpath('//tr/td[@itemprop="name"]/text()').extract()[0]
        except Exception, e:
            name = response.xpath('//div[@id="profiles"]/h4/text()').extract_first()

        tmp = ["意見箱にお答えします","新人日給","体験入店予定","LINEで簡単お問い合わせ"]
        if type(name) == unicode:
            name = name.encode("utf-8")
        if not name:
            return
        try:
            tmps = map(name.find,tmp)
        except:
            import ipdb;ipdb.set_trace()
        for x in tmps:
            if x >= 0:
                return
        # age = response.xpath("//*[@id='p_data']/tr[2]/td/text()").extract()[0]
        # birthday = response.xpath("//*[@id='p_data']/tr[3]/td/text()").extract()[0]
        bwh = ""
        constellation = ""
        blood = ""
        age = ""
        other = response.xpath("//*[@id='p_data']//td/text()").extract()
        for x in other:
            if x.find("T") >= 0:
                bwh = x
            elif x.find("歳") > 0:
                age = x
            elif x.find("座") > 0:
                constellation = x
            elif x.find("型") > 0:
                blood = x

        imgList = []
        imgList = response.xpath("//ul[@id='slider']//img/@src").extract()
        if not imgList:
            imgList = response.xpath("//img[@name='girlsPhoto']/@src").extract()
        if imgList:
            imgList = self.mapImg(imgList)
        character = []
        storesUrl = ""
        point = response.xpath("//*[@class='salespoint_list']/li/text()").extract()
        item["name"] = name
        item["age"] = age
        item["bwh"] = bwh
        item["blood"] = blood
        item["constellation"] = constellation
        item["storesUrl"] = storesUrl
        item["url"] = response.url
        item["questions"] = ''
        item["comment"] = ''
        item["storeComment"] = ''
        item["other"] = other
        item["character"] = character
        item["point"] = point
        item["image_urls"] = imgList
        item["date"] = str(date.today())
        yield item


    def mapImg(self,image_urls):
        if image_urls[0]:
            image_urls = map(lambda url:re.sub("(.*)img\.","https://",url),image_urls)
            image_urls = map(lambda url:re.sub("//d-markets","https://d-markets",url),image_urls)
            image_urls = [x for x in image_urls if len(x) > 2]
        return image_urls

    def closed(self, reason):
        pass
        # import ipdb;ipdb.set_trace()
        # info("DoubanBookSpider Closed:" + reason)
