# -*- coding: utf-8 -*-
# from girl.items import city
from datetime import *
import platform
from scrapy.http import Request as sreq
from scrapy_redis.spiders import RedisCrawlSpider
import redis
from girl.items.city import *
import pymongo
import re

class NewstorecitySpider(RedisCrawlSpider):
    name = "agenda"
    start_urls = [
        'https://www.cityheaven.net/hokkaido/shop-list/',
    ]
    custom_settings = {
        "CONCURRENT_REQUESTS": 100,
        # "HTTPCACHE_ENABLED" : False
        "HTTPCACHE_EXPIRATION_SECS": 60 * 60 * 24 * 1
    }
    pool = redis.ConnectionPool(host='localhost', port=6379)
    conn = redis.Redis(connection_pool=pool)
    conn.sadd("agenda:start_urls", *start_urls)
    # conn.delete("agenda:dupefilter")
    if platform.system() != 'Darwin':
        time = datetime.now().strftime('%Y-%m-%d-%H-%M')
        custom_settings["LOG_FILE"] = "/mnt/scrapy/crawler/%s-%s.log" % (name, time)
    client = pymongo.MongoClient("47.75.39.50", socketKeepAlive=True, maxPoolSize=400)
    client.admin.authenticate("admin321", "dsf::6666,,<<", mechanism='SCRAM-SHA-1')
    # tool = client.city.store.find({"smallcover":{"exits":False}}, {'url': 1, '_id': 0})
    today = str(date.today())
    # tool = client.city.store.find({"attendance":{"$exists":True},"date":{"$ne":today}}, {"date":1,'url': 1, '_id': 0},no_cursor_timeout=True).batch_size(100)
    tool = client.city.store.find({}, {"date":1,'url': 1, '_id': 0},no_cursor_timeout=True).batch_size(100)

    def parse(self, response):
        tmp = {"User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1"}
        for x in self.tool:
            if not x.get("url"):
                continue
            # yield sreq(x.get("url"), callback=self.parse1,headers=tmp)
            yield sreq(x.get("url")+"attend", callback=self.parse3,dont_filter=True)


    def parse1(self, response):
        # item = storesItem()
        item["url"] = response.url
        if response.css(".logo-nodeco::attr(data-echo)"):
            item["smallcover"] = response.css(".logo-nodeco::attr(data-echo)").re_first("(img.*)\?cache")
        elif response.css(".shopinfo img::attr(src)"):
            item["smallcover"] = response.css(".shopinfo img::attr(src)").re_first("(img.*)\?cache")
        elif response.css(".shoptopimage img::attr(data-echo)"):
            item["smallcover"] = response.css(".shoptopimage img::attr(data-echo)").re_first("(img.*)\?cache")
        else:
            item["smallcover"] = "no"
        yield item

    def parse3(self,response):
        print "parse3"
        item = attendanceItem()
        item["url"] = response.url.replace("attend","")
        item["date"] = str(date.today())
        attendance = []
        for x in response.xpath("//*[@class='shukkin-list-container']/div"):
            a = x.xpath("a/@href").extract() or x.xpath(".//a/@href").extract()
            b = x.css(".time_font_size.shadow.shukkin_detail_time ::text").extract()
            b = filter(lambda yy:not re.match("\s+$",yy),b)
            b = map(lambda yy:yy.strip(),b)
            attendance.append(zip(a, b))

        if not attendance:
            for x in response.css(".panel.panel-shukkin"):
                a = x.xpath("li/a/@href").extract()
                b = x.css(".text.text-small *::text").extract()
                b = b[::-2]
                b.reverse()
                b = map(lambda yy:yy.strip(),b)
                attendance.append(zip(a, b))

        # if not attendance:
        #     for x in response.css(".panel.panel-shukkin"):
        #         a = x.xpath("li/a/@href").extract()
        #         b = x.css(".text.text-small *::text").extract()
        #         b = b[::-2]
        #         b.reverse()
        #         b = map(lambda yy:yy.strip(),b)
        #         attendance.append(zip(a, b))
        if not attendance:
            # import ipdb;ipdb.set_trace()
            attendance = {}
            for x in response.css("#shukkin_list tr:nth-child(2)"):
                girlUrl = x.css("td:nth-child(1) a::attr(href)").extract_first()
                tmp1 = re.sub("\s+","","".join(x.css("td:nth-child(2) ::text").extract()))
                tmp2 = re.sub("\s+","","".join(x.css("td:nth-child(3) ::text").extract()))
                tmp3 = re.sub("\s+","","".join(x.css("td:nth-child(4) ::text").extract()))
                tmp4 = re.sub("\s+","","".join(x.css("td:nth-child(5) ::text").extract()))
                tmp5 = re.sub("\s+","","".join(x.css("td:nth-child(6) ::text").extract()))
                tmp6 = re.sub("\s+","","".join(x.css("td:nth-child(7) ::text").extract()))
                tmp7 = re.sub("\s+","","".join(x.css("td:nth-child(8) ::text").extract()))
                # map(lambda a:re.match("\s+"),yy)
                attendance[girlUrl] = [tmp1,tmp2,tmp3,tmp4,tmp5,tmp6,tmp7]
        if not attendance:
            item["attendance"] = "no"
        else:
            item["attendance"] = attendance
        yield item