# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import scrapy
# from scrapy.spiders import spider
import re
from scrapy.http import Request as sreq
from scrapy.http import FormRequest as fr
from dmmimg.items.dmm import *
from scrapy.shell import inspect_response
import time
import pymongo
import platform
import os
import random
import redis
from fdfs_client.client import *
import MySQLdb
import MySQLdb.cursors
from datetime import *
import time
class DmmimgSpider(scrapy.Spider):
    name = "dmmimg"
    client = pymongo.MongoClient("localhost",socketKeepAlive=True,maxPoolSize=400)
    client.admin.authenticate("admin321", "dsf::6666,,<<", mechanism='SCRAM-SHA-1')
    tool = client.dmm.movie.find({},{'url':1,"bigImg":1,'imgList':1,'video':1,'thumb':1,'_id':0},no_cursor_timeout=True).batch_size(50)
    # tool = client.dmm.movie.find({},{'url':1,'imgList':1,'thumb':1,'_id':0}).batch_size(10000)
    pool = redis.ConnectionPool(host='localhost', port=6379)
    conn = redis.Redis(connection_pool=pool)
    # girlImg = list(client.dmm.avgirl.find({},{'thumb':1,"url":1,'_id':0}))
    # cityMovie = list(client.city3.movie.find({},{"movie":1,'thumb':1,"url":1,'_id':0}))
    # cityGirl = list(client.city3.girl.find({},{'image_urls':1,"girlUrl":1,'_id':0}))
    # cityDiary = list(client.city3.diary.find({},{'image_urls':1,"url":1,'_id':0}))
    # import ipdb;ipdb.set_trace()
    # if not conn.exists("dmm:exist"):

    tmp = client.dmm2.img.find({},{'source':1,'_id':0})
    # conn.expire("dmm:exist",timedelta(days=1))
    conn.sadd("dmm:exist",*[x.get("source") for x in tmp])
    start_urls = ["http://www.dmm.co.jp/"]
    # tmp = client.dmm2.img.distinct("url")
    # for x in tmp:
    #     print x.get("url")
    # #     conn.sadd("dmm:existUrl",x.get("url"))
    # for x in self.tool:
    #     if x.get("imgList"):
    #         for y in x["imgList"]:
    #             y = y.replace("-","jp-")
    #             if not self.conn.sismember("dmm:exist",y):
    #                 conn.sadd("dmmimg:request",y)
    #             else:
    #                 pass

    client = Fdfs_client('/etc/fdfs/client.conf')
    custom_settings = {
        'RETRY_ENABLED' : False,
        'REFERER_ENABLED':False,
        "HTTPCACHE_ENABLED":False,
        "DOWNLOAD_TIMEOUT": 500,
        "CONCURRENT_REQUESTS":10,
        "MYEXT_ENABLED":False,
        "DEPTH_PRIORITY":1,
        "DOWNLOADER_MIDDLEWARES" :{
        'random_useragent.RandomUserAgentMiddleware': 400
        }
    }
    if platform.system() != 'Darwin':
        time = datetime.now().strftime('%m-%d-%H-%M')
        custom_settings["LOG_FILE"] = "/mnt/scrapy/crawler/%s-%s.log" % (name,time)

    def parse(self,response):
        for x in self.tool:
                url = x.get("url").replace("http://www.dmm.co.jp","")
                # if self.conn.sismember("dmm:existUrl",url):
                #     continue
                if not self.conn.sismember("dmm:exist",x["thumb"]):
                    if x["thumb"]:
                        meta = {"url":url,"type":"thumb","source":x["thumb"]}
                    yield sreq(x["thumb"],meta=meta,callback=self.parse3,dont_filter=True)
                if x.get("bigImg"):
                    if not self.conn.sismember("dmm:exist",x["bigImg"]):
                        meta = {"url":url,"type":"bigImg","source":x["bigImg"]}
                        yield sreq(x["bigImg"],meta=meta,callback=self.parse3,dont_filter=True)
                if x.get("video"):
                    if not self.conn.sismember("dmm:exist",x["video"]):
                        meta = {"url":url,"type":"video","source":x["video"]}
                        yield sreq("http://www.dmm.co.jp" + x["video"],meta=meta,callback=self.parse1,dont_filter=True)
                if x.get("imgList"):
                    for y in x["imgList"]:
                        y = y.replace("-","jp-")
                        meta = {"url":url,"type":"bigimgList","source":y}
                        if not self.conn.sismember("dmm:exist",y):
                            yield sreq(y,meta=meta,callback=self.parse3,dont_filter=True)
                        else:
                            pass
        # import ipdb;ipdb.set_trace()
        # for x in self.girlImg:
        #     if not x.get("thumb"):
        #         continue
        #     if x.get("thumb") in self.exist:
        #         print "continue"
        #         continue
        #     meta = {"url":x["url"],"type":"avgirl","source":x["thumb"]}
        #     yield sreq(x["thumb"],meta=meta,callback=self.parse3)
        # for x in self.cityMovie:
        #     if x.get("thumb") and x.get("thumb") not in self.exist:
        #         ext = re.search("\.(\w+)\?.*",x["thumb"]).groups()[0]
        #         meta = {"url":x["url"],"type":"citymoviethumb","source":x["thumb"],"ext":ext}
        #         url = x["thumb"].replace("//img.cityheaven.net","https://img.cityheaven.net")
        #         url = re.sub(r"(.+)\?.+","\g<1>",url)
        #         yield sreq(url,meta=meta,callback=self.parse3)
        #     if x.get("movie") and x.get("movie") not in self.exist:
        #         meta = {"url":x["url"],"type":"citymovie","source":x["movie"]}
        #         url = x["movie"].replace("//img.cityheaven.net","https://img.cityheaven.net")
        #         url = re.sub(r"(.+)\?.+","\g<1>",url)
        #         yield sreq(url,meta=meta,callback=self.parse3)
        # for x in self.cityGirl:
        #     if not x.get("image_urls"):
        #         continue
        #     for y in x.get("image_urls"):
        #         if y in self.exist:
        #             print "continue"
        #             continue
        #         ext = re.search("\.(\w+)\?.*",y).groups()[0]
        #         meta = {"url":x["girlUrl"],"type":"citygirl","source":y,"ext":ext}
        #         y = re.sub(r"(.+)\?.+","\g<1>",y)
        #         y = y.replace("https://cityheaven.net","https://img.cityheaven.net")
        #         yield sreq(y,meta=meta,callback=self.parse3)
        # for x in self.cityDiary:
        #     if not x.get("image_urls"):
        #         continue
        #     if x.get("image_urls") in self.exist:
        #         print "continue"
        #         continue
        #     ext = re.search("\.(\w+)\?.*",x.get("image_urls")).groups()[0]
        #     meta = {"url":x["url"],"type":"citydiary","source":x.get("image_urls"),"ext":ext}
        #     url = re.sub(r"(.+)\?.+","\g<1>",x.get("image_urls"))
        #     url = url.replace("https://cityheaven.net","https://img.cityheaven.net")
        #     yield sreq(url,meta=meta,callback=self.parse3)


    def parse1(self,response):
        url = response.xpath("//iframe/@src").extract_first()
        yield sreq(url,meta=response.meta,callback=self.parse2)

    def parse3(self,response):
        item = dmmimgItem()
        if not response.meta.get("ext"):
            ext = response.url.split(".")[-1]
        else:
            ext = response.meta["ext"]
        # import ipdb;ipdb.set_trace()
        if response.url.find("now_printing") < 0:
            ret = self.client.upload_by_buffer(response.body,ext)
            # ret = self.client.upload_appender_by_buffer(response.body,ext)
            item["path"] = ret["Remote file_id"]
        else:
            item["path"] = "now_printing"
        item["url"] = response.meta["url"]
        item["type"] = response.meta["type"]
        item["source"] = response.meta["source"]
        item["date"] = str(date.today() + timedelta(days=1))
        response._set_body("")
        item
        yield item

    def parse2(self,response):
        url = re.search(r"src\":\"(\S*?)\"",response.body).groups(0)[0]
        url = "http:" + url.replace("\\","")
        yield sreq(url,meta=response.meta,callback=self.parse3)


    # def closed(self, reason):
    #     self.conn.delete(["dmm:existUrl","dmm:exist"])


