# encoding: utf-8
__author__ = 'wwj'
import pymongo
import re
import MySQLdb
import MySQLdb.cursors
from datetime import *
import paramiko
import sys
sys.path.append("/Users/wwj/Downloads/shangtianjun/girl/girl/dbShell/")
sys.path.append( "/var/crawler/spider/crawler/girl/dbShell/")
from dbshell import dbshell
import sh
# from ..dbshell import dbshell
# from scrapy import log
import redis
from pprint import pprint as pp
from zlib import crc32
import copy
from collections import defaultdict
class main(dbshell):
    orderItem = [
            ['age', ''],
            ['name', ''],
            ['height', ''],
            ['bust', ''],
            ['waist', ''],
            ['hipline', ''],
            ['cup', ''],
            ['blood', ''],
            ['imgs', ''],
            ['thumb', ''],
            ['url', ''],
            ['sid', ''],
    ]
    mongoTable = "girl"
    mysqlTable = "girl"
    # exesize = 300
    def __init__(self,spidername=''):
        super(main,self).__init__(spidername)
        # self.geturltool("select distinct(url) from girl")
        self.mongofrom = self.mongo.find(self.where,no_cursor_timeout=True).batch_size(1000).sort("_id",-1)
        if not self.redisconn.exists("stores"):
            tmp = self.getdb("select distinct(url),id from store")
            for x in tmp:
                self.redisconn.hset("stores",x["url"],x["id"])
    def clean(self,y):
        data = self.data
        y = defaultdict(lambda:"",y)
        url = y["storesUrl"].replace("https://www.cityheaven.net","")
        url = re.sub("girlid.*/","",url)
        if not url:
            url = re.sub("girlid.*/","",y["url"])

        tmp = self.redisconn.hget("stores",url)
        if tmp:
            data['sid'] = str(tmp)
        else:
            self.myException("no sid")
        data["age"] = re.search("\d+",y["age"]).group() if re.search("\d+",y["age"]) else ""
        data["name"] = y["name"]
        bwh = re.findall("\d+",y["bwh"])
        if len(bwh) > 3:
            data["height"] = bwh[0] or ""
            data["bust"] = bwh[1] or ""
            data["waist"] = bwh[2] or ""
            data["hipline"] = bwh[3] or ""
        else:
            data["height"] = data["bust"] = data["waist"] = data["hipline"] = ''
        if re.search(r"\((.+)\)",y["bwh"]):
            data["cup"] = re.search(r"\((.+)\)",y["bwh"]).groups()[0]
        data["blood"] = y["blood"]
        tmp = y.get("image_urls")
        tmp = map(lambda url: re.sub("\?cache.*", "", url), tmp)
        data["imgs"] = ",".join(tmp)
        data["constellation"] = y["constellation"]
        data["thumb"] = tmp[0] if tmp else ""
        data["url"] = y["url"].replace("https://www.cityheaven.net","")
        data["urlHash"] = str(crc32(data["url"])).replace("-","")
        tmpdata = copy.deepcopy(data)
        self.datas.append(tmpdata)
        # pp(data)
        # for yy in self.stores:
        #     if yy["url"] == y["stores"].replace("https://www.cityheaven.net",""):
        #         data['sid'] = yy["id"]
        # self.urlpool.add(y["url"])

if __name__ == '__main__':
    obj = main("city")
    obj()
