# _*_coding:utf-8 _*_
#@Time    :2020/5/12 9:11
#@Author  :Dapan
#@Email : wali666@qq.com
import json
import urllib.parse
from bson import ObjectId
from pymongo import MongoClient
from datetime import datetime, timedelta
from bson.binary import Binary
import pickle, zlib




MONGODB_HOST = 'localhost'
MONGODB_PORT = 27017


class MongoCache:
    def __init__(self, expires=timedelta(days=30)):
        """
        初始化，设置各种基本参数
        :param expires: 设置过期时间
        """
        self.client = MongoClient(MONGODB_HOST, MONGODB_PORT)
        self.db = self.client.park4night  # 链接数据库  如果没有则自动创建
        self.collection = self.db.responseData  # 链接集合  如果没有则自动创建
        # 创建索引
        self.collection.create_index('timestamp', expireAfterSeconds=expires.total_seconds())

    def __setitem__(self, key, value):
        """
        将爬取的内容序列化压缩并以binary格式存储到数据库中
        :param key: 爬取的链接
        :param value: 对应链接爬取的结果
        :return:
        """
        # record = {'result': Binary(zlib.compress(pickle.dumps(value))), 'timestamp': datetime.utcnow()}
        record = {'result': pickle.dumps(value), 'timestamp': str(datetime.now())}
        # record = {'result': json.dumps({'value':value}), 'timestamp': datetime.utcnow()}
        # self.collection.update_one({"_id": key}, {'$setOnInsert': record}, upsert=True)
        self.collection.update_one({"_id": key}, {'$set': record}, upsert=True)

        # self.collection.update_one({"_id": key}, {'$setOnInsert': value}, upsert=True)
        # self.collection.update_one({"_id": key}, {'$set': value}, upsert=True)
        # $setOnInsert:当该key不存在的时候执行插入操作，当存在的时候则不管
        # $set:当key不存在的时候执行插入操作，当存在的时候更新除key以外的set内的值
        # upsert=True，则是指明，当update不存在的_id时，执行插入操作。默认是false，只更新，不插入。

    def __getitem__(self, item):
        """
        通过链接获取到对应链接爬取的结果
        :param item: 链接地址
        :return: 解压缩并反序列化后的结果
        """
        record = self.collection.find_one({"_id": item})
        if record:
            return pickle.loads(record['result'])
            # return zlib.decompress(pickle.loads(record['result'].decode()))
        else:
            # logger.warning('warning:', record)
            print('item:',item)
            print('record:',record)

            raise KeyError(item + ' does not exists')

    def __contains__(self, item):
        """
        判断数据库中是否存有该链接地址的信息，如果没有则返回False
        :param item:
        :return:
        """
        try:
            self[item]
        except KeyError:
            return False
        else:
            return True

    def clear(self):
        self.collection.drop()



def checkHtml():
    mongo_cache = MongoCache()
    # # 查询已有文档
    result = mongo_cache.collection.find()
    all_exist_id = []
    # 查询到的是结果集，只有每个结果才有_id属性
    for url in result:
        # 将所有已爬取过的网页链接放在临时列表中，也就是_id字段
        all_exist_id.append(url["_id"])
    print(all_exist_id)
    i= 0
    for id in all_exist_id:
        if '--' not in id and 'ajax' not in id:
            # pass
            res = mongo_cache.__getitem__(id)
            htmlStr = res['item']['htmlStr']
            if 'This feature requires the PRO version park4night. Learn more?' in htmlStr:
                i += 1
                print(id, 'cookie失效', i)
                mongo_cache.collection.delete_one({"_id": id})
        # else:
        #     res = mongo_cache.__getitem__(id)
        #     addressInfoList = res['addressInfoList']
        #     for address_info in addressInfoList:
        #         addressName = address_info['addressName']  # 详细路名
        #         addressUrl = address_info['addressUrl']
        #         siteCode = address_info['siteCode']
        #         if addressUrl == 'https://www.park4night.com/index.php':
        #             print(res)
        #             print(siteCode)




if __name__ == '__main__':
    checkHtml()
    # mongo_cache = MongoCache()
    # id = 'Parking lot day/night--France--Seine-Maritime'
    # mongo_cache.collection.delete_one({"_id": id})
    # res = mongo_cache.__getitem__('ajax_661')
    # print(res)

    # mongo_cache = MongoCache()
    # # 查询已有文档
    # result = mongo_cache.collection.find()
    # exist_ids = []
    # # 查询到的是结果集，只有每个结果才有_id属性
    # for url in result:
    #     # 将所有已爬取过的网页链接放在临时列表中，也就是_id字段
    #     exist_ids.append(url["_id"])
    #
    # print(exist_ids)

