# -*- coding: utf-8 -*-

import time
from MongoConn import MongoConn
from scrapy.log import logger
from config import *

class WorkerPipeline(object):

    def __init__(self):
        self.conn = MongoConn()

    def open_spider(self,spider):
        self.db = self.conn.db

    def process_item(self,item,spider):
        collection = 'clue_id_' + str(item['clue_id'])
        if str(collection) not in self.db.collection_names():
            self.db[collection].create_index('img_url')
            self.db[collection].create_index('pic_size')
            self.db[collection].create_index('url')
            self.db[collection].create_index('status')

        self.start_time = time.time()
        img_url = item.get("img_url")
        find_count_time = time.time()
        count = self.db[collection].find({'img_url': img_url}).count()
        find_count_time_end = time.time()
        logger.debug('查询图片次数时间:{}, 图片链接: {}'.format(find_count_time_end-find_count_time, item['img_url']))
        if count < MAX_COUNT:
            insert_time = time.time()
            self.db[collection].insert(dict(item))
            self.db[collection].update_many({'img_url': img_url}, {"$set": {'count': count + 1}})
            insert_time_end = time.time()
            logger.debug('插入图片时间:{},图片链接:{}'.format(insert_time_end-insert_time, item['img_url']))

            self.end_time = time.time()
            logger.debug('存储时间: {}, 存储链接:{}'.format(self.end_time-self.start_time, item['img_url']))

        return item

    def close_spider(self,spider):
        self.conn.close()
