# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html

import os
import redis
import scrapy
import logging
import datetime
from copy import copy, deepcopy
from SFPM_spider.settings import IMAGES_STORE
from scrapy.pipelines.images import ImagesPipeline
from SFPM_spider.utils.mongoclient import MongodbClient
from SFPM_spider.utils.case_id_dups import get_dup_case_id_li
from SFPM_spider.utils.redis_pool import POOL, PROXY_REDIS_KEY

logger = logging.getLogger(__name__)


class FdcSpiderPipeline(object):
    def process_item(self, item, spider):
        item['createTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        item['updateTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        item['fetchTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        return item


class RedisConnPipeline(object):
    def open_spider(self, spider):
        spider.redis_conn = redis.StrictRedis(connection_pool=POOL)
        spider.redis_conn.delete(PROXY_REDIS_KEY)

    def close_spider(self, spider):
        spider.redis_conn.close()


class MongoClientPipeline(object):
    def open_spider(self, spider):
        self.clo = MongodbClient()

    def close_spider(self, spider):
        self.clo.close()

    def process_item(self, item, spider):
        if spider.name == 'sfpm':
            self.clo.set_db('sfpm')
            self.clo.set_col('case')
            self.clo.insert_one(dict(item))
        return item


# 司法拍卖(阿里)
class SfpmCasePipeline(object):
    def open_spider(self, spider):
        if spider.name == 'sfpm':
            flag = spider.settings['SFPM_CASE_REDIS'] if spider.settings['SFPM_CASE_REDIS'] != None else True
            if flag:
                logger.info('>>>>开始去重<<<<')
                ret = get_dup_case_id_li()
                logger.info('>>>>去重完成<<<<') if ret else logger.error('>>>>去重失败<<<<')

    def process_item(self, item, spider):
        item['updateTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        item['fetchTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        return item


class SfpmCaseImagesPipeline(ImagesPipeline):
    def get_media_requests(self, item, info):
        headers = dict(referer=item['caseUrl'])
        for img_url in item['imageUrls']:
            yield scrapy.Request(
                img_url,
                headers=headers,
                meta=dict(item=deepcopy(item)),
            )

    # 修改文件的命名和路径
    def file_path(self, request, response=None, info=None):
        item = copy(request.meta['item'])
        image_guid = request.url.split('/')[-1]
        file_path = '{}/{}/{}/{}/{}'.format(item['province'], item['city'], item['districtName'],
                                            item['categoryName'], item['caseId'], )
        full_path = os.path.join(IMAGES_STORE, file_path)
        if not os.path.exists(full_path):
            os.makedirs(full_path)
        file_name = os.path.join(file_path, image_guid)
        return file_name
