# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html

import os
import scrapy
from scrapy.conf import settings
from scrapy.exceptions import DropItem
from scrapy.pipelines.images import ImagesPipeline

from sina_crawl.spiders.sina_weibo import SinaWeiboSpider
from sina_crawl.utils.mongodb import MongoDbClient
from sina_crawl.utils.logger_utils import Logger

log = Logger()

host = settings.get('MONGODB_HOST')
port = settings.get('MONGODB_PORT')
dbname = settings.get('MONGODB_DBNAME')
collection_name = settings.get('MONGODB_COLLECTION')
db = MongoDbClient(host=host, port=port, db_name=dbname, collection=collection_name)


class DuplicateUserInfoPipeline(object):
    """
    处理已经爬取过的用户
    """

    def __init__(self):
        # 如果配置了爬取的数据与 db 进行比较，则查询 db 内已经爬取的微博用户 id
        if settings.get('COMPARE_WITH_DB', True):
            # 查询 db 内已经爬取的微博用户 id
            crawled_users = db.find({}, {'user_id': 1, '_id': 0})
            self.users = [user.get('user_id') for user in crawled_users if user]
            self.users = set(self.users)

    def process_item(self, item, spider):
        if settings.get('COMPARE_WITH_DB', True):
            # 当前微博用户 id 已经爬取过，则丢弃
            if item.get('user_id') in self.users:
                raise DropItem("%s 已被处理" % item['user_id'])
            else:
                self.users.add(item.get('user_id'))
                return item
        else:
            return item


class SaveUserInfoPipeline(object):
    """
    保存爬取的数据\n
    """

    def __init__(self):
        log.info('要保存的 Collenction：%s' % collection_name)

    def process_item(self, item, spider):
        data = dict(item)
        log.debug("最终入库数据：%s" % item)
        # 不同爬虫应用，记录的主键不同，需要进行判断
        if isinstance(spider, SinaWeiboSpider):
            # 记录不存在则插入，否则更新数据
            db.update_one({'weibo_id': data.get('weibo_id')}, {"$set": data}, True)
        else:
            db.update_one({'user_id': data.get('user_id')}, {"$set": data}, True)
        return item


class WeiboImagesPipeline(ImagesPipeline):
    """
    微博图片下载\n
    """

    def get_media_requests(self, item, info):
        """
        构造图片下载请求\n
        :param item:爬取的数据\n
        :param info:爬虫应用信息\n
        :return:
        """
        # request_obj = super(WeiboImagesPipeline,self).get_media_requests(item,info)
        if item.get('weibo_images'):
            for url in item.get('weibo_images'):
                yield scrapy.Request(url=url, meta={'item': item})

    def item_completed(self, results, item, info):
        """
        图片下载请求发送后执行的方法\n
        :param results:请求执行结果\n
        :param item:爬取的数据\n
        :param info:爬虫应用信息\n
        :return:
        """
        # 新浪微博爬虫（SinaWeiboSpider）应用需要下载微博图片
        if isinstance(info.spider, SinaWeiboSpider):
            # 获取图片请求的结果以及路径
            image_paths = [x['path'] for ok, x in results if ok]
            # 路径为空的情况下，丢弃
            if not image_paths:
                raise DropItem('不是图片')
            # print(image_paths)
            # if item.get('images_path'):
            #     item['images_path'] = list(item.get('images_path')).extend(image_paths)
            # else:
            #     item['images_path'] = image_paths
            # print("0000000000000000000000", item)
        return item

    def file_path(self, request, response=None, info=None):
        """
        图片下载成功后，保存之前执行的方法
        :param request:下载请求\n
        :param response:下载响应\n
        :param info:爬虫应用信息\n
        :return:
        """
        # 调用父类的方法
        path = super(WeiboImagesPipeline, self).file_path(request, response, info)
        # 获取配置的文件保存路径
        file_path = os.path.join(settings.get('IMAGES_STORE') + '/' + request.meta['item'].get('user_id'))
        # 文件路径不存在，则创建文件夹
        if not os.path.exists(file_path):
            os.mkdir(file_path)
        # 构造文件名
        image_name = path.replace("full/", "")
        image_path = os.path.join(file_path, image_name)
        log.debug("文件保存路径：%s" % image_path)
        return image_path
