# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
import pymongo
from itemadapter import ItemAdapter
from .settings import mongo_host, mongo_port, mongo_db, mongo_db_table
from scrapy.pipelines.images import ImagesPipeline


class CnblogPipeline:
    def __init__(self):
        self.conn = pymongo.MongoClient(host=mongo_host, port=mongo_port)
        self.db = self.conn.admin  # 先连接系统默认数据库admin
        self.db = self.conn[mongo_db]

    def process_item(self, item, spider):
        self.db[mongo_db_table].insert(dict(item))
        return item


class CnblogImagePipeline(ImagesPipeline):
    """
       继承ImagesPipeline，重载item_completed方法
           以实现处理图片下载路径的目的
       """
    def item_completed(self, results, item, info):
        """ 循环results，将里面的path取出，然后赋值到item中的image_path字段，最后返回item """
        image_path = ''
        for ok, value in results:
            image_path = value["path"]
        item["img_url"] = image_path
        return item
