# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter

import os
import pymongo

# 管道模块
class FmPipeline:
    def process_item(self, item, spider):
        """
        :param item: parse方法解析好的返回的数据
        :param spider: 定义的爬虫名称
        :return:
        """
        # 如果想要在管道模块中打印parse方法返回的数据需要在配置文件中激活管道
        # print('这是管道模块中打印的数据:', item)
        type_ = item.get('type')

        if type_ == 'image':
            download_path = os.getcwd() + '/download/'
            if not os.path.exists(download_path):
                os.mkdir(download_path)

            # 图片保存
            image_name = item.get('image_name')
            image_content = item.get('image_content')
            if image_name:
                with open(download_path + image_name, 'wb') as file:
                    file.write(image_content)
                    print('图片保存成功:', image_name)
        elif type_ == 'info':
            mongo_client = pymongo.MongoClient()
            collection = mongo_client['py_spider']['qingtingFM']
            collection.insert_one(item)
            print('数据插入成功:', item.get('title'))
        else:
            print('数据类型不符合规定...')
