# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
"""
当Item在Spider中被收集后，它将会被传递到Item Pipeline
Item Pipeline主要有以下典型应用：
1.清理HTML数据
2.验证爬取的数据合法性，价差Item是否包含某些字段
3.查重并丢弃
4.将爬取结果保存到文件或者数据库中

可以把items模块想象成水流，pipelines就是水管，水管可以对水施加过滤条件
"""
import scrapy
from itemadapter import ItemAdapter
import json
from scrapy.exceptions import DropItem
from scrapy.pipelines.images import ImagesPipeline


# 定制普通的 Item Pipeline 很简单，需要把每个Item Pipeline作为一个单独的python类，然后让它必须实现process_item方法，这个方法必须返回一个Item对象
# 或者抛出DropItem异常
class CnBlogSpiderPipeline:
    def __init__(self):
        self.file = open(r".\爬取作者博客.json", "w", encoding="utf-8")

    def process_item(self, item, spider):
        if item["title"]:
            line = json.dumps(dict(item), ensure_ascii=False) + "\n"
            self.file.write(line)
            return item
        else:
            raise DropItem(f"Missing title in {item}!!!")


class MyImagesPipeline(ImagesPipeline):
    """处理图片对象的类"""
    def get_media_requests(self, item, info):
        """管道会得到图片的URL并从中下载，需要重写此方法"""
        for image_url in item['image_urls']:
            yield scrapy.Request(image_url)

    def item_completed(self, results, item, info):
        """当一个get_media_requests中的单独项目中的所有图片请求完成时（要么成功完成下载，要么下载失败），此方法就会被调用该"""
        image_paths = [x["path"] for ok, x in results if ok]
        print("image_paths are", image_paths)
        if not image_paths:
            raise DropItem("Item contains no images")
        item["image_paths"] = image_paths
        return item
