# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
from scrapy.http import Request

class ArticlespiderPipeline:
    def process_item(self, item, spider):
        return item

# ImagePipeline
from scrapy.pipelines.images import ImagesPipeline
class ArticleImagePipeline(ImagesPipeline):
    def get_media_requests(self, item, info):
        # 过滤掉无效的图片 URL
        urls = item.get("front_image_url", [])
        if isinstance(urls, str):
            urls = [urls]  
        valid_urls = [u for u in urls if u and u.startswith("http")]  # 确保是合法的 URL
        if not valid_urls:
            info.spider.logger.warning(f"No valid image URLs for item: {item}")
        return [Request(u) for u in valid_urls]

    def item_completed(self, results, item, info):
        try:
            # 只保存成功下载的图片路径
            image_file_paths = [value["path"] for ok, value in results if ok]
            if image_file_paths:
                item["front_image_path"] = image_file_paths  # 保存成功的路径列表
            else:
                item["front_image_path"] = None  # 如果没有下载成功
        except Exception as e:
            # 记录错误日志
            info.spider.logger.error(f"Error downloading image: {item['front_image_url']}, error: {str(e)}")
            item["front_image_path"] = None  # 标记为没有图片
        return item

import json
class JsonWithEncodingPipeline(object):
    #自定义json文件的导出
    def __init__(self):
        self.file = open('article.json', 'a', encoding='utf-8')

    def process_item(self, item, spider):
        lines = json.dumps(dict(item), ensure_ascii=False) + "\n"
        self.file.write(lines)
        return item

    def spider_closed(self, spider):
        self.file.close()