# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
from scrapy.exporters import JsonLinesItemExporter
from scrapy.pipelines.images import ImagesPipeline
from scrapy.utils.project import get_project_settings
import scrapy
import os

class DouyuPipeline:
    def __init__(self):
        self.file = open('douyu.json', 'wb')
        self.exporter =JsonLinesItemExporter(self.file, ensure_ascii=False, encoding='utf-8')

    def process_item(self, item, spider):
        self.exporter.export_item(item)
        return item

    def close_spider(self, spider):
        self.file.close()
        print('数据保存完毕！')

class ImagesPipeline(ImagesPipeline):
    IMAGES_STORE = get_project_settings().get("IMAGES_STORE")

    # 接收item对象并将获取item对象中的url发送请求
    def get_media_requests(self, item, info):
        print(item['image_urls'])
        yield scrapy.Request(item['image_urls'])

    # 返回item对象，给下一执行的管道类
    def item_completed(self, results, item, info):
        # 固定写法，获取图片路径，同时判断这个路径是否正确，如果正确，就放到 image_path里
        print('=' * 50)
        image_path = [x["path"] for ok, x in results if ok]
        print(image_path)
        print('=' * 50)
        # 图片下载路径、url和校验和等信息
        print(results)
        return item

    def file_path(self, request,item, response=None, info=None):
        # 打印图片路径
        print(request.url)
        img_name = item['name'] + '.png'
        return img_name


    # def item_completed(self, results, item, info):
    #     # 固定写法，获取图片路径，同时判断这个路径是否正确，如果正确，就放到 image_path里，ImagesPipeline源码剖析可见
    #     image_path = [x["path"] for ok, x in results if ok]
    #
    #     os.rename(self.IMAGES_STORE + "/" + image_path[0], self.IMAGES_STORE + "/" + item["nickname"] + ".jpg")
    #     item["vertical_src"] = self.IMAGES_STORE + "/" + item["nickname"]
    #
    #     return item
