# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
import json
import codecs
import scrapy
class Scrapy01Pipeline:
    def __init__(self):
        # 创建bilibili.py保存文件
        '''
        self.file1 = codecs.open(filename="test1.csv",mode="w+",encoding="utf-8")
        self.file2 = codecs.open(filename="test1.json", mode="w+", encoding="utf-8")
        self.file2.write("[")
        '''
        pass

    def process_item(self, item, spider):
        # bilibili.py对数据进行处理然后写入
        print("进入失败")
        '''
        print(item)
        ## csv 部分 spider.name是爬虫名，这里是bilibili
        self.file1.write(item["name"]+","+item["num"]+","+spider.name+"\n")

        ##json部分
        self.file2.write(json.dumps(dict(item),ensure_ascii=False)+", \n")
        return item
        '''

    def open_spider(self,spider):
        pass
    def close_spider(self,spider):
        # 关闭写入文件
        pass
        '''
        self.file2.write("]")
        self.file1.close()
        self.file2.close()
        '''
##到处图片要用到的类
from scrapy.pipelines.images import ImagesPipeline
class bilibili_dancePipeline(ImagesPipeline):
    ##重写get_media_request方法，请求图片链接
    def get_media_requests(self, item, info):
        yield scrapy.Request(item['pic'],meta={'bvid':item['bvid']})

    # 重写file_path方法，修改文件名字，不然他会用哈希值保存
    def file_path(self, request, response=None, info=None, *, item=None):
        return request.meta['bvid']+".jpg"

    ##查看图片的url和保存的名字和路径
    def item_completed(self, results, item, info):
        print(results[0][1]['path'])

    # def process_item(self,item,spider):
    #     pass