# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
import scrapy


class TupianPipeline:
    def process_item(self, item, spider):
        print(item)
        return item


# class TupianPipelineMongoDB(object):
#     # 链接数据库
#     # 创建游标
#     # 填入数据
#     # 提交事务
#     # 关闭链接
#     def open_spider(self, spider):
#         # 创建链接
#         self.conn = pymongo.MongoClient(host="127.0.0.1", port=27017)
#         self.db = self.conn["tupian"]
#         self.collection = self.db["tupian"]
#         self.collection.create_index("date")
#         self.collection.create_index("img_url")
#
#     def close_spider(self, spider):
#         self.conn.close()
#
#     def process_item(self, item, spider):
#         # 写入数据
#         self.collection.insert_one(dict(item))
#         print(item)
#         return item

from scrapy.pipelines.images import ImagesPipeline

class MyTuPipeline(ImagesPipeline):
    # 发送请求获取图片，视频等信息
    def get_media_requests(self, item, info):
        img_path = item["img_path"]
        print("==========>>>>>>>>>", img_path)
        yield scrapy.Request(img_path, method='get', meta={"sss": img_path},
                             headers={"Referer": "https://desk.zol.com.cn/meinv/"})
    # 配置文件存储的路径

    def file_path(self, request, response=None, info=None, *, item=None):
        file_path = "/sss/sss/"
        img_name = request.meta["sss"].split("/")[-1]
        rasl_path = file_path + img_name
        return rasl_path


    def item_completed(self, results, item, info):
        print("item_completed", results)
