# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import json
import os
import requests
from urllib import request
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter

class ImagePipeline:
    def open_spider(self, spider):
        # 在当前文件夹下创建images文件夹
        self.path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'images')
        if not os.path.exists(self.path):
            os.mkdir(self.path)

    def process_item(self, item, spider):
        # print(item)
        url = item['image_url']
        name_id = item['name_id']

        # 第一种方法
        # 获取图片路径的二进制文件
        r = requests.get(url,headers={'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'})
        self.fp = open('images/'+name_id+'.jpg','ab')
        # 将图片保存
        self.fp.write(r.content)

        # 第二种方法 推荐第二种 看比赛是否有这个库
        # 保存图片 urllib.request.urlretrieve
        # request.urlretrieve(url,filename)
        # request.urlretrieve(url,os.path.join('images',name_id+'.jpg'))

class XiaozhuPipeline:
    def open_spider(self, spider):
        self.fb = open('content.json', 'w+', encoding='utf-8')
        print('打开爬虫')

    def process_item(self, item, spider):
        print(item)
        res = dict(item)
        item_json = json.dumps(res, ensure_ascii=False)
        self.fb.write(item_json + '\n')
        return item

    def close_spider(self, spider):
        self.fb.close()
        print('关闭爬虫')
