# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
import datetime
import os
import urllib


class SportsPunchPhotosPipeline:
    def process_item(self, item, spider):
        # 创建文件夹
        filename = datetime.datetime.now().strftime("%Y-%m-%d") + " 智慧体育摄像头采集照片"
        folder = os.path.exists(filename)
        if not folder:
            os.makedirs(filename)
        # 开始下载文件
        print("[INFO] 正在下载第 %d 个文件...\n[URL] %s" % ((item["number"] + 1), item["URL"]))
        with open("cookie.txt", "r", encoding = "UTF-8") as cookief:
            cookie = cookief.readline()
        header = {
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.3 Safari/605.1.15",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
            "Cookie": cookie
        }
        request = urllib.request.Request(url = item["URL"], data = None, headers = header)
        response = urllib.request.urlopen(request)
        with open(filename + "/" + str(item["number"]) + ".jpg", "wb") as f:
            f.write(response.read())