# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from urllib.request import urlretrieve
import os
import  requests
class XiaohuaSpiderPipeline(object):
    def process_item(self, item, spider):
        if spider.name == 'xiaohua':
            print(item['folder_name'],item['img_url'],item['img_name'])
            base_dir = os.path.join(os.path.dirname(__file__)+'img')
            img_dir = os.path.join(base_dir,item['folder_name'])
            if not os.path.exists(img_dir):
                os.makedirs(img_dir)
            img_path = os.path.join(img_dir, item['img_name'])

            img_url = item['img_url']
            resp = requests.get(img_url,timeout=30)
            if resp.status_code == 200:
                img_bytes = resp.content
            else:
                print(f'{img_url}下载失败')
            with open(img_path,mode='wb') as f:
                f.write(img_bytes)
                print(f'{img_url}下载成功')


            return item

        # tupian = item['addr']
        # name   = item['name']
        # for i in range(1,20):
        #    urlretrieve(tupian,filename=f'D:/校花/{i}{name}.png')

