# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html

from scrapy.contrib.pipeline.images import ImagesPipeline
from scrapy.exceptions import DropItem
from scrapy.http import Request
import requests
import os

class JavPipeline(object):
    
    MIN_IMAGE_FILE_SIZE = 30000

    def process_item(self, item, spider):
        path = "./images/"+item['title']
        jpgpath = path +"/"+item['image_name']
        print(jpgpath)
        #if path is not exist ,creat it
        if not os.path.exists(path):
            os.makedirs(path)
        if  os.path.exists(jpgpath):
            print('%s is exist'% jpgpath)
            return item
        headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
        'Accept': 'text/html;q=0.9,*/*;q=0.8',
        'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
        'Accept-Encoding': 'gzip',
        'Connection': 'close'
        }
        r = requests.get(item['image_urls'], stream=True,timeout=10,headers=headers)
        with open(jpgpath, "wb") as downloadfile:
                for chunk in r.iter_content(chunk_size=512):
                    if chunk:
                        downloadfile.write(chunk)
        #如果文件太小，表示下载失败
        if  os.path.getsize(jpgpath) < JavPipeline.MIN_IMAGE_FILE_SIZE :
            print(jpgpath + " is delete for too small !")
            os.remove(jpgpath)
        return item

    


        












