# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.pipelines.images import ImagesPipeline
from scrapy.pipelines.files import FilesPipeline
from scrapy.exceptions import DropItem
from scrapy_test.settings import IMAGES_STORE
import requests,time,scrapy
import os
headers2={

	'Host': 'ii.hywly.com',
	'Connection': 'keep-alive',
	'Accept': 'image/webp,image/apng,image/*,*/*;q=0.8',
	'X-Requested-With': 'XMLHttpRequest',
	'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.89 Safari/537.36',
	'DNT': '1',
    'Accept-Encoding': 'gzip, deflate',
	'Accept-Language': 'zh-CN,zh;q=0.8,ja;q=0.6',
	'Referer': 'https://www.meituri.com/a/23183/',
    'Name':''
}


class ScrapyTestPipeline(object):


    def open_spider(self, spider):
        self.file = open('items.txt', 'wb')

    def close_spider(self, spider):
        self.file.close()

    ## return a dict with data, return an Item (or any descendant class) object,
    ## return a Twisted Deferred or raise DropItem exception.
    ## Dropped items are no longer processed by further pipeline components.
    def process_item(self, item, spider):
        try:
            info='%s===%s'%(item['review_url'],item['review_title'])
            item['images']=item['review_url']
            self.file.write(info.strip()+'\n')
        except Exception,err:
            print err
            raise DropItem('Drop')

        return item


class ImageDownloadPipeline(ImagesPipeline):
    """
            文件下载完成以后会调用FilesPipeline 中的file_downloaded方法,因此只需要重写
            file_downloaded方法即可
      """
    # def file_downloaded(self, response, request, info):
    #     # 或者也可以自定义文件名字
    #     file_name =request.header['Name']
    #     if file_name:
    #         path = file_name
    #     buf = BytesIO(response.body)
    #     checksum = md5sum(buf)
    #     buf.seek(0)
    #     self.store.persist_file(path, buf, info)
    #     return checksum

    def get_media_requests(self, item, info):
        headers2['Referer'] = item['referer']
        headers2['host'] = item['host']
        for image_url in item['image_urls']:
            yield scrapy.Request(image_url,headers=headers2,meta={"folder": item['folder']})

    def item_completed(self, results, item, info):
        print results
        image_paths = [x['path'] for ok, x in results if ok]
        if not image_paths:
            raise DropItem("Item contains no images")
        item['image_paths'] = image_paths
        return item

    def file_path(self, request, response=None, info=None):
        # 这个方法是在图片将要被存储的时候调用，来获取这个图片存储的路径
        path = super(ImageDownloadPipeline, self).file_path(request, response, info)
        category_path = request.meta['folder']
        # image_store = settings.IMAGES_STORE
        # category_path = os.path.join(image_store, category)
        if not os.path.exists(category_path):
            os.makedirs(category_path)
        image_name = path.replace("full/", "")
        image_path = category_path+"\\"+image_name
        return image_path



class ImageTestPipeline(ImagesPipeline):

    # def get_media_requests(self, item, info):
    #     yield Request(item['link'])
    # 重载ImagePipeline中的item_completed方法，获取下载地址
    def item_completed(self, results, item, info):
        image_paths = item['link']
        item['image_urls']=image_paths
        item['images'] = image_paths
        item['image_paths'] = image_paths
        if not image_paths:
            raise DropItem("Item contains no images")
        self.save_image(image_paths,item['file_urls'])
        ##return item

    def save_image(self, url, referer_url):
        headers2['Referer'] = referer_url
        try:
            rsq = requests.get(url, headers=headers2)
            f = open(IMAGES_STORE + str(time.time()) + ".jpg", 'wb')
            f.write(rsq.content)
            f.close()
        except Exception,err:
            print url+'>>>drop>>>'+err.message
            raise DropItem('Drop')


class FileTestPipeline(FilesPipeline):
    # 重载ImagePipeline中的item_completed方法，获取下载地址
    def item_completed(self, results, item, info):
        # it
        # image_paths = [x['image_urls'] for ok, x in results if ok]
        # if not image_paths:
        #     raise DropItem("Item contains no images")
        ##item['file_urls'] ='http://www.dydao.top/wp-content/uploads/2017/06/544c21d1435f8a989088182a77424c9a.jpg'
        return item
