# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html

import os
import pycurl
import pymongo
import datetime

from scrapy.pipelines.images import ImagesPipeline,FilesPipeline
from scrapy import Spider, Request,FormRequest
from scrapy.exceptions import DropItem
from MultipleSpider.items import BiliItem
from MultipleSpider.items import ProxyItem

class MultipleSpiderPipeline(object):
    def process_item(self, item, spider):
        return item


#=========================================================
class BiliImagesPipeline(ImagesPipeline):

    default_headers = {
        #'accept': 'image/webp,image/*,*/*;q=0.8',
        #'accept-encoding': 'gzip, deflate, sdch, br',
        #'accept-language': 'zh-CN,zh;q=0.8,en;q=0.6',
        #'cookie': 'bid=yQdC/AzTaCw',
        #'referer': 'https://www.douban.com/photos/photo/2370443040/',
        #'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36',
    }

    def process_item(self, item, spider):
        if not isinstance(item, BiliItem):
            return item
        ImagesPipeline.process_item(self,item,spider)
        return item

    def file_path(self, request, response=None, info=None):
        item = request.meta['key']
        path = "%s/%s.%s" % (item['subClass'],item['pk'],item['image_urls'][0].split('.')[-1])
        return path

    def get_media_requests(self, item, info):
        if not 'image_urls' in item:
            raise DropItem("File not image_urls in %s" % item['pk'])
        for image_url in item['image_urls']:
            self.default_headers['referer'] = image_url
            yield Request(image_url, headers=self.default_headers,meta={'key': item})

    def item_completed(self, results, item, info):

        image_path = [x['path'] for ok,x in results if ok]
        if not image_path:
            raise DropItem('Item contains no images')
        item['image_paths'] = image_path
        return item

class BiliVideoPipeline(object):

    @classmethod
    def from_crawler(cls, crawler):
        return cls(
            video_store=crawler.settings.get('VIDEO_STORE'),
        )

    def __init__(self, video_store):
        self.video_store = video_store

    def process_item(self, item, spider):
        if not isinstance(item, BiliItem):
            return item

        file_dir = '%s/%s' % (self.video_store,item['subClass'])
        if not os.path.exists(file_dir): #判断一个目录是否存在
            os.makedirs(file_dir)
        file_path = '%s/%s.%s' % (file_dir,item['pk'],item['exType'])

        video_url = item['video_url']
        headers = ['Referer:%s' % (item['web_url']),'Accept:*/*','User-Agent:Mozilla/5.0 (Windows NT 6.1; WOW64; rv:32.0) Gecko/20100101 Firefox/32.0']
        #headers = { 'Referer' : "https://www.bilibili.com/" }


        if os.path.exists(file_path):
            #获取服务器文件大小
            tmp_curl_obj = pycurl.Curl()
            tmp_curl_obj.setopt(pycurl.URL, video_url)
            tmp_curl_obj.setopt(tmp_curl_obj.NOBODY,True)
            tmp_curl_obj.perform()
            seriver_file_size = tmp_curl_obj.getinfo(tmp_curl_obj.CONTENT_LENGTH_DOWNLOAD) #文件大小
            local_file_size = os.path.getsize(file_path)
            if(seriver_file_size == local_file_size):
                raise DropItem("File exists in %s" % file_path)



        #真实下载
        curl_obj = pycurl.Curl()
        curl_obj.setopt(pycurl.URL, video_url)
        curl_obj.setopt(pycurl.HTTPHEADER, headers)
        curl_obj.setopt(pycurl.CONNECTTIMEOUT, 60*1)
        curl_obj.setopt(pycurl.NOPROGRESS, 0)
        #curl_obj.setopt(pycurl.FOLLOWLOCATION, 1)
        #curl_obj.setopt(pycurl.MAXREDIRS, 5)
        curl_obj.setopt(pycurl.TIMEOUT,60*20)
        curl_obj.fp = open(file_path, "wb")
        curl_obj.setopt(pycurl.WRITEDATA, curl_obj.fp)
        curl_obj.perform()
        curl_obj.fp.close()
        download_size = curl_obj.getinfo(pycurl.SIZE_DOWNLOAD)

        if download_size == 0:
            if os.path.exists(file_path):
                os.remove(file_path)
            raise DropItem("Down fail in %s" % video_url)

        item['video_path'] = '%s/%s.%s' % (item['subClass'],item['pk'],item['exType'])
        return item

class BiliMongoPipeline(object):
    collection_name = 'bili'

    @classmethod
    def from_crawler(cls, crawler):
        return cls(
            mongo_uri=crawler.settings.get('MONGO_URI'),
            mongo_db=crawler.settings.get('MONGO_DATABASE')
        )

    def __init__(self, mongo_uri, mongo_db):
        self.mongo_uri = mongo_uri
        self.mongo_db = mongo_db

    def open_spider(self, spider):
        self.client = pymongo.MongoClient(self.mongo_uri)
        self.db = self.client[self.mongo_db]

    def close_spider(self, spider):
        self.client.close()

    def process_item(self, item, spider):
        if not isinstance(item, BiliItem):
            return item
        item['input_time'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        self.db[self.collection_name].update({'pk': item['pk']}, dict(item), True)
        return item


#=========================================================
class ProxyMongoPipeline(object):

    collection_name = 'proxy'

    @classmethod
    def from_crawler(cls, crawler):
        return cls(
            mongo_uri=crawler.settings.get('MONGO_URI'),
            mongo_db=crawler.settings.get('MONGO_DATABASE')
        )

    def __init__(self, mongo_uri, mongo_db):
        self.mongo_uri = mongo_uri
        self.mongo_db = mongo_db

    def open_spider(self, spider):
        self.client = pymongo.MongoClient(self.mongo_uri)
        self.db = self.client[self.mongo_db]

    def close_spider(self, spider):
        self.client.close()

    def process_item(self, item, spider):
        if not isinstance(item,ProxyItem):
            return item
        item['input_time'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        self.db[self.collection_name].update({'ip': item['ip']}, dict(item), True)
        return item

