# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import os
from gridfs import GridFS
import scrapy
from pymongo.mongo_client import MongoClient
from scrapy.pipelines.images import ImagesPipeline
from BBSSpider.items import LectureItem, ArticleItem, ReplyItem
from BBSSpider.settings import MONGO_DATABASE, MONGO_URI, MONGO_COLLECTION, IMAGES_STORE
try:
    from cStringIO import StringIO as BytesIO
except ImportError:
    from io import BytesIO
from PIL import Image


class ImgPipeline(ImagesPipeline):

    collections_name = MONGO_COLLECTION['image']
    client = MongoClient(MONGO_URI)
    db = client[MONGO_DATABASE]
    fs = GridFS(db, collections_name)

    def get_media_requests(self, item, info):
        for image_url in item['image_urls']:
            yield scrapy.Request(image_url)

    def item_completed(self, results, item, info):
        file_paths = [x['path'] for ok, x in results if ok]

        item['image_ids'] = []
        item['image_guid'] = []

        for file_path in file_paths:
            image_guid = file_path.split('/')[-1]
            with Image.open(os.path.join(IMAGES_STORE, file_path)) as image:
                buf = BytesIO()
                image.save(buf, 'JPEG')
                id = self.fs.put(buf, filename = image_guid)

                item['image_ids'].append(id)
                item['image_guid'].append(image_guid)

        self.client.close()

        return item

import logging
import logging.handlers

lecture_log_file = 'log/article_item.log'
lecture_log_fmt = '%(asctime)s : %(name)s - %(message)s'

logger = logging.getLogger('item_log')
lecture_handler = logging.handlers.RotatingFileHandler(lecture_log_file,
                                                               maxBytes=1024 * 1024,
                                                               encoding='utf-8',
                                                               backupCount=5)
lecture_fmt = logging.Formatter(lecture_log_fmt)
lecture_handler.setFormatter(lecture_fmt)

logger.addHandler(lecture_handler)
logger.setLevel(logging.INFO)


class SavePipeline(object):

    lecture_collections_name = MONGO_COLLECTION['lecture']
    article_collections_name  = MONGO_COLLECTION['article']
    replay_collections_name = MONGO_COLLECTION['replay']

    def __init__(self, mongo_uri, mongo_db):
        self.mongo_uri = mongo_uri
        self.mongo_db = mongo_db

    @classmethod
    def from_crawler(cls,crawler):
        return cls(
            mongo_uri=crawler.settings.get('MONGO_URI'),
            mongo_db=crawler.settings.get('MONGO_DATABASE')
        )

    def open_spider(self, spider):
        self.client = MongoClient(self.mongo_uri)
        self.db = self.client[self.mongo_db]
        # self.db.add_son_manipulator(LectureTransfrom())

    def close_spider(self, spider):
        self.client.close()

    def process_item(self, item, spider):
        # 未添加日志信息，需添加日志信息
        if isinstance(item, LectureItem):
            logger.info('LectureItem, sequence %s ',item['sequence'])
            self.db[self.lecture_collections_name].save(dict(item))
        elif isinstance(item, ArticleItem):
            logger.info('ArticleItem, category:%s , sub_category:%s , sequence %s ', item['header']['category'], item['header']['sub_category'], item['sequence'])
            self.db[self.article_collections_name].save(dict(item))
        elif isinstance(item, ReplyItem):
            # logger.info('ReplayItem-- category:%s -- sub_category:%s -- sequence %s ', item['header']['category'], item['header']['sub_category'], item['sequence'])
            self.db[self.replay_collections_name].save(dict(item))

        return item
