# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
import pymysql
import scrapy
from scrapy.pipelines.images import ImagesPipeline
from scrapy.exceptions import DropItem


class U17MysqlPipeline(object):
    def __init__(self, ip, port, username, password, database):
        self.ip = ip
        self.port = port
        self.username = username
        self.password = password
        self.database = database

    @classmethod
    def from_crawler(cls, crawler):
        """
        类加载时会调用此类方法，
        然后把mysql配置信息传给对象构造函数
        """
        return cls(ip=crawler.settings.get('MYSQL_IP'),
                   username=crawler.settings.get('MYSQL_USERNAME'),
                   password=crawler.settings.get('MYSQL_PASSWORD'),
                   database=crawler.settings.get('MYSQL_DATABASE'),
                   port=crawler.settings.get('MYSQL_PORT'))

    def open_spider(self, spider):
        """
        创建蜘蛛时会调用此方法
        构建数据库连接
        """
        self.db = pymysql.connect(self.ip, self.username, self.password, self.database, charset='utf8', port=self.port)
        self.cursor = self.db.cursor()

    def close_spider(self, spider):
        """
        蜘蛛对象销毁时，调用close_spider方法
        释放数据库连接
        """
        self.db.close()

    def process_item(self, item, spider):
        """
        处理数据
        """
        if item.collection == 'comic':
            sql = 'insert into comic (comic_id, name, cover, line1, line2) values (%s,%s,%s,%s,%s)'
            self.cursor.execute(sql, (item['comic_id'], item['name'], item['cover'], item['line1'], item['line2']))
            self.db.commit()
        else:
            sql = 'insert into comic_chapter (comic_id, name, link) values (%s,%s,%s)'
            self.cursor.execute(sql, (item['comic_id'], item['name'], item['link']))
            self.db.commit()
        return item


class U17PyMongoPipeline(object):
    def __init__(self, ip, port, database):
        self.ip = ip
        self.port = port
        self.database = database

    @classmethod
    def from_crawler(cls, crawler):
        """
        类加载时会调用此类方法，
        然后把mysql配置信息传给对象构造函数
        """
        return cls(ip=crawler.settings.get('MONGO_IP'),
                   database=crawler.settings.get('MONGO_DB'),
                   port=crawler.settings.get('MONGO_PORT'))

    def open_spider(self, spider):
        """
        创建蜘蛛时会调用此方法
        构建数据库连接
        """
        self.client = pymongo.MongoClient(f'mongodb://{self.ip}:{self.port}')
        self.db = self.client[self.database]

    def close_spider(self, spider):
        """
        蜘蛛对象销毁时，调用close_spider方法
        释放数据库连接
        """
        self.client.close()

    def process_item(self, item, spider):
        """
        处理数据
        """
        self.db[item.collection].insert_one(dict(item))
        return item


class U17ImagePipeline(ImagesPipeline):
    """
    图片下载管道
    """

    def get_media_requests(self, item, info):
        """
        生成图片下载请求对象
        """
        if 'cover' in item:
            yield scrapy.Request(url=item['cover'], meta={'comic_name': item['name']})

    def file_path(self, request, response=None, info=None):
        """
        图片文件名称
        """
        # url = request.url
        # file_name = url.split('/')[-1]
        file_name = request.meta['comic_name'] + '.jpg'
        return file_name

    def item_completed(self, results, item, info):
        """
        判断图片是否请求成功
        """
        image_paths = [x['path'] for ok, x in results if ok]
        if not image_paths:
            raise DropItem('Image Downloaded Failed')
        return item
