# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import scrapy

import logging
import os

from pymysql import cursors

from scrapy.pipelines.images import ImagesPipeline
from twisted.enterprise import adbapi

logger = logging.getLogger(__name__)


class MyImagesPipeline(ImagesPipeline):
    def get_media_requests(self, item, info):
        # TODO 如果图片url为空，
        for image_url in item['image_urls']:
            yield scrapy.Request(url=image_url, meta={'item': item, 'index': item['image_urls'].index(image_url)})

    def file_path(self, request, response=None, info=None):
        item = request.meta['item']  # 通过上面的meta传递过来item
        trans_key = item['trans_key']
        index = request.meta['index']
        # 图片命名格式为：牛仔裤_sn_10717788608_0.jpg
        image_name = "%s_%s_%s" % (trans_key, item['id'], index)

        # TODO： 测试，保存为png格式
        image_path = '%s/%s.png' % (trans_key, image_name)
        return image_path

    def item_completed(self, results, item, info):
        image_paths = [x['path'] for ok, x in results if ok]
        # TODO 如果url为空，那返回的路径肯定为空，但该条数据还是需要保存
        # if not image_paths:
        #     raise DropItem("Item contains no images")
        item['image_urls'] = ','.join(image_paths)
        return item


class MySQLPipeline(object):
    def __init__(self, db_pool):
        self.db_pool = db_pool

    @classmethod
    def from_settings(cls, settings):
        """从settings配置文件中读取参数, 用一个db_params接收连接数据库的参数"""
        db_params = dict(
            host=settings['HOST'],
            user=settings['USERNAME'],
            password=settings['PASSWORD'],
            port=settings['PORT'],
            database=settings['DB_NAME'],
            charset=settings['CHARSET'],
            use_unicode=True,
            # 设置游标类型
            cursorclass=cursors.DictCursor
        )
        # 创建连接池
        db_pool = adbapi.ConnectionPool('pymysql', **db_params)

        # 返回一个pipeline对象
        return cls(db_pool)

    def process_item(self, item, spider):
        """处理item,把要执行的sql放入连接池,如果sql执行发送错误,自动回调addErrBack()函数"""
        query = self.db_pool.runInteraction(self._insert, item)
        query.addErrback(self._handle_error, item, spider)
        return item

    @staticmethod
    def _insert(cursor, item):
        """插入"""
        # TODO: 价格是否存在该表当中
        # common_sku(id, website， keyword, trans_key, name, price, label, image_urls)

        sql1 = "INSERT INTO common_sku(`id`,`website`, `keyword`, `trans_key`, `name`, `price`,`label`, `image_urls`) " \
               "VALUES ('%s', '%s','%s', '%s', '%s', '%s', null, '%s');" \
               % (item['id'], item['website'], item['keyword'], item['trans_key'], item['name'], item['price'], item['image_urls'])
        cursor.execute(sql1)

    @staticmethod
    def _handle_error(failure, item, spider):
        """输出错误信息"""
        # print(failure)
        # log.msg(failure, level=log.WARNING)
        logger.log(logging.WARNING, failure)
        # spider.logger.warn(failure)


