# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html

import codecs
import json
import logging
import os

import scrapy
from pymysql import cursors

from scrapy.pipelines.images import ImagesPipeline
from twisted.enterprise import adbapi

logger = logging.getLogger(__name__)


class JsonPipeline(object):
    def __init__(self):
        self.file = codecs.open(filename='D:/sku.json', mode='w+', encoding='utf-8')
        self.file.write('[')

    def process_item(self, item, spider):
        name = item['name']
        url = item['url']
        images = item['images']
        data = {"name": name, "url": url, "images": images}
        data_str = json.dumps(data, ensure_ascii=False)
        self.file.write(data_str)
        self.file.write(',\n')

    def close_spider(self, spider):
        self.file.seek(-1, os.SEEK_END)
        self.file.truncate()
        self.file.seek(-1, os.SEEK_END)
        self.file.truncate()
        self.file.write(']')
        self.file.close()


class MyImagesPipeline(ImagesPipeline):
    def get_media_requests(self, item, info):
        # TODO 如果图片url为空，
        for image_url in item['images']:
            yield scrapy.Request(url=image_url, meta={'item': item, 'index': item['images'].index(image_url)})

    def file_path(self, request, response=None, info=None):
        item = request.meta['item']  # 通过上面的meta传递过来item
        trans_key = item['trans_key']
        index = request.meta['index']
        # 图片命名格式为：牛仔裤_sn_10717788608_0.jpg
        image_name = "%s_%s_%s" % (trans_key, item['id'], index)

        # TODO： 测试，保存为png格式
        image_path = '%s/%s.png' % (trans_key, image_name)
        return image_path

    def item_completed(self, results, item, info):
        image_paths = [x['path'] for ok, x in results if ok]
        # TODO 如果url为空，那返回的路径肯定为空，但该条数据还是需要保存
        # if not image_paths:
        #     raise DropItem("Item contains no images")
        item['images'] = ','.join(image_paths)
        return item


class MySQLPipeline(object):
    # 初始化函数
    def __init__(self, db_pool):
        self.count = 0  # 用于统计爬取的数据
        self.db_pool = db_pool

    # 从settings配置文件中读取参数
    @classmethod
    def from_settings(cls, settings):
        # 用一个db_params接收连接数据库的参数
        db_params = dict(
            host=settings['HOST'],
            user=settings['USERNAME'],
            password=settings['PASSWORD'],
            port=settings['PORT'],
            database=settings['DATABASE'],
            charset=settings['CHARSET'],
            use_unicode=True,
            # 设置游标类型
            cursorclass=cursors.DictCursor
        )
        # 创建连接池
        db_pool = adbapi.ConnectionPool('pymysql', **db_params)

        # 返回一个pipeline对象
        return cls(db_pool)

    # 处理item函数
    def process_item(self, item, spider):
        # 把要执行的sql放入连接池
        query = self.db_pool.runInteraction(self._insert, item)
        # 如果sql执行发送错误,自动回调addErrBack()函数
        query.addErrback(self._handle_error, item, spider)
        return item

    @staticmethod
    def _insert(cursor, item):
        """将图片，以及价格分别存入两张表"""
        #  sku(id, source， keyword, trans_key, name, label, image_urls)
        sql1 = "INSERT INTO sku(`id`,`source`, `keyword`, `trans_key`, `name`, `label`, `image_urls`) " \
               "VALUES ('%s', '%s','%s', '%s', '%s', null, '%s');" \
               % (item['id'], item['source'], item['keyword'], item['trans_key'], item['name'], item['images'])
        cursor.execute(sql1)

        # price(id, time, price)  time为默认当前时间字段
        sql2 = "INSERT INTO price(`id`, `price`) " \
               "VALUES ('%s', '%s');" \
               % (item['id'], item['price'])
        cursor.execute(sql2)

        # 存name,url,images
        # sql3 = "insert into sku_1('name', 'url', 'images') values('%s', '%s', '%s') " \
        #        "% (item['name'], item['url'], item['images'])"
        # cursor.execute(sql3)

    @staticmethod
    def _handle_error(failure, item, spider):
        # #输出错误信息
        # print(failure)
        # log.msg(failure, level=log.WARNING)
        logger.log(logging.WARNING, failure)
        # TODO: Module `scrapy.log` has been deprecated
        # print('mysql ', __name__)
        # spider.logger.warn(failure)
        pass
