# -*- coding: utf-8 -*-
import pymysql
import pymysql.cursors
import pymongo
import logging
import scrapy
try:
    # Python 3.x
    from urllib.parse import quote_plus
except ImportError:
    # Python 2.x
    from urllib import quote_plus

from scrapy.pipelines.images import ImagesPipeline
from scrapy.exporters import JsonItemExporter, CsvItemExporter, JsonLinesItemExporter
from scrapy import signals
from twisted.enterprise import adbapi


logger = logging.getLogger(__name__)


# class SpiderPipeline(object):
#     def process_item(self, item, spider):
#         return item


class InsertMongodbPipeline(object):
    """
    item数据保存到mongodb
    """
    @classmethod
    def from_crawler(cls, crawler):
        return cls(
            mongodb_user= crawler.settings.get('MONGODB_USER'),
            mongodb_password= crawler.settings.get('MONGODB_PASSWORD'),
            mongodb_host= crawler.settings.get('MONGODB_HOST'),
            mongodb_port= crawler.settings.get('MONGODB_PORT'),
            mongodb_db= crawler.settings.get('MONGODB_DB'),
        )

    def __init__(self, mongodb_user, mongodb_password, mongodb_host, mongodb_port, mongodb_db):
        self.mongodb_user = mongodb_user
        self.mongodb_password = mongodb_password
        self.mongodb_host = mongodb_host
        self.mongodb_port = mongodb_port
        self.mongodb_db = mongodb_db

    def open_spider(self, spider):
        logger.info("InsertMongodbPipeline: open_spider")
        self.client = pymongo.MongoClient("mongodb://%s:%s@%s:%s"%(quote_plus(self.mongodb_user), quote_plus(self.mongodb_password), self.mongodb_host, self.mongodb_port))
        self.db = self.client.get_database(self.mongodb_db)

    def close_spider(self, spider):
        logger.info("InsertMongodbPipeline: close_spider")
        self.client.close()

    def process_item(self, item, spider):
        try:
            result = self.db.get_collection(spider.name).insert_one(dict(item))
            inserted_id = result.inserted_id
            if inserted_id:
                logger.info('InsertMongodbPipeline-添加第【{0}】页下第{1}条成功{2}'.format(item["page"], item["index"], inserted_id))
        except Exception as e:
            logger.info('InsertMongodbPipeline-添加第【{0}】页下第{1}条失败{2}'.format(item["page"], item["index"], e))
        finally:
            return item


class CustomImagesPipeline(ImagesPipeline):
    """
    重写scrapy插件ImagesPipeline类的item_completed方法,获取下载图片的本地路径并保存到item中
    results是一个list
    第一个为图片下载状态,对应OK
    第二个是一个tupled其中可以为path的字段对应存储路径,而item['front_image_path']是我们自定义字段,保存在item中
    """
    def item_completed(self, results, item, info):
        if "image_urls" in item:
            try:
                for ok, value in results:
                    item["image_files"].append(value["path"])
            except Exception as e:
                item["image_files"].append("full/default.png")
        return item


class AsyInsterMysqlPipeline(object):
    """
    通过twisted插件实现异异步添加数据到MYSQL
    """
    def __init__(self, dbpoll):
        self.dbpoll = dbpoll

    @classmethod
    def from_settings(cls, settings):
        dbparms = dict(
            host=settings["MYSQL_HOST"],
            db=settings["MYSQL_DBNAME"],
            user=settings["MYSQL_USER"],
            passwd=settings["MYSQL_PASSWORD"],
            charset=settings["MYSQL_CHARSET"],
            cursorclass=pymysql.cursors.DictCursor,
            use_unicode=True,
        )
        dbpoll = adbapi.ConnectionPool("pymysql", **dbparms)
        # 通过cls()实例化一个MysqlAsynchronousPipeline对象
        return cls(dbpoll)

    def process_item(self, item, spider):
        query = self.dbpoll.runInteraction(self.do_insert, item)
        query.addErrback(self.handle_error, item, spider)

    def handle_error(self, failure, item, spider):
        logger.info('异步插入item到mysql失败')

    def do_insert(self, cursor, item):
        sql, params = item.get_insert_sql_mysql()
        try:
            cursor.execute(sql, params)
        except Exception as e:
            logger.info('插入item:{0}失败:{1}'.format(item["title"], e))


# class JsonExporterPipleline(object):
#     """
#     通过scrapy中JsonItemExporter类实现item数据以json形式保存到本地
#     [{"name": "Color TV", "price": "1200"},{"name": "DVD player", "price": "200"}]
#     JSON序列化格式非常简单,灵活,但是它不能很好地伸缩以来大量数据增量解析JSON解析器中并没有很好的支持,他们中的大多数只是解析整个对象在内存中
#     """
#
#     def __init__(self):
#         self.files = {}
#         self.save_path = os.path.normpath(os.path.join(os.path.dirname(
#             os.path.dirname(__file__)),"media\jsonfiles\taobao.json"))
#
#     @classmethod
#     def from_crawler(cls, crawler):
#         pipeline = cls()
#         crawler.signals.connect(pipeline.spider_opened, signals.spider_opened)
#         crawler.signals.connect(pipeline.spider_closed, signals.spider_closed)
#         return pipeline
#
#     def spider_opened(self, spider):
#         logger.info("JsonExporterPipleline:spider_opened")
#         file = open(self.save_path, "wb")
#         self.files[spider] = file
#         self.exporter = JsonItemExporter(file, encoding="utf-8", ensure_ascii=False)
#         self.exporter.start_exporting()
#
#     def spider_closed(self, spider):
#         logger.info("JsonExporterPipleline:spider_closed")
#         self.exporter.finish_exporting()
#         file = self.files.pop(spider)
#         file.close()
#
#     def process_item(self, item, spider):
#         logger.info("JsonExporterPipleline:process_item")
#         self.exporter.export_item(item)
#         return item


# class JsonLinesExporterPipleline(object):
#     """
#     通过scrapy中JsonLinesItemExporter类实现item数据以json形式保存到本地
#     {"name": "Color TV", "price": "1200"}
#     {"name": "DVD player", "price": "200"}
#     """
#
#     def __init__(self):
#         self.files = {}
#         self.save_path = os.path.normpath(os.path.join(os.path.dirname(
#             os.path.dirname(__file__)),"media\jsonfiles\taobao_lines.json"))
#
#     @classmethod
#     def from_crawler(cls, crawler):
#         pipeline = cls()
#         crawler.signals.connect(pipeline.spider_opened, signals.spider_opened)
#         crawler.signals.connect(pipeline.spider_closed, signals.spider_closed)
#         return pipeline
#
#     def spider_opened(self, spider):
#         logger.info("JsonLinesExporterPipleline:spider_opened")
#         file = open(self.save_path, "wb")
#         self.files[spider] = file
#         self.exporter = JsonLinesItemExporter(file, encoding="utf-8", ensure_ascii=False)
#         self.exporter.start_exporting()
#
#     def spider_closed(self, spider):
#         logger.info("JsonLinesExporterPipleline:spider_closed")
#         self.exporter.finish_exporting()
#         file = self.files.pop(spider)
#         file.close()
#
#     def process_item(self, item, spider):
#         logger.info("JsonLinesExporterPipleline:process_item")
#         self.exporter.export_item(item)
#         return item


# class CSVExporterPipleline(object):
#     """
#     通过scrapy中CsvItemExporter类实现item数据以csv形式保存到本地
#     """
#     def __init__(self):
#         self.save_path = os.path.normpath(os.path.join(os.path.dirname(
#             os.path.dirname(__file__)), "media\csvfiles\taobao.csv"))
#
#     @classmethod
#     def from_crawler(cls, crawler):
#         pipeline = cls()
#         crawler.signals.connect(pipeline.spider_opened, signals.spider_opened)
#         crawler.signals.connect(pipeline.spider_closed, signals.spider_closed)
#         return pipeline
#
#     def spider_opened(self, spider):
#         logger.info("CSVExporterPipleline:spider_opened")
#         self.file = open(self.save_path, 'w+')#a+
#         self.file.write("标题,商品链接,图片下载地址,市场价,淘宝价,爬去时间")
#         # self.exporter = CsvItemExporter(file)
#         # self.exporter.fields_to_export = ["title", "url", "image_urls", "market_price", "taobao_price", "push_date"]
#         # self.exporter.encoding = "utf-8"
#         # self.exporter.start_exporting()
#
#     def spider_closed(self, spider):
#         logger.info("CSVExporterPipleline:spider_closed")
#         # self.exporter.finish_exporting()
#         self.file.close()
#
#     def process_item(self, item, spider):
#         logger.info("CSVExporterPipleline:process_item")
#         # self.exporter.export_item(item)
#         if item["image_urls"]:
#             item["image_urls"] = item["image_urls"][0]
#         if item["push_date"]:
#             item["push_date"] = item["push_date"].now().strftime("%Y-%m-%d %H:%M:%S")
#         list = [item["title"], item["url"], item["image_urls"], item["market_price"], item["taobao_price"], item["push_date"]]
#         line = "\n"+",".join(list)
#         self.file.write(line)
#         return item


# class Excel2003ExporterPipleline(object):
#     """
#     item数据以excel2003形式保存到本地
#     """
#     def __init__(self):
#         self.save_path = os.path.normpath(os.path.join(os.path.dirname(
#             os.path.dirname(__file__)), "media\csvfiles\taobao.xls"))
#         self.head_data = ["标题", "商品链接", "图片下载地址", "市场价格", "淘宝价格", "爬取时间"]
#
#     @classmethod
#     def from_crawler(cls, crawler):
#         pipeline = cls()
#         crawler.signals.connect(pipeline.spider_opened, signals.spider_opened)
#         crawler.signals.connect(pipeline.spider_closed, signals.spider_closed)
#         return pipeline
#
#     def spider_opened(self, spider):
#         logger.info("Excel2003ExporterPipleline:spider_opened")
#         self.workbook = xlwt.Workbook(encoding="utf-8")
#         self.worksheet = self.workbook.add_sheet("淘宝家具")
#         for colnum in range(0, len(self.head_data)):
#             self.worksheet.write(0, colnum, self.head_data[colnum], xlwt.easyxf("font:bold on"))
#
#     def spider_closed(self, spider):
#         logger.info("Excel2003ExporterPipleline:spider_closed")
#         self.workbook = None
#
#     def process_item(self, item, spider):
#         logger.info("Excel2003ExporterPipleline:process_item")
#         if item["image_urls"]:
#             item["image_urls"] = item["image_urls"][0]
#         if item["push_date"]:
#             item["push_date"] = item["push_date"].now().strftime("%Y-%m-%d %H:%M:%S")
#         list = [item["title"], item["url"], item["image_urls"], item["market_price"], item["taobao_price"],item["push_date"]]
#         for colnum in range(0, len(self.head_data)):
#             self.worksheet.write(item["index"], colnum, list[colnum])
#         self.workbook.save(self.save_path)
#         return item




