# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.pipelines.images import ImagesPipeline
from time import time
import scrapy
import codecs
import json
import MySQLdb
from MySQLdb import cursors
from scrapy.exporters import JsonItemExporter
from twisted.enterprise import adbapi


class MyscrapyPipeline(object):
    def process_item(self, item, spider):
        return item



# 自定义json文件导出
class MyJsonPipeline(object):
    def __init__(self):
        self.file = codecs.open("myjson.json", "w", encoding="utf-8")

    def process_item(self, item, spider):
        lines = json.dumps(dict(item), ensure_ascii=False) + "\n"
        self.file.write(lines)

    def spider_closed(self, spider):
        self.file.close()


# 自带json类文件导出
class MyJsonPipeline2(object):
    def __init__(self):
        self.file = open("myjson.json", 'wb')
        self.exporter = JsonItemExporter(self.file, encoding="utf-8", ensure_ascii=False)
        self.exporter.start_exporting()

    def process_item(self, item, spider):
        self.exporter.export_item(item)
        return item

    def spider_closed(self, spider):
        self.exporter.finish_exporting()
        self.file.close()


# 常规同步MYSQL连接
class MySQLPipeline(object):
    def __init__(self):
        self.conn = MySQLdb.connect('127.0.0.1', 'root', '198608', 'scrapy_test', charset='utf8', use_unicode=True)
        self.cursor = self.conn.cursor()

    def process_item(self, item, spider):
        insert_sql = """
            insert into test(title,image_type,cover_image_url)values(%s,%s,%s)
        """
        self.cursor.execute(insert_sql, (item['title'], item['image_type'], item['cover_image_url'][0]))
        self.conn.commit()
        pass

    def spider_closed(self, spider):
        pass


# 异步MYSQL连接
class MySQLAsynPipeline(object):
    def __init__(self, dbpool):
        self.dbpool = dbpool

    @classmethod
    def from_settings(cls, settings):
        dbparams = dict(
            host=settings["MYSQL_HOST"],
            db=settings["MYSQL_DBNAME"],
            user=settings["MYSQL_USER"],
            passwd=settings["MYSQL_PASSWORD"],
            charset="utf8",
            cursorclass=MySQLdb.cursors.DictCursor,
            use_unicode=True,

        )
        # 建立连接池
        dbpool = adbapi.ConnectionPool("MySQLdb", **dbparams)
        return cls(dbpool)

    def process_item(self, item, spider):
        # 处理item的对应函数以及失败时候的对应处理函数
        query = self.dbpool.runInteraction(self.do_insert, item)
        query.addErrback(self.handle_error)

    def handle_error(self, failure):
        # 处理异常
        print(failure)

    def do_insert(self, cursor, item):
        # 执行插入逻辑
        insert_sql = """
                    insert into test(title,image_type,cover_image_url)values(%s,%s,%s)
                """
        cursor.execute(insert_sql, (item['title'], item['image_type'], item['cover_image_url'][0]))
        pass

#自定义图片管道
class MyImagePipeline(ImagesPipeline):
    def get_media_requests(self, item, info):
        for image_url in item['cover_image_url']:
            yield scrapy.Request(url=image_url, meta={'item': item})

    def file_path(self, request, response=None, info=None):
        image_guid = request.url.split('/')[-1]
        path = request.meta["item"]["image_type"]
        return '%s/%s' % (path, image_guid)

    def item_completed(self, results, item, info):

        for ok, value in results:
            image_file_path = value["path"]
        item["image_file_path"] = image_file_path
        return item
