# -*- coding: utf-8 -*-

import codecs
import json

import pymysql
import scrapy
from scrapy.exporters import JsonItemExporter
from scrapy.pipelines.images import ImagesPipeline
from twisted.enterprise import adbapi


class ArticlespiderPipeline(object):
    def process_item(self, item, spider):
        return item


class MysqlPipeline(object):
    def __init__(self):
        self.conn = pymysql.connect('localhost', 'root', '0000', 'crawed', charset='utf8', use_unicode=True)
        self.cursor = self.conn.cursor()

    def process_item(self, item, spider):
        insert_sql = """insert into article(title,url,create_date,fav_nums) values (%s,%s,%s,%s)"""
        self.cursor.execute(insert_sql, (item['title'], item['url'], item['date'], item['fav_nums']))
        self.conn.commit()


class MysqlTwistePipeline(object):
    def __init__(self, dbpool):
        self.dbpool = dbpool

    @classmethod
    def from_settings(cls, settings):
        dbparms = dict(
            host=settings['MYSQL_HOST'],
            db=settings['MYSQL_DB'],
            user=settings['MYSQL_USER'],
            password=settings['MYSQL_PASSWORD'],
            charset='utf8',
            cursorclass=pymysql.cursors.DictCursor,
            use_unicode=True,
        )
        dbpool = adbapi.ConnectionPool('pymysql', **dbparms)
        return cls(dbpool)

    def process_item(self, item, spider):
        # 使用twisted将mysql插入变异步执行
        query = self.dbpool.runInteraction(self.do_insert, item)
        # query.addErrorback(self.handle_error) #处理异常
        query.addErrback(self.handle_error)  # 处理异常

    def handle_error(self, failure):
        # 处理异步插入的异常
        print(failure)

    def do_insert(self, cursor, item):
        insert_sql, params = item.get_insert_sql()
        try:
            cursor.execute(insert_sql, params)
            print('插入成功')
        except Exception as e:
            print('插入失败')


class ArticleImagesPipeline(ImagesPipeline):
    # 调用scrapy提供的imagepipeline下载图片
    def item_completed(self, results, item, info):
        if "img_url" in item:
            for ok, value in results:
                img_path = value['path']
                item['img_path'] = img_path
        return item

    def get_media_requests(self, item, info):  # 下载图片
        if "img_url" in item:
            for img_url in item['img_url']:
                yield scrapy.Request(img_url, meta={'item': item,
                                                    'index': item['img_url'].index(img_url)})  # 添加meta是为了下面重命名文件名使用

    def file_path(self, request, response=None, info=None):
        item = request.meta['item']
        if "img_url" in item:  # 通过上面的meta传递过来item
            index = request.meta['index']  # 通过上面的index传递过来列表中当前下载图片的下标

            # 图片文件名，item['carname'][index]得到汽车名称，request.url.split('/')[-1].split('.')[-1]得到图片后缀jpg,png
            image_guid = item['title'] + '.' + request.url.split('/')[-1].split('.')[-1]
            # 图片下载目录 此处item['country']即需要前面item['country']=''.join()......,否则目录名会变成\u97e9\u56fd\u6c7d\u8f66\u6807\u5fd7\xxx.jpg
            filename = u'full/{0}'.format(image_guid)
            return filename


class JsonExporterPipeline(JsonItemExporter):
    # 调用scrapy提供的json export 导出json文件
    def __init__(self):
        self.file = open('articleexpoter.json', 'wb')
        self.exporter = JsonItemExporter(self.file, encoding='utf-8', ensure_ascii=False)
        self.exporter.start_exporting()  # 开始导出

    def close_spider(self):
        self.exporter.finish_exporting()  # 停止导出
        self.file.close()

    def process_item(self, item, spider):
        self.exporter.export_item(item)
        return item


class JsonWithEncodingPipeline(object):
    # 自定义json文件的导出
    def __init__(self):
        self.file = codecs.open('article.json', 'w', encoding='utf-8')

    def process_item(self, item, spider):
        lines = json.dumps(dict(item), ensure_ascii=False) + '\n'
        self.file.write(lines)
        return item

    def spider_closed(self):
        self.file.close()


class ElasticSearchPipeline(object):
    # 写入数据到es中

    def process_item(self, item, spider):
        #将item转换为es的数据
        item.save_to_es()
        return item
