# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import codecs
import json
import os
import random
import time
from NewsSpider.tools.kafka_export import Kafka_
from NewsSpider.tools.solr_ import Solr_Data
import pandas as pd
import pymongo
import pymysql
from scrapy import Request
from scrapy.exceptions import DropItem
from scrapy.exporters import JsonItemExporter
from scrapy.pipelines.images import ImagesPipeline
from twisted.enterprise import adbapi
from NewsSpider.tools.redis_db import Redis_DB


class MyScrapyPipeline(object):
    def process_item(self, item, spider):
        return item


# Mongo写入
class MongoPipeline(object):
    def __init__(self, mongo_uri, mongo_db):
        self.mongo_uri = mongo_uri
        self.mongo_db = mongo_db

    @classmethod
    def from_crawler(cls, crawler):
        return cls(
            mongo_uri=crawler.settings.get('MONGO_URI'),
            mongo_db=crawler.settings.get('MONGO_DB')
        )

    def open_spider(self, spider):
        self.client = pymongo.MongoClient(self.mongo_uri)
        self.db = self.client[self.mongo_db]

    def process_item(self, item, spider):
        self.db[item.collection].insert(dict(item))
        return item

    def close_spider(self, spider):
        self.client.close()


class MysqlPipeline(object):

    def __init__(self, host, port, database, user, passwd, use_unicode, charset='utf-8'):

        self.config = {
            'host': host,
            'port': port,  # MySQL默认端口
            'user': user,  # mysql默认用户名
            'password': passwd,
            'db': database,  # 数据库
            'charset': 'utf8mb4',
            'use_unicode': use_unicode,
            # 如果加入下面那一项 返回的结果为字典格式
            'cursorclass': pymysql.cursors.DictCursor,
        }

    @classmethod
    def from_settings(cls, settings):
        return cls(
            host=settings['MYSQL_HOST'],
            port=settings['MYSQL_PORT'],
            database=settings['MYSQL_DATABASE'],
            user=settings['MYSQL_USER'],
            passwd=settings['MYSQL_PASSWORD'],
            use_unicode=settings['MYSQL_USE_UNICODE'],
        )

    def process_item(self, item, spider):

        insert_sql, params = item.get_insert_sql()
        try:
            self.cursor.execute(insert_sql, params)
            self.db.commit()
            return item
        except Exception as e:
            print(e)

    def open_spider(self, spider):
        self.db = pymysql.connect(**self.config)
        self.cursor = self.db.cursor()

    def close_spider(self, spider):
        self.db.close()


# 异步Mysql插入
class TwistedMysqlPipelines(object):
    def __init__(self, dbpool):
        self.dbpool = dbpool

    @classmethod
    def from_settings(cls, settings):
        dbparms = dict(
            host=settings['MYSQL_HOST'],
            db=settings['MYSQL_DATABASE'],
            user=settings['MYSQL_USER'],
            passwd=settings['MYSQL_PASSWORD'],
            charset=settings['MYSQL_CHARSET'],
            cursorclass=pymysql.cursors.DictCursor,
            use_unicode=settings['MYSQL_USE_UNICODE']
        )
        dbpool = adbapi.ConnectionPool('pymysql', **dbparms)
        return cls(dbpool)

    def process_item(self, item, spider):
        # 使用twisted将mysql插入变成异步执行插入
        query = self.dbpool.runInteraction(self.do_insert, item)
        # 处理异常
        query.addErrback(self.handle_error, item, spider)  # 下面handle_error要传入什么就填什么,failure必须有

    def handle_error(self, failure, item, spider):
        # 处理异步插入数据库的异常
        print('TwistedMysqlPipelines出现错误%s' % failure)
        spider.logger.error('TwistedMysqlPipelines出现错误%s' % failure)

    def do_insert(self, cursor, item):
        # 执行具体的插入
        # 根据不同的item 构建不同的sql语句并插入到mysql
        insert_sql, params = item.get_insert_sql()
        cursor.execute(insert_sql, params)


# 处理图片下载的pipeline 这里将中间件修改为默认寻找item的image_url链接并进行下载
class ImagePipeline(ImagesPipeline):

    # 修改文件名以及下载路径
    def file_path(self, request, response=None, info=None):
        url = request.url
        file_name = url.split('/')[-1]
        return 'full/%s' % (file_name)

    # 源代码中是读取IMAGES_URLS_FIELD配置字段的列表中遍历url进行下载
    # 这里将其改成直接将url取出来交给调度器去下载
    def get_media_requests(self, item, info):
        yield Request(item['image_url'])

    # 查看result返回的结果(包括状态以及文件路径),将item内的image_path进行赋值
    def item_completed(self, results, item, info):
        '''
        :param results: [(True,
          {'checksum': '2b00042f7481c7b056c4b410d28f33cf',
           'path': 'full/0a79c461a4062ac383dc4fade7bc09f1384a3910.jpg',
           'url': 'http://www.example.com/files/product1.pdf'}),
         (False,
          Failure(...))]
        :return:
        '''
        image_file_path = [value['path'] for ok, value in results if ok]
        if not image_file_path:
            raise DropItem("Image下载失败!")
        item['image_path'] = image_file_path[0]
        return item


class JsonExporterPipleline(object):

    # 调用scrapy提供的json export 导出json文件
    def __init__(self):
        self.file = open('articleexport.json', 'wb')
        self.exporter = JsonItemExporter(self.file, encoding='utf-8', ensure_ascii=False)
        self.exporter.start_exporting()

    def close_spider(self, spider):
        self.exporter.finish_exporting()  # 停止导出
        spider.logger.info("[%s]写入文件完成!" % spider.name)
        self.file.close()

    def process_item(self, item, spider):
        self.exporter.export_item(item)
        return item


class JsonWithEncodingPipeline(object):
    # 自定义json文件的导出
    def __init__(self):
        self.file = codecs.open('acticle.json', 'w', encoding='utf-8')

    def process_item(self, item, spider):
        # 将items变为字典然后变为字符串
        lines = json.dumps(dict(item), ensure_ascii=False) + '\n'  # 不写会ensure_ascii=False导致中文显示不正常
        self.file.write(lines)
        return item

    def spider_closed(self, spider):
        self.file.close()


class ToExeclPipeline(object):
    # 传入字典和名称 分割文件待优化 文件路径待优化
    # 写入excel的管道文件
    def __init__(self):
        self.table = []
        self.file_path = 'G://dangdang1.xlsx'

    def process_item(self, item, spider):
        keys_data = list(item.keys())
        values_data = list(item.values())
        # print("keys_data",keys_data)
        # print("values_data",values_data)
        self.table.append(values_data)
        df = pd.DataFrame(self.table, columns=keys_data)
        df.to_excel(self.file_path, index=False)
        return item

    def close_spider(self, spider):
        spider.logger.info("[%s]写入excel完毕!" % spider.name)


class KafkaPipeline(object):

    def process_item(self, item, spider):

        item = dict(item)
        if not item:
            print("item为空继续请求下一个url...")
            return item
        if (item['title'] and item['content'] and item['pubdate']) == "":
            print("title and pubdate and content为空...标题%s,发布时间%s,正文%s,链接%s"%(item['title'],item['pubdate'],item['content'],item['url']))
            return item
        try:
            K = Kafka_()
            # 这里item是item类型,将其转为字典
            data = json.dumps(item, ensure_ascii=False)
            if item['formats'] == "weibo":
                K.send('weibotopic', data.encode(), key=b'weibo',
                       partition=random.choice([0, 1, 2]))
                print('weibotopic发送数据============id:{}, url:{} ,title:{} ,pubdate:{}'.format(item['id'], item['url'],
                                                                                             item['title'],
                                                                                             item['pubdate']))
                time.sleep(0.1)
            elif item['formats'] == "tieba":
                K.send('tiebatopic', data.encode(), key=b'tieba',
                       partition=random.choice([0, 1, 2]))
                print('tiebatopic发送数据============', data)
                time.sleep(0.1)
            else:
                K.send('metaserchtopic', data.encode(), key=b'metasearch',
                       partition=random.choice([0, 1, 2]))
                print('metaserchtopic发送数据============', "title:[%s],id:[%s],url:[%s],pubdate:[%s]" % (
                    item['title'], item['id'], item['url'], item['pubdate']
                ))
                time.sleep(0.1)
        except Exception as e:
            print('Kafka管道文件发生错误:', e, item)

        return item


class Wenshu_Twisted_MysqlPipelines(object):
    def __init__(self, dbpool):
        self.dbpool = dbpool

    @classmethod
    def from_settings(cls, settings):
        dbparms = dict(
            host=settings['MYSQL_HOST'],
            db=settings['MYSQL_DATABASE'],
            user=settings['MYSQL_USER'],
            passwd=settings['MYSQL_PASSWORD'],
            charset=settings['MYSQL_CHARSET'],
            cursorclass=pymysql.cursors.DictCursor,
            use_unicode=settings['MYSQL_USE_UNICODE']
        )
        dbpool = adbapi.ConnectionPool('pymysql', **dbparms)
        return cls(dbpool)

    def process_item(self, item, spider):
        # 使用twisted将mysql插入变成异步执行插入
        query = self.dbpool.runInteraction(self.do_insert, item)
        # 处理异常
        query.addErrback(self.handle_error, item, spider)  # 下面handle_error要传入什么就填什么,failure必须有

    def handle_error(self, failure, item, spider):
        # 处理异步插入数据库的异常
        print('TwistedMysqlPipelines出现错误%s' % failure)
        spider.logger.error('TwistedMysqlPipelines出现错误%s' % failure)

    def do_insert(self, cursor, item):
        # 执行具体的插入
        # 根据不同的item 构建不同的sql语句并插入到mysql
        insert_sql, params = item.get_insert_sql()
        cursor.execute(insert_sql, params)
        print("插入数据成功：", item['case_number'])


class WriteFilePipeline(object):

    def process_item(self, item, spider):

        html = item['html']
        path = item['path']
        html_path = item['html_path']
        if not os.path.exists(path):
            os.makedirs(path)
        try:
            with open(html_path, 'w', encoding='utf-8') as f:
                f.write(html)
            print("写入html成功：", item['case_number'])
        except Exception as e:
            print('WriteFilePipeline Exception:', e)
        return item

    def close_spider(self, spider):

        spider.logger.debug("[%s]写入html文件完毕!" % spider.name)


class SolrSendPipeline(object):

    def process_item(self, item, spider):
        item = dict(item)
        if spider.name == 'bjcourt2':
            if Redis_DB.check_exist('wenshuquchong', item['id']) == 0:
                print("该文书id:%s已存在..." % item['id'])
                return item
        Solr_Data().add_docs(item)
        print('添加数据id为%s的数据..' % item['id'])
        # return item
