# -*- coding: utf-8 -*-
"""
分管道处理不同的爬虫任务
"""
import logging
import pymongo
from scrapy.utils.project import get_project_settings
# 读取默认配置文件
settings = get_project_settings()
# 设置日志输出级别
logging.getLogger().setLevel(logging.INFO)

# 封装MongoDB文档操作
class Mongo:

    @classmethod
    def getDoc(cls):
        db_name = settings['MONGO_DB']
        cls.client = pymongo.MongoClient(
            settings['MONGO_HOST'], settings['MONGO_PORT'])

        try:
            cls.db = cls.client.admin.authenticate(
                settings['MONGO_USER'], settings['MONGO_PASSWORD'])
            logging.info('MongoDB密码验证成功！')

            if cls.db:
                logging.info('MongoDB连接成功！')
                return cls.client[db_name]
            else:
                logging.error('MongoDB连接失败！')
                return None
        except Exception as e:
            logging.info('MongoDB密码验证失败:%s' % (e))

# 基础任务管道类
class BasePipeline(object):
    # 建立数据库连接，选择相应数据集
    def open_spider(self, spider):
        logging.info('==================当前爬虫任务:%s' % spider.name)
        self.doc = Mongo.getDoc()
        self.collection = self.doc[spider.name]
        logging.info('%s文档已创建，准备写入！' % spider.name)

    # 爬虫结束
    def close_spider(self, spider):
        logging.info('=======爬虫任务:%s结束！' % spider.name)

# 死亡公司库任务
class DeathCompanyPipeline(BasePipeline):

    # 处理爬虫数据
    def process_item(self, item, spider):
        info_list = item['company_info']
        for info in info_list:
            print(info['com_name'])

            # 根据公司ID判重
            com_id = info['com_id']
            result = self.collection.find({'com_id': com_id})
            if len(list(result)) > 0:
                logging.info('数据已存在，无需插入！')
                continue
            # 不重复则插入
            try:
                self.collection.insert_one(info)
                logging.info('已写入%s！' % spider.name)
            except Exception as e:
                logging.error('写入出错：%s' % (e))

        return item


# 投资机构任务
class InvestmentPipeline(BasePipeline):

    # 处理爬虫数据
    def process_item(self, item, spider):
        info_list = item['investment_info']
        for info in info_list:

            # 根据ID判重
            invetment_id = info['id']
            result = self.collection.find({'id': invetment_id})
            if len(list(result)) > 0:
                logging.info('数据已存在，无需插入！')
                continue
            # 不重复则插入
            try:
                self.collection.insert_one(info)
                logging.info('已写入%s！' % spider.name)
            except Exception as e:
                logging.error('写入出错：%s' % (e))

        return item


# LP任务
class LpPipeline(BasePipeline):

    # 处理爬虫数据
    def process_item(self, item, spider):
        info_list = item['lp_info']
        for info in info_list:

            # 根据ID判重
            lp_id = info['id']
            result = self.collection.find({'id': lp_id})
            if len(list(result)) > 0:
                logging.info('数据已存在，无需插入！')
                continue
            # 不重复则插入
            try:
                self.collection.insert_one(info)
                logging.info('已写入%s！' % spider.name)
            except Exception as e:
                logging.error('写入出错：%s' % (e))

        return item


# GP任务
class GpPipeline(BasePipeline):

    # 处理爬虫数据
    def process_item(self, item, spider):
        info_list = item['gp_info']
        for info in info_list:

            # 根据ID判重
            gp_id = info['id']
            result = self.collection.find({'id': gp_id})
            if len(list(result)) > 0:
                logging.info('数据已存在，无需插入！')
                continue
            # 不重复则插入
            try:
                self.collection.insert_one(info)
                logging.info('已写入%s！' % spider.name)
            except Exception as e:
                logging.error('写入出错：%s' % (e))

        return item

# 基金机构任务
class FundPipeline(BasePipeline):

    # 处理爬虫数据
    def process_item(self, item, spider):
        info_list = item['fund_info']
        for info in info_list:

            # 根据FUND_ID判重
            fund_id = info['fund_id']
            result = self.collection.find({'fund_id': fund_id})
            if len(list(result)) > 0:
                logging.info('数据已存在，无需插入！')
                continue
            # 不重复则插入
            try:
                self.collection.insert_one(info)
                logging.info('已写入%s！' % spider.name)
            except Exception as e:
                logging.error('写入出错：%s' % (e))

        return item