# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
import os
import csv
import platform
from pymongo import MongoClient
from os.path import join, basename, dirname
from urllib.parse import urlparse
import datetime
from scrapy.utils.project import get_project_settings

settings = get_project_settings()
class TutorialPipeline:
    def process_item(self, item, spider):
        return item


class Pipeline_ToCSV(object):

    def __init__(self):
        # csv文件的位置,无需事先创建
        store_file = os.path.dirname(__file__) + '/spiders/新能源汽车_20221204.csv'
        # 打开(创建)文件
        self.file = open(store_file, 'w', newline='')
        # self.domain = set()

        # csv写法
        self.writer = csv.writer(self.file)
        self.writer.writerow(['target', 'category', 'releaseAt', 'title', 'source'])

    def process_item(self, item, spider):
        # domain = item['domain']
        # keyword = item['keyword']
        # if domain+keyword in self.domain:
        #     raise DropItem("Duplicate book found:%s" % item)
        # else:
        self.writer.writerow([item['target'], item['category'], item['releaseAt'], item['title'], item['source']])
        # self.domain.add(domain+keyword)
        return item

    def _process_item(self, item, spider):
        # 判断字段值不为空再写入文件
        item["spider_name"] = spider.name
        item["store_at"] = datetime.datetime.now()
        item['item_name'] = spider.item_name.upper()

        if item['target']:
            self.writer.writerow(
                [item['target'], item['company_name'], item['search_engine'], item['title'], item['source']])
        return item

    def close_spider(self, spider):
        # 关闭爬虫时顺便将文件保存退出
        self.file.close()


from scrapy import Item
import pymongo


# class6 MongoDBPipeline(object):
#     """ MongoDB 存储 Pipeline
#     """
#
#     def __init__(self):
#         # 链接数据库
#         self.client = pymongo.MongoClient("mongodb://42.192.142.215:27017/",username = 'myuser',password = '123',authsource='cpt')
#         self.db = self.client[settings['MONGODB_DATABASE']]  # 获得数据库的句柄
#         self.col = self.db[settings['MONGODB_COL']] # 获得collection的句柄
#
#
#
#     def process_item(self, item, spider):
#         """ MongoDB 存储操作
#         Todo 请修改你存入 MongoDB 中的主键
#         :param item: 传入的 item 数据
#         :param spider: spider 相关信息
#         :return item: item 对象
#         """
#         item["spider_name"] = spider.name
#         item["store_at"] = datetime.datetime.now()
#         item['item_name'] = spider.item_name.upper()
#         if platform.system() != 'Linux':
#             self.col.update_one({"target": item["target"]}, {"$set": dict(item)}, True)
#         else:
#             self.col.update_one({"target": item["target"]}, {"$setOnInsert": dict(item)}, True)
#         return item
class MongoDBPipeline(object):
    """ MongoDB 存储 Pipeline
    """

    def __init__(self, mongodb_uri, mongodb_db, mongodb_col,):
        """ 初始化
        :param mongodb_uri: mongodb 数据库 uri
        :param mongodb_db: mongodb 数据库名
        :param mongodb_col: mongodb 数据表名
        """
        self.mongodb_uri = mongodb_uri
        self.mongodb_db = mongodb_db
        self.mongodb_col = mongodb_col

    @classmethod
    def from_crawler(cls, crawler):
        """ 实例函数
        :param crawler: crawler 类
        :return: 实例
        """
        return cls(
            mongodb_uri=crawler.settings.get("MONGODB_URI", "mongodb://42.192.14.215:27017/"),
            mongodb_db=crawler.settings.get("MONGODB_DATABASE", "items"),
            mongodb_col=crawler.settings.get("MONGODB_COL", "col")

        )

    def open_spider(self, spider):
        """ 爬虫启动 Hook
        :param spider: 爬虫对象
        :return: None
        """
        self.client = MongoClient(host=self.mongodb_uri,username = 'myuser' ,password = '123',port=27017,authsource='cpt')
        self.db = self.client[self.mongodb_db]
        # if spider.item_name and platform.system() != 'Linux':
        #     self.init_data()  # 初始化自检函数的变量

    def close_spider(self, spider):
        """ 爬虫关闭 Hook
        :param spider: 爬虫对象
        :return: None
        """
        # if spider.item_name and platform.system() != 'Linux':
        #     self.valid_data(spider.item_name, spider.name)
        self.client.close()



    def process_item(self, item, spider):
        """ MongoDB 存储操作
        Todo 请修改你存入 MongoDB 中的主键
        :param item: 传入的 item 数据
        :param spider: spider 相关信息
        :return item: item 对象
        """
        item["spider_name"] = spider.name
        item["store_at"] = datetime.datetime.now()
        item['item_name'] = spider.item_name.upper()
        if platform.system() != 'Linux':
            print(self.db[self.mongodb_col])
            self.db[self.mongodb_col].update_one({"target": item["target"]}, {"$set": dict(item)}, True)
        else:
            self.db[self.mongodb_col].update_one({"target": item["target"]}, {"$setOnInsert": dict(item)}, True)
        return item
