# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter

from pymongo import MongoClient
import pandas as pd
import os


class FundspiderPipeline:
    def process_item(self, item, spider):
        return item


class FilePipeline(object):
    def process_item(self, item, spider):
        result = {'code': item['code'], 'name': item['stock'], 'percentage': item['ratio']}
        result_df = pd.DataFrame(result)
        # 去掉基金名字里的/，否则会误认为路径符
        file_name = item['fund'].replace('/', '')
        path = "/data/python/jupyter/公募基金"
        result_df.to_csv(os.path.join(path, file_name + ".csv"), index=None, encoding="utf_8_sig")

        return item


class MongoPipeline(object):
    def __init__(self, databaseIp='127.0.0.1', databasePort=27017, user="admin", password="123456",
                 mongodbName='fund_db', collectName="fund_data"):
        self.client = MongoClient(databaseIp, databasePort)
        self.db = self.client[mongodbName]
        self.db.authenticate(user, password)
        self.collection = self.db[collectName]

    def process_item(self, item, spider):
        i = 10
        result = {}
        for code, stock, ratio in zip(item['code'], item['stock'], item['ratio']):
            result[f'top{i}'] = {"code": code, "stock": stock, "ratio": ratio}
            i = i - 1
        result['crawl_date'] = item['crawl_date']
        result['fund'] = item['fund']
        result['scale'] = item['scale']
        self.collection.insert(result)  # 向数据库插入一条记录

        return item  # 会在控制台输出原item数据，可以选择不写

    def close_spider(self, spider):
        self.client.close()
