# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from scrapy.exporters import JsonLinesItemExporter
from elasticsearch import Elasticsearch
from elasticsearch import helpers
from scrapy.utils.project import get_project_settings


class WeiboSpyderPipeline:
    def __init__(self):
        self.exporter = JsonLinesItemExporter(self.fp, ensure_ascii=False, encoding='utf-8')

    def open_spider(self, spider):
        print('开始爬取微博数据...')

    def process_item(self, item, spider):
        self.exporter.export_item(item)
        return item

    def close_spider(self, spider):
        self.fp.close()
        print('爬虫结束...')

class HotToESPipeline(object):

    def getHashCode(self, str):
        h = 0
        n = len(str)
        for i, c in enumerate(str):
            h = h + ord(c) * 31 ** (n - i - 1)
        return h

    def __init__(self):
        print("Enter ElasticSearch HotToESPipeline...")
        self.item_list = []
        # 获取配置
        settings = get_project_settings()
        eshost = settings.get('ELASTICSEARCH_SERVER')
        esport = settings.get('ELASTICSEARCH_PORT')
        # username = settings.get('ELASTICSEARCH_USERNAME')
        # password = settings.get('ELASTICSEARCH_PASSWORD')
        self.es = Elasticsearch([{'host': eshost, 'port': esport}])
        # self.es = Elasticsearch([eshost], port=esport, http_auth=(username, password))

    def process_item(self, item, spider):
        """ 批量写入数据 """
        if(len(self.item_list)<50):
            if(item['hot']!=0):
                self.item_list.append(item)
        else:
            action = []
            for item in self.item_list:
                action.append({
                    "_index": "hot",
                    "_type": "_doc",
                    "_id": self.getHashCode(item['title']),
                    "_source": {
                        "title": item['title'],
                        "hot": item["hot"],
                        "created_time": item["created_time"]
                    }
                })
            helpers.bulk(self.es, action)
            self.item_list = []

    def close_spider(self, spider):
        if(len(self.item_list)!=0):
            action = []
            for item in self.item_list:
                action.append({
                    "_index": "hot",
                    "_type": "_doc",
                    "_id": self.getHashCode(item['title']),
                    "_source": {
                        "title": item['title'],
                        "hot": item["hot"],
                        "created_time": item["created_time"]
                    }
                })
            helpers.bulk(self.es, action)
            self.item_list = []
        print("hot 爬虫结束")

class MBlogToESPipeline(object):
    def __init__(self):
        print("Enter ElasticSearch MBlogToESPipeline...")
        self.item_list = []
        # 获取配置
        settings = get_project_settings()
        eshost = settings.get('ELASTICSEARCH_SERVER')
        esport = settings.get('ELASTICSEARCH_PORT')
        # username = settings.get('ELASTICSEARCH_USERNAME')
        # password = settings.get('ELASTICSEARCH_PASSWORD')
        self.es = Elasticsearch([{'host': eshost, 'port': esport}])
        # self.es = Elasticsearch([eshost], port=esport, http_auth=(username, password))

    def process_item(self, item, spider):
        """ 批量写入数据 """
        if (len(self.item_list) < 2000):
            self.item_list.append(item)
        else:
            action = []
            for item in self.item_list:
                action.append({
                    "_index": "mblog",
                    "_type": "_doc",
                    "_id": item['mid'],
                    "_source": {
                        "topic": item["topic"],
                        "text": item["text"],
                        "created_time": item["created_time"],
                        "created_time_text": item["created_time_text"],
                        "author": item["author"],
                        "comments_count": item["comments_count"],
                        "reposts_count": item["reposts_count"],
                        "attitudes_count": item["attitudes_count"],
                        "source": item["source"],
                        "location": item["location"],
                        "province": item["province"],
                        "link": item["link"],
                        "sentiment": item["sentiment"]
                    }
                })
            helpers.bulk(self.es, action)
            self.item_list = []

    def close_spider(self, spider):
        if (len(self.item_list) != 0):
            action = []
            for item in self.item_list:
                action.append({
                    "_index": "mblog",
                    "_type": "_doc",
                    "_id": item['mid'],
                    "_source": {
                        "topic": item["topic"],
                        "text": item["text"],
                        "created_time": item["created_time"],
                        "created_time_text": item["created_time_text"],
                        "author": item["author"],
                        "comments_count": item["comments_count"],
                        "reposts_count": item["reposts_count"],
                        "attitudes_count": item["attitudes_count"],
                        "source": item["source"],
                        "location": item["location"],
                        "province": item["province"],
                        "link": item["link"],
                        "sentiment": item["sentiment"]
                    }
                })
            helpers.bulk(self.es, action)
            self.item_list = []
        print("mblog 爬虫结束")