# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import platform
import json
from elasticsearch import Elasticsearch
python_version = platform.python_version()
if python_version.startswith('2'):
    from pybloom import ScalableBloomFilter
elif python_version.startswith('3'):
    from pybloom_live import ScalableBloomFilter
from weibo_scrapy.items import MblogItem, UserItem
from weibo_scrapy.settings import ES_URL,ES_INDEX
import time

class WeiboPipeline(object):
    def open_spider(self,spider):
        self.es = Elasticsearch(ES_URL,timeout=1000)
        self.bf = ScalableBloomFilter()
    def close_spider(self,spider):
        pass
    def process_item(self, item, spider):
        if isinstance(item,MblogItem):
            self.es.index(index=ES_INDEX,doc_type='post',id=item['mid'],body=dict(item))
        elif isinstance(item,UserItem):
            uid = item['uid']
            if uid not in self.bf:
                self.es.index(index=ES_INDEX,doc_type='user',id=uid,body=dict(item))
                self.bf.add(uid)
        return item

class WeiboPipeline2(object):
    def open_spider(self,spider):
        self.file = open('user{}.json'.format(int(time.time())),'a',encoding='utf-8')
    def close_spider(self,spider):
        self.file.close()
    def process_item(self, item, spider):
        self.file.write(json.dumps(dict(item),ensure_ascii=False)+'\n')
        self.file.flush()
        return item
