# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.exporters import JsonLinesItemExporter
from redis import StrictRedis
from . import conf
import pika
import json


class BlogPipeline(object):
	def __init__(self):
		self.f = open('blog.json', 'wb')
		self.exporter = JsonLinesItemExporter(self.f, ensure_ascii=False, encoding='utf-8')

	def process_item(self, item, spider):
		self.exporter.export_item(item)
		return item

	def close_spider(self, spider):
		self.f.close()


class BlogRedisPipeline(object):
	def __init__(self):
		# 保存在reid中, 获取redis的连接
		self.redis_obj = StrictRedis(host=conf.REDIS_HOST, port=conf.REDIS_PORT)

	def process_item(self, item, spider):
		self.redis_obj.hset(conf.REDIS_BLOG_DETAIL, item['title'], item['content'])
		return item


class BlogMQPipeline(object):
	def __init__(self):
		"""初始化, 创建MQ连接对象"""
		hostname = conf.MQ_HOST
		parameters = pika.ConnectionParameters(hostname)
		# 创建MQ连接对象
		self.connection = pika.BlockingConnection(parameters)
		# 创建通道
		self.channel = self.connection.channel()
		# 声明一个队列用于保存数据
		self.channel.queue_declare(queue=conf.MQ_QUEUE)

	def process_item(self, item, spider):
		# 将解析的博客内容转, 序列化成json后发送到队列中
		item_json = json.dumps({'title': item['title'], 'content': item['content']})
		self.channel.basic_publish(exchange='', routing_key=conf.MQ_QUEUE, body=item_json)
		return item

	def close_spider(self, spider):
		"""关闭爬虫时, 结束MQ对象"""
		self.connection.close()