# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html

import codecs
import json
from w3lib.html import remove_tags

# 导入mysqlclient
import MySQLdb
import MySQLdb.cursors

# 可以将数据库操作变为异步操作
from twisted.enterprise import adbapi

from scrapy.pipelines.images import ImagesPipeline
from scrapy.exporters import JsonItemExporter

from ArticleSpider.models.es_types import ArticleType


class ArticlespiderPipeline(object):
	def process_item(self, item, spider):
		# return item
		pass


# 自定义json导出
class JsonPipeline(object):
	def __init__(self):
		self.file = codecs.open("article.json", "w", "utf-8")

	def process_item(self, item, spider):
		lines = json.dumps(dict(item), ensure_ascii=False) + "\n"
		self.file.write(lines)
		return item

	def close_spider(self, spider):
		self.file.close()


# 使用json export导出
class JsonExporterPipeline(object):
	def __init__(self):
		self.file = open("article_export.json", "wb")
		self.exporter = JsonItemExporter(self.file, encoding="utf-8", ensure_ascii=False)
		self.exporter.start_exporting()

	def close_spider(self, spider):
		self.exporter.finish_exporting()
		self.file.close()

	def process_item(self, item, spider):
		self.exporter.export_item(item)
		return item


class ArticleImagePipeline(ImagesPipeline):
	def item_completed(self, results, item, info):

		#所有的item都会调用
		#但有些item可以没有图片，或者字段名不一样
		#所有下面要判断

		if "cover_image_path" in item:
			for ok, value in results:
				cover_image_path = value["path"]
			item["cover_image_path"] = cover_image_path
		return item


class MySQLPipeline(object):
	def __init__(self):
		# 详细参数可以点击进入查看源码

		# 如果使用localhost连接失败，请用ip
		# 这是MySQL配置，这里不再深入讲解了
		self.conn = MySQLdb.connect("127.0.0.1", "root", "ixuea888", "article_spider", charset="utf8", use_unicode=False)
		self.cursor = self.conn.cursor()

	def process_item(self, item, spider):
		insert_sql = """
            insert into jobbole_article(url_object_id,title,url) values (%s,%s,%s);
        
        """

		# insert_sql = """
		#             insert into jobbole_article(url_object_id,title,url,created_at,fav_nums) values (%s,%s,%s,%s);
		#
		#         """

		self.cursor.execute(insert_sql, ("aa", item["title"], item["url"]))

		self.conn.commit()


# MySQL异步存储
class MySQLTwistedPipeline(object):

	def __init__(self, database_pool):
		self.database_pool = database_pool

	# scrapy框架会调用该方法
	# 在这里可以获取配置
	@classmethod
	def from_settings(cls, settings):
		database_params = dict(
			host=settings["MYSQL_HOST"],
			db=settings["MYSQL_DATABASE"],
			user=settings["MYSQL_USERNAME"],
			password=settings["MYSQL_PASSWORD"],
			charset="utf8",
			cursorclass=MySQLdb.cursors.DictCursor,
			use_unicode=True
		)

		# python函数，变量参数，可以传**字典；
		# 也可以使用名称传，这是python基础知识哟
		# 这里是因为参数多，所以放到字典中
		database_pool = adbapi.ConnectionPool("MySQLdb", **database_params)

		# 实例化当前对象
		return cls(database_pool)

	def process_item(self, item, spider):
		# 使用Twisted将数据库操作变为异步化
		query = self.database_pool.runInteraction(self.do_insert, item, spider)
		query.addErrback(self.handle_error, item, spider)

	def do_insert(self, cursor, item, spider):
		#根据不同的item类型做不同的处理
		# if item.__class__.__name__=="JobBoleArticleItem":
		# 	insert_sql = """
		# 									insert into jobbole_article(url_object_id,title,url) values (%s,%s,%s);
		#
		# 							"""
		#
		# 	cursor.execute(insert_sql, ("aa", item["title"], item["url"]))

		# 不需要commit，会自动处理

		insert_sql,params=item.get_insert_sql()

		if params[0]==1336:
			print("pipe found item")

		cursor.execute(insert_sql, params)


	def handle_error(self, failure, item, spider):
		# 处理插入数据库异常
		print("handle_error:",failure,item.__class__.__name__)

class ElasticsearchPipeline(object):
	#将数据写到es

	def process_item(self, item, spider):
		#将item转为es支持的格式
		item.save_to_es()

		return item