# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html

import datetime
import re
from w3lib.html import remove_tags

import scrapy
from scrapy.loader import ItemLoader

# MapCompose相当于是一个容器，他可以传递任意多个函数
# 他会依次调用每一个函数
# 有点类似于流水线
# 大家在设计框架，也可以借鉴这种模式
from scrapy.loader.processors import MapCompose, TakeFirst, Join

from ArticleSpider.util.common import extract_number
# from util.common import extract_number

from ArticleSpider.settings import SQL_DATETIME_FORMAT, SQL_DATE_FORMAT

from elasticsearch_dsl import Document, Date, Integer, Keyword, Text, connections,Completion,analyzer
from ArticleSpider.models.es_types import ArticleType

# es=connections.create_connection(ArticleType._doc_type.using)
es=connections.create_connection(hosts=['localhost'])

class ArticlespiderItem(scrapy.Item):
	# define the fields for your item here like:
	# name = scrapy.Field()
	pass


def add_jobbole(value):
	return value + " add"


def string_to_datetime(value):
	# 将日期字符串，转为datetime
	try:
		created_at = datetime.datetime.struct_time(value, "%Y/%m/%d").date()
	except Exception as e:
		# 如果出错了，当前时间
		created_at = datetime.datetime.now().date()
	return created_at


def string_to_number(value):
	value_re = re.match(".*?(\d+).*", value)
	if value_re:
		number = int(value_re.group(1))
	else:
		number = 0

	return number


def remove_comment_tag(value):
	# 去掉tag中的评论
	if "评论" in value:
		return ""
	else:
		return value


def return_value(value):
	return value

def gen_suggests(index,info_tuple):
	#根据字符串搜索建议数组
	used_words=set()
	suggests=[]

	for text,weight in info_tuple:
		if text:
			#调用es的analyze接口分析字符串
			# words=es.indices.analyze(index=index,analyze="ik_max_word",params={"filter":["lowercase"]},body=text)

			#es6
			words=es.indices.analyze(index=index, body={'text': text, 'analyzer': "ik_max_word",'filter':["lowercase"]})

			# my_analyzer = analyzer('my_analyzer',
			# 											 tokenizer=tokenizer('trigram', 'nGram', min_gram=3, max_gram=3),
			# 											 filter=['lowercase']
			# 											 )

			anlyzed_words=set([r["token"] for r in words["tokens"] if len(r["token"])>1])
			new_words=anlyzed_words-used_words
		else:
			new_words=set()

		if new_words:
			suggests.append({"input":list(new_words),"weight":weight})

	return suggests


class JobBoleArticleItem(scrapy.Item):
	title = scrapy.Field(
		# 相当于Java中对象的set方法
		# 可以在这里先处理直，处理完成后在设置
		# 但这个相当于给给字段设在挂在一个函数
		# input_processor=MapCompose(add_jobbole)

		# 传递lambda表达式
		# input_processor=MapCompose(lambda x:x+" x")

		# 传递多个函数
		# input_processor=MapCompose(lambda x:x+" x",add_jobbole)
	)
	created_at = scrapy.Field(
		input_processor=MapCompose(string_to_datetime),

		# 只取第一个
		# 因为使用了自定义Loader
		# 所以这里不用在单独设在output_processor
		# output_processor=TakeFirst(),
	)
	url = scrapy.Field()
	url_object_id = scrapy.Field()
	cover_image = scrapy.Field(
		# 由于image pipeline需要是列表
		# 而loader中设在了默认的output_processor
		# 这里这里要覆盖，不用用默认的output_processor
		output_processor=MapCompose(return_value)
	)
	cover_image_path = scrapy.Field()
	praise_nums = scrapy.Field(
		input_processor=MapCompose(string_to_number)
	)
	comment_nums = scrapy.Field(
		input_processor=MapCompose(string_to_number)
	)
	fav_nums = scrapy.Field(
		input_processor=MapCompose(string_to_number)
	)
	tags = scrapy.Field(
		input_processor=MapCompose(remove_comment_tag),
		output_processor=Join(",")
	)
	content = scrapy.Field()

	# 每个Item都实现这个方法
	# 有点类似Java的多态
	def get_insert_sql(self):
		insert_sql = """
          insert into jobbole_article(url_object_id,title,url) values (%s,%s,%s);
        """
		params = (self["url_object_id"], self["title"], self["url"])

		return insert_sql, params


	def save_to_es(self):
		article = ArticleType()
		article.title = self["title"]
		article.created_at = self["created_at"]
		article.url = self["url"]
		article.url_object_id = self["url_object_id"]
		article.cover_image = self["cover_image"]
		if "cover_image_path" in self:
			article.cover_image_path = self["cover_image_path"]
		article.praise_nums = self["praise_nums"]
		article.comment_nums = self["comment_nums"]
		article.fav_nums = self["fav_nums"]
		article.tags = self["tags"]
		article.content = remove_tags(self["content"].strip())

		# article.suggest=[{"input":[],"weight":2}]
		article.suggest=gen_suggests(ArticleType.Index.name,((article.title,10),(article.tags,7)))

		article.save()


# 自定义ItemLoad
# 目的是设在默认的ouput处理器
class ArticleItemLoader(ItemLoader):
	default_output_processor = TakeFirst()


class RubyChinaQuestionItem(scrapy.Item):
	# ruby china问题
	# 字段和数据库一样
	id = scrapy.Field()
	title = scrapy.Field()
	content = scrapy.Field()
	url = scrapy.Field()
	created_at = scrapy.Field()
	updated_at = scrapy.Field()
	answer_counts = scrapy.Field()
	comment_counts = scrapy.Field()
	view_counts = scrapy.Field()
	crawl_at = scrapy.Field()
	crawl_updated_at = scrapy.Field()

	def get_insert_sql(self):
		# 当主键冲突时，就执行后面的update语句
		insert_sql = """
          insert into rc_questions(id,url,title,content,view_counts,crawl_at) values (%s,%s,%s,%s,%s,%s)
          ON DUPLICATE KEY UPDATE content=(content),view_counts=(view_counts);
        """

		crawl_at = datetime.datetime.now().strftime(SQL_DATETIME_FORMAT)

		# 如果这里的Item值是list，可以使用上面的方法
		# 可以使用join，因为列表中就一个
		# "".join(self["id"])

		# 还可以取第0个
		id = self["id"][0]
		if 1366 == id:
			print("found item")

		url = "".join(self["url"]).strip()
		title = "".join(self["title"]).strip()
		content = "".join(self["content"]).strip()
		view_counts = int(extract_number(self["view_counts"][-1]))

		params = (id, url, title, content, view_counts, crawl_at)

		return insert_sql, params


class RubyChinaAnswerItem(scrapy.Item):
	# ruby china回答
	id = scrapy.Field()
	url = scrapy.Field()
	content = scrapy.Field()
	question_id = scrapy.Field()
	author_id = scrapy.Field()
	created_at = scrapy.Field()
	updated_at = scrapy.Field()
	crawl_at = scrapy.Field()
	crawl_updated_at = scrapy.Field()

	def get_insert_sql(self):
		insert_sql = """
          insert into rc_answers(id,content,question_id,author_id,crawl_at) values (%s,%s,%s,%s,%s)
          ON DUPLICATE KEY UPDATE content=(content);
        """

		id = self["id"]
		content = self["content"]
		question_id = self["question_id"]
		author_id = self["author_id"]

		crawl_at = datetime.datetime.now().strftime(SQL_DATETIME_FORMAT)

		params = (id, content, question_id, author_id, crawl_at)

		return insert_sql, params


def remove_splash(value):
	# 去掉工作城市，/线
	return value.replace("/", "")


def handler_address(value):
	# 处理拉钩地址

	# 使用\n拆分
	values = value.split("\n")

	# 对每一项去除首位空格
	# 并去除 查看地图 字符串
	# 然后在组合为一个数组
	values = [item.strip() for item in values if item.strip() != "查看地图"]

	# 用空字符串连接数组中每项值
	return "".join(values)


class LagouJobItem(scrapy.Item):
	# 拉勾网职位信息
	url = scrapy.Field()
	url_object_id = scrapy.Field()
	title = scrapy.Field()
	salary = scrapy.Field()
	city = scrapy.Field(
		input_processor=MapCompose(remove_splash),
	)
	work_years = scrapy.Field(
		input_processor=MapCompose(remove_splash),
	)
	degree_need = scrapy.Field(
		input_processor=MapCompose(remove_splash),
	)
	type = scrapy.Field()
	publish_at = scrapy.Field()
	tags = scrapy.Field(
		input_processor=Join(","),
	)
	advantage = scrapy.Field()
	desc = scrapy.Field()
	address = scrapy.Field(
		input_processor=MapCompose(remove_tags, handler_address),
	)
	company_url = scrapy.Field()
	company_name = scrapy.Field()
	crawl_at = scrapy.Field()
	crawl_update_time = scrapy.Field()

	def get_insert_sql(self):
		# sql每行5个字段
		# 具体的格式，大家可以根据自己团队和个人喜好调整

		#反引号转义sql语句中中的关键字
		#企业级开发中，表明，列表都应该这样
		insert_sql = """
          insert into lagou_jobs(url,url_object_id,title,salary,city,work_years,degree_need,`type`,publish_at,tags,advantage,`desc`,address,company_url,company_name,crawl_at) 
          values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)
          ON DUPLICATE KEY UPDATE 
            salary=(salary),advantage=(advantage);
        """

		params = (
			self["url"], self["url_object_id"], self["title"], self["salary"], self["city"],
			self["work_years"], self["degree_need"], self["type"], self["publish_at"], self["tags"],
			self["advantage"], self["desc"], self["address"], self["company_url"], self["company_name"],
			self["crawl_at"].strftime(SQL_DATETIME_FORMAT)
		)

		return insert_sql, params


class LagouItemLoader(ItemLoader):
	default_output_processor = TakeFirst()
