# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html

import scrapy
import datetime
import re
from scrapy.loader import ItemLoader
from scrapy.loader.processors import MapCompose, TakeFirst, Join

from models.es_types import ArticleType
from w3lib.html import remove_tags

from elasticsearch_dsl.connections import connections
es = connections.create_connection(ArticleType)


class ArticlespiderItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    pass


def add_sifou(value):
    return value + 'sifou'


def date_convert(value):
    try:
        create_date = datetime.datetime.strptime(value, "%y/%m/%d").date()
    except Exception as e:
        create_date = datetime.datetime.now().date()
    return create_date


def get_nums(value):
    match_re = re.match(".*?(\d+|(\.\d+)).*?", value)
    if match_re:
        nums = match_re.group(1)
    else:
        nums = 0
    return nums


def return_value(value):
    return value


class ArticlItemLoader(ItemLoader):
    # 自定义ItemLoader
    default_output_processor = TakeFirst()


def grn_suggest(index, info_tuple):
    pass


def gen_suggests(index, info_tuple):
    #根据字符串生成搜索建议数组
    used_word = set()
    suggests = []
    for text,weight in info_tuple:
        if text:
            #调用es的analyze接口分析字符串
            words = es.indices.analyze(index=index, analyzer="ik_max_word", params={'filter':["lowercase"]},body=text)
            anylyzed_words = set([r["token"] for r in words if len(["token"])>1])
            new_words = anylyzed_words - used_word
        else:
            new_words = set()

        if new_words:
            suggests.append({"input":list(new_words), "weight":weight})

    return suggests


class ArticleItem(scrapy.Item):
    title = scrapy.Field(
        input_processor=MapCompose(add_sifou)
    )
    create_date = scrapy.Field(
        input_processor=MapCompose(date_convert)
    )
    url = scrapy.Field()
    front_image_url = scrapy.Field(
        output_processor=MapCompose(return_value)
    )
    front_image_path = scrapy.Field()
    read_volume = scrapy.Field(
        input_processor=MapCompose(get_nums)
    )
    url_object_id = scrapy.Field()
    article_text = scrapy.Field()
    fav_nums = scrapy.Field()

    def get_insert_sql(self):
        insert_sql = """
                   insert into article(url_object_id,title,create_date,url,front_image_url,
                   fav_nums,article_text,read_volume)
                   values (%s,%s,%s,%s,%s,%s,%s,%s)
               """
        params = (
            self["url_object_id"], self["title"], self["create_date"], self["url"], self["front_image_url"],
            self["fav_nums"], self["article_text"], self["read_volume"]
        )
        return insert_sql, params

    # 讲数据写入到es中
    def save_to_es(self):
        article = ArticleType()
        article.title = self['title']
        article.create_date = self['create_date']
        article.article_text = remove_tags(self['article_text'])
        article.front_image_url = self['front_image_url']
        # if "front_image_url" in self:
        #     article.front_image_path = self['front_image_path']
        article.read_volume = self['read_volume']
        article.fav_nums = self['fav_nums']
        article.url = self['url']
        article.url_object_id = self['url_object_id']

        # article.suggest = [{"input": {}, "weight": 2}]
        article.suggest = gen_suggests(ArticleType._doc_type.index, ((article.title, 10), (article.create_date, 7)))

        article.save(using=None, index=None, validate=True, skip_empty=True)


def remove_splash(value):
    # 去掉工作城市的斜线
    return value.replace("/", "")


def handle_jobaddr(value):
    addr_list = value.split("\n")
    addr_list = [item.strip() for item in addr_list if item.strip() != "查看地图"]
    return "".join(addr_list)


def remove_publishstr(value):
    # 去掉更新时间里的字符串
    value = re.findall('\d{2}:\d{2}', value)
    return value


class LagouJobItemLoader(ItemLoader):
    default_input_processor = TakeFirst()


class LagouJobItem(scrapy.Item):
    # 拉勾网职位信息
    title = scrapy.Field()
    url = scrapy.Field()
    usr_object_id = scrapy.Field()
    salary = scrapy.Field()
    job_city = scrapy.Field(
        input_processor=MapCompose(remove_splash),
    )
    work_years = scrapy.Field(
        input_processor=MapCompose(remove_splash),
    )
    degree_need = scrapy.Field(
        input_processor=MapCompose(remove_splash),
    )
    job_type = scrapy.Field()
    publish_time = scrapy.Field(
        input_processor=MapCompose(remove_publishstr),
    )
    tags = scrapy.Field(
        input_processor=Join(",")
    )
    job_advantage = scrapy.Field()
    job_desc = scrapy.Field()
    job_addr = scrapy.Field(
        input_processor=MapCompose(remove_tags, handle_jobaddr)
    )
    company_url = scrapy.Field()
    company_name = scrapy.Field()
    crawl_time = scrapy.Field()

    def get_insert_sql(self):
        insert_sql = """
            insert into lagou_job(title,url,url_object_id,salary,job_city,work_years,degree_need,
            job_type,publish_time,tags,job_advantage,job_desc,job_addr,company_url,company_name,crawl_time)
            values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)
        """
        params = (
            self["title"], self["url"], self["usr_object_id"], self["salary"], self["job_city"], self["work_years"],
            self["degree_need"], self["job_type"], self["publish_time"], self["tags"], self["job_advantage"],
            self["job_desc"],
            self["job_addr"], self["company_url"], self["company_name"], self["crawl_time"]
        )
        return insert_sql, params
