# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
from datetime import datetime
import time
import re
import random
import scrapy
import redis
from scrapy.loader import ItemLoader
from scrapy.loader.processors import MapCompose, TakeFirst, Join, Identity, SelectJmes, TakeAll, Take_advantage_All
from urllib import parse
from w3lib.html import remove_tags
from DataSpider.settings import SQL_DATETIME_FORMAT

from DataSpider.models.es_types import WuYictoIndex, TodayJobIndex,GuokrIndex
from elasticsearch_dsl.connections import connections

es = connections.create_connection(WuYictoIndex._doc_type.using)

redis_cli = redis.StrictRedis()


class FirtscrapyItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    pass


def datetime_convert(value):
    try:
        create_datetime = time.strptime(value, "%Y-%m-%d %H:%M")
    except Exception as e:
        create_datetime = datetime.datetime.now()
    return create_datetime


class ArticalItemLoader(ItemLoader):
    default_output_processor = TakeFirst()


def get_num(value):
    return int(value)


def get_author(value):
    match_re = re.match(".*[：](.*)", value)
    if match_re:
        author = match_re.group(1)
    return author


def return_value(value):
    return value


def gen_suggests(index, info_tuple):
    # 根据字符串生成搜索建议数组
    used_words = set()
    suggests = []
    for text, weight in info_tuple:
        if text:
            # 调用es的analyze接口分析字符串
            words = es.indices.analyze(index=index, analyzer="ik_max_word", params={'filter': ["lowercase"]}, body=text)
            anylyzed_words = set([r["token"] for r in words["tokens"] if len(r["token"]) > 1])
            new_words = anylyzed_words - used_words
        else:
            new_words = set()

        if new_words:
            suggests.append({"input": list(new_words), "weight": weight})

    return suggests


class WuYiCtoItem(scrapy.Item):
    url = scrapy.Field()
    url_object_id = scrapy.Field()
    front_image_url = scrapy.Field(
        output_processor=MapCompose(return_value)
    )
    front_image_path = scrapy.Field(
        output_processor=MapCompose(return_value)
    )
    title = scrapy.Field()
    create_time = scrapy.Field(
        input_processor=MapCompose(datetime_convert)
    )
    author = scrapy.Field(
        input_processor=MapCompose(get_author)
    )
    praise_num = scrapy.Field(
        input_processor=MapCompose(get_num)
    )
    content = scrapy.Field(
        output_processor=TakeAll()
    )
    origin = scrapy.Field()
    origin_url = scrapy.Field()
    tags = scrapy.Field()

    def get_insert_sql(self):
        insert_sql = """
            insert into 51cto_article (url_object_id, title, url, create_time, front_image_url, front_image_path, author, origin, origin_url,
            praise_num,  content, tags) 
            values ( %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
        """
        params = (
            self["url_object_id"], self["title"], self["url"], self["create_time"], self["front_image_url"],
            self["front_image_path"], self["author"], self["origin"], self["origin_url"], self["praise_num"],
            self["content"], self["tags"])

        return insert_sql, params

    def save_to_es(self):
        article = WuYictoIndex()
        article.url = self['url']
        article.meta.id = self["url_object_id"]
        article.front_image_url = self["front_image_url"]
        if "front_image_path" in self:
            article.front_image_path = self["front_image_path"]
        article.title = self["title"]
        article.create_time = self["create_time"]
        article.author = self["author"]
        article.praise_num = self["praise_num"]
        article.content = self["content"]
        article.origin = self["origin"]
        article.origin_url = self["origin_url"]
        article.tags = self["tags"]

        article.suggest = gen_suggests(WuYictoIndex._doc_type.index, ((article.title, 10), (article.tags, 7)))

        article.save()
        # 使用redis计数爬取量
        redis_cli.incr("51cto_count")
        return


class ShixisengItemLoader(ItemLoader):
    default_output_processor = TakeFirst()


def get_salary_min(value):
    # value = get_decrypt_str(value)
    if value == "\r\n                                面议":
        return 0
    return int(value.split('-')[0])


def get_salary_max(value):
    # value = get_decrypt_str(value)
    if value == "\r\n                                面议":
        return 0
    return int((value.split('-')[1]).split('元')[0])


def get_str_format(value):
    value = get_decrypt_str(value)
    return parse.unquote(value)


# def get_company_url(value):
#     return "https://www.shixiseng.com" + value

def get_decrypt_str(value):
    # 对实习僧数字解密
    intab = "\ue5e0\ue3f9\ue1c0\ueaa9\ue3f1\ued6f\ueaf7\uf7ad\uf606\ue964"
    outtab = "0123456789"
    trantab1 = str.maketrans(intab, outtab)
    return value.translate(trantab1)


def get_compane_name(value):
    return re.sub("\n", '', value)


def get_title(value):
    return value.split('n')[0].strip()


def get_city(value):
    return value.split('：')[1]


class ShixisengItem(scrapy.Item):
    # boss直聘职位信息
    title = scrapy.Field(
        input_processor=MapCompose(get_title),
    )
    url = scrapy.Field()
    url_object_id = scrapy.Field()
    salary_min = scrapy.Field(
        input_processor=MapCompose(get_salary_min),
    )
    salary_max = scrapy.Field(
        input_processor=MapCompose(get_salary_max)
    )
    job_city = scrapy.Field(
        input_processor=MapCompose(get_city)
    )
    work_week_time = scrapy.Field()
    degree_need = scrapy.Field()
    work_how_long = scrapy.Field()
    publish_time = scrapy.Field()
    job_advantage = scrapy.Field()
    job_desc = scrapy.Field(
        input_processor=TakeAll()
    )
    job_addr = scrapy.Field(
        input_processor=MapCompose(get_city)
    )
    company_url = scrapy.Field()
    company_name = scrapy.Field()
    crawl_time = scrapy.Field()

    def get_insert_sql(self):
        insert_sql = """
                insert into shixiseng_job(title, url_object_id, url, salary_min, salary_max, job_city, work_week_time, degree_need, work_how_long , publish_time, job_advantage,
                job_desc, job_addr, company_url, company_name, crawl_time) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)
                ON DUPLICATE KEY update salary_min=values(salary_min), salary_max=values(salary_max), job_desc=values (job_desc)
        """
        print(self["title"])
        params = (
            self["title"], self["url_object_id"], self["url"], self["salary_min"], self["salary_max"], self["job_city"]
            , self["work_week_time"], self["degree_need"], self["work_how_long"], self["publish_time"],
            self["job_advantage"], self["job_desc"], self["job_addr"]
            , self["company_url"], self["company_name"], self["crawl_time"].strftime(SQL_DATETIME_FORMAT))
        return insert_sql, params

    def save_to_es(self):
        job = TodayJobIndex()
        job.url = self['url']
        job.meta.id = self["url_object_id"]
        job.title = self["title"]
        job.publish_time = self["publish_time"]
        job.job_addr = self["job_addr"]
        job.salary_min = self["salary_min"]
        job.salary_max = self["salary_max"]
        job.job_desc = self["job_desc"]
        job.company_name = self["company_name"]
        job.company_url = self["company_url"]

        job.suggest = gen_suggests(WuYictoIndex._doc_type.index, ((job.title, 10), (job.company_name, 10)))

        job.save()
        # 使用redis计数爬取量
        redis_cli.incr("today_job_count")
        return

class GuokrItemLoader(ItemLoader):
    default_output_processor = TakeFirst()

def get_create_time(value):
    if value is None:
        return datetime.now()
    else:return value

def get_random_origin(value):
    random_origin = ['知乎', '虎扑', '今日头条', '果壳问答', '悟空文答', '天涯论坛']
    return random_origin[random.randint(0,5)]


class GuokrItem(scrapy.Item):
    # boss直聘职位信息
    title = scrapy.Field(
        input_processor=MapCompose(get_title),
    )
    url = scrapy.Field()
    url_object_id = scrapy.Field()
    content = scrapy.Field(
        input_processor=Take_advantage_All()
    )
    tags = scrapy.Field(
        input_processor=Take_advantage_All()
    )
    origin = scrapy.Field(
        input_processor=MapCompose(get_random_origin)
    )
    origin_url = scrapy.Field()
    create_time = scrapy.Field(
        input_processor=MapCompose(get_create_time)
    )
    answer_content = scrapy.Field(
        input_processor=TakeAll()
    )
    crawl_time = scrapy.Field()

    # def get_insert_sql(self):
    #     insert_sql = """
    #             insert into shixiseng_job(title, url_object_id, url, salary_min, salary_max, job_city, work_week_time, degree_need, work_how_long , publish_time, job_advantage,
    #             job_desc, job_addr, company_url, company_name, crawl_time) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)
    #             ON DUPLICATE KEY update salary_min=values(salary_min), salary_max=values(salary_max), job_desc=values (job_desc)
    #     """
    #     print(self["title"])
    #     params = (
    #         self["title"], self["url_object_id"], self["url"], self["salary_min"], self["salary_max"], self["job_city"]
    #         , self["work_week_time"], self["degree_need"], self["work_how_long"], self["publish_time"],
    #         self["job_advantage"], self["job_desc"], self["job_addr"]
    #         , self["company_url"], self["company_name"], self["crawl_time"].strftime(SQL_DATETIME_FORMAT))
    #     return insert_sql, params

    def save_to_es(self):
        question = GuokrIndex()
        question.url = self['url']
        question.meta.id = self["url_object_id"]
        question.title = self["title"]
        question.content = self["content"]
        question.tags = self["tags"]
        question.origin = self["origin"]
        question.origin_url = self["origin_url"]
        question.create_time = self["create_time"]
        question.answer_content = self["answer_content"]

        question.suggest = gen_suggests(GuokrIndex._doc_type.index, ((question.title, 10), (question.tags, 10)))

        question.save()
        # 使用redis计数爬取量
        redis_cli.incr("Guokr_question_count")
        return

