#!/usr/bin python3
# -*- encoding: utf-8 -*-
'''
@File    :   modelProfile.py
@Time    :   2020/09/07 13:40:32
@Author  :   陈培杞
@Version :   1.0
@Doc     :   tfidf + textrank 构造文本画像，输出主题词
'''

import os
import jieba
import jieba.analyse
import jieba.posseg as pseg
import codecs

from pyspark.sql import functions as F
from pyspark.sql.functions import UserDefinedFunction,udf
from pyspark.sql.types import ArrayType, StringType,IntegerType,DoubleType, FloatType,StructType
from pyspark.ml.feature import IDF,IDFModel,CountVectorizer,CountVectorizerModel,StopWordsRemover

from util import splitToken, sentenceClear
from myinit import spark, sc
from logger import worklog
from config import config
from db  import into_MySQL

class TFIDF(object):
    def __init__(self):
        self.cv_model = None
        self.idf_model = None
        self.tfidf_result = None


    def future_loadModel(self):
        self.cv_model = CountVectorizerModel.load(config.CVModel)
        self.idf_model = IDFModel.load(config.IDFModel)


    def future_saveModel(self):
        self.cv_model.write().overwrite().save(config.CVModel)
        self.idf_model.write().overwrite().save(config.IDFModel)


    def fit(self, data):
        words_df = data.withColumn("words", splitToken(F.array("sentence"))).select("rcpt","words")
        
        # vocabSize是总词汇的大小，minDF是文本中出现的最少次数
        cv = CountVectorizer(inputCol="words", outputCol="countFeatures", vocabSize=10000, minDF=1.0)
        self.cv_model = cv.fit(words_df)

        # 得出词频向量结果
        cv_result = self.cv_model.transform(words_df)

        # idf
        idf = IDF(inputCol="countFeatures", outputCol="idfFeatures")
        self.idf_model = idf.fit(cv_result)

        self.tfidf_result = self.idf_model.transform(cv_result)
        return self.tfidf_result


def get_rcpt_topk_keyword_by_tfidf(tfidf_result):
    """返回每个用户前k个最大的tfidf及对应的关键词"""
    def sort_by_tfidf(partition):
        """选取前20个作为关键词,此处仅为词索引"""
        TOPK = config.TopK
        for row in partition:
            # 找到索引与IDF值并进行排序
            _dict = list(zip(row.idfFeatures.indices, row.idfFeatures.values))
            _dict = sorted(_dict, key=lambda x: x[1], reverse=True)
            result = _dict[:TOPK]
            for word_index, tfidf in result:
                yield row.rcpt,  int(word_index), round(float(tfidf), 4)

    rcpt_topk_keyword_by_tfidf = tfidf_result.rdd.mapPartitions(sort_by_tfidf)
    return rcpt_topk_keyword_by_tfidf

def get_idf_with_keywords(cv_model, idf_model):
    """返回每个词的idf"""
    idf_with_keywords = list(zip(cv_model.vocabulary, idf_model.idf.toArray()))
    def append_index(data):
        for index in range(len(data)):
            data[index] = list(data[index]) 
            data[index].append(index)       
            data[index][1] = float(data[index][1])

    append_index(idf_with_keywords)
    #sc = spark.sparkContext
    rdd = sc.parallelize(idf_with_keywords) 
    idf_keywords = rdd
    return idf_keywords

def tfidfModel(data):
    tfidf = TFIDF()
    tfidf_result = tfidf.fit(data)
    tfidf_result.cache()

    rcpt_topk_keyword_by_tfidf = get_rcpt_topk_keyword_by_tfidf(tfidf_result).toDF(["rcpt", "index", "weights"])
    idf_keywords = get_idf_with_keywords(tfidf.cv_model, tfidf.idf_model).toDF(["keyword", "idf", "index"])
    idf_keywords.createOrReplaceTempView("idf_keywords")
    idf_keywords.cache()

    keywords_index = idf_keywords.select("keyword", "index")
    keywords_result = rcpt_topk_keyword_by_tfidf.join(keywords_index, keywords_index.index == rcpt_topk_keyword_by_tfidf.index)\
                                                .select(["rcpt",  "keyword", "weights"])
    keywords_result.createOrReplaceTempView("tfidf_keywords_values")
    keywords_result.cache()
    keywords_result.write.saveAsTable('profile.keywords_result', mode='overwrite')




def get_textrank_keywords_values(sentence_df):
    """通过闭包形式将 mapPartitions 相关内容分发到其他节点，不能拆开到函数外"""
    class TextRank(jieba.analyse.TextRank):
        def __init__(self, window=20, word_min_len=2):
            super(TextRank, self).__init__()
            self.span = window  # 窗口大小
            self.word_min_len = word_min_len  # 单词的最小长度
            # 要保留的词性，根据jieba github ，具体参见https://github.com/baidu/lac
            self.pos_filt = frozenset(
                ('n', 'x', 'eng', 'f', 's', 't', 'nr', 'ns', 'nt', "nw", "nz", "PER", "LOC", "ORG"))
        
        def pairfilter(self, wp):
            """过滤条件，返回True或者False"""
            if wp.flag == "eng":
                if len(wp.word) <= 2:
                    return False

            if wp.flag in self.pos_filt and len(wp.word.strip()) >= self.word_min_len:
                return True

    def textrank(partition):
        # 结巴加载用户词典
        if not jieba.dt.initialized:
            jieba.load_userdict(config.JieBa)

        def get_stopwords_list():
            """返回stopwords列表"""
            stopwords_list = [i.strip() for i in codecs.open(stopwords_path).readlines()]
            return stopwords_list

        # 所有的停用词列表
        #stopwords_list = get_stopwords_list()

        # TextRank过滤窗口大小为5，单词最小为2
        textrank_model = TextRank(window=config.TRWindow, word_min_len=config.TRWordMinLen)
        allowPOS = ('n', "x", 'eng', 'nr', 'ns', 'nt', "nw", "nz", "c")

        for row in partition:
            tags = textrank_model.textrank(row.sentence, topK=config.TopK, withWeight=True, allowPOS=allowPOS, withFlag=False)
            for tag in tags:
                yield row.rcpt, tag[0], tag[1]

    textrank_keywords_values = sentence_df.rdd.mapPartitions(textrank)
    return textrank_keywords_values


def textrankModel(data):
    sentence_df = data.select("rcpt","sentence").withColumn('sentence', sentenceClear(F.col("sentence")))
    textrank_keywords_values = get_textrank_keywords_values(sentence_df).toDF(["rcpt", "keyword", "textrank"])
    textrank_keywords_values.createOrReplaceTempView('textrank_keywords_values')
    textrank_keywords_values.cache()
    textrank_keywords_values.write.saveAsTable('profile.textrank_keywords_values', mode='overwrite')



def calProfile():
    """文本画像"""
    # 使用 TF-IDF 权重  idf_keywords
    # 使用 TextRank 权重 textrank_keywords_values
    # 通过 keyword 关联 TF-IDF 权重和 TextRank 权重
    # 计算 TF-IDF 权重和 TextRank 权重的乘积作为关键词权重

    keywords_res = spark.sql("""
                    select rcpt, textrank_keywords_values.keyword, textrank,idf 
                    from textrank_keywords_values 
                         left join idf_keywords 
                         on  textrank_keywords_values.keyword=idf_keywords.keyword
                    """)
    keywords_weights = keywords_res.withColumn('weights', keywords_res.textrank * keywords_res.idf).select(["rcpt", "keyword", "weights"])

    #这里，我们需要将相同文章的词都合并到一条记录中，将 keywords_weights 按照 rcpt 分组，
    # 并利用 collect_list() 方法，分别将关键词和权重合并为列表
    keywords_weights.createOrReplaceTempView('keywords_weights')
    keywords_weights = spark.sql("select rcpt, collect_list(keyword) as  keywords, collect_list(weights) weights from keywords_weights group by rcpt")
    keywords_weights.cache()

    #为了方便查询，我们需要将关键词和权重合并为一列，并存储为 map 类型，这里利用 dict() 和 zip() 方法，将每个关键词及其权重组合成字典。
    # eml_keywords 结果如下所示，keywords 即为每篇文章的关键词和对应权重
    def to_map(row):
        return row.rcpt, dict(zip(row.keywords, row.weights))
    eml_keywords = keywords_weights.rdd.map(to_map).toDF(['rcpt', 'keywords'])
    #eml_keywords.show(3,truncate=False)

    # 前面我们计算完了关键词，接下来我们将 TF-IDF 和 TextRank 的共现词作为主题词，
    # 将 TF-IDF 权重表 tfidf_keywords_values 和 TextRank 权重表 textrank_keywords_values 进行关联，
    # 并利用 collect_set() 对结果进行去重，即可得到 TF-IDF 和 TextRank 的共现词，即主题词.
    # eml_topics 结果如下所示，topics 即为每篇文章的主题词列表
    eml_topics = spark.sql("""
                    select tf.rcpt as rcpt_, collect_set(tf.keyword) as topics 
                    from tfidf_keywords_values as tf
                         inner join 
                         textrank_keywords_values as tr
                         on tf.keyword=tr.keyword
                    group by tf.rcpt
                    """)
    #eml_topics.show(3,truncate=False)
    # 最后，将主题词结果和关键词结果合并，即为文章画像，保存到表 eml_profile
    eml_keywords.cache()
    eml_topics.cache()

    eml_profile = eml_keywords.join(eml_topics, eml_keywords.rcpt==eml_topics.rcpt_).select(["rcpt",  "keywords", "topics"])
    eml_profile.cache()
    eml_profile.createOrReplaceTempView("eml_profile")
    
    print('eml profile into hive')
    #into_MySQL(eml_profile, 'eml_profile')
    eml_profile.write.saveAsTable('profile.eml_profile', mode='overwrite')
    


def userClusterProfile():
    pass


def modelProfile():
    data = spark.sql("""
                    select rcpt, concat_ws(' ',collect_set(sentence)) as sentence
                    from (select explode(split(rcpt,';')) as rcpt, sentence from data) 
                    where rcpt <>''
                    group by rcpt
                    """)
    data.cache()
    worklog.debug("开始进行 文本画像")
    worklog.debug("\t 正在计算 tfidf")
    tfidfModel(data)
    """
    worklog.debug("\t 正在计算 textrank")
    textrankModel(data)

    worklog.debug("\t 正在结合tfidf和textrank计算文本画像")
    calProfile()

    worklog.debug("\t 正在计算用户主题词聚类画像")
    userClusterProfile()
    """