#!/usr/bin/env python
"""
todo @desc : RFE活跃度模型标签开发
基于用户行为日志数据，使用KMeans+RFE方法计算用户活跃度标签
RFE = Recency(最近), Frequency(频次), Engagement(参与度)
"""
import os

import numpy as np
from pyspark import SparkContext
from pyspark.ml.clustering import KMeans, KMeansModel
from pyspark.ml.feature import VectorAssembler

from pyspark.sql import DataFrame, SparkSession, Column
from pyspark.sql import functions as F
from pyspark.sql.functions import udf
from pyspark.sql.types import StringType
from pyspark.sql.types import StringType

from BaseModel import BaseModel

# SPARK_HOME = 'D:\\ProgramCj\\spark-2.4.8-bin-hadoop2.7'
# PYSPARK_PYTHON = 'D:\\ProgramCJ\\Python\\python'
# 2-服务器路径
SPARK_HOME = '/export/server/spark'
PYSPARK_PYTHON = '/root/anaconda3/envs/pyspark_env/bin/python'
# 导入路径
os.environ['SPARK_HOME'] = SPARK_HOME
os.environ["PYSPARK_PYTHON"] = PYSPARK_PYTHON
# TODO 0.准备Spark开发环境(重复)
spark = SparkSession \
    .builder \
    .appName("TfecProfile") \
    .master("local[*]") \
    .getOrCreate()
sc: SparkContext = spark.sparkContext

# 构建Spark的Windows环境
SPARK_HOME = 'D:\\ProgramCj\\spark-2.4.8-bin-hadoop2.7'
PYSPARK_PYTHON = 'D:\\ProgramCj\\Python\\python'
os.environ['SPARK_HOME'] = SPARK_HOME
os.environ["PYSPARK_PYTHON"] = PYSPARK_PYTHON


class RFEmodel(BaseModel):
    """用户活跃度(RFE)标签模型"""
    def getTagId(self):
        """返回标签ID(45表示活跃度标签)"""
        return 45
    # inType=Elasticsearch##esNodes=up01:9200##esIndex=tfec_tbl_logs##esType=_doc##selectFields=global_user_id,loc_url,log_time
    def compute(self, esdf: DataFrame, fivedf: DataFrame):
        esdf.show(truncate=False)

        fivedf.show()

        # 0.定义常量字符串,避免后续拼写错误
        recencyStr = "recency"
        frequencyStr = "frequency"
        engagementsStr = "engagements"
        featureStr = "feature"
        scaleFeatureStr = "scaleFeature"
        predictStr = "predict"

        #todo 计算最近活跃天数(当前时间与最后活跃时间的差值)
        recencyAggColumn: Column = F.datediff(F.current_timestamp(), F.max("log_time")).alias(
            recencyStr)
        #todo 计算总访问次数(频次)
        frequencyAggColumn: Column = F.count("loc_url").alias(frequencyStr)
        # todo计算访问不同页面的数量(参与度)
        engagementsAggColumn: Column = F.countDistinct("loc_url").alias(engagementsStr)
        # todo按用户ID聚合计算三大特征
        tempDF: DataFrame = esdf.groupBy("global_user_id") \
            .agg(recencyAggColumn, frequencyAggColumn, engagementsAggColumn)
        tempDF.show(10, truncate=False)

        #todo把数据进行归并化减少噪音让数据更好的比较
        recencyScore: Column = F.when(F.col(recencyStr).between(0, 15), 5) \
            .when(F.col(recencyStr).between(16, 30), 4) \
            .when(F.col(recencyStr).between(31, 45), 3) \
            .when(F.col(recencyStr).between(46, 60), 2) \
            .when(F.col(recencyStr) > 60, 1) \
            .alias(recencyStr)

        frequencyScore: Column = F.when(F.col(frequencyStr) > 560, 5) \
            .when(F.col(frequencyStr).between(560, 760), 4) \
            .when(F.col(frequencyStr).between(400, 559), 3) \
            .when(F.col(frequencyStr).between(250, 499), 2) \
            .when(F.col(frequencyStr) < 250, 1) \
            .alias(frequencyStr)

        engagementsScore: Column = F.when(F.col(engagementsStr) > 300, 5) \
            .when(F.col(engagementsStr).between(250, 300), 4) \
            .when(F.col(engagementsStr).between(200, 250), 3) \
            .when(F.col(engagementsStr).between(50, 200), 2) \
            .when(F.col(engagementsStr)< 49, 1) \
            .alias(engagementsStr)
        #todo 生成评分数据集(过滤空值)
        FREScoreDF: DataFrame = tempDF \
            .select(tempDF["global_user_id"].alias("userId"), recencyScore, frequencyScore, engagementsScore)\
            .where(f"userId is not null and {recencyStr} is  not null and {frequencyStr} is  not null and {engagementsStr} is  not null")
        FREScoreDF.show(10, truncate=False)

        #todo 将三个评分特征组合成特征向量
        vector = VectorAssembler().setInputCols([recencyStr, frequencyStr,engagementsStr]).setOutputCol(featureStr)
        vectorDF = vector.transform(FREScoreDF)
        #todo使用kmeans 4个簇，最大迭代10次
        kMeans: KMeans = KMeans() \
            .setK(4) \
            .setSeed(10) \
            .setMaxIter(10) \
            .setFeaturesCol(featureStr) \
            .setPredictionCol(predictStr)
        #训练集
        model: KMeansModel = kMeans.fit(vectorDF)
        # 根据模型预测结果
        resultDF: DataFrame = model.transform(vectorDF)
        resultDF.show()

        center = model.clusterCenters()
        # 计算每个聚类的特征总和
        list1 = [float(np.sum(x)) for x in center]
        print("sum cluer:",list1)

        # 构建聚类编号与总分的映射
        dict1 = {}
        for i in range(len(list1)):
            dict1[i] = list1[i]
        print("from dict",dict1)

        list2 = [[k, v] for (k, v) in dict1.items()]
        print(list2)
        # 转为DataFrame格式
        centerdf: DataFrame = spark.createDataFrame(list2, ['predict', 'center'])
        centerdf.show()
        # 按总分降序排序(总分越高表示用户越活跃)
        centersortrdd = centerdf.rdd.repartition(1).sortBy(lambda x: x[1], ascending=False)
        print("sort partition")
        centersortrdd.foreach(lambda x: print(x))

        temprdd = centersortrdd.union(sc.parallelize([]))
        unionrdd = temprdd.repartition(1).map(lambda x: x).zip(fivedf.rdd.repartition(1))
        unionrdd.foreach(lambda x: print(x))
        # 生成预测结果到标签ID的映射字典
        fivedict = unionrdd.map(lambda row: (row[0][0], row[1][0])).collectAsMap()
        print(fivedict)

        newDF: DataFrame = resultDF.select(resultDF['userId'],
                                           udf(lambda x: fivedict[x], returnType=StringType())(resultDF['predict']).alias('tagsId'))

        newDF.show()
        return newDF

if __name__ == '__main__':
    rfemodel = RFEmodel(46)
    rfemodel.execute()