# @desc : 挖掘标签：使用KMeans算法+RFE打分计算用户活跃度模型
import numpy as np
from pyspark.sql import DataFrame, Column
from pyspark.sql.functions import udf
from pyspark.sql.types import StringType

from cn.tag.base.BaseModelES import BaseModelAbstract, spark, sc
from pyspark.sql import functions as F
from pyspark.ml.clustering import KMeans, KMeansModel
from pyspark.ml.feature import VectorAssembler

class RFEKmeansTest(BaseModelAbstract):
    def getTagId(self):
        return 46
    def compute(self, esDF: DataFrame, fiveDF: DataFrame):
        fiveDF.show()
        # +---+----+
        # | id|rule|
        # +---+----+
        # | 47|   1|
        # | 48|   2|
        # | 49|   3|
        # | 50|   4|
        # +---+----+
        esDF.show()
        # +--------------+--------------------+-------------------+
        # |global_user_id|             loc_url|           log_time|
        # +--------------+--------------------+-------------------+
        # |           703|http://www.eshop....| 2019-07-20 05:06:47|
        # |           798|http://member.esh...| 2019-07-26 06:34:05|
        # |           223|http://search.esh...| 2019-07-19 19:07:06|

        # 0.定义常量字符串,避免后续拼写错误
        recencyStr = "recency"
        frequencyStr = "frequency"
        engagementsStr = "engagements"
        featureStr = "feature"
        scaleFeatureStr = "scaleFeature"
        predictStr = "predict"

        # recency:最近一次访问时间,用户最后一次访问距今时间
        recencyAggColumn:Column=F.datediff(F.date_sub(F.current_timestamp(),1080),F.max("log_time")).alias(recencyStr)
        # frequency:访问频率,用户一段时间内访问的页面总次数,
        frequencyAggColumn: Column =F.count("loc_url").alias(frequencyStr)
        # engagements:页面互动度,用户一段时间内访问的独立页面数,也可以定义为页面 浏览量、下载量、 视频播放数量等
        engagementsAggColumn: Column =F.countDistinct("loc_url").alias(engagementsStr)
        # 1.根据用户id进行分组,求出RFE
        tempDF: DataFrame =esDF.groupby("global_user_id").agg(recencyAggColumn,frequencyAggColumn,engagementsAggColumn)
        tempDF.show(10,truncate=False)
        # +--------------+-------+---------+-----------+
        # |global_user_id|recency|frequency|engagements|
        # +--------------+-------+---------+-----------+
        # |107           |981    |424      |280        |
        # |110           |981    |356      |249        |
        # |12            |981    |373      |266        |
        recencyScore: Column = F.when(F.col(recencyStr).between(0,15),5) \
            .when(F.col(recencyStr).between(16, 30), 4) \
            .when(F.col(recencyStr).between(31, 45), 3) \
            .when(F.col(recencyStr).between(46, 60), 2) \
            .when(F.col(recencyStr) > 60, 1) \
            .alias(recencyStr)

        frequencyScore: Column =F.when(F.col(frequencyStr)>760,5) \
            .when(F.col(frequencyStr).between(560, 760), 4) \
            .when(F.col(frequencyStr).between(400, 559), 3) \
            .when(F.col(frequencyStr).between(250, 399), 2) \
            .when(F.col(frequencyStr) < 250, 1) \
            .alias(frequencyStr)

        engagementsScore: Column =F.when(F.col(engagementsStr)>300,5) \
            .when(F.col(engagementsStr).between(250, 300), 4) \
            .when(F.col(engagementsStr).between(200, 249), 3) \
            .when(F.col(engagementsStr).between(50, 199), 2) \
            .when(F.col(engagementsStr) < 50, 1) \
            .alias(engagementsStr)


        FREScoreDF: DataFrame =tempDF \
            .select(tempDF["global_user_id"].alias("userId"),recencyScore,frequencyScore,engagementsScore) \
            .where(f"userId is not null and {recencyStr} is  not null and {frequencyStr} is  not null and {engagementsStr} is  not null")
        FREScoreDF.show(10, truncate=False)

        # todo 5-2组合成向量列vectorAssemble -->feature
        vector = VectorAssembler().setInputCols([recencyStr, frequencyStr, engagementsStr]).setOutputCol(featureStr)
        vectorDF = vector.transform(FREScoreDF)

        # todo 5-3使用Kmeans算法对df进行训练
        kMeans: KMeans = KMeans() \
            .setK(4) \
            .setSeed(10) \
            .setMaxIter(10) \
            .setFeaturesCol(featureStr) \
            .setPredictionCol(predictStr) \
            .setDistanceMeasure("cosine")

        #训练模型
        model: KMeansModel = kMeans.fit(vectorDF)

        #根据模型预测结果
        resultDF: DataFrame=model.transform(vectorDF)
        print("------------预测的结果--------------")
        resultDF.show()
        # +------+-------+---------+-----------+-------------+-------+
        # |userId|recency|frequency|engagements|      feature|predict|
        # +------+-------+---------+-----------+-------------+-------+
        # |   107|      1|        3|          4|[1.0,3.0,4.0]|      1|
        # |   110|      1|        2|          3|[1.0,2.0,3.0]|      0|
        # |    12|      1|        2|          4|[1.0,2.0,4.0]|      2|

        # todo 5-4获取聚类中心,,取和后,加索引,排序,然后predictstr列跟fivedf合并
        center=model.clusterCenters()
        print("center", center)
        # center[array([5., 4.96815287, 3.]), array([5., 3., 3.]), array([5., 5., 4.00483871]), array([5., 3.11111111, 4.])]

        # todo 5-5 求和
        list1 = [float(np.sum(x)) for x in center]
        print("sum cluer:", list1)
        # sum cluer: [6.088435374149659, 8.008064516129032, 7.0]

        # todo 5-6 加索引,形成字典
        dict1 = {}
        for i in range(len(list1)):
            dict1[i] = list1[i]
        print("from dict", dict1)
        # from dict {0: 6.088435374149659, 1: 8.008064516129032, 2: 7.0}

        # todo 5-7 转为列表,==>df==>rdd排序==?合并
        list2 = [[k, v] for (k, v) in dict1.items()]
        print(list2)
        #[[0, 6.088435374149659], [1, 8.008064516129032], [2, 7.0]]

        centerdf: DataFrame = spark.createDataFrame(list2, ['predict', 'center'])
        centerdf.show()
        # +-------+------------------+
        # |predict|            center|
        # +-------+------------------+
        # |      0|1.6035674514745464|
        # |      1|1.5689290811054724|
        # |      2|1.5275245503527624|
        # |      3|1.6059101370939324|
        # +-------+------------------+

        centersortrdd = centerdf.rdd.repartition(1).sortBy(lambda x: x[1], ascending=False)
        print("sort partition")
        centersortrdd.foreach(lambda x: print(x))
        # Row(predict=3, center=1.6059101370939324)
        # Row(predict=0, center=1.6035674514745464)
        # Row(predict=1, center=1.5689290811054724)
        # Row(predict=2, center=1.5275245503527624)

        # todo 5-7 合并fivedf,,先union一个空rdd,再降分区,map,最后zip
        temprdd = centersortrdd.union(sc.parallelize([]))
        unionrdd = temprdd.repartition(1).map(lambda x: x).zip(fiveDF.rdd.repartition(1))
        unionrdd.foreach(lambda x: print(x))
        # (Row(predict=3, center=1.6059101370939324), Row(id=47, rule='1'))
        # (Row(predict=0, center=1.6035674514745464), Row(id=48, rule='2'))
        # (Row(predict=1, center=1.5689290811054724), Row(id=49, rule='3'))
        # (Row(predict=2, center=1.5275245503527624), Row(id=50, rule='4'))

        # todo 5-8 合并后的rdd,取predict列和fivedf的id列,形成字典
        fivedict = unionrdd.map(lambda row: (row[0][0], row[1][0])).collectAsMap()
        print(fivedict)
        #{3: 47, 0: 48, 1: 49, 2: 50}

        # todo 5-9 根据predict列和id列的对应关系,,匹配resultdf和fivedf
        newDF: DataFrame =resultDF.select(resultDF['userId'],
                                           udf(lambda x: fivedict[x], returnType=StringType())(resultDF[predictStr]).alias('tagsId'))
        newDF.show()
        # +------+------+
        # |userId|tagsId|
        # +------+------+
        # |   107|    49|
        # |   110|    48|
        # |    12|    50|
        # |   137|    49|
        # |   147|    48|
        # |   163|    49|
        return newDF

if __name__ == '__main__':
    RFEKmeansTest().execute()