from pyspark.sql import functions as F
from base.BaseModel import BaseModel
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.clustering import KMeans, KMeansModel
from pyspark.sql.functions import udf, current_timestamp, from_unixtime
from pyspark.sql.types import StringType
import numpy as np
from pyspark.sql import DataFrame, functions, SparkSession, Column

from pyspark.sql import DataFrame, functions, SparkSession, Column
from pyspark.ml.clustering import KMeans, KMeansModel
from pyspark.ml.feature import VectorAssembler
from pyspark.sql import DataFrame, functions, SparkSession, Column
from pyspark.sql.functions import *
from pyspark.sql.types import IntegerType

class RFMKmeansModel(BaseModel):
    def compute(self,es_df,five_df):
        es_df = es_df.where(es_df.memberid.isNotNull())
        es_df.show()

        five_df.show()

        # 定义常量字符串, 避免后续拼写错误
        recencyStr = "recency"
        frequencyStr = "frequency"
        monetaryStr = "monetary"
        featureStr = "feature"
        predictStr = "predict"
        # 1. 按用户id进行聚合获取客户RFM
        # 客户价值模型 - RFM:
        # Rencency: 最近一次消费, 最后一次订单距今时间
        # Frequency: 消费频率, 订单总数量
        # Monetary: 消费金额, 订单总金额
        # https: // blog.csdn.net / liam08 / article / details / 79663018
        recencyAggColumn = functions.datediff(date_sub(current_timestamp(), 1205),
                                              from_unixtime(max("finishtime"))).alias(recencyStr)
        frequencyAggColumn = functions.count("ordersn").alias(frequencyStr)
        monetaryAggColumn = functions.sum("orderamount").alias(monetaryStr)

        RFMResult = es_df.groupBy("memberid").agg(recencyAggColumn, frequencyAggColumn, monetaryAggColumn)
        RFMResult.show()
        RFMResult.printSchema()

        # root
        # | -- memberid: long(nullable=true)
        # | -- recency: integer(nullable=true)
        # | -- frequency: long(nullable=false)
        # | -- monetary: double(nullable=true)
        # 2.为RFM打分
        # R: 1-3天=5分，4-6天=4分，7-9天=3分，10-15天=2分，大于16天=1分
        # F: ≥200=5分，150-199=4分，100-149=3分，50-99=2分，1-49=1分
        # M: ≥20w=5分，10-19w=4分，5-9w=3分，1-4w=2分，<1w=1分
        recencyScore: Column = functions.when(
            (RFMResult[recencyStr] >= 1) & (RFMResult[recencyStr] <= 3), 5) \
            .when((RFMResult[recencyStr] >= 4) & (RFMResult[recencyStr] <= 6), 4) \
            .when((RFMResult[recencyStr] >= 7) & (RFMResult[recencyStr] <= 9), 3) \
            .when((RFMResult[recencyStr] >= 10) & (RFMResult[recencyStr] <= 15), 2) \
            .when(RFMResult[recencyStr] >= 16, 1) \
            .otherwise(0) \
            .alias(recencyStr)

        frequencyScore: Column = functions.when(RFMResult[frequencyStr] >= 200, 5) \
            .when((RFMResult[frequencyStr] >= 150) & (RFMResult[frequencyStr] <= 199), 4) \
            .when((RFMResult[frequencyStr] >= 100) & (RFMResult[frequencyStr] <= 149), 3) \
            .when((RFMResult[frequencyStr] >= 50) & (RFMResult[frequencyStr] <= 99), 2) \
            .when((RFMResult[frequencyStr] >= 1) & (RFMResult[frequencyStr] <= 49), 1) \
            .otherwise(0) \
            .alias(frequencyStr)

        monetaryScore: Column = functions.when(RFMResult[monetaryStr] >= 200000, 5) \
            .when(RFMResult[monetaryStr].between(100000, 199999), 4) \
            .when(RFMResult[monetaryStr].between(50000, 99999), 3) \
            .when(RFMResult[monetaryStr].between(10000, 49999), 2) \
            .when(RFMResult[monetaryStr] <= 9999, 1) \
            .otherwise(0) \
            .alias(monetaryStr)
        RFMScoreResult: DataFrame = RFMResult.select("memberid", recencyScore, frequencyScore, monetaryScore)
        RFMScoreResult.show(10)
        # 3.聚类
        # 为方便后续模型进行特征输入，需要部分列的数据转换为特征向量，并统一命名，VectorAssembler类就可以完成这一任务。
        # VectorAssembler是一个transformer，将多列数据转化为单列的向量列
        vectorDF: DataFrame = VectorAssembler() \
            .setInputCols([recencyStr, frequencyStr, monetaryStr]) \
            .setOutputCol(featureStr) \
            .transform(RFMScoreResult)
        vectorDF.show(10)
        # +--------+-------+---------+--------+-------------+
        # | memberid | recency | frequency | monetary | feature |
        # +--------+-------+---------+--------+-------------+
        # | 26 | 0 | 3 | 5 | [0.0, 3.0, 5.0] |
        # | 29 | 0 | 3 | 5 | [0.0, 3.0, 5.0] |
        kMeans: KMeans = KMeans() \
            .setK(3) \
            .setSeed(10) \
            .setMaxIter(200) \
            .setDistanceMeasure("cosine") \
            .setFeaturesCol(featureStr) \
            .setPredictionCol(predictStr)

        # 4.训练模型
        model: KMeansModel = kMeans.fit(vectorDF)

        # #5.预测
        result: DataFrame = model.transform(vectorDF)
        result.show(truncate=False)
        # | memberid | recency | frequency | monetary | feature | predict |
        # +--------+-------+---------+--------+-------------+-------+
        # | 26 | 0 | 3 | 5 | [0.0, 3.0, 5.0] | 0 |
        # | 29 | 0 | 3 | 5 | [0.0, 3.0, 5.0] | 0 |
        # | 4033555 | 0 | 3 | 5 | [0.0, 3.0, 5.0] | 0 |
        # 6.测试时看下聚类效果
        # ds = result \
        #     .groupBy(predictStr) \
        #     .agg(max(result[recencyStr] + result[frequencyStr] + result[monetaryStr]),
        #          min(result[recencyStr] + result[frequencyStr] + result[monetaryStr])) \
        #     .sort(result[predictStr], ascending=True)
        # ds.show()
        # +-------+---------------------------------------+---------------------------------------+
        # | predict | max(((recency + frequency) + monetary)) | min(((recency + frequency) + monetary)) |
        # +-------+---------------------------------------+---------------------------------------+
        # | 0 | 10 | 6 |
        # | 1 | 7 | 6 |
        # | 2 | 5 | 0 |
        # +-------+---------------------------------------+---------------------------------------+
        print(model.clusterCenters())
        # [array([0., 3.03230148, 5.]),
        # array([0., 2.96666667, 4.]),
        # array([0.04545455, 1.13636364, 1.27272727])]
        # numpy.ndarray
        print([np.sum(c) for c in model.clusterCenters()])  # [8.032301480484522, 6.966666666666667, 2.4545454545454546]

        center = {}
        for i in range(len(model.clusterCenters())):
            center[i] = [float(np.sum(c)) for c in model.clusterCenters()][i]
        print(center)
        # {0: 8.032301480484522, 1: 6.966666666666667, 2: 2.4545454545454546}
        # 要的格式如下：
        # (聚类索引/编号, 聚类中心的RFM的和)
        # (0 8.032301480484522)
        # (1 6.966666666666667)
        # (2 2.4545454545454546)
        # 问题: 每一个簇的ID是无序的, 但是我们将分类簇和rule进行对应的时候, 需要有序
        # 7.按质心排序, 质心大, 该类用户价值大
        # [(质心id, 质心值)]
        # Python写法
        # sortedIndexAndRFM=sorted(center.items(),key=lambda x:x[1],reverse=True)
        # [(1, 3.892179195140471), (0, 3.0038167938931295), (2, 2.0)]

        dictlist = [(k, v) for k, v in center.items()]
        indexAndRFM = self.spark.createDataFrame(dictlist, ['predict', 'rfm_center'])
        sortedIndexAndRFM = indexAndRFM.rdd.sortBy(lambda x: x[1], ascending=False)
        print(sortedIndexAndRFM.collect())
        # [Row(predict=1, rfm_center=3.892179195140471), Row(predict=0, rfm_center=3.0038167938931295), Row(predict=2, rfm_center=2.0)]
        # 7.将上面的排好序的聚类编号和聚类中心与5级规则分别转化为DataSet进行对应，在转化为RDD的repartition进行用zip拉链操作，最后进行选择predict | tagIds
        sortedIndexAndRFMRDD = sortedIndexAndRFM.union(self.spark.sparkContext.parallelize([]))
        tempRDD = sortedIndexAndRFMRDD.repartition(1).map(lambda x: x).zip((five_df.rdd.repartition(1)))
        print(tempRDD.collect())
        ruleDF: DataFrame = tempRDD.map(lambda t: (t[0][0], t[1][0])).toDF(["predict", "tagIds"])
        ruleDF.show()
        # 8 将ruleDF首先转化为DataSet转化为map类型，使用rdd的collectAsMap操作，在使用udf转换，比如预测为predict = 4 代表的是38号标签
        ruleMap = ruleDF.rdd.collectAsMap()
        # print(ruleMap)#{1: 103, 0: 104, 2: 105}
        # print(ruleMap[1])#这里是字典类型，key=1，得到tagsId=103
        predict2Tag = udf(lambda predict: ruleMap[predict], IntegerType())
        result.printSchema()
        newDF: DataFrame = result.select(result["memberid"].alias("userId"), predict2Tag("predict").alias("tagsId"))
        newDF.show()
        return newDF


if __name__ == '__main__':
    rm = RFMKmeansModel(38)
    rm.execute()