#!/usr/bin/env python
# @desc : 挖掘标签：
#       1.使用KMeans算法根据用户的RFM的值计算客户价值模型
#       2.使用KMeans算法根据用户的RFM的值计算客户价值模型+实现模型的保存和加载
#       3.使用KMeans算法根据用户的RFM的值计算客户价值模型+实现模型的保存和加载+K值选取

from pyspark.ml.clustering import KMeans, KMeansModel
from pyspark.ml.feature import VectorAssembler
from pyspark.sql import DataFrame, functions, SparkSession, Column
from pyspark.sql.functions import *
from pyspark.sql.types import IntegerType
import os
from cn.itcast.tag.base.BaseModel import BaseModel

SPARK_HOME = '/export/server/spark'
PYSPARK_PYTHON = '/root/anaconda3/envs/pyspark_env/bin/python3'
# 导入路径
os.environ['SPARK_HOME'] = SPARK_HOME
os.environ["PYSPARK_PYTHON"] = PYSPARK_PYTHON

spark = SparkSession.builder \
    .master("local[*]") \
    .appName("UserProfileModel") \
    .getOrCreate()


class RFMKmeansModel(BaseModel):
    """基于KMeans算法的RFM客户价值模型"""

    def getTagId(self):
        """获取标签ID
        Returns:
            int: 返回标签ID 38
        """
        return 38

    def compute(self, esDF: DataFrame, fiveDS: DataFrame):
        """计算RFM模型并生成用户标签

        Args:
            esDF: 包含用户订单数据的DataFrame
            fiveDS: 包含5级规则的DataFrame

        Returns:
            DataFrame: 包含用户ID和对应标签ID的新DataFrame
        """
        print("Execute the compute method of the subclass!")
        # 过滤掉memberid为空的记录
        esDF = esDF.where(esDF.memberid.isNotNull())
        esDF.show()

        # 定义常量字符串,避免后续拼写错误
        recencyStr = "recency"  # 最近一次消费
        frequencyStr = "frequency"  # 消费频率
        monetaryStr = "monetary"  # 消费金额
        featureStr = "feature"  # 特征向量列名
        predictStr = "predict"  # 预测结果列名

        # 1. 按用户id聚合计算RFM指标
        # Rencency: 最近一次消费,计算最后一次订单距今时间
        # Frequency: 消费频率,计算订单总数量
        # Monetary: 消费金额,计算订单总金额
        recencyAggColumn = functions.datediff(date_sub(current_timestamp(), 1205), from_unixtime(max("finishtime"))).alias(recencyStr)
        frequencyAggColumn = functions.count("ordersn").alias(frequencyStr)
        monetaryAggColumn = functions.sum("orderamount").alias(monetaryStr)

        # 按用户分组并计算RFM指标
        RFMResult = esDF.groupBy("memberid").agg(recencyAggColumn, frequencyAggColumn, monetaryAggColumn)
        RFMResult.show()
        RFMResult.printSchema()

        # 2. 为RFM指标打分
        # R评分标准: 1-3天=5分，4-6天=4分，7-9天=3分，10-15天=2分，大于16天=1分
        recencyScore: Column = functions.when(
            (RFMResult[recencyStr] >= 1) & (RFMResult[recencyStr] <= 3), 5) \
            .when((RFMResult[recencyStr] >= 4) & (RFMResult[recencyStr] <= 6), 4) \
            .when((RFMResult[recencyStr] >= 7) & (RFMResult[recencyStr] <= 9), 3) \
            .when((RFMResult[recencyStr] >= 10) & (RFMResult[recencyStr] <= 15), 2) \
            .when(RFMResult[recencyStr] >= 16, 1) \
            .otherwise(0) \
            .alias(recencyStr)

        # F评分标准: ≥200=5分，150-199=4分，100-149=3分，50-99=2分，1-49=1分
        frequencyScore: Column = functions.when(RFMResult[frequencyStr] >= 200, 5) \
            .when((RFMResult[frequencyStr] >= 150) & (RFMResult[frequencyStr] <= 199), 4) \
            .when((RFMResult[frequencyStr] >= 100) & (RFMResult[frequencyStr] <= 149), 3) \
            .when((RFMResult[frequencyStr] >= 50) & (RFMResult[frequencyStr] <= 99), 2) \
            .when((RFMResult[frequencyStr] >= 1) & (RFMResult[frequencyStr] <= 49), 1) \
            .otherwise(0) \
            .alias(frequencyStr)

        # M评分标准: ≥20w=5分，10-19w=4分，5-9w=3分，1-4w=2分，<1w=1分
        monetaryScore: Column = functions.when(RFMResult[monetaryStr] >= 200000, 5) \
            .when(RFMResult[monetaryStr].between(100000, 199999), 4) \
            .when(RFMResult[monetaryStr].between(50000, 99999), 3) \
            .when(RFMResult[monetaryStr].between(10000, 49999), 2) \
            .when(RFMResult[monetaryStr] <= 9999, 1) \
            .otherwise(0) \
            .alias(monetaryStr)

        print("rfmscore")
        # 选择RFM评分结果
        RFMScoreResult: DataFrame = RFMResult.select("memberid", recencyScore, frequencyScore, monetaryScore)
        RFMScoreResult.show(10)

        # 3. 聚类分析
        # 使用VectorAssembler将RFM评分转换为特征向量
        vectorDF: DataFrame = VectorAssembler() \
            .setInputCols([recencyStr, frequencyStr, monetaryStr]) \
            .setOutputCol(featureStr) \
            .transform(RFMScoreResult)
        vectorDF.show(10)

        # 配置KMeans算法参数
        # kMeans: KMeans = KMeans() \
        #     .setK(4) \  # 设置聚类数量
        #     .setSeed(10) \  # 设置随机种子
        #     .setMaxIter(200) \  # 设置最大迭代次数
        #     .setDistanceMeasure("cosine") \  # 使用余弦距离
        #     .setFeaturesCol(featureStr) \  # 设置特征列
        #     .setPredictionCol(predictStr)  # 设置预测结果列
        kMeans: KMeans = KMeans() \
            .setK(4) \
            .setSeed(10) \
            .setMaxIter(200) \
            .setDistanceMeasure("cosine") \
            .setFeaturesCol(featureStr) \
            .setPredictionCol(predictStr)

        # 4. 训练模型
        model: KMeansModel = kMeans.fit(vectorDF)

        # 5. 预测聚类结果
        result: DataFrame = model.transform(vectorDF)
        result.show(truncate=False)

        # 6. 评估聚类效果
        # 计算每个聚类的RFM总分范围
        ds = result \
            .groupBy(predictStr) \
            .agg(max(result[recencyStr] + result[frequencyStr] + result[monetaryStr]),
                 min(result[recencyStr] + result[frequencyStr] + result[monetaryStr])) \
            .sort(result[predictStr], ascending=True)
        ds.show()

        # 打印聚类中心点
        print(model.clusterCenters())

        # 计算每个聚类中心的RFM总分
        import numpy as np
        print([np.sum(c) for c in model.clusterCenters()])

        # 将聚类中心信息存入字典
        center = {}
        for i in range(len(model.clusterCenters())):
            center[i] = [float(np.sum(c)) for c in model.clusterCenters()][i]
        print(center)

        # 7. 按质心总分排序(降序)
        # 将字典转换为DataFrame
        dictlist = [(k, v) for k, v in center.items()]
        indexAndRFM = spark.createDataFrame(dictlist, ['predict', 'rfm_center'])

        # 按RFM总分降序排序
        sortedIndexAndRFM = indexAndRFM.rdd.sortBy(lambda x: x[1], ascending=False)

        # 8. 将排序后的聚类编号与5级规则对应
        # 合并两个RDD并进行zip操作
        sortedIndexAndRFMRDD = sortedIndexAndRFM.union(spark.sparkContext.parallelize([]))
        tempRDD = sortedIndexAndRFMRDD.repartition(1).map(lambda x: x).zip((fiveDS.rdd.repartition(1)))
        print(tempRDD.count())

        # 创建规则映射DataFrame
        ruleDF: DataFrame = tempRDD.map(lambda t: (t[0][0], t[1][0])).toDF(["predict", "tagIds"])
        ruleDF.show()

        # 将规则映射转换为字典
        ruleMap = ruleDF.rdd.collectAsMap()

        # 创建UDF函数将预测结果转换为标签ID
        predict2Tag = udf(lambda predict: ruleMap[predict], IntegerType())

        # 9. 生成最终结果DataFrame
        newDF: DataFrame = result.select(
            result["memberid"].alias("userId"),
            predict2Tag("predict").alias("tagsId")
        )
        newDF.show()
        return newDF


if __name__ == '__main__':
    rfmKmeansModel = RFMKmeansModel()
    rfmKmeansModel.execute()
