from pyspark.ml.feature import StandardScaler, VectorAssembler
from pyspark.sql import SparkSession
import os
from pyspark.ml.clustering import KMeans, KMeansModel
from pyspark.sql.functions import *
import pyspark.sql.functions as F
from pyspark.sql.window import Window
from pyspark.sql import DataFrame, functions, SparkSession, Column
from cn.itcast.tag.base.BaseModel import BaseModel


# 0. 设置系统环境变量
os.environ['JAVA_HOME'] = '/export/server/jdk1.8.0_241/'
os.environ['SPARK_HOME'] = '/export/server/spark'
os.environ['PYSPARK_PYTHON'] = '/root/anaconda3/envs/pyspark_env/bin/python'
os.environ['PYSPARK_DRIVER_PYTHON'] = '/root/anaconda3/envs/pyspark_env/bin/python'

class CustomerValue(BaseModel):
    def compute(self, es_df, five_df):
        """
        基于K-Means聚类的客户价值标签计算
        :param es_df: 从ES读取的原始数据（memberid,ordersn,orderamount,finishtime）
        :param five_df: 标签规则数据（包含rule和id）
        :return: 打标签后的DataFrame（userId, tagsId）
        """
        # 1. 数据预处理：转换字段类型（确保数值型字段正确）
        es_df = es_df.select(
            F.col("memberid").cast("long"),
            F.col("ordersn"),
            F.col("orderamount").cast("double"),
            F.col("finishtime").cast("long"),
        ).na.fill(0) \
            .filter(F.col("finishtime") > 0)  # 填充缺失值
        es_df.show(5)
        # 定义常量字符串, 避免后续拼写错误
        recencyStr = "recency"
        frequencyStr = "frequency"
        monetaryStr = "monetary"
        featureStr = "feature"
        predictStr = "predict"

        recencyAggColumn = functions.datediff(date_sub(current_timestamp(), 1205),
                                              from_unixtime(max("finishtime"))).alias(recencyStr)
        frequencyAggColumn = functions.count("ordersn").alias(frequencyStr)
        monetaryAggColumn = functions.sum("orderamount").alias(monetaryStr)

        RFMResult = es_df.groupBy("memberid").agg(recencyAggColumn, frequencyAggColumn, monetaryAggColumn)
        RFMResult.show()
        RFMResult.printSchema()

        # 2.为RFM打分
        # R: 1-3天=5分，4-6天=4分，7-9天=3分，10-15天=2分，大于16天=1分
        # F: ≥200=5分，150-199=4分，100-149=3分，50-99=2分，1-49=1分
        # M: ≥20w=5分，10-19w=4分，5-9w=3分，1-4w=2分，<1w=1分
        recencyScore = F.when(
            (RFMResult[recencyStr] >= 1) & (RFMResult[recencyStr] <= 3), 5) \
            .when((RFMResult[recencyStr] >= 4) & (RFMResult[recencyStr] <= 6), 4) \
            .when((RFMResult[recencyStr] >= 7) & (RFMResult[recencyStr] <= 9), 3) \
            .when((RFMResult[recencyStr] >= 10) & (RFMResult[recencyStr] <= 15), 2) \
            .when(RFMResult[recencyStr] >= 16, 1) \
            .otherwise(0) \
            .alias(recencyStr)

        frequencyScore: Column = F.when(RFMResult[frequencyStr] >= 200, 5) \
            .when((RFMResult[frequencyStr] >= 150) & (RFMResult[frequencyStr] <= 199), 4) \
            .when((RFMResult[frequencyStr] >= 100) & (RFMResult[frequencyStr] <= 149), 3) \
            .when((RFMResult[frequencyStr] >= 50) & (RFMResult[frequencyStr] <= 99), 2) \
            .when((RFMResult[frequencyStr] >= 1) & (RFMResult[frequencyStr] <= 49), 1) \
            .otherwise(0) \
            .alias(frequencyStr)

        monetaryScore: Column = F.when(RFMResult[monetaryStr] >= 200000, 5) \
            .when(RFMResult[monetaryStr].between(100000, 199999), 4) \
            .when(RFMResult[monetaryStr].between(50000, 99999), 3) \
            .when(RFMResult[monetaryStr].between(10000, 49999), 2) \
            .when(RFMResult[monetaryStr] <= 9999, 1) \
            .otherwise(0) \
            .alias(monetaryStr)
        print("rfmscore")
        RFMScoreResult: DataFrame = RFMResult.select("memberid", recencyScore, frequencyScore, monetaryScore)
        RFMScoreResult.show(10)
        # 3.聚类
        # 为方便后续模型进行特征输入，需要部分列的数据转换为特征向量，并统一命名，VectorAssembler类就可以完成这一任务。
        # VectorAssembler是一个transformer，将多列数据转化为单列的向量列
        vectorDF: DataFrame = VectorAssembler() \
            .setInputCols([recencyStr, frequencyStr, monetaryStr]) \
            .setOutputCol(featureStr) \
            .transform(RFMScoreResult)
        vectorDF.show(10)

        from pyspark.ml.feature import StandardScaler

        # 标准化特征
        scaler = StandardScaler(
            inputCol=featureStr,
            outputCol="scaled_features",
            withStd=True,
            withMean=True
        )
        scaled_df = scaler.fit(vectorDF).transform(vectorDF)

        kMeans = KMeans() \
            .setK(7) \
            .setSeed(10) \
            .setMaxIter(200) \
            .setDistanceMeasure("euclidean") \
            .setFeaturesCol("scaled_features") \
            .setPredictionCol(predictStr)

        # 2. 训练模型并预测
        model = kMeans.fit(scaled_df)  # 传入包含scaled_features的DataFrame

        # 预测结果也基于scaled_df
        result = model.transform(scaled_df)

        result = result.withColumn(
            "category",
            F.col(predictStr) + 1  # 简化写法：直接+1（0→1, 1→2, ..., 6→7）
        )

        result.show(truncate=False)

        # 10. 打标签：关联标签规则表（five_df）
        new_df = result.join(
            five_df,
            F.col("category") == F.col("rule"),
            how="left"
        ).select(
            F.col("memberid").alias("userId"),
            F.col("id").alias("tagsId")
        ).dropDuplicates(["userId", "tagsId"])  # 去重，每个用户只保留一个标签

        return new_df


if __name__ == '__main__':
    ageModel = CustomerValue(38)
    ageModel.execute()

