from pyspark.ml.feature import StandardScaler, VectorAssembler
from pyspark.sql import SparkSession
import os
from pyspark.ml.clustering import KMeans
import pyspark.sql.functions as F
from pyspark.sql.window import Window

from cn.itcast.tag.base.BaseModel import BaseModel

# 0. 设置系统环境变量
os.environ['JAVA_HOME'] = '/export/server/jdk1.8.0_241/'
os.environ['SPARK_HOME'] = '/export/server/spark'
os.environ['PYSPARK_PYTHON'] = '/root/anaconda3/envs/pyspark_env/bin/python'
os.environ['PYSPARK_DRIVER_PYTHON'] = '/root/anaconda3/envs/pyspark_env/bin/python'

class PriceSensitivity(BaseModel):
    def compute(self, es_df, five_df):
        """
        基于K-Means聚类的价格敏感度标签计算
        :param es_df: 从ES读取的原始数据（memberid, ordersn, orderamount, couponcodevalue）
        :param five_df: 标签规则数据（包含rule和id）
        :return: 打标签后的DataFrame（userId, tagsId）
        """
        # 1. 数据预处理：转换字段类型（确保数值型字段正确）
        es_df = es_df.select(
            F.col("memberid").cast("long"),
            F.col("ordersn"),
            F.col("orderamount").cast("double"),  # 转换为数值型
            F.col("couponcodevalue").cast("double")  # 转换为数值型
        ).na.fill(0)  # 填充缺失值

        # 2. 计算用户价格敏感度特征（PySpark聚合）
        user_features = es_df.groupBy("memberid").agg(
            # 优惠券使用率 = 使用优惠券的订单数 / 总订单数
            (F.sum(F.when(F.col("couponcodevalue") > 0, 1).otherwise(0)) / F.count("*"))
            .alias("coupon_usage_rate"),
            # 平均折扣率 = 总优惠金额 / 总订单金额
            (F.sum("couponcodevalue") / (F.sum("orderamount") + 1e-6))
            .alias("avg_discount_rate"),
            # 平均订单金额
            F.avg("orderamount").alias("avg_order_amount"),
            # 订单金额波动系数 = 标准差 / 均值
            (F.stddev("orderamount") / (F.avg("orderamount") + 1e-6))
            .alias("amount_volatility")
        )

        # 3. 特征向量化（PySpark MLlib要求特征为Vector类型）
        assembler = VectorAssembler(
            inputCols=["coupon_usage_rate", "avg_discount_rate", "avg_order_amount", "amount_volatility"],
            outputCol="features"
        )
        feature_df = assembler.transform(user_features)

        # 4. 特征标准化（PySpark MLlib的StandardScaler）
        scaler = StandardScaler(
            inputCol="features",
            outputCol="scaled_features",
            withMean=True,
            withStd=True
        )
        scaler_model = scaler.fit(feature_df)
        scaled_df = scaler_model.transform(feature_df)

        # 5. K-Means聚类（K=5）
        kmeans = KMeans(
            featuresCol="scaled_features",
            predictionCol="cluster_id",
            k=5,
            seed=42
        )
        kmeans_model = kmeans.fit(scaled_df)
        cluster_df = kmeans_model.transform(scaled_df)

        # 6. 聚类结果分析：计算每个簇的敏感度特征（用于映射1-5标签）
        cluster_sensitivity = cluster_df.groupBy("cluster_id").agg(
            F.avg("coupon_usage_rate").alias("avg_coupon_usage"),
            F.avg("avg_discount_rate").alias("avg_discount"),
            F.avg("avg_order_amount").alias("avg_amount")
        ).orderBy(
            # 按敏感度从高到低排序（优惠券使用率高、折扣率高 → 敏感度高）
            F.col("avg_coupon_usage").desc(),
            F.col("avg_discount").desc()
        )

        # 7. 簇ID映射为敏感度等级（1=极低敏感，5=极高敏感）
        # 为每个簇分配排名（1到5）
        cluster_rank = cluster_sensitivity.withColumn(
            "rank",
            F.row_number().over(Window.orderBy(F.col("avg_coupon_usage").desc(), F.col("avg_discount").desc()))
        ).select("cluster_id", "rank")

        # 反转排名：敏感度越高，等级数值越大（5=最高，1=最低）
        cluster_rank = cluster_rank.withColumn(
            "sensitivity_level",
            6 - F.col("rank")  # 1→5, 2→4, ..., 5→1
        )

        # 8. 合并敏感度等级到用户特征中
        user_sensitivity = cluster_df.join(
            cluster_rank,
            on="cluster_id",
            how="left"
        ).select("memberid", "sensitivity_level")

        # 9. 合并到原始数据（每个订单关联用户的敏感度等级）
        es_df = es_df.join(
            user_sensitivity,
            on="memberid",
            how="left"
        )
        es_df.show()
        # 10. 打标签：关联标签规则表（five_df）
        new_df = es_df.join(
            five_df,
            F.col("sensitivity_level") == F.col("rule"),
            how="left"
        ).select(
            F.col("memberid").alias("userId"),
            F.col("id").alias("tagsId")
        ).dropDuplicates(["userId", "tagsId"])  # 去重，每个用户只保留一个标签

        return new_df


if __name__ == '__main__':
    ageModel = PriceSensitivity(51)
    ageModel.execute()

