from pyspark.sql import SparkSession
from pyspark.sql import functions as F
from pyspark.sql.window import Window
from pyspark.ml.feature import QuantileDiscretizer

# 初始化SparkSession
spark = SparkSession.builder \
    .appName("DamoFeatureAnalysis") \
    .config("spark.sql.shuffle.partitions", "200") \
    .getOrCreate()


# 定义数据加载函数
def load_data(user_path, order_path, behavior_path, geo_path):
    """加载用户、订单、行为和地理位置数据"""
    # 加载用户基础信息
    user_df = spark.read.parquet(user_path)
    # 加载订单数据
    order_df = spark.read.parquet(order_path)
    # 加载行为数据
    behavior_df = spark.read.parquet(behavior_path)
    # 加载地理位置数据
    geo_df = spark.read.parquet(geo_path)

    return user_df, order_df, behavior_df, geo_df


# 人口属性指标计算
def calculate_demographic_features(user_df):
    """计算人口属性指标"""
    # 计算年龄区间
    user_with_age_bin = user_df.withColumn(
        "age_bin",
        F.when(F.col("age") < 18, "0-17")
        .when((F.col("age") >= 18) & (F.col("age") <= 24), "18-24")
        .when((F.col("age") >= 25) & (F.col("age") <= 39), "25-39")
        .when((F.col("age") >= 40) & (F.col("age") <= 59), "40-59")
        .otherwise("60+")
    )

    # 计算人生阶段
    user_with_life_stage = user_with_age_bin.withColumn(
        "life_stage",
        F.when(F.col("age") < 25, "学生")
        .when((F.col("age") >= 25) & (F.col("age") <= 35) & (F.col("has_child") == True), "育儿期")
        .when((F.col("age") >= 25) & (F.col("age") <= 35) & (F.col("has_child") == False), "职场新人")
        .when((F.col("age") >= 36) & (F.col("age") <= 50), "职场中坚")
        .otherwise("退休")
    )

    # 计算淘气值等级
    user_with_tq_level = user_with_life_stage.withColumn(
        "tao_qi_level",
        F.when(F.col("tao_qi_score") < 400, "普通会员")
        .when((F.col("tao_qi_score") >= 400) & (F.col("tao_qi_score") < 1000), "超级会员")
        .when((F.col("tao_qi_score") >= 1000) & (F.col("tao_qi_score") < 2500), "APASS会员")
        .otherwise("APASS黑卡")
    )

    return user_with_tq_level


# 消费能力与行为指标计算
def calculate_consumption_features(order_df, user_df):
    """计算消费能力与行为指标"""
    # 计算近一年月均支付金额
    one_year_ago = F.date_sub(F.current_date(), 365)
    monthly_payment = order_df.filter(F.col("order_date") >= one_year_ago) \
        .groupBy("user_id", F.trunc("order_date", "month").alias("order_month")) \
        .agg(F.sum("payment_amount").alias("monthly_payment")) \
        .groupBy("user_id") \
        .agg(F.avg("monthly_payment").alias("avg_monthly_payment"))

    # 计算近一年月均订单笔数
    monthly_orders = order_df.filter(F.col("order_date") >= one_year_ago) \
        .groupBy("user_id", F.trunc("order_date", "month").alias("order_month")) \
        .agg(F.count("order_id").alias("monthly_orders")) \
        .groupBy("user_id") \
        .agg(F.avg("monthly_orders").alias("avg_monthly_orders"))

    # 计算消费能力等级（使用分位数离散化）
    window = Window.orderBy(F.col("avg_monthly_payment").desc())
    consumption_power = monthly_payment.withColumn(
        "consumption_rank",
        F.percent_rank().over(window)
    ).withColumn(
        "consumption_level",
        F.when(F.col("consumption_rank") < 0.2, "L1")
        .when((F.col("consumption_rank") >= 0.2) & (F.col("consumption_rank") < 0.4), "L2")
        .when((F.col("consumption_rank") >= 0.4) & (F.col("consumption_rank") < 0.6), "L3")
        .when((F.col("consumption_rank") >= 0.6) & (F.col("consumption_rank") < 0.8), "L4")
        .otherwise("L5")
    )

    # 合并消费指标
    consumption_features = consumption_power.join(monthly_orders, on="user_id", how="left")

    # 与用户表合并
    user_with_consumption = user_df.join(consumption_features, on="user_id", how="left")

    return user_with_consumption


# 地理位置指标计算
def calculate_geo_features(user_df, geo_df):
    """计算地理位置指标"""
    # 合并用户表和地理位置表
    user_with_geo = user_df.join(geo_df, on="location_id", how="left")

    # 城市层级映射
    city_level_map = {
        "北京": "一线城市", "上海": "一线城市", "广州": "一线城市", "深圳": "一线城市",
        "杭州": "新一线城市", "南京": "新一线城市", "成都": "新一线城市",
        # 添加更多城市映射...
    }

    # 创建城市层级映射UDF
    city_level_udf = F.udf(lambda city: city_level_map.get(city, "其他城市"))

    # 添加城市层级字段
    user_with_city_level = user_with_geo.withColumn("city_level", city_level_udf(F.col("city")))

    return user_with_city_level


# 行为偏好指标计算
def calculate_behavior_features(behavior_df, user_df):
    """计算行为偏好指标"""
    # 计算近30天内各类目的浏览次数
    thirty_days_ago = F.date_sub(F.current_date(), 30)
    category_views = behavior_df.filter(
        (F.col("behavior_type") == "view") &
        (F.col("event_time") >= thirty_days_ago)
    ).groupBy("user_id", "category_id") \
        .agg(F.count("behavior_id").alias("view_count"))

    # 计算类目偏好度
    window = Window.partitionBy("user_id").orderBy(F.col("view_count").desc())
    category_preference = category_views.withColumn(
        "category_rank",
        F.row_number().over(window)
    ).filter(F.col("category_rank") <= 3)  # 取前3个偏好类目

    # 计算价格区间偏好
    price_preference = behavior_df.filter(
        (F.col("behavior_type") == "purchase") &
        (F.col("event_time") >= thirty_days_ago)
    ).groupBy("user_id", "category_id") \
        .agg(
        F.avg("price").alias("avg_price"),
        F.expr("percentile(price, 0.25)").alias("price_q1"),  # 使用 SQL 函数替代
        F.expr("percentile(price, 0.75)").alias("price_q3")  # 使用 SQL 函数替代
    )

    # 合并行为指标
    behavior_features = category_preference.join(
        price_preference,
        on=["user_id", "category_id"],
        how="left"
    )

    # 与用户表合并
    user_with_behavior = user_df.join(
        behavior_features.select("user_id", "category_id", "view_count", "avg_price"),
        on="user_id",
        how="left"
    )

    return user_with_behavior


# 生命周期指标计算
def calculate_lifecycle_features(order_df, behavior_df, user_df):
    """计算生命周期指标（修复版）"""
    # 1. 计算用户首次/最后下单时间
    order_dates = order_df.groupBy("user_id") \
        .agg(
        F.min("order_date").alias("first_order_date"),  # 新增首次下单时间
        F.max("order_date").alias("last_order_date")
    )

    # 2. 计算用户最后一次行为时间（加购、收藏）
    max_behavior_date = behavior_df.filter(
        F.col("behavior_type").isin(["add_cart", "collect"])
    ).groupBy("user_id") \
        .agg(F.max("event_time").alias("last_behavior_date"))

    # 3. 合并订单和行为数据
    user_lifecycle = user_df.join(order_dates, on="user_id", how="left") \
        .join(max_behavior_date, on="user_id", how="left")

    # 4. 定义时间阈值
    six_months_ago = F.date_sub(F.current_date(), 180)  # 近180天
    one_year_ago = F.date_sub(F.current_date(), 365)  # 近1年

    # 5. 修正用户类型判断逻辑
    user_lifecycle = user_lifecycle.withColumn(
        "user_type",
        F.when(
            # 潜客：未下单，但有行为（加购/收藏）
            F.col("last_order_date").isNull() & F.col("last_behavior_date").isNotNull(),
            "潜客"
        ).when(
            # 新客：近180天内首次下单
            F.col("first_order_date").isNotNull() & (F.col("first_order_date") >= six_months_ago),
            "新客"
        ).when(
            # 老客：近1年内有下单
            F.col("last_order_date") >= one_year_ago,
            "老客"
        ).otherwise(
            # 沉睡用户：以上都不满足
            "沉睡用户"
        )
    )

    # 6. 计算活跃度
    user_activity = behavior_df.groupBy("user_id") \
        .agg(
        F.countDistinct(F.date_trunc("day", F.col("event_time"))).alias("active_days"),
        F.count("behavior_id").alias("total_behaviors")
    ) \
        .withColumn(
        "activity_level",
        F.when(F.col("active_days") >= 20, "高活跃")
        .when((F.col("active_days") >= 10) & (F.col("active_days") < 20), "中活跃")
        .otherwise("低活跃")
    )

    # 7. 合并生命周期指标
    user_with_lifecycle = user_lifecycle.join(user_activity, on="user_id", how="left")

    return user_with_lifecycle


# 主函数
def main():
    # 加载数据
    user_df, order_df, behavior_df, geo_df = load_data(
        "/data/damo_demo_data/user_data.parquet",
        "/data/damo_demo_data/order_data.parquet",
        "/data/damo_demo_data/behavior_data.parquet",
        "/data/damo_demo_data/geo_data.parquet"
    )

    # 计算各类特征
    demographic_features = calculate_demographic_features(user_df)
    consumption_features = calculate_consumption_features(order_df, demographic_features)
    geo_features = calculate_geo_features(consumption_features, geo_df)
    behavior_features = calculate_behavior_features(behavior_df, geo_features)
    lifecycle_features = calculate_lifecycle_features(order_df, behavior_df, behavior_features)

    # 保存最终特征数据
    # lifecycle_features.write.parquet("path/to/final_features.parquet", mode="overwrite")

    # 示例：筛选高价值用户（L4-L5消费能力 + 高活跃）
    high_value_users = lifecycle_features.filter(
        F.col("consumption_level").isin(["L4", "L5"]) &
        (F.col("activity_level") == "高活跃")
    )

    # 显示结果
    high_value_users.show()


if __name__ == "__main__":
    main()