from pyspark.sql import SparkSession
from pyspark.sql.functions import col, sum, count, avg

# 创建SparkSession
spark = SparkSession.builder.appName("DamoPanFeatures").getOrCreate()

# 读取模拟数据
user_profile_df = spark.read.csv("../mock_data/user_profile.csv", header=False, inferSchema=True)
user_behavior_df = spark.read.csv("../mock_data/user_behavior.csv", header=False, inferSchema=True)
order_transaction_df = spark.read.csv("../mock_data/order_transaction.csv", header=False, inferSchema=True)
order_item_df = spark.read.csv("../mock_data/order_item.csv", header=False, inferSchema=True)
marketing_response_df = spark.read.csv("../mock_data/marketing_response.csv", header=False, inferSchema=True)

# 重命名列以匹配原始表结构
user_profile_df = user_profile_df.toDF("user_id", "gender", "age", "occupation", "education", "marital_status", "has_children", "income_level", "province", "city", "city_level")
user_behavior_df = user_behavior_df.toDF("user_id", "item_id", "category_id", "behavior_type", "behavior_time", "duration", "page_depth", "is_purchase", "order_id")
order_transaction_df = order_transaction_df.toDF("order_id", "user_id", "order_time", "payment_time", "delivery_time", "receive_time", "order_amount", "discount_amount", "actual_payment", "payment_method", "order_status")
order_item_df = order_item_df.toDF("order_id", "item_id", "category_id", "brand_id", "item_name", "item_price", "quantity", "item_amount", "is_return", "return_time")
marketing_response_df = marketing_response_df.toDF("campaign_id", "user_id", "campaign_type", "exposure_time", "click_time", "conversion_time", "conversion_type", "conversion_value")

# 提取用户基本特征
basic_features = user_profile_df.select("user_id", "gender", "age", "income_level", "province", "city_level")

# 提取用户行为特征
behavior_features = user_behavior_df.groupBy("user_id") \
    .agg(
        count("*").alias("total_behavior_count"),
        sum(col("duration")).alias("total_browse_duration"),
        avg(col("page_depth")).alias("average_page_depth"),
        sum(col("is_purchase").cast("int")).alias("purchase_count")
    )

# 提取用户消费特征
consumption_features = order_transaction_df.groupBy("user_id") \
    .agg(
        sum(col("order_amount")).alias("total_order_amount"),
        sum(col("discount_amount")).alias("total_discount_amount"),
        sum(col("actual_payment")).alias("total_actual_payment"),
        count("*").alias("order_count")
    )

# 提取用户营销响应特征
marketing_features = marketing_response_df.groupBy("user_id") \
    .agg(
        count("*").alias("campaign_exposure_count"),
        sum(col("click_time").isNotNull().cast("int")).alias("campaign_click_count"),
        sum(col("conversion_time").isNotNull().cast("int")).alias("campaign_conversion_count"),
        sum(col("conversion_value")).alias("total_conversion_value")
    )

# 合并所有特征
all_features = basic_features \
    .join(behavior_features, on="user_id", how="left") \
    .join(consumption_features, on="user_id", how="left") \
    .join(marketing_features, on="user_id", how="left")

# 显示结果
all_features.show()
output_path = "feature/features.csv"
try:
    all_features.write.csv(output_path, mode="overwrite")
    print(f"特征数据已保存至: {output_path}")
except Exception as e:
    print(f"保存特征数据失败: {e}")

# 停止SparkSession
spark.stop()