from pyspark.sql import SparkSession
from pyspark.sql.functions import col, count, avg, stddev, min, max, sum, collect_list
from pyspark.ml.feature import StringIndexer, OneHotEncoder, VectorAssembler
from pyspark.ml import Pipeline
import pyspark.sql.functions as F


def initialize_spark():
    """初始化Spark会话"""
    return SparkSession.builder \
        .appName("DamoDiskFeatureAnalysis") \
        .config("spark.driver.memory", "8g") \
        .config("spark.executor.memory", "16g") \
        .config("spark.sql.shuffle.partitions", "100") \
        .getOrCreate()


def load_data(spark, data_path):
    """加载达摩盘用户特征数据"""
    try:
        df = spark.read.parquet(data_path)
        print(f"数据加载成功，共{df.count()}条记录，{len(df.columns)}个字段")
        return df
    except Exception as e:
        print(f"数据加载失败: {e}")
        return None


def preprocess_data(df):
    """数据预处理：缺失值填充、异常值处理"""
    if df is None:
        return None

    # 查看缺失值情况
    missing_counts = df.select([count(F.when(col(c).isNull(), c)).alias(c) for c in df.columns])
    missing_counts.show()

    # 填充缺失值
    numeric_cols = [col_type[0] for col_type in df.dtypes if col_type[1] in ['int', 'bigint', 'double', 'float']]
    string_cols = [col_type[0] for col_type in df.dtypes if col_type[1] == 'string']

    # 数值型字段用中位数填充
    for col_name in numeric_cols:
        median_value = df.approxQuantile(col_name, [0.5], 0.01)[0]
        df = df.fillna(median_value, subset=[col_name])

    # 字符串型字段用"unknown"填充
    df = df.fillna("unknown", subset=string_cols)

    # 处理异常值（示例：过滤年龄小于0或大于100的记录）
    if "age" in numeric_cols:
        df = df.filter((col("age") >= 0) & (col("age") <= 100))

    return df


def feature_engineering(df):
    """特征工程：构建达摩盘基础特征"""
    if df is None:
        return None

    # 1. 人口统计学特征
    demographic_features = [
        "gender", "age", "occupation", "education", "marital_status",
        "has_children", "income_level"
    ]

    # 2. 行为特征
    # 计算用户活跃度：基于最近访问时间、访问频率、浏览深度等
    df = df.withColumn("activity_score",
                       (col("visit_frequency") * 0.4 +
                        col("browse_depth") * 0.3 +
                        col("recency") * 0.3) * 100)

    # 3. 偏好特征
    # 提取用户偏好的品类列表
    df = df.withColumn("preferred_categories",
                       F.split(col("category_preferences"), ","))

    # 4. RFM特征（Recency, Frequency, Monetary）
    df = df.withColumn("recency_score",
                       F.when(col("last_purchase_days") <= 7, 5)
                       .when(col("last_purchase_days") <= 15, 4)
                       .when(col("last_purchase_days") <= 30, 3)
                       .when(col("last_purchase_days") <= 60, 2)
                       .otherwise(1))

    df = df.withColumn("frequency_score",
                       F.when(col("purchase_frequency") >= 10, 5)
                       .when(col("purchase_frequency") >= 5, 4)
                       .when(col("purchase_frequency") >= 3, 3)
                       .when(col("purchase_frequency") >= 1, 2)
                       .otherwise(1))

    df = df.withColumn("monetary_score",
                       F.when(col("total_spend") >= 10000, 5)
                       .when(col("total_spend") >= 5000, 4)
                       .when(col("total_spend") >= 2000, 3)
                       .when(col("total_spend") >= 500, 2)
                       .otherwise(1))

    df = df.withColumn("rfm_score",
                       col("recency_score") * 0.35 +
                       col("frequency_score") * 0.35 +
                       col("monetary_score") * 0.30)

    # 5. 消费特征
    consumption_features = [
        "avg_order_value", "total_spend", "purchase_frequency",
        "discount_sensitivity", "brand_loyalty"
    ]

    # 6. 设备特征
    device_features = [
        "device_type", "os_type", "browser", "screen_resolution"
    ]

    # 7. 地理位置特征
    geo_features = [
        "province", "city", "district", "city_level",
        "is_first_tier_city", "is_new_first_tier_city"
    ]

    # 8. 兴趣特征
    interest_features = [
        "interests", "hobbies", "lifestyle", "social_class"
    ]

    # 9. 营销响应特征
    response_features = [
        "campaign_response_rate", "conversion_rate", "avg_cart_value",
        "abandoned_cart_rate", "coupon_usage_rate"
    ]

    # 10. 特征编码（示例：对gender字段进行编码）
    if "gender" in df.columns:
        indexer = StringIndexer(inputCol="gender", outputCol="gender_index")
        encoder = OneHotEncoder(inputCols=["gender_index"], outputCols=["gender_encoded"])

        pipeline = Pipeline(stages=[indexer, encoder])
        df = pipeline.fit(df).transform(df)

    return df


def analyze_features(df):
    """特征分析：计算特征统计量和相关性"""
    if df is None:
        return None

    # 基本统计量分析
    numeric_cols = [col_type[0] for col_type in df.dtypes if col_type[1] in ['int', 'bigint', 'double', 'float']]
    stats = df.select(numeric_cols).describe()
    stats.show()

    # 相关性分析
    print("特征相关性分析:")
    for col1 in numeric_cols:
        for col2 in numeric_cols:
            if col1 != col2:
                corr = df.stat.corr(col1, col2)
                if abs(corr) > 0.5:
                    print(f"{col1} 和 {col2} 的相关性: {corr:.4f}")

    # 特征重要性分析（示例：基于随机森林）
    # 这里仅为示例，实际应用需要完整的模型训练
    from pyspark.ml.classification import RandomForestClassifier

    if "conversion_label" in df.columns and len(numeric_cols) > 0:
        assembler = VectorAssembler(inputCols=numeric_cols, outputCol="features")
        df = assembler.transform(df)

        rf = RandomForestClassifier(labelCol="conversion_label", featuresCol="features")
        model = rf.fit(df)

        print("特征重要性:")
        for i, col in enumerate(numeric_cols):
            print(f"{col}: {model.featureImportances[i]:.4f}")

    return df


def save_features(df, output_path):
    """保存特征工程结果"""
    if df is None:
        print("没有数据可保存")
        return

    try:
        df.write.parquet(output_path, mode="overwrite")
        print(f"特征数据已保存至: {output_path}")
    except Exception as e:
        print(f"保存特征数据失败: {e}")


def main():
    """主函数：执行完整的特征工程流程"""
    spark = initialize_spark()

    # 请替换为实际数据路径
    data_path = "hdfs://path/to/your/damo_disk_data.parquet"
    output_path = "hdfs://path/to/save/features.parquet"

    df = load_data(spark, data_path)
    df = preprocess_data(df)
    df = feature_engineering(df)
    df = analyze_features(df)
    save_features(df, output_path)

    spark.stop()


if __name__ == "__main__":
    main()