from pyspark.sql import SparkSession
from pyspark.sql import functions as F
from pyspark.sql.functions import col, sum, count, avg, when, split, lag
from pyspark.sql.window import Window
from pyspark.ml import Pipeline
from pyspark.ml.feature import StringIndexer, OneHotEncoder


def initialize_spark():
    """初始化SparkSession"""
    spark = SparkSession.builder \
        .appName("DamoPanFeatureEngineering") \
        .config("spark.sql.shuffle.partitions", "20") \
        .getOrCreate()
    print("SparkSession初始化完成")
    return spark


def load_data(spark, data_path):
    """加载达摩盘用户特征数据"""
    try:
        if data_path.endswith('.csv'):
            # 读取CSV文件，禁用header自动推断
            df = spark.read.csv(data_path, header=False, inferSchema=True)

            # 手动指定列名（根据实际数据结构调整）
            df = df.toDF("campaign_id", "user_id", "campaign_type", "exposure_time",
                         "click_time", "conversion_time", "conversion_type", "conversion_value")

            print(f"数据加载成功，共{df.count()}条记录，{len(df.columns)}个字段")
            print(f"数据列名: {df.columns}")
            df.show(5)  # 显示前5行数据用于调试

            return df
        else:
            df = spark.read.parquet(data_path)
            print(f"数据加载成功，共{df.count()}条记录，{len(df.columns)}个字段")
            return df
    except Exception as e:
        print(f"数据加载失败: {e}")
        return None


def preprocess_data(df):
    """数据预处理：处理缺失值、数据类型转换等"""
    if df is None:
        return None

    print("开始数据预处理...")

    # 1. 处理列名中的特殊字符
    # 这里假设列名已在load_data中被正确处理

    # 2. 处理缺失值
    missing_counts = df.select([count(F.when(col(c).isNull(), c)).alias(c) for c in df.columns])
    print("缺失值统计:")
    missing_counts.show()

    # 3. 数据类型转换
    # 将时间戳列转换为日期时间类型
    if "exposure_time" in df.columns:
        df = df.withColumn("exposure_time", F.to_timestamp("exposure_time"))
    if "click_time" in df.columns:
        df = df.withColumn("click_time", F.to_timestamp("click_time"))
    if "conversion_time" in df.columns:
        df = df.withColumn("conversion_time", F.to_timestamp("conversion_time"))

    # 4. 填充缺失值
    df = df.fillna({"conversion_type": "None", "conversion_value": 0.0})

    print("数据预处理完成")
    return df


def feature_engineering(df):
    """特征工程：构建和转换用户特征"""
    if df is None:
        return None

    print("开始特征工程...")

    # 检查实际存在的列
    available_columns = df.columns
    print(f"可用列: {available_columns}")

    # 1. 计算营销活动响应指标
    if "exposure_time" in available_columns and "click_time" in available_columns:
        # 计算点击率
        df = df.withColumn("is_clicked", F.when(col("click_time").isNotNull(), 1).otherwise(0))

        # 计算转化率
        df = df.withColumn("is_converted", F.when(col("conversion_time").isNotNull(), 1).otherwise(0))

        # 计算点击延迟（小时）
        df = df.withColumn("click_delay",
                           (F.unix_timestamp("click_time") - F.unix_timestamp("exposure_time")) / 3600)

        # 计算转化延迟（小时）
        df = df.withColumn("conversion_delay",
                           (F.unix_timestamp("conversion_time") - F.unix_timestamp("exposure_time")) / 3600)

    # 2. 按用户聚合特征
    user_features = df.groupBy("user_id") \
        .agg(
        count("*").alias("campaign_exposure_count"),
        sum("is_clicked").alias("click_count"),
        sum("is_converted").alias("conversion_count"),
        avg("click_delay").alias("avg_click_delay"),
        avg("conversion_delay").alias("avg_conversion_delay"),
        sum("conversion_value").alias("total_conversion_value"),
        F.countDistinct("campaign_id").alias("distinct_campaign_count")
    )

    # 3. 计算用户的营销响应率
    user_features = user_features.withColumn("click_through_rate",
                                             col("click_count") / col("campaign_exposure_count"))

    user_features = user_features.withColumn("conversion_rate",
                                             col("conversion_count") / col("click_count"))

    # 4. 处理分类特征
    if "campaign_type" in available_columns:
        # 对活动类型进行编码
        indexer = StringIndexer(inputCol="campaign_type", outputCol="campaign_type_index")
        encoder = OneHotEncoder(inputCols=["campaign_type_index"], outputCols=["campaign_type_encoded"])

        pipeline = Pipeline(stages=[indexer, encoder])
        df = pipeline.fit(df).transform(df)

    print("特征工程完成")
    return user_features


def analyze_features(df):
    """特征分析：计算特征统计量和相关性"""
    if df is None:
        return None

    print("开始特征分析...")

    # 基本统计量分析
    numeric_cols = [col_type[0] for col_type in df.dtypes if col_type[1] in ['int', 'bigint', 'double', 'float']]
    stats = df.select(numeric_cols).describe()
    stats.show()

    # 相关性分析
    print("特征相关性分析:")
    for col1 in numeric_cols:
        for col2 in numeric_cols:
            if col1 != col2:
                corr = df.stat.corr(col1, col2)
                if abs(corr) > 0.5:
                    print(f"{col1} 和 {col2} 的相关性: {corr:.4f}")

    print("特征分析完成")
    return df


def save_features(df, output_path):
    """保存特征数据"""
    if df is None:
        print("没有数据可保存")
        return

    try:
        df.write.csv(output_path, header=True, mode="overwrite")
        print(f"特征数据已保存至: {output_path}")
    except Exception as e:
        print(f"保存特征数据失败: {e}")


def main():

    """主函数：执行完整的特征工程流程"""
    spark = initialize_spark()

    # 请替换为实际数据路径
    data_path = "D:/Practical/python/work/two/mock_data/marketing_response.csv"
    output_path = "D:/Practical/python/work/two/feature/marketing_features.csv"

    df = load_data(spark, data_path)
    df = preprocess_data(df)
    df = feature_engineering(df)
    df = analyze_features(df)
    save_features(df, output_path)

    spark.stop()


if __name__ == "__main__":
    main()