from pyspark.sql import SparkSession
from pyspark.sql.functions import col, count, sum, avg, max, min, when, datediff, current_date
from pyspark.ml import Pipeline
from pyspark.ml.feature import StringIndexer, VectorAssembler, StandardScaler
from pyspark.ml.classification import RandomForestClassifier
from pyspark.ml.evaluation import BinaryClassificationEvaluator
from pyspark.ml.tuning import CrossValidator, ParamGridBuilder
import random
from datetime import datetime, timedelta
from pyspark.sql.functions import udf
from pyspark.sql.types import DoubleType
import numpy as np

# 创建SparkSession
spark = SparkSession.builder \
    .appName("CustomerChurnPrediction") \
    .config("spark.driver.memory", "8g") \
    .config("spark.executor.memory", "8g") \
    .getOrCreate()


# 生成模拟数据
def generate_customer_data(num_customers=1000):
    """生成客户模拟数据"""
    customer_data = []
    for i in range(1, num_customers + 1):
        customer_id = i
        customer_name = f"客户{i}"
        gender = random.choice(["男", "女"])
        age = random.randint(18, 70)
        member_level = random.choice(["普通会员", "银卡会员", "金卡会员", "钻石会员"])
        region = random.choice(["华东", "华北", "华南", "西南", "西北", "东北", "中部"])
        registration_date = (datetime.now() - timedelta(days=random.randint(30, 730))).strftime("%Y-%m-%d")

        # 生成客户行为特征
        last_purchase_days = random.randint(0, 120)  # 最后购买距今天数
        purchase_frequency = random.uniform(0.1, 5.0)  # 月均购买次数
        avg_order_amount = round(random.uniform(50, 2000), 2)  # 平均订单金额
        total_spend = round(purchase_frequency * avg_order_amount * (730 - last_purchase_days) / 30, 2)  # 总消费
        is_active = 1 if last_purchase_days <= 30 else 0  # 是否活跃用户

        # 生成营销响应特征
        email_response_rate = round(random.uniform(0, 1), 2)  # 邮件响应率
        promotion_usage = round(random.uniform(0, 1), 2)  # 促销使用率

        # 生成流失标签（根据业务逻辑设置流失概率）
        churn_probability = 0.0
        if last_purchase_days > 90:
            churn_probability += 0.5
        if purchase_frequency < 0.5:
            churn_probability += 0.3
        if avg_order_amount < 100:
            churn_probability += 0.2
        if member_level == "普通会员":
            churn_probability += 0.1

        is_churned = 1 if random.uniform(0, 1) < churn_probability else 0

        customer_data.append((
            customer_id,
            customer_name,
            gender,
            age,
            member_level,
            region,
            registration_date,
            last_purchase_days,
            purchase_frequency,
            avg_order_amount,
            total_spend,
            is_active,
            email_response_rate,
            promotion_usage,
            is_churned
        ))

    return customer_data


# 生成客户数据
customer_data = generate_customer_data(10000)

# 创建DataFrame
df = spark.createDataFrame(
    customer_data,
    [
        "customer_id", "customer_name", "gender", "age", "member_level",
        "region", "registration_date", "last_purchase_days",
        "purchase_frequency", "avg_order_amount", "total_spend",
        "is_active", "email_response_rate", "promotion_usage", "is_churned"
    ]
)

# 数据预处理
# 将分类特征转换为数值
categorical_cols = ["gender", "member_level", "region"]
indexers = [StringIndexer(inputCol=col, outputCol=col + "_index").setHandleInvalid("keep") for col in categorical_cols]

# 创建特征向量
numeric_cols = [
    "age", "last_purchase_days", "purchase_frequency",
    "avg_order_amount", "total_spend", "is_active",
    "email_response_rate", "promotion_usage"
]
indexed_cols = [col + "_index" for col in categorical_cols]
assembler = VectorAssembler(
    inputCols=indexed_cols + numeric_cols,
    outputCol="features"
)

# 标准化特征
scaler = StandardScaler(inputCol="features", outputCol="scaled_features")

# 定义分类器
rf = RandomForestClassifier(
    labelCol="is_churned",
    featuresCol="scaled_features",
    numTrees=100,
    maxDepth=5,
    seed=42
)

# 创建Pipeline
pipeline = Pipeline(stages=indexers + [assembler, scaler, rf])

# 划分训练集和测试集
train_data, test_data = df.randomSplit([0.7, 0.3], seed=42)

# 训练模型
model = pipeline.fit(train_data)

# 预测
predictions = model.transform(test_data)

# 评估模型
evaluator = BinaryClassificationEvaluator(
    labelCol="is_churned",
    rawPredictionCol="rawPrediction",
    metricName="areaUnderROC"
)
auc = evaluator.evaluate(predictions)
print(f"模型评估 - AUC: {auc:.4f}")

# 查看特征重要性
rf_model = model.stages[-1]
feature_importances = rf_model.featureImportances
feature_names = indexed_cols + numeric_cols

print("\n特征重要性:")
for i in range(len(feature_importances)):
    print(f"{feature_names[i]}: {feature_importances[i]:.4f}")

# 预测所有客户的流失概率
all_predictions = model.transform(df)

# 提取预测结果和客户ID
# 定义 UDF 来提取概率向量中的第二个元素（流失概率，索引 1）
def get_churn_probability(prob_vector):
    return float(prob_vector[1]) if prob_vector is not None else None

get_churn_probability_udf = udf(get_churn_probability, DoubleType())

# 修改预测结果提取部分的代码
churn_predictions = all_predictions.select(
    "customer_id",
    "customer_name",
    "is_churned",
    col("prediction").alias("churn_prediction"),
    get_churn_probability_udf(col("probability")).alias("churn_probability")
)

# 筛选高流失风险客户（概率大于0.7）
high_risk_customers = churn_predictions.filter(col("churn_probability") > 0.7) \
    .orderBy(col("churn_probability").desc())

# 打印高流失风险客户
print("\n高流失风险客户列表:")
high_risk_customers.show(20, truncate=False)


# 保存预测结果
# try:
#     print("\n正在保存预测结果...")
#     churn_predictions.write.csv("churn_predictions.csv", header=True, mode="overwrite")
#     high_risk_customers.write.csv("high_risk_customers.csv", header=True, mode="overwrite")
#     print("保存成功！")
# except Exception as e:
#     print(f"保存失败: {e}")

# 停止SparkSession
spark.stop()