from pyspark.sql import SparkSession
import pyspark.sql.functions as F
from pyspark.sql.types import StringType, IntegerType
from pyspark.ml import Pipeline
from pyspark.ml.feature import (
    StringIndexer,
    VectorAssembler,
    Tokenizer,
    StopWordsRemover,
    HashingTF,
    IDF
)
from pyspark.ml.classification import RandomForestClassifier
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
import pandas as pd
import matplotlib

matplotlib.use('Agg')  # 适配服务器环境
import matplotlib.pyplot as plt
import seaborn as sns

# 初始化Spark会话（适配Python 3.7）
spark = SparkSession.builder \
    .appName("EcommerceTicketClassification") \
    .config("spark.executor.memory", "2g") \
    .config("spark.driver.memory", "2g") \
    .getOrCreate()


# 1. 数据准备 - 适配Python 3.7的数据生成
def generate_ticket_data(spark, num_records=1000):
    """生成模拟电商工单数据集（Python 3.7兼容）

    Args:
        spark: SparkSession对象
        num_records: 要生成的记录数量，默认为1000

    Returns:
        DataFrame: 包含模拟工单数据的Spark DataFrame
    """
    from pyspark.sql.types import StructType, StructField

    # 定义数据结构
    schema = StructType([
        StructField("ticket_id", IntegerType()),
        StructField("customer_id", IntegerType()),
        StructField("store_id", IntegerType()),
        StructField("product_category", StringType()),
        StructField("customer_level", StringType()),
        StructField("priority", IntegerType()),
        StructField("response_time_hours", IntegerType()),
        StructField("customer_satisfaction", IntegerType()),
        StructField("repeat_issue_count", IntegerType()),
        StructField("actual_ticket_type", StringType()),
        StructField("description", StringType())
    ])

    # 工单类型和产品类别
    ticket_types = ["退货", "换货", "质量投诉", "物流问题", "产品咨询", "发票问题", "退款", "安装问题"]
    product_categories = ["手机", "电脑", "家电", "服装", "食品", "美妆", "图书", "运动"]
    customer_levels = ["普通", "银卡", "金卡", "钻石"]

    # 生成数据
    data = []
    for i in range(num_records):
        # 生成各字段的模拟数据
        ticket_id = 5000 + i
        customer_id = 1000 + i % 500  # 500个不同的客户
        store_id = 1 + i % 20  # 20个不同的店铺
        product_category = product_categories[i % len(product_categories)]
        customer_level = customer_levels[i % len(customer_levels)]
        priority = 1 + i % 5  # 优先级1-5
        response_time = 1 + i % 48  # 响应时间1-48小时
        satisfaction = 30 + i % 70  # 满意度30-99
        repeat_count = i % 5  # 重复问题计数0-4
        ticket_type = ticket_types[i % len(ticket_types)]

        # 根据工单类型生成不同的描述文本
        if ticket_type == "退货":
            desc = "退货申请 原因{}".format(i % 5)
        elif ticket_type == "换货":
            desc = "换货申请 原因{}".format(i % 4)
        elif ticket_type == "质量投诉":
            desc = "质量问题 描述{}".format(i % 3)
        elif ticket_type == "物流问题":
            desc = "物流问题 单号{}".format(i % 1000)
        elif ticket_type == "产品咨询":
            desc = "咨询问题 {}".format(i % 100)
        elif ticket_type == "发票问题":
            desc = "发票问题 {}".format(i % 50)
        elif ticket_type == "退款":
            desc = "退款申请 {}".format(i % 20)
        else:
            desc = "安装问题 {}".format(i % 10)

        data.append((ticket_id, customer_id, store_id, product_category,
                     customer_level, priority, response_time, satisfaction,
                     repeat_count, ticket_type, desc))

    return spark.createDataFrame(data, schema)


# 生成模拟数据
ticket_data = generate_ticket_data(spark, 1000)
print("工单数据示例:")
ticket_data.show(5, truncate=False)


def feature_engineering(df):
    """特征工程处理
    对原始数据进行预处理和特征转换

    Args:
        df: 原始数据DataFrame

    Returns:
        DataFrame: 处理后的DataFrame
    """
    # 文本预处理：移除特殊字符，只保留中文、英文和数字
    df = df.withColumn("clean_description",
                       F.regexp_replace(F.col("description"), "[^a-zA-Z0-9\u4e00-\u9fa5]", " "))

    # 将客户等级从字符串转换为数值型
    df = df.withColumn("customer_level_num",
                       F.when(F.col("customer_level") == "普通", 1)
                       .when(F.col("customer_level") == "银卡", 2)
                       .when(F.col("customer_level") == "金卡", 3)
                       .otherwise(4))

    return df


# 应用特征工程
processed_data = feature_engineering(ticket_data)


def build_pipeline():
    """构建机器学习管道
    包含文本处理、特征转换和分类模型

    Returns:
        Pipeline: 构建好的机器学习管道
    """
    # 文本处理：分词
    tokenizer = Tokenizer(inputCol="clean_description", outputCol="words")

    # 中文停用词列表
    stopwords = ["的", "了", "和", "是", "就", "都", "而", "及", "与", "这", "那"]
    stopwords_remover = StopWordsRemover(
        inputCol="words",
        outputCol="filtered_words",
        stopWords=stopwords
    )
    # 文本特征提取：TF-IDF
    hashing_tf = HashingTF(inputCol="filtered_words", outputCol="raw_features", numFeatures=100)
    idf = IDF(inputCol="raw_features", outputCol="text_features")

    # 分类特征索引：将产品类别字符串转换为数值索引
    product_indexer = StringIndexer(inputCol="product_category", outputCol="product_index")

    # 特征组合：将所有特征合并为一个特征向量
    assembler = VectorAssembler(
        inputCols=[
            "product_index",
            "customer_level_num",
            "priority",
            "response_time_hours",
            "customer_satisfaction",
            "repeat_issue_count",
            "text_features"
        ],
        outputCol="features"
    )

    # 标签索引：将工单类型字符串转换为数值索引
    label_indexer = StringIndexer(inputCol="actual_ticket_type", outputCol="label")

    # 分类器：使用随机森林模型
    classifier = RandomForestClassifier(
        labelCol="label",
        featuresCol="features",
        numTrees=50,
        maxDepth=5,
        seed=42
    )

    # 构建管道：按顺序组合所有处理步骤
    pipeline = Pipeline(stages=[
        tokenizer,
        stopwords_remover,
        hashing_tf,
        idf,
        product_indexer,
        label_indexer,
        assembler,
        classifier
    ])

    return pipeline


def train_and_evaluate(df):
    """训练和评估模型
    划分训练测试集，训练模型并评估性能

    Args:
        df: 处理后的数据DataFrame

    Returns:
        tuple: (训练好的模型, 预测结果DataFrame)
    """
    # 划分训练集和测试集
    train_data, test_data = df.randomSplit([0.8, 0.2], seed=42)

    # 构建管道
    pipeline = build_pipeline()

    # 训练模型
    model = pipeline.fit(train_data)

    # 预测测试集
    predictions = model.transform(test_data)

    # 评估模型准确率
    evaluator = MulticlassClassificationEvaluator(
        labelCol="label",
        predictionCol="prediction",
        metricName="accuracy"
    )
    accuracy = evaluator.evaluate(predictions)
    print("模型准确率: {:.4f}".format(accuracy))

    # 保存模型
    model.write().overwrite().save("ticket_classification_model")
    print("模型已保存")

    return model, predictions


# 训练和评估模型
model, predictions = train_and_evaluate(processed_data)


def feature_importance(model):
    """分析特征重要性
    可视化随机森林模型中各特征的重要性

    Args:
        model: 训练好的PipelineModel

    Returns:
        DataFrame: 包含特征重要性排序的DataFrame
    """
    # 获取随机森林模型
    rf_model = model.stages[-1]

    # 获取特征重要性
    importances = rf_model.featureImportances

    # 特征名称（与VectorAssembler顺序一致）
    feature_names = [
        "product_index",
        "customer_level_num",
        "priority",
        "response_time_hours",
        "customer_satisfaction",
        "repeat_issue_count",
        "text_features"
    ]

    # 创建特征重要性DataFrame
    importance_data = []
    for i in range(len(importances)):
        if i < len(feature_names):
            importance_data.append({
                "feature": feature_names[i],
                "importance": float(importances[i])
            })
        else:
            importance_data.append({
                "feature": "feature_{}".format(i),
                "importance": float(importances[i])
            })

    importance_df = pd.DataFrame(importance_data)
    importance_df = importance_df.sort_values("importance", ascending=False)

    # 可视化特征重要性
    plt.figure(figsize=(12, 6))
    sns.barplot(x="importance", y="feature",
                data=importance_df.head(10))  # 只显示前10个重要特征
    plt.title("特征重要性分析")
    plt.tight_layout()
    plt.savefig('feature_importance.png')
    plt.close()

    print("特征重要性图表已保存")
    return importance_df


# 分析特征重要性
feature_importance_df = feature_importance(model)
print("特征重要性排序:")
print(feature_importance_df.head(10))


def predict_new_tickets(model, new_data):
    """使用模型预测新工单

    Args:
        model: 训练好的PipelineModel
        new_data: 新工单数据DataFrame

    Returns:
        DataFrame: 包含预测结果的DataFrame
    """
    # 特征工程处理
    new_data = feature_engineering(new_data)

    # 预测
    predictions = model.transform(new_data)

    # 获取标签映射
    label_indexer = model.stages[5]  # StringIndexer阶段
    labels = label_indexer.labels

    # 定义UDF：将预测的数值索引转换回原始标签
    def index_to_label(index):
        try:
            return labels[int(index)]
        except IndexError:
            return "未知"

    # 注册UDF
    index_to_label_udf = F.udf(index_to_label, StringType())

    # 转换预测结果
    result = predictions.withColumn(
        "predicted_ticket_type",
        index_to_label_udf(F.col("prediction"))
    ).select(
        "ticket_id",
        "description",
        "actual_ticket_type",
        "predicted_ticket_type"
    )

    return result


# 生成新工单进行预测
new_tickets = generate_ticket_data(spark, 5)
predictions = predict_new_tickets(model, new_tickets)
print("\n新工单预测结果:")
predictions.show(truncate=False)

# 停止Spark会话
spark.stop()
