#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
工单系统客户-主题-店铺关系分析（修正版）
修复日期类型转换问题
"""

from pyspark.sql import SparkSession
import pyspark.sql.functions as F
from pyspark.sql.types import *
from datetime import datetime
import matplotlib.pyplot as plt
import seaborn as sns
import os


# 初始化Spark会话
def init_spark():
    """初始化Spark会话

    返回:
        SparkSession: 配置好的Spark会话对象
    """
    return SparkSession.builder \
        .appName("TicketAnalysis") \
        .config("spark.executor.memory", "2g") \
        .config("spark.driver.memory", "2g") \
        .getOrCreate()


# 数据准备（修正日期处理）
def prepare_data(spark):
    """生成模拟数据（修正日期处理）

    参数:
        spark: SparkSession对象

    返回:
        tuple: 包含三个DataFrame的元组 (store_df, customer_df, ticket_df)
    """
    # 定义Schema - 店铺数据表结构
    store_schema = StructType([
        StructField("store_id", IntegerType(), False),
        StructField("store_name", StringType()),
        StructField("store_type", StringType()),
        StructField("location", StringType())
    ])

    # 定义Schema - 客户数据表结构
    customer_schema = StructType([
        StructField("customer_id", IntegerType(), False),
        StructField("name", StringType()),
        StructField("level", StringType()),
        StructField("register_date", DateType())
    ])

    # 定义Schema - 工单数据表结构
    ticket_schema = StructType([
        StructField("ticket_id", IntegerType(), False),
        StructField("customer_id", IntegerType()),
        StructField("store_id", IntegerType()),
        StructField("topic", StringType()),
        StructField("create_time", TimestampType()),
        StructField("status", StringType()),
        StructField("description", StringType())
    ])

    # 店铺模拟数据
    stores = [
        (1, "北京旗舰店", "flagship", "北京市"),
        (2, "上海体验店", "experience", "上海市"),
        (3, "广州专卖店", "franchise", "广州市")
    ]

    # 客户模拟数据（已修正日期格式）
    customers = [
        (1001, "张伟", "gold", datetime.strptime("2022-01-15", "%Y-%m-%d").date()),
        (1002, "李娜", "silver", datetime.strptime("2022-03-20", "%Y-%m-%d").date()),
        (1003, "王芳", "platinum", datetime.strptime("2021-11-05", "%Y-%m-%d").date())
    ]

    # 工单模拟数据（已修正时间格式）
    tickets = [
        (5001, 1001, 1, "退货问题", datetime.strptime("2023-05-10 14:30:00", "%Y-%m-%d %H:%M:%S"), "resolved",
         "收到的商品与描述不符"),
        (5002, 1002, 2, "物流问题", datetime.strptime("2023-05-11 10:15:00", "%Y-%m-%d %H:%M:%S"), "processing",
         "快递三天未更新"),
        (5003, 1001, 1, "质量问题", datetime.strptime("2023-05-12 09:45:00", "%Y-%m-%d %H:%M:%S"), "open",
         "商品有瑕疵"),
        (5004, 1003, 3, "发票问题", datetime.strptime("2023-05-12 16:20:00", "%Y-%m-%d %H:%M:%S"), "open",
         "需要补开发票")
    ]

    # 创建DataFrame
    store_df = spark.createDataFrame(stores, store_schema)
    customer_df = spark.createDataFrame(customers, customer_schema)
    ticket_df = spark.createDataFrame(tickets, ticket_schema)

    return store_df, customer_df, ticket_df


# 客户维度分析
def analyze_customers(ticket_df, customer_df):
    """客户工单行为分析

    参数:
        ticket_df: 工单DataFrame
        customer_df: 客户DataFrame

    返回:
        tuple: 包含两个DataFrame的元组 (stats, rfm)
    """
    print("\n客户工单统计:")
    # 统计每个客户的工单数量、解决数量和平均解决天数
    stats = ticket_df.groupBy("customer_id") \
        .agg(
        F.count("ticket_id").alias("ticket_count"),
        F.sum(F.when(F.col("status") == "resolved", 1).otherwise(0)).alias("resolved_count"),
        F.avg(F.datediff(F.current_date(), F.to_date("create_time"))).alias("avg_resolve_days")
    ) \
        .join(customer_df, "customer_id", "left") \
        .orderBy(F.desc("ticket_count"))

    stats.show()

    # RFM分析模型实现
    print("\n客户价值分析(RFM模型):")
    rfm = ticket_df.join(customer_df, "customer_id") \
        .groupBy("customer_id", "name", "level") \
        .agg(
        F.datediff(F.current_date(), F.max("create_time")).alias("recency"),  # 最近一次消费时间
        F.count("ticket_id").alias("frequency"),  # 消费频率
        F.sum(F.when(F.col("status") == "resolved", 1).otherwise(0)).alias("monetary")  # 消费金额(用解决工单数代替)
    ) \
        .withColumn("rfm_score",
                    F.when(F.col("recency") <= 30, 3)  # 最近消费评分
                    .when(F.col("recency") <= 90, 2)
                    .otherwise(1) +
                    F.when(F.col("frequency") >= 5, 3)  # 消费频率评分
                    .when(F.col("frequency") >= 2, 2)
                    .otherwise(1) +
                    F.when(F.col("monetary") >= 3, 3)  # 消费金额评分
                    .when(F.col("monetary") >= 1, 2)
                    .otherwise(1)) \
        .orderBy(F.desc("rfm_score"))  # 按RFM总分降序排列

    rfm.show()
    return stats, rfm


# 主题维度分析
def analyze_topics(ticket_df):
    """工单主题分析

    参数:
        ticket_df: 工单DataFrame

    返回:
        DataFrame: 主题分布统计结果
    """
    print("\n工单主题分布:")
    topic_dist = ticket_df.groupBy("topic") \
        .agg(
        F.count("ticket_id").alias("count"),  # 各主题工单数量
        F.avg(F.when(F.col("status") != "open",
                     F.unix_timestamp("create_time") / 3600).otherwise(None)).alias("avg_hours_to_resolve")  # 平均解决时间(小时)
    ) \
        .orderBy(F.desc("count"))  # 按工单数量降序排列

    topic_dist.show()
    return topic_dist


# 店铺维度分析
def analyze_stores(ticket_df, store_df):
    """店铺工单分析

    参数:
        ticket_df: 工单DataFrame
        store_df: 店铺DataFrame

    返回:
        DataFrame: 店铺绩效统计结果
    """
    print("\n店铺工单处理质量:")
    performance = ticket_df.join(store_df, "store_id") \
        .groupBy("store_id", "store_name", "store_type") \
        .agg(
        F.count("ticket_id").alias("total_tickets"),  # 总工单数
        F.avg(F.when(F.col("status") == "resolved", 1).otherwise(0)).alias("resolve_rate"),  # 解决率
        F.avg(F.datediff(F.current_date(), F.to_date("create_time"))).alias("avg_resolve_days")  # 平均解决天数
    ) \
        .orderBy(F.desc("resolve_rate"))  # 按解决率降序排列

    performance.show()
    return performance


# 三维关联分析
def analyze_relations(ticket_df, customer_df, store_df):
    """客户-主题-店铺关联分析

    参数:
        ticket_df: 工单DataFrame
        customer_df: 客户DataFrame
        store_df: 店铺DataFrame

    返回:
        DataFrame: 三维关联分析结果
    """
    print("\n三维关联分析:")
    analysis = ticket_df.join(customer_df, "customer_id") \
        .join(store_df, "store_id") \
        .groupBy("level", "topic", "store_type") \
        .agg(
        F.count("ticket_id").alias("ticket_count"),  # 工单数量
        F.avg(F.when(F.col("status") == "resolved", 1).otherwise(0)).alias("resolve_rate")  # 解决率
    ) \
        .orderBy(F.desc("ticket_count"))  # 按工单数量降序排列

    analysis.show()
    return analysis


# 可视化分析结果
def visualize_results(analysis_df, output_dir="output"):
    """生成可视化图表

    参数:
        analysis_df: 分析结果DataFrame
        output_dir: 输出目录路径
    """
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # 转换为Pandas DataFrame以便可视化
    pd_df = analysis_df.toPandas()

    # 创建客户等级-主题分布热力图
    plt.figure(figsize=(12, 6))
    heatmap_data = pd_df.pivot_table(
        index="level",
        columns="topic",
        values="ticket_count",
        aggfunc="sum",
        fill_value=0
    )
    sns.heatmap(heatmap_data, annot=True, fmt="d", cmap="YlGnBu")
    plt.title("客户等级-主题分布热力图")
    plt.tight_layout()
    plt.savefig(f"{output_dir}/customer_topic_heatmap.png")
    plt.close()

    print(f"可视化图表已保存到 {output_dir} 目录")


def main():
    """主函数，执行分析流程"""
    # 初始化Spark
    spark = init_spark()

    try:
        # 准备数据（使用修正后的方法）
        store_df, customer_df, ticket_df = prepare_data(spark)

        # 各维度分析
        customer_stats, rfm = analyze_customers(ticket_df, customer_df)
        topic_dist = analyze_topics(ticket_df)
        store_perf = analyze_stores(ticket_df, store_df)
        full_analysis = analyze_relations(ticket_df, customer_df, store_df)

        # 可视化
        visualize_results(full_analysis)

        print("\n分析完成！")

    finally:
        # 关闭Spark会话
        spark.stop()


if __name__ == "__main__":
    main()
