from pyspark.sql import SparkSession
from pyspark.sql.functions import from_json, col, expr, explode, size, udf
from pyspark.sql.types import StructType, StructField, StringType, ArrayType, IntegerType
import json
import matplotlib.pyplot as plt
import seaborn as sns
from pyspark import SparkContext
from tqdm import tqdm


# 定义purchase_history的schema
purchase_schema = StructType([
    StructField("items", ArrayType(StructType([
        StructField("id", IntegerType())
    ]))),
    StructField("payment_method", StringType()),
    StructField("payment_status", StringType()),
    StructField("purchase_date", StringType()),
    StructField("avg_price", StringType()),
    StructField("categories", StringType()),
])

# 加载商品目录
with open('data/30G_data_new/product_catalog.json', encoding='utf-8') as f:
    product_catalog = json.load(f)
print("开始初始化会话")


# 初始化Spark会话
spark = SparkSession.builder \
    .appName("MarketBasketAnalysis") \
    .config("spark.sql.parquet.enableVectorizedReader", "false") \
    .config("spark.executor.memory", "64g") \
    .config("spark.driver.memory", "64g") \
    .config("spark.memory.overhead", "8g") \
    .config("spark.sql.shuffle.partitions", "200") \
    .config("spark.executor.heartbeatInterval", "60s") \
    .config("spark.network.timeout", "120s") \
    .config("spark.python.worker.reuse", "true") \
    .getOrCreate()

print("[INFO] Spark会话初始化完成")
# 初始化带进度监控的Spark会话
 # 总阶段数：数据加载+4个任务+结果保存



# 广播商品目录映射
product_bc = spark.sparkContext.broadcast(product_catalog)
print("[INFO] 商品目录广播完成")
# 加载商品目录为DataFrame
product_df = spark.read.option("multiline", "true")\
    .json('data/30G_data_new/product_catalog.json')\
    .select(explode(col("products")).alias("product"))\
    .select("product.id", "product.category", "product.price")
print("[INFO] 商品目录加载完成")
# 读取Parquet数据
df = spark.read.parquet("data/30G_data_new/*.parquet")
print("[INFO] Parquet数据读取完成")
# 解析JSON字段并关联商品类别
parsed_df = df.withColumn("purchase_data", from_json(col("purchase_history"), purchase_schema)) \
    .select(
        col("id"),
        col("user_name"),
        col("purchase_data.payment_method"),
        col("purchase_data.payment_status"),
        col("purchase_data.purchase_date"),
        explode(col("purchase_data.items")).alias("item")
    ) \
    .join(
        product_df.select(
            col("id").alias("product_id"),
            "category",
            "price"
        ),
        expr("item.id = product_id"),
        "left"
    ) \
    .groupBy("id", "payment_method", "payment_status", "purchase_date", "user_name") \
    .agg(
        expr("collect_set(category)").alias("categories"),

    )
print("[INFO] JSON字段解析完成")
# 检查解析后的数据是否为空
print("[DEBUG] parsed_df数据量: ", parsed_df.count())
parsed_df.select("categories").show(5, truncate=False)
# 缓存处理后的数据
parsed_df.cache()

# 任务1：商品关联规则分析
# 在文件顶部定义电子产品类别常量
MAIN_CATEGORIES = [
    {'name':'电子产品', 'subcategories':{"平板电脑", "智能手机", "智能手表", "摄像机", "游戏机", "相机", "笔记本电脑", "耳机", "音响"}},
    {'name':'服装', 'subcategories':{"上衣", "内衣", "帽子", "外套", "围巾", "裙子", "裤子", "鞋子", "手套"}},
    {'name':'食品', 'subcategories':{"米面", "蛋奶", "水果", "蔬菜", "肉类", "水产", "零食", "饮料", "调味品"}},
    {'name':'家居', 'subcategories':{"家具", "床上用品", "厨具", "卫浴用品"}},
    {'name':'办公', 'subcategories':{"办公用品", "文具"}},
    {'name':'运动户外', 'subcategories':{"健身器材", "户外装备"}},
    {'name':'玩具', 'subcategories':{"玩具", "益智玩具", "模型"}},
    {'name':'母婴', 'subcategories':{"婴儿用品", "儿童课外读物"}},
    {'name':'汽车用品', 'subcategories':{"汽车装饰", "车载电子"}}
]

# 在MAIN_CATEGORIES定义后立即添加映射函数
def category_mapping_udf(category):
    for main_cat in MAIN_CATEGORIES:
        if category in main_cat['subcategories']:
            return main_cat['name']
    return '其他'

category_mapping_udf = udf(category_mapping_udf, StringType())


def task3_analysis():
    from pyspark.sql.functions import to_date, month, quarter,explode
    order_time_df = parsed_df.withColumn("purchase_date", to_date("purchase_date")) \
    .withColumn("purchase_month", month("purchase_date")) \
    .withColumn("purchase_quarter", quarter("purchase_date"))
    exploded_df = order_time_df.withColumn("category", explode("categories")) \
        .withColumn("main_category", category_mapping_udf(col("category")))
    # 时间维度解析
    # time_df = exploded_df.withColumn("purchase_month", month(to_date("purchase_date"))) \
    #     .withColumn("purchase_quarter", quarter(to_date("purchase_date")))
    
    # 季度分析
    quarterly = exploded_df.groupBy("purchase_quarter", "main_category").count()
    quarterly_pivot = quarterly.groupBy("main_category").pivot("purchase_quarter").sum("count")
    
    # 月份分析
    monthly = exploded_df.groupBy("purchase_month", "main_category").count()
    monthly_pivot = monthly.groupBy("main_category").pivot("purchase_month").sum("count")
    category_per_row_df = exploded_df.select("user_name", "purchase_date", "main_category", "user_name")
    # 时序模式分析
    from pyspark.sql.window import Window
    from pyspark.sql.functions import lag, explode
    windowSpec = Window.partitionBy("user_name").orderBy("purchase_date")
    
    # 获取用户上一次购买的主类别
    sequential_df = category_per_row_df.withColumn("prev_main_category", lag("main_category", 1).over(windowSpec)) \
        .filter(col("prev_main_category").isNotNull())

    # 统计主类别转移频率
    sequential_pattern = sequential_df.groupBy("prev_main_category", "main_category").count()

    # 写入结果
    sequential_pattern.write.mode("overwrite").json("output/task3_sequential")

    # 保存季度和月度结果
    quarterly_pivot.write.mode("overwrite").json("output/task3_quarterly")
    monthly_pivot.write.mode("overwrite").json("output/task3_monthly")

    # 可视化

    import matplotlib.font_manager as fm

    # 设置中文字体（优先使用 WenQuanYi Zen Hei，备选 Noto Sans CJK）
    plt.rcParams['font.sans-serif'] = ['WenQuanYi Zen Hei', 'Noto Sans CJK SC', 'SimHei']  # 文泉驿正黑 > Noto > SimHei
    plt.rcParams['axes.unicode_minus'] = False  # 正常显示负号

    # 可视化季度趋势
    pd_df = quarterly.toPandas()
    plt.figure(figsize=(12, 6))
    sns.lineplot(x='purchase_quarter', y='count', hue='main_category', data=pd_df)
    plt.title('季度购买趋势')
    plt.savefig('output/quarterly_trend.png', bbox_inches='tight', dpi=300)

    # 可视化月度趋势
    pd_df = monthly.toPandas()
    plt.figure(figsize=(12, 6))
    sns.lineplot(x='purchase_month', y='count', hue='main_category', data=pd_df)
    plt.title('月度购买趋势')
    plt.savefig('output/monthly_trend.png', bbox_inches='tight', dpi=300)
    



if __name__ == "__main__":
    try:

        task3_analysis()

    except Exception as e:
        print(f"[ERROR] 执行失败: {str(e)}")
        spark.stop()
        exit(1)
    finally:
        print("\n[INFO] 正在释放Spark资源...")
        spark.stop()



