from pyspark.sql import SparkSession
from pyspark.sql.functions import from_json, col, expr, explode,size,udf
from pyspark.sql.types import StructType, StructField, StringType, ArrayType, IntegerType
import json
import matplotlib.pyplot as plt
import seaborn as sns
from pyspark import SparkContext
from tqdm import tqdm


# 定义purchase_history的schema
purchase_schema = StructType([
    StructField("items", ArrayType(StructType([
        StructField("id", IntegerType())
    ]))),
    StructField("payment_method", StringType()),
    StructField("payment_status", StringType()),
    StructField("purchase_date", StringType()),
    StructField("avg_price", StringType()),
    StructField("categories", StringType()),
])

# 加载商品目录
with open('data/30G_data_new/product_catalog.json', encoding='utf-8') as f:
    product_catalog = json.load(f)
print("开始初始化会话")
# 初始化Spark会话
spark = SparkSession.builder \
    .appName("MarketBasketAnalysis") \
    .config("spark.sql.parquet.enableVectorizedReader", "false") \
    .config("spark.executor.memory", "64g") \
    .config("spark.driver.memory", "64g") \
    .config("spark.memory.overhead", "8g") \
    .config("spark.sql.shuffle.partitions", "200") \
    .config("spark.executor.heartbeatInterval", "60s") \
    .config("spark.network.timeout", "120s") \
    .config("spark.python.worker.reuse", "true") \
    .getOrCreate()

print("[INFO] Spark会话初始化完成")

# 广播商品目录映射
product_bc = spark.sparkContext.broadcast(product_catalog)
print("[INFO] 商品目录广播完成")
# 加载商品目录为DataFrame
product_df = spark.read.option("multiline", "true")\
    .json('data/30G_data_new/product_catalog.json')\
    .select(explode(col("products")).alias("product"))\
    .select("product.id", "product.category", "product.price")
print("[INFO] 商品目录加载完成")
# 读取Parquet数据
df = spark.read.parquet("data/30G_data_new/*.parquet")
print("[INFO] Parquet数据读取完成")
# 解析JSON字段并关联商品类别
parsed_df = df.withColumn("purchase_data", 
    from_json(col("purchase_history"), purchase_schema)) \
    .select(
        col("id"),
        expr("purchase_data.items").alias("items"),
        col("purchase_data.payment_method").alias("payment_method"),
        col("purchase_data.payment_status").alias("payment_status"),
        col("purchase_data.purchase_date").alias("purchase_date")
    ) \
    .withColumn("item", expr("explode(items)")) \
    .join(product_df.select(col("id").alias("item_id"), "category"), expr("item.id = item_id")) \
    .groupBy("id") \
    .agg(expr("collect_set(category)").alias("categories"))
print("[INFO] JSON字段解析完成")
# 检查解析后的数据是否为空
print("[DEBUG] parsed_df数据量: ", parsed_df.count())
# 打印列名
print("列名:", parsed_df.columns)

# 显示前5行数据
parsed_df.show(5)
parsed_df.select("categories").show(5, truncate=False)
# 缓存处理后的数据
parsed_df.cache()
# 在MAIN_CATEGORIES定义后立即添加映射函数
def category_mapping_udf(category):
    for main_cat in MAIN_CATEGORIES:
        if category in main_cat['subcategories']:
            return main_cat['name']
    return '其他'

category_mapping_udf = udf(category_mapping_udf, StringType())
# 任务1：商品关联规则分析
# 在文件顶部定义电子产品类别常量
MAIN_CATEGORIES = [
    {'name':'电子产品', 'subcategories':{"平板电脑", "智能手机", "智能手表", "摄像机", "游戏机", "相机", "笔记本电脑", "耳机", "音响"}},
    {'name':'服装', 'subcategories':{"上衣", "内衣", "帽子", "外套", "围巾", "裙子", "裤子", "鞋子", "手套"}},
    {'name':'食品', 'subcategories':{"米面", "蛋奶", "水果", "蔬菜", "肉类", "水产", "零食", "饮料", "调味品"}},
    {'name':'家居', 'subcategories':{"家具", "床上用品", "厨具", "卫浴用品"}},
    {'name':'办公', 'subcategories':{"办公用品", "文具"}},
    {'name':'运动户外', 'subcategories':{"健身器材", "户外装备"}},
    {'name':'玩具', 'subcategories':{"玩具", "益智玩具", "模型"}},
    {'name':'母婴', 'subcategories':{"婴儿用品", "儿童课外读物"}},
    {'name':'汽车用品', 'subcategories':{"汽车装饰", "车载电子"}}
]
# 修改task1_analysis函数
def task1_analysis():
    from pyspark.ml.fpm import FPGrowth
    
    with tqdm(total=6, desc='商品关联分析') as pbar:
        # 提取订单级商品类别（去重）
        order_categories = parsed_df \
            .withColumn("category", explode(col("categories"))) \
            .withColumn("main_category", category_mapping_udf(col("category"))) \
            .groupBy("id") \
            .agg(expr("array_distinct(collect_list(main_category))").alias("items"))
        pbar.update(1)
        print("[INFO] 订单级商品类别提取完成")
        # 检查订单级商品类别数据是否为空
        print("[DEBUG] order_categories数据量: ", order_categories.count())
        order_categories.select("items").show(5, truncate=False)
        # 过滤空数组或单元素数组
        filtered_orders = order_categories.filter(size(col("items")) >= 2)
        print("[DEBUG] 过滤后的数据量: ", filtered_orders.count())
        # 配置FPGrowth参数
        fp_growth = FPGrowth(
            itemsCol="items",
            minSupport=0.02,
            minConfidence=0.5,
            numPartitions=32  # 控制并行度，不会影响最终结果的完整性
        )
        model = fp_growth.fit(filtered_orders)
        pbar.update(2)
        print("[INFO] FPGrowth模型训练完成")
        # 保存所有关联规则
        all_frequent_itemsets = model.freqItemsets
        print("[INFO] 频繁项集数量: ", all_frequent_itemsets.count())
        all_rules = model.associationRules
        # 打印关联规则的大小
        print("[INFO] 关联规则数量: ", all_rules.count())
        all_frequent_itemsets.write.mode("overwrite").json("output/task1_main_freq_itemsets")
        all_rules.write.mode("overwrite").json("output/task1_main_all_rules")
        pbar.update(1)
        
        # 过滤包含电子产品的关联规则
        electronic_rules = all_rules.rdd.filter(
            lambda x: (isinstance(x.antecedent, list) and isinstance(x.consequent, list)) \
            and any('电子产品' in (list(x.antecedent) + list(x.consequent)))
        ).toDF()
        pbar.update(1)
        
        # 增强结果可读性
        result_df = electronic_rules.select(
            expr("concat_ws(' -> ', antecedent)").alias("前项"),
            expr("concat_ws(' + ', consequent)").alias("后项"),
            col("support"),
            col("confidence"),
            col("lift")
        ).orderBy(col("support").desc(), col("confidence").desc())
        pbar.update(1)
        
        # 保存电子产品相关规则
        result_df.write.mode("overwrite").json("output/task1_maincategory_rules")
        pbar.update(1)
    


if __name__ == "__main__":
    try:
        task1_analysis()

    except Exception as e:
        print(f"[ERROR] 执行失败: {str(e)}")
        spark.stop()
        exit(1)
    finally:
        print("\n[INFO] 正在释放Spark资源...")
        spark.stop()



