from pyspark.sql import SparkSession
from pyspark.sql.functions import from_json, col, expr, explode, size, udf
from pyspark.sql.types import StructType, StructField, StringType, ArrayType, IntegerType
import json
import matplotlib.pyplot as plt
import seaborn as sns
from pyspark import SparkContext
from tqdm import tqdm


# 定义purchase_history的schema
purchase_schema = StructType([
    StructField("items", ArrayType(StructType([
        StructField("id", IntegerType())
    ]))),
    StructField("payment_method", StringType()),
    StructField("payment_status", StringType()),
    StructField("purchase_date", StringType()),
    StructField("avg_price", StringType()),
    StructField("categories", StringType()),
])

# 加载商品目录
with open('data/30G_data_new/product_catalog.json', encoding='utf-8') as f:
    product_catalog = json.load(f)
print("开始初始化会话")


# 初始化Spark会话
spark = SparkSession.builder \
    .appName("MarketBasketAnalysis") \
    .config("spark.sql.parquet.enableVectorizedReader", "false") \
    .config("spark.executor.memory", "64g") \
    .config("spark.driver.memory", "64g") \
    .config("spark.memory.overhead", "8g") \
    .config("spark.sql.shuffle.partitions", "200") \
    .config("spark.executor.heartbeatInterval", "60s") \
    .config("spark.network.timeout", "120s") \
    .config("spark.python.worker.reuse", "true") \
    .getOrCreate()

print("[INFO] Spark会话初始化完成")
# 初始化带进度监控的Spark会话
 # 总阶段数：数据加载+4个任务+结果保存



# 广播商品目录映射
product_bc = spark.sparkContext.broadcast(product_catalog)
print("[INFO] 商品目录广播完成")
# 加载商品目录为DataFrame
product_df = spark.read.option("multiline", "true")\
    .json('data/30G_data_new/product_catalog.json')\
    .select(explode(col("products")).alias("product"))\
    .select("product.id", "product.category", "product.price")
print("[INFO] 商品目录加载完成")
# 读取Parquet数据
df = spark.read.parquet("data/30G_data_new/*.parquet")
print("[INFO] Parquet数据读取完成")
# 解析JSON字段并关联商品类别
parsed_df = df.withColumn("purchase_data", from_json(col("purchase_history"), purchase_schema)) \
    .select(
        col("id"),
        col("purchase_data.payment_method"),
        col("purchase_data.payment_status"),
        col("purchase_data.purchase_date"),
        explode(col("purchase_data.items")).alias("item")
    ) \
    .join(
        product_df.select(
            col("id").alias("product_id"),
            "category",
            "price"
        ),
        expr("item.id = product_id"),
        "left"
    ) \
    .groupBy("id", "payment_method", "payment_status", "purchase_date") \
    .agg(
        expr("collect_set(category)").alias("categories"),
        expr("max(price)").alias("max_item_price"),
        expr("count(product_id)").alias("item_count")
    )
print("[INFO] JSON字段解析完成")
# 检查解析后的数据是否为空
print("[DEBUG] parsed_df数据量: ", parsed_df.count())
parsed_df.select("categories").show(5, truncate=False)
# 缓存处理后的数据
parsed_df.cache()

# 任务1：商品关联规则分析
# 在文件顶部定义电子产品类别常量
MAIN_CATEGORIES = [
    {'name':'电子产品', 'subcategories':{"平板电脑", "智能手机", "智能手表", "摄像机", "游戏机", "相机", "笔记本电脑", "耳机", "音响"}},
    {'name':'服装', 'subcategories':{"上衣", "内衣", "帽子", "外套", "围巾", "裙子", "裤子", "鞋子", "手套"}},
    {'name':'食品', 'subcategories':{"米面", "蛋奶", "水果", "蔬菜", "肉类", "水产", "零食", "饮料", "调味品"}},
    {'name':'家居', 'subcategories':{"家具", "床上用品", "厨具", "卫浴用品"}},
    {'name':'办公', 'subcategories':{"办公用品", "文具"}},
    {'name':'运动户外', 'subcategories':{"健身器材", "户外装备"}},
    {'name':'玩具', 'subcategories':{"玩具", "益智玩具", "模型"}},
    {'name':'母婴', 'subcategories':{"婴儿用品", "儿童课外读物"}},
    {'name':'汽车用品', 'subcategories':{"汽车装饰", "车载电子"}}
]

# 在MAIN_CATEGORIES定义后立即添加映射函数
def category_mapping_udf(category):
    for main_cat in MAIN_CATEGORIES:
        if category in main_cat['subcategories']:
            return main_cat['name']
    return '其他'

category_mapping_udf = udf(category_mapping_udf, StringType())

# 任务2：支付方式分析
def task2_analysis():
    from pyspark.sql.functions import col, array_contains
    from pyspark.ml.fpm import FPGrowth
    with tqdm(total=5, desc='支付方式分析') as pbar:
        # 高价值商品过滤
        # 关联商品价格信息
       
        # 按订单聚合最高商品价格
        # 高价值支付方式分析
        high_value = parsed_df.filter(col("max_item_price") > 5000)
        
        # 首选支付方式统计
        payment_preference = high_value.groupBy("payment_method").count() \
            .orderBy(col("count").desc())
        print("\n高价值商品首选支付方式:")
        payment_preference.show(truncate=False)
        
        # 全量订单支付关联
        exploded_df = parsed_df.withColumn("category", explode("categories"))
        mapped_df = exploded_df.withColumn("main_category", category_mapping_udf(col("category")))
        pbar.update(1)
        
        # 支付方式关联分析
        payment_trans = mapped_df.groupBy("id", "main_category").agg(
            expr("array_distinct(flatten(collect_list(array(payment_method, main_category))))").alias("items")
)   
        pbar.update(1)
        payment_trans.show(5, truncate=False)
        # 打印payment_trans的大小
        print(f"payment_trans size: {payment_trans.count()}")
        fp_growth = FPGrowth(itemsCol="items", minSupport=0.01, minConfidence=0.6)
        
        # 进度条调整
        pbar.total = 7
        pbar.update(1)
        model = fp_growth.fit(payment_trans)
        pbar.update(1)
        # 打印关联规则大小
        print(f"model.associationRules size: {model.associationRules.count()}")
        model.associationRules.write.mode("overwrite").json("output/task2_rules")
        pbar.update(1)

        print(f"payment_trans size: {payment_trans.count()}")
        payment_methods = ["信用卡", "储蓄卡", "微信支付","现金","支付宝", "银联", "云闪付"]
        filtered_rules = model.associationRules.filter(
          array_contains(col("antecedent"), payment_methods[0]) |
          array_contains(col("antecedent"), payment_methods[1]) |
          array_contains(col("antecedent"), payment_methods[2])
          | array_contains(col("antecedent"), payment_methods[3]) |
            array_contains(col("antecedent"), payment_methods[4]) |
            array_contains(col("antecedent"), payment_methods[5]) |
            array_contains(col("antecedent"), payment_methods[6])
               ).filter(
         ~array_contains(col("consequent"), payment_methods[0]) &
         ~array_contains(col("consequent"), payment_methods[1]) &
         ~array_contains(col("consequent"), payment_methods[2])
            & ~array_contains(col("consequent"), payment_methods[3]) &
            ~array_contains(col("consequent"), payment_methods[4]) &
            ~array_contains(col("consequent"), payment_methods[5]) &
            ~array_contains(col("consequent"), payment_methods[6])
        ).orderBy(col("confidence").desc())
        if not filtered_rules.isEmpty():
            # 写入 JSON（默认多文件）
            # filtered_rules.write.mode("overwrite").json("output/filtered_rules_json")
    
            # 可选：合并为单个文件（小数据适用）
            filtered_rules.coalesce(1).write.mode("overwrite").json("output/filtered_rules_single")
        else:
            print("警告：过滤后的关联规则为空，未生成文件")
        # 可视化支付方式分布
        # 添加主类别映射
        payment_dist = mapped_df.groupBy("payment_method", "main_category").count().toPandas()
        plt.figure(figsize=(10,6))
        sns.barplot(x='payment_method', y='count', data=payment_dist)
        plt.savefig('output/payment_dist.png')
        pbar.update(1)




if __name__ == "__main__":
    try:
 
        task2_analysis()

    except Exception as e:
        print(f"[ERROR] 执行失败: {str(e)}")
        spark.stop()
        exit(1)
    finally:
        print("\n[INFO] 正在释放Spark资源...")
        spark.stop()



