import pandas as pd
import os
from tqdm import tqdm

# --- 1. 定义常量与路径 ---
INPUT_DIR = r"D:\processed_data"  # 处理后的文件输入目录
OUTPUT_DIR = r"D:\cleaned_data"  # 清洗后的输出目录
os.makedirs(OUTPUT_DIR, exist_ok=True)

# --- 4. 主处理流程 ---
for i in tqdm(range(8)):
    # 输入输出路径
    input_path = os.path.join(INPUT_DIR, f"processed_{i:05d}.parquet")
    output_path = os.path.join(OUTPUT_DIR, f"cleaned_{i:05d}.parquet")

    # 读取数据
    df = pd.read_parquet(input_path)
    initial_count = len(df)

    # 使用 'id' 列作为用户 ID
    df_cleaned = df[["id", "purchase_category"]].copy()
    df_cleaned.rename(columns={"id": "user_id", "purchase_category": "categories"}, inplace=True)

    # 可选：对 categories 列进行一些基本的清洗，例如去除空值
    df_cleaned = df_cleaned[df_cleaned["categories"].notna()]

    # 保存结果
    df_cleaned[["user_id", "categories"]].to_parquet(output_path)

    # 打印日志
    print(
        f"文件 {i}: 原始记录={initial_count} | "
        f"有效记录={len(df_cleaned)} | "
        f"过滤率={(1 - len(df_cleaned) / initial_count):.1%}"
    )

print("\n所有文件处理完成！清洗后的数据保存在:", OUTPUT_DIR)



import pandas as pd
from mlxtend.frequent_patterns import apriori, association_rules
import os
import logging
import json
import glob # 导入 glob 库用于查找文件
import ast # 导入 ast 库用于安全地解析字符串列表

# --- 1. 定义常量与路径 ---
INPUT_DIR = r"D:\cleaned_data" # 修改为输入目录
# PRODUCT_CATALOG_PATH = r"D:\product_catalog.json\product_catalog.json" # 不再需要商品目录文件
OUTPUT_DIR = r"D:\analysis_results_cleaned_data_large_categories" # 独立输出目录
os.makedirs(OUTPUT_DIR, exist_ok=True)

# --- 2. 配置日志 ---
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler(os.path.join(OUTPUT_DIR, 'analysis_cleaned_large_categories.log')), # 修改日志文件名
        logging.StreamHandler()
    ]
)

# --- 3. 定义大类映射规则 ---
# 根据用户提供的信息创建具体类别到大类的映射
# 假设 'categories' 列包含了这些具体的类别名称
CATEGORY_TO_LARGE_CATEGORY_MAP = {
    '智能手机': '电子产品', '笔记本电脑': '电子产品', '平板电脑': '电子产品',
    '智能手表': '电子产品', '耳机': '电子产品', '音响': '电子产品',
    '相机': '电子产品', '摄像机': '电子产品', '游戏机': '电子产品',

    '上衣': '服装', '裤子': '服装', '裙子': '服装', '内衣': '服装',
    '鞋子': '服装', '服装': '服装', '帽子': '服装', '手套': '服装', '围巾': '服装', '外套': '服装', # 注意这里有一个重复的'服装'键，已保留但建议检查

    '零食': '食品', '饮料': '食品', '调味品': '食品', '米面': '食品',
    '水产': '食品', '肉类': '食品', '蛋奶': '食品', '水果': '食品', '蔬菜': '食品',

    '家具': '家居', '床上用品': '家居', '厨具': '家居', '卫浴用品': '家居',

    '文具': '办公', '办公用品': '办公',

    '健身器材': '运动户外', '户外装备': '运动户外',

    '玩具': '玩具', '模型': '玩具', '益智玩具': '玩具',

    '婴儿用品': '母婴', '儿童课外读物': '母婴',

    '车载电子': '汽车用品', '汽车装饰': '汽车用品',

    # 如果 'categories' 列中包含其他类别，但您不想将其映射到大类，可以忽略
    # 如果需要一个“其他”类别，可以在这里添加映射
    # '未知类别': '其他'
}

# --- 4. 数据读取与处理 (处理目录) ---
def read_and_process_data_from_directory(directory_path, category_map): # 移除 catalog_path 参数
    """读取目录下的所有数据文件，并进行处理和映射"""
    all_files = glob.glob(os.path.join(directory_path, "*.parquet"))
    if not all_files:
        logging.error(f"在目录 {directory_path} 中未找到任何 .parquet 文件。")
        return None

    list_df = []
    for file_path in all_files:
        try:
            logging.info(f"读取文件: {file_path}")
            df = pd.read_parquet(file_path)
            list_df.append(df)
        except Exception as e:
            logging.warning(f"读取文件失败 {file_path}: {str(e)}")
            continue

    if not list_df:
        logging.error("未能成功读取任何数据文件。")
        return None

    df_transactions = pd.concat(list_df, ignore_index=True)
    logging.info(f"成功读取并合并 {len(list_df)} 个文件，总记录数: {len(df_transactions)}")

    try:
        # 验证必要列是否存在 (根据您提供的实际列信息调整)
        required_transaction_columns = ['user_id', 'categories'] # 需要 user_id 作为事务标识，和 categories 作为商品类别
        if not set(required_transaction_columns).issubset(df_transactions.columns):
            missing = set(required_transaction_columns) - set(df_transactions.columns)
            logging.error(f"合并后的交易数据缺少必要列: {missing}。实际列包括: {df_transactions.columns.tolist()}") # 打印实际列
            return None

        # --- 提前处理：确保 'categories' 列是列表格式 ---
        if 'categories' in df_transactions.columns and not df_transactions.empty:
            logging.info(f"检查并处理 'categories' 列的数据类型和示例值...")
            # 检查前几行的数据类型和值 (处理前)
            for i in range(min(5, len(df_transactions))):
                 logging.info(f"处理前 - 第 {i} 行 'categories' 列的数据类型: {type(df_transactions['categories'].iloc[i])}, 值: {df_transactions['categories'].iloc[i]}")

            # 尝试将可能是字符串形式的列表或单个字符串转换为实际列表
            def parse_categories_if_string(categories_entry):
                if isinstance(categories_entry, str):
                    try:
                        # 尝试使用 ast.literal_eval 安全地解析字符串为列表
                        parsed_list = ast.literal_eval(categories_entry)
                        if isinstance(parsed_list, list):
                            return parsed_list
                        else:
                            # 如果解析成功但不是列表，将其包装在列表中
                            return [parsed_list]
                    except (ValueError, SyntaxError):
                        # 如果解析失败，将原始字符串视为单个类别，并包装在列表中
                        logging.warning(f"无法解析字符串格式的类别，将其视为单个类别: {categories_entry}")
                        return [categories_entry]
                elif isinstance(categories_entry, list):
                    # 如果已经是列表，直接返回
                    return categories_entry
                else:
                    # 处理其他未知类型，返回空列表并记录警告
                    logging.warning(f"遇到未知类型的 'categories' 条目: {type(categories_entry)}, 值: {categories_entry}")
                    return []


            df_transactions['categories'] = df_transactions['categories'].apply(parse_categories_if_string)
            logging.info("尝试将 'categories' 列转换为列表格式完成。")

            # 检查前几行的数据类型和值 (处理后)
            logging.info(f"检查处理后 'categories' 列的数据类型和示例值...")
            for i in range(min(5, len(df_transactions))):
                 logging.info(f"处理后 - 第 {i} 行 'categories' 列的数据类型: {type(df_transactions['categories'].iloc[i])}, 值: {df_transactions['categories'].iloc[i]}")

            # --- 新增调试信息：打印所有唯一的具体类别名称 ---
            all_specific_categories = [item for sublist in df_transactions['categories'].tolist() for item in sublist]
            unique_specific_categories = sorted(list(set(all_specific_categories)))
            logging.info(f"\n--- 数据中包含的唯一具体类别名称 ({len(unique_specific_categories)} 个): ---")
            # 分批打印，避免日志过长
            batch_size = 20
            for i in range(0, len(unique_specific_categories), batch_size):
                logging.info(unique_specific_categories[i:i+batch_size])
            logging.info("-----------------------------------------------------------------")


        # 直接使用 'categories' 列作为具体类别，并映射到大类
        # 假设 'categories' 列中的每个值是一个列表，我们需要处理列表中的每个具体类别
        def map_categories_to_large_categories(categories_list, category_map):
            if not isinstance(categories_list, list):
                return [] # 如果不是列表，返回空列表
            large_categories = [category_map.get(cat, None) for cat in categories_list]
            # 过滤掉未映射到大类的 None 值，并去重
            return list(set([cat for cat in large_categories if cat is not None]))

        df_transactions['large_categories'] = df_transactions['categories'].apply(
            lambda x: map_categories_to_large_categories(x, category_map)
        )

        # --- 调试信息：检查映射后 'large_categories' 列的示例值 ---
        if 'large_categories' in df_transactions.columns and not df_transactions.empty:
             logging.info(f"检查映射后 'large_categories' 列的示例值...")
             for i in range(min(5, len(df_transactions))):
                  logging.info(f"第 {i} 行映射后 'large_categories' 列的值: {df_transactions['large_categories'].iloc[i]}")


        # 过滤掉没有映射到任何大类的记录
        df_transactions = df_transactions[df_transactions['large_categories'].apply(lambda x: len(x) > 0)]
        logging.info(f"过滤掉未映射到任何大类的记录后，剩余 {len(df_transactions)} 条记录")


        # --- 构建事务 ---
        # 使用 'user_id' 列作为事务ID (假设同一 user_id 的记录属于同一事务)
        # 将同一 user_id 的所有 large_categories 列表合并成一个大的列表
        transactions = df_transactions.groupby('user_id')['large_categories'].agg(lambda x: sum(x, [])).tolist()

        # 对每个事务中的大类进行去重 (因为一个用户可能购买同一大类下的多个商品)
        transactions = [list(set(transaction)) for transaction in transactions]

        # 过滤掉空的事务
        transactions = [transaction for transaction in transactions if transaction]

        logging.info(f"构建事务完成，共 {len(transactions)} 个事务")

        return transactions

    except Exception as e:
        logging.error(f"数据处理失败: {str(e)}")
        return None

# --- 5. 关联规则挖掘 ---
def perform_association_rule_mining(transactions, min_support=0.005, min_confidence=0.2): # 调整默认阈值
    """在事务数据上执行关联规则挖掘"""
    try:
        from mlxtend.preprocessing import TransactionEncoder

        if not transactions:
            logging.warning("没有有效的事务数据进行关联规则挖掘。")
            return pd.DataFrame(), pd.DataFrame()

        # 编码转换
        te = TransactionEncoder()
        te_ary = te.fit(transactions).transform(transactions)
        df_encoded = pd.DataFrame(te_ary, columns=te.columns_)
        logging.info(f"事务数据编码完成，维度: {df_encoded.shape}")

        # 挖掘频繁项集
        logging.info(f"开始挖掘频繁项集，min_support={min_support}")
        frequent_itemsets = apriori(
            df_encoded,
            min_support=min_support,
            use_colnames=True,
            low_memory=True
        )
        logging.info(f"发现频繁项集: {len(frequent_itemsets)}个")

        # 生成关联规则
        if not frequent_itemsets.empty:
            logging.info(f"开始生成关联规则，min_confidence={min_confidence}")
            rules = association_rules(
                frequent_itemsets,
                metric="confidence",
                min_threshold=min_confidence
            )
            logging.info(f"发现关联规则: {len(rules)}条")
            return frequent_itemsets, rules
        logging.info("未发现频繁项集，无法生成关联规则。")
        return frequent_itemsets, pd.DataFrame()

    except MemoryError:
        logging.error("内存不足，请尝试增大内存或减小min_support")
        return None, None
    except Exception as e:
        logging.error(f"关联规则挖掘失败: {str(e)}")
        return None, None

# --- 6. 可视化函数 (保留关联规则可视化) ---
def visualize_association_rules(rules, output_dir, title="关联规则"):
    """
    可视化关联规则.

    Args:
        rules (pd.DataFrame): 包含关联规则的 DataFrame.
        output_dir (str): 输出目录.
        title (str): 图表标题
    """
    if rules is None or rules.empty:
        logging.info("没有关联规则可以可视化.")
        return

    import matplotlib.pyplot as plt
    import seaborn as sns
    # 解决中文显示问题
    plt.rcParams['font.sans-serif'] = ['SimHei'] # 指定默认字体
    plt.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题

    # 散点图：支持度 vs 置信度 (着色表示提升度)
    plt.figure(figsize=(10, 8))
    sns.scatterplot(x='support', y='confidence', hue='lift', data=rules, alpha=0.7, s=100) # 调整点大小和透明度
    plt.xlabel('支持度')
    plt.ylabel('置信度')
    plt.title(title + ' - 支持度 vs 置信度 (提升度着色)')
    plt.grid(True, linestyle='--', alpha=0.6)
    plt_path = os.path.join(output_dir, title + '_support_confidence_lift.png')
    plt.savefig(plt_path)
    logging.info(f"保存散点图到: {plt_path}")
    # plt.show() # 在脚本中运行时可能不需要立即显示

    # 网络图：展示关联规则 (简化版本)
    import networkx as nx
    # 限制规则数量，避免图过于复杂
    rules_to_plot = rules.sort_values(by='lift', ascending=False).head(30) # 按提升度排序取前30条
    if len(rules) > len(rules_to_plot):
        logging.info(f"\n{title} - 关联规则过多，仅显示提升度最高的 {len(rules_to_plot)} 条用于可视化")

    if len(rules_to_plot) > 0:
        G = nx.DiGraph()
        for index, row in rules_to_plot.iterrows():
            # 确保项是可哈希的，转换为字符串
            antecedent = ", ".join(map(str, row['antecedents']))
            consequent = ", ".join(map(str, row['consequents']))
            G.add_edge(antecedent, consequent,
                       support=row['support'],
                       confidence=row['confidence'],
                       lift=row['lift'])

        plt.figure(figsize=(14, 12)) # 调整网络图大小
        # 尝试不同的布局算法，调整参数
        pos = nx.spring_layout(G, k=0.8, iterations=50, seed=42) # 增加k值，增加iterations，固定种子
        nx.draw(G, pos, with_labels=True, node_size=4000, node_color='skyblue',
                arrowsize=30, font_size=10, alpha=0.8, edge_color='gray', node_shape='o') # 调整节点大小、字体大小、透明度、边颜色、节点形状

        # 添加边标签
        edge_labels = {(u, v): f"S:{G[u][v]['support']:.4f}\nC:{G[u][v]['confidence']:.4f}\nL:{G[u][v]['lift']:.4f}" # 调整标签格式和精度，添加lift
                       for u, v in G.edges()}
        nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels, font_size=8, label_pos=0.5, bbox=dict(facecolor='white', alpha=0.6, edgecolor='none')) # 调整标签字体大小和位置，添加背景框

        plt.title(title + ' - 关联规则网络图')
        plt_path = os.path.join(output_dir, title + '_network_graph.png')
        plt.savefig(plt_path)
        logging.info(f"保存网络图到: {plt_path}")
        # plt.show()

        # 表格：展示关联规则 (按提升度排序)
        logging.info(f"\n{title} - 关联规则 (按提升度排序):")
        # 打印前 N 条规则，避免输出过长
        print(rules.sort_values(by='lift', ascending=False).head(20))
        if len(rules) > 20:
            logging.info(f"\n显示提升度最高的前 20 条规则，共 {len(rules)} 条。")
    else:
        logging.info(f"\n{title} - 没有关联规则")


# --- 7. 主程序 ---
def main():
    logging.info(f"\n{'='*40}")
    logging.info(f"开始清洗数据大类关联规则分析")
    logging.info(f"交易数据目录: {INPUT_DIR}")
    # logging.info(f"商品目录文件: {PRODUCT_CATALOG_PATH}") # 不再需要商品目录文件
    logging.info(f"{'='*40}")

    # 读取、处理和构建事务数据 (从目录读取)
    # 移除 PRODUCT_CATALOG_PATH 参数
    transactions = read_and_process_data_from_directory(INPUT_DIR, CATEGORY_TO_LARGE_CATEGORY_MAP)

    if transactions is None or not transactions:
        logging.error("事务数据构建失败或为空，程序终止。")
        return

    # --- 关联规则分析 ---
    logging.info("\n开始关联规则分析...")
    # 尝试使用更低的 min_support 和 min_confidence
    frequent_itemsets, rules = perform_association_rule_mining(transactions, min_support=0.001, min_confidence=0.1) # 可以根据需要调整这些阈值

    if frequent_itemsets is not None and not frequent_itemsets.empty:
        logging.info(f"发现频繁项集: {len(frequent_itemsets)}个")
        # 可以选择在这里添加频繁项集的可视化

    if rules is not None and not rules.empty:
        logging.info(f"发现关联规则: {len(rules)}条")
        visualize_association_rules(
            rules,
            OUTPUT_DIR,
            title="Cleaned_Data_Large_Category_Association_Rules" # 修改可视化标题
        )
    else:
        logging.info("未发现符合条件的关联规则")

if __name__ == "__main__":
    main()
import pandas as pd
from mlxtend.frequent_patterns import apriori, association_rules
import os
import logging
import json
import glob
import ast # Import ast for safe string evaluation
import pyarrow.parquet as pq # Import pyarrow for efficient Parquet reading
import gc # Import garbage collector

# --- 1. 定义常量与路径 ---
INPUT_DATA_DIR = r"D:\课件集\data mining\10G_data_new\10G_data_new" # 原始数据目录
PRODUCT_CATALOG_PATH = r"D:\product_catalog.json\product_catalog.json" # 商品目录文件
OUTPUT_DIR = r"D:\analysis_results_original_data" # 独立输出目录
os.makedirs(OUTPUT_DIR, exist_ok=True)

# --- 2. 配置日志 ---
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler(os.path.join(OUTPUT_DIR, 'analysis_original_data.log')), # Log file
        logging.StreamHandler() # Output logs to console
    ]
)

# --- 3. 定义大类映射规则 ---
# Map specific categories (from product catalog) to large categories
CATEGORY_TO_LARGE_CATEGORY_MAP = {
    '智能手机': '电子产品', '笔记本电脑': '电子产品', '平板电脑': '电子产品',
    '智能手表': '电子产品', '耳机': '电子产品', '音响': '电子产品',
    '相机': '电子产品', '摄像机': '摄像机', '游戏机': '电子产品', # Corrected '摄像机' mapping

    '上衣': '服装', '裤子': '服装', '裙子': '服装', '内衣': '服装',
    '鞋子': '服装', '服装': '服装', '帽子': '服装', '手套': '服装', '围巾': '服装', '外套': '服装', # Corrected '外套' mapping

    '零食': '食品', '饮料': '食品', '调味品': '食品', '米面': '食品',
    '水产': '食品', '肉类': '食品', '蛋奶': '食品', '水果': '食品', '蔬菜': '食品', # Corrected '蔬菜' mapping

    '家具': '家居', '床上用品': '家居', '厨具': '家居', '卫浴用品': '家居',

    '文具': '办公', '办公用品': '办公',

    '健身器材': '运动户外', '户外装备': '运动户外',

    '玩具': '玩具', '模型': '玩具', '益智玩具': '玩具',

    '婴儿用品': '母婴', '儿童课外读物': '母婴',

    '车载电子': '汽车用品', '汽车装饰': '汽车用品',

    # Add other mappings if needed, or an 'Other' category
    # 'Unknown Category': 'Other'
}


# --- 4. 数据读取、处理与映射 ---
def load_and_process_data(data_directory_path, catalog_path, category_map, file_filter=None):
    """
    Reads data from specified files (or all files) in batches, loads catalog,
    extracts purchase history, and maps item_id to large categories.
    Returns a processed DataFrame with flattened purchase details and large categories.

    Args:
        data_directory_path (str): Path to the directory containing Parquet files.
        catalog_path (str): Path to the product catalog JSON file.
        category_map (dict): Dictionary mapping specific categories to large categories.
        file_filter (str, optional): If provided, only process files whose name contains this string. Defaults to None (process all files).
    """
    all_files = glob.glob(os.path.join(data_directory_path, "*.parquet"))
    if not all_files:
        logging.error(f"No .parquet files found in directory: {data_directory_path}")
        return None

    # Apply file filter if specified
    if file_filter:
        files_to_process = [f for f in all_files if file_filter in os.path.basename(f)]
        if not files_to_process:
            logging.error(f"No files found matching filter '{file_filter}' in directory: {data_directory_path}")
            return None
        logging.info(f"Processing only files matching filter: '{file_filter}'")
    else:
        files_to_process = all_files
        logging.info(f"Processing all {len(files_to_process)} files in directory.")


    # --- Load Product Catalog ---
    try:
        logging.info(f"Reading product catalog file: {catalog_path}")
        with open(catalog_path, 'r', encoding='utf-8') as f:
            catalog_full_data = json.load(f) # Load the full JSON data

        # Assuming the product list is under the key 'products'
        if 'products' not in catalog_full_data or not isinstance(catalog_full_data['products'], list):
             logging.error(f"Product catalog JSON does not contain a list under the key 'products'. Actual top-level keys: {list(catalog_full_data.keys())}")
             return None

        catalog_data_list = catalog_full_data['products'] # Access the list of products

        if not catalog_data_list:
             logging.error("Product list under 'products' key is empty.")
             return None

        df_catalog = pd.DataFrame(catalog_data_list)
        logging.info(f"Product catalog loaded with {len(df_catalog)} entries from 'products' list.")

        # --- Updated: Check for 'id' and 'category' in catalog DataFrame ---
        required_catalog_columns = ['id', 'category'] # Assuming each item in the list has these keys
        if not set(required_catalog_columns).issubset(df_catalog.columns):
            missing = set(required_catalog_columns) - set(df_catalog.columns)
            logging.error(f"Product catalog DataFrame is missing required columns ('id', 'category'): {missing}. Actual columns in DataFrame: {df_catalog.columns.tolist()}")
            # Optional: Log sample keys from the first product entry to help debug
            if catalog_data_list and isinstance(catalog_data_list[0], dict):
                 logging.info(f"Sample keys from the first product entry: {list(catalog_data_list[0].keys())}")
            return None

        # --- Updated: Create item_id to specific category mapping dictionary using 'id' from catalog ---
        item_id_to_category = df_catalog.set_index('id')['category'].to_dict()
        logging.info(f"Item ID to specific category mapping created with {len(item_id_to_category)} entries (using 'id' as item_id).")

    except FileNotFoundError as e:
        logging.error(f"Product catalog file not found: {str(e)}")
        return None
    except json.JSONDecodeError as e:
        logging.error(f"Product catalog JSON file parsing failed: {str(e)}")
        return None
    except Exception as e:
        logging.error(f"Product catalog loading or processing failed: {str(e)}")
        return None

    # --- Process Data Files in Batches ---
    all_flattened_purchases = []

    # Function to safely parse purchase_history entries
    def parse_purchase_history_entry(entry):
        if isinstance(entry, str):
            try:
                parsed_entry = ast.literal_eval(entry)
                if isinstance(parsed_entry, list):
                    return parsed_entry
                else:
                    # If parsed but not a list, wrap it in a list (e.g., a single dict)
                    if isinstance(parsed_entry, dict):
                         return [parsed_entry]
                    # logging.warning(f"Parsed purchase history string is not a list or dict: {entry}") # Too verbose
                    return []
            except (ValueError, SyntaxError):
                # If parsing fails, log and return empty list
                # logging.warning(f"Could not parse purchase history string with ast.literal_eval: {entry}") # Too verbose
                return []
        elif isinstance(entry, list):
            # If it's already a list, return directly
            return entry
        else:
            # Handle other unknown types, return empty list and log a warning
            # logging.warning(f"Encountered unknown type for purchase_history entry: {type(entry)}, value: {entry}") # Too verbose
            return []

    # Define batch size for reading Parquet files
    BATCH_SIZE = 10000 # Adjust this number based on your system's memory

    for file_path in files_to_process: # Iterate through filtered files
        try:
            logging.info(f"Reading and processing file in batches: {file_path}")
            parquet_file = pq.ParquetFile(file_path)

            # Check if essential columns exist in the file schema
            file_columns = parquet_file.schema.names
            required_initial_columns = ['id', 'purchase_history']
            if not set(required_initial_columns).issubset(file_columns):
                missing = set(required_initial_columns) - set(file_columns)
                logging.warning(f"File {file_path} is missing essential columns ('id', 'purchase_history'): {missing}. Skipping this file.")
                continue

            # Iterate through batches (row groups)
            for i, batch in enumerate(parquet_file.iter_batches(batch_size=BATCH_SIZE)):
                df_batch = batch.to_pandas()
                # logging.info(f"Processing batch {i+1} from {file_path} with {len(df_batch)} rows.") # Reduce batch logging verbosity

                # Extract and Flatten Purchase History for the current batch
                batch_flattened_purchases = []
                # Add a counter to limit detailed logging
                log_limit = 5
                logged_count = 0

                for index, row in df_batch.iterrows():
                    user_id = row['id'] # Use 'id' as user_id from the main row
                    purchase_history_data = row['purchase_history']

                    if logged_count < log_limit:
                         logging.info(f"Processing user_id: {user_id}, raw purchase_history: {purchase_history_data}")

                    purchases = parse_purchase_history_entry(purchase_history_data)

                    if logged_count < log_limit:
                         logging.info(f"Parsed purchase_history: {purchases}")

                    if not purchases:
                         if logged_count < log_limit:
                              logging.warning(f"No purchases extracted for user_id: {user_id}")
                         logged_count += 1 # Increment even if no purchases to respect limit
                         continue # Skip to next row if no purchases were parsed

                    # --- Optimized: Iterate through items within each purchase entry ---
                    for purchase in purchases:
                        # Assuming each purchase is a dictionary
                        if not isinstance(purchase, dict):
                            if logged_count < log_limit:
                                 logging.warning(f"Purchase entry is not a dictionary for user_id {user_id}: {purchase}")
                            continue # Skip this purchase if it's not a dictionary

                        # Extract item_id from the 'items' list within the purchase dictionary
                        item_id = None # Initialize item_id as None
                        items_list = purchase.get('items') # Get the list of items

                        if isinstance(items_list, list) and items_list:
                            # Iterate through items in the 'items' list (a purchase might contain multiple items)
                            for item_entry in items_list:
                                if isinstance(item_entry, dict) and 'id' in item_entry:
                                    current_item_id = item_entry.get('id') # Get item ID from the item entry
                                    if current_item_id is not None:
                                        # --- Updated: Extract payment_method, payment_status, and price using correct keys ---
                                        purchase_date = purchase.get('purchase_date')
                                        payment_status = purchase.get('payment_status') # Use 'payment_status' key
                                        payment_method = purchase.get('payment_method') # Use 'payment_method' key
                                        price = purchase.get('avg_price') # Use 'avg_price' key for price

                                        if logged_count < log_limit:
                                            logging.info(f"Extracted purchase details for item: item_id={current_item_id}, date={purchase_date}, status={payment_status}, method={payment_method}, price={price}")
                                            logging.info(f"Keys found in purchase dictionary: {list(purchase.keys())}")
                                            if isinstance(item_entry, dict):
                                                logging.info(f"Keys found in item entry dictionary: {list(item_entry.keys())}")


                                        batch_flattened_purchases.append({
                                            'user_id': user_id,
                                            'item_id': current_item_id, # This is the item_id extracted from 'items'
                                            'purchase_date': purchase_date,
                                            'payment_status': payment_status, # Use extracted status
                                            'payment_method': payment_method, # Use extracted method
                                            'price': price # Use extracted price (avg_price)
                                        })
                                    else:
                                        if logged_count < log_limit:
                                             logging.warning(f"item 'id' is None in item entry for user_id {user_id}: {item_entry}. Skipping item.")
                                else:
                                     if logged_count < log_limit:
                                          logging.warning(f"Item entry is not a dictionary or missing 'id' for user_id {user_id}: {item_entry}. Skipping item.")
                        else:
                             if logged_count < log_limit:
                                  logging.warning(f"'items' key not found or not a valid list/empty for user_id {user_id}")


                    logged_count += 1 # Increment counter after processing a row


                # Append flattened purchases from this batch to the overall list
                all_flattened_purchases.extend(batch_flattened_purchases)

                # logging.info(f"Processed batch {i+1} from {file_path}. Appended {len(batch_flattened_purchases)} flattened records. Total flattened records so far: {len(all_flattened_purchases)}") # Reduce batch logging verbosity

                del df_batch # Release memory after processing the batch
                gc.collect() # Explicitly call garbage collector

            logging.info(f"Finished processing file {file_path}.")
            del parquet_file # Release memory after processing the file
            gc.collect() # Explicitly call garbage collector


        except Exception as e:
            logging.error(f"Error processing file {file_path}: {str(e)}")
            continue # Continue to the next file even if one fails

    if not all_flattened_purchases:
        logging.error("No valid purchase records extracted from any file.")
        return None

    df_flattened_purchases = pd.DataFrame(all_flattened_purchases)
    logging.info(f"Finished flattening purchase history from all files. Total purchase records: {len(df_flattened_purchases)}")

    # --- Continue Processing on Flattened Data ---
    try:
        # --- Updated: Merge flattened purchase data with catalog data using 'item_id' from purchases and 'id' from catalog ---
        # We need to merge df_flattened_purchases (which has 'item_id') with df_catalog (which has 'id')
        df_merged_purchases = pd.merge(df_flattened_purchases, df_catalog[['id', 'category']], left_on='item_id', right_on='id', how='left')
        df_merged_purchases.rename(columns={'category': 'specific_category'}, inplace=True) # Rename catalog category column
        # Check if 'id_y' exists before dropping (it might not if merge is empty)
        if 'id_y' in df_merged_purchases.columns:
             df_merged_purchases.drop(columns=['id_y'], inplace=True) # Drop the redundant 'id' column from the catalog after merge
        # Check if 'id_x' exists before renaming (it might not if merge is empty)
        if 'id_x' in df_merged_purchases.columns:
             df_merged_purchases.rename(columns={'id_x': 'item_id'}, inplace=True) # Rename the original item_id column back if needed (optional, but good practice)


        # Filter out records where item_id was not found in the catalog
        df_merged_purchases.dropna(subset=['specific_category'], inplace=True)
        logging.info(f"Purchase records remaining after filtering items not in catalog: {len(df_merged_purchases)}")

        # Map specific category to large category
        df_merged_purchases['large_category'] = df_merged_purchases['specific_category'].map(category_map)

        # Filter out records where specific category did not map to a large category
        df_processed = df_merged_purchases.dropna(subset=['large_category']).copy() # Create a copy
        logging.info(f"Purchase records remaining after filtering items not mapped to a large category: {len(df_processed)}")

        # Ensure large_category is in a list format for transaction building
        df_processed['large_categories_list'] = df_processed['large_category'].apply(lambda x: [x] if pd.notna(x) else [])

        # Convert date column to datetime if it exists
        if 'purchase_date' in df_processed.columns:
            df_processed['purchase_date'] = pd.to_datetime(df_processed['purchase_date'], errors='coerce')


        return df_processed

    except Exception as e:
        logging.error(f"Data processing after flattening failed: {str(e)}")
        return None

# --- 5. 关联规则挖掘函数 ---
def perform_association_rule_mining(transactions, min_support, min_confidence, title):
    """Performs association rule mining and visualizes results."""
    logging.info(f"\n--- Starting Association Rule Mining: {title} ---")
    try:
        from mlxtend.preprocessing import TransactionEncoder

        if not transactions:
            logging.warning(f"No valid transaction data available for {title} mining.")
            return pd.DataFrame(), pd.DataFrame()

        # Encode transactions
        te = TransactionEncoder()
        te_ary = te.fit(transactions).transform(transactions)
        df_encoded = pd.DataFrame(te_ary, columns=te.columns_)
        logging.info(f"Transaction data encoding complete for {title}. Shape: {df_encoded.shape}")

        # Mine frequent itemsets
        logging.info(f"Starting frequent itemset mining for {title} with min_support={min_support}")
        frequent_itemsets = apriori(
            df_encoded,
            min_support=min_support,
            use_colnames=True,
            low_memory=True
        )
        logging.info(f"Found {len(frequent_itemsets)} frequent itemsets for {title}.")

        # Generate association rules
        if not frequent_itemsets.empty:
            logging.info(f"Starting association rule generation for {title} with min_threshold={min_confidence}")
            rules = association_rules(
                frequent_itemsets,
                metric="confidence",
                min_threshold=min_confidence
            )
            # Sort rules
            rules = rules.sort_values(['confidence', 'lift', 'support'], ascending=[False, False, False])
            logging.info(f"Found {len(rules)} association rules for {title}.")

            # Visualize rules
            visualize_association_rules(rules, OUTPUT_DIR, title)

            return frequent_itemsets, rules
        logging.info(f"No frequent itemsets found for {title}, cannot generate association rules.")
        return frequent_itemsets, pd.DataFrame()

    except MemoryError:
        logging.error(f"Memory error during {title} mining. Try increasing available memory or decreasing min_support.")
        return None, None
    except Exception as e:
        logging.error(f"Association rule mining failed for {title}: {str(e)}")
        return None, None

# --- 6. 可视化函数 (关联规则) ---
def visualize_association_rules(rules, output_dir, title="Association Rules"):
    """
    Visualizes association rules.

    Args:
        rules (pd.DataFrame): DataFrame containing association rules.
        output_dir (str): Output directory to save plots.
        title (str): Title for the plots.
    """
    if rules is None or rules.empty:
        logging.info(f"No association rules to visualize for {title}.")
        return

    import matplotlib.pyplot as plt
    import seaborn as sns
    # Configure matplotlib to display Chinese characters
    plt.rcParams['font.sans-serif'] = ['SimHei'] # Specify default font
    plt.rcParams['axes.unicode_minus'] = False # Fix negative sign display issue

    # Scatter plot: Support vs. Confidence (colored by Lift)
    plt.figure(figsize=(10, 8))
    sns.scatterplot(x='support', y='confidence', hue='lift', data=rules, alpha=0.7, s=100)
    plt.xlabel('支持度')
    plt.ylabel('置信度')
    plt.title(title + ' - 支持度 vs 置信度 (提升度着色)')
    plt.grid(True, linestyle='--', alpha=0.6)
    plt_path = os.path.join(OUTPUT_DIR, title.replace(" ", "_") + '_support_confidence_lift.png') # Replace spaces for filename
    plt.savefig(plt_path)
    logging.info(f"Saved scatter plot to: {plt_path}")
    plt.close() # Close plot to free memory

    # Network graph: Visualize association rules (simplified version)
    import networkx as nx
    rules_to_plot = rules.sort_values(by='lift', ascending=False).head(30)
    if len(rules) > len(rules_to_plot):
        logging.info(f"\n{title} - Too many association rules, showing only the top {len(rules_to_plot)} by lift for visualization.")

    if len(rules_to_plot) > 0:
        G = nx.DiGraph()
        for index, row in rules_to_plot.iterrows():
            antecedent = ", ".join(map(str, row['antecedents']))
            consequent = ", ".join(map(str, row['consequents']))
            G.add_edge(antecedent, consequent,
                       support=row['support'],
                       confidence=row['confidence'],
                       lift=row['lift'])

        plt.figure(figsize=(14, 12))
        pos = nx.spring_layout(G, k=0.8, iterations=50, seed=42)
        nx.draw(G, pos, with_labels=True, node_size=4000, node_color='skyblue',
                arrowsize=30, font_size=10, alpha=0.8, edge_color='gray', node_shape='o')

        edge_labels = {(u, v): f"S:{G[u][v]['support']:.4f}\nC:{G[u][v]['confidence']:.4f}\nL:{G[u][v]['lift']:.4f}"
                       for u, v in G.edges()}
        nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels, font_size=8, label_pos=0.5, bbox=dict(facecolor='white', alpha=0.6, edgecolor='none'))

        plt.title(title + ' - 关联规则网络图')
        plt_path = os.path.join(OUTPUT_DIR, title.replace(" ", "_") + '_network_graph.png') # Replace spaces for filename
        plt.savefig(plt_path)
        logging.info(f"Saved network graph to: {plt_path}")
        plt.close() # Close plot to free memory

        # Table: Display association rules
        logging.info(f"\n{title} - Association Rules (Sorted by Confidence, then Lift):")
        print(rules.head(20))
        if len(rules) > 20:
            logging.info(f"\nShowing top 20 rules. Total rules found: {len(rules)}.")
    else:
        logging.info(f"\n{title} - No association rules found.")


# --- 7. 主程序 ---
def main():
    logging.info(f"\n{'='*40}")
    logging.info(f"开始原始数据关联分析")
    logging.info(f"原始数据目录: {INPUT_DATA_DIR}")
    logging.info(f"商品目录文件: {PRODUCT_CATALOG_PATH}")
    logging.info(f"{'='*40}")

    # Load, process, and map data by extracting purchase history
    # Process only the specified file 'part-00000.parquet'
    df_processed = load_and_process_data(INPUT_DATA_DIR, PRODUCT_CATALOG_PATH, CATEGORY_TO_LARGE_CATEGORY_MAP, file_filter='part-00000.parquet')

    if df_processed is None or df_processed.empty:
        logging.error("Data loading, processing, or mapping failed. Program terminated.")
        return

    # --- 关联分析 1: 支付方式与商品类别的关联分析 ---
    logging.info(f"\n{'='*40}")
    logging.info("开始支付方式与大类商品关联分析")
    logging.info(f"{'='*40}")

    # --- Added Sampling ---
    # Adjust sampling_fraction based on available memory and dataset size
    # A smaller fraction reduces memory usage but might miss infrequent patterns
    # For single file processing, sampling might not be needed if the file is small enough
    # Let's keep sampling as an option, but you can set it to 1.0 to process all data from the single file
    sampling_fraction_payment = 1.0 # Process all data from the single file initially
    df_payment_sampled = df_processed.sample(frac=sampling_fraction_payment, random_state=42).copy() # Use a fixed random_state for reproducibility
    logging.info(f"Sampled {len(df_payment_sampled)} records ({sampling_fraction_payment*100}%) for Payment Method & Category analysis.")

    # Build transactions: Group by user_id and payment_method on sampled data
    # --- Corrected Filtering Condition ---
    df_payment_filtered = df_payment_sampled[df_payment_sampled['payment_method'].notna() & (df_payment_sampled['payment_method'] != '')].copy()

    if df_payment_filtered.empty:
        logging.info("No valid payment method data found in sampled data for Payment Method & Category analysis.")
    else:
        payment_category_transactions_grouped = df_payment_filtered.groupby(['user_id', 'payment_method'])['large_categories_list'].agg(lambda x: sum(x, []))
        payment_category_transactions = payment_category_transactions_grouped.apply(lambda x: list(set(x))).tolist()
        payment_category_transactions = [t for t in payment_category_transactions if t] # Filter empty transactions

        logging.info(f"Built {len(payment_category_transactions)} transactions from sampled data for Payment Method & Category analysis.")

        # Perform association rule mining for Payment Method & Category
        # Using user-specified thresholds: support >= 0.01, confidence >= 0.6
        # Note: Support is calculated relative to the sampled data size
        perform_association_rule_mining(
            payment_category_transactions,
            min_support=0.01, # Keep original thresholds, but they apply to the sample
            min_confidence=0.6,
            title="Payment Method & Large Category Association Rules (Sampled Data)" # Indicate sampled data
        )
    # --- More robust memory release ---
    if 'df_payment_sampled' in locals(): del df_payment_sampled
    if 'df_payment_filtered' in locals(): del df_payment_filtered
    if 'payment_category_transactions_grouped' in locals(): del payment_category_transactions_grouped
    if 'payment_category_transactions' in locals(): del payment_category_transactions
    gc.collect()

    # --- 关联分析 2: 退款模式分析 ---
    logging.info(f"\n{'='*40}")
    logging.info("开始退款模式分析")
    logging.info(f"{'='*40}")

    # --- Added Sampling ---
    sampling_fraction_refund = 1.0 # Process all data from the single file initially
    df_refund_sampled = df_processed.sample(frac=sampling_fraction_refund, random_state=42).copy()
    logging.info(f"Sampled {len(df_refund_sampled)} records ({sampling_fraction_refund*100}%) for Refund Patterns analysis.")


    # Filter for refunded items in sampled data
    df_status_filtered = df_refund_sampled[df_refund_sampled['payment_status'].notna() & (df_refund_sampled['payment_status'] != '')].copy()
    df_refunded = df_status_filtered[df_status_filtered['payment_status'].isin(['已退款', '部分退款'])].copy() # Filter and create copy

    if df_refunded.empty:
        logging.info("No refunded records found in sampled data. Skipping Refund Patterns analysis.")
    else:
        logging.info(f"Found {len(df_refunded)} refunded records in sampled data.")
        # Build transactions for refunded items: Group by user_id on sampled data
        refund_transactions_grouped = df_refunded.groupby('user_id')['large_categories_list'].agg(lambda x: sum(x, []))
        refund_transactions = refund_transactions_grouped.apply(lambda x: list(set(x))).tolist()
        refund_transactions = [t for t in refund_transactions if t] # Filter empty transactions

        logging.info(f"Built {len(refund_transactions)} transactions from sampled data for Refund Patterns analysis.")

        # Perform association rule mining for Refund Patterns
        # Using user-specified thresholds: support >= 0.005, confidence >= 0.4
        # Note: Support is calculated relative to the sampled data size
        perform_association_rule_mining(
            refund_transactions,
            min_support=0.005, # Keep original thresholds, but they apply to the sample
            min_confidence=0.4,
            title="Refund Patterns Association Rules (Sampled Data)" # Indicate sampled data
        )
    # --- More robust memory release ---
    if 'df_refund_sampled' in locals(): del df_refund_sampled
    if 'df_status_filtered' in locals(): del df_status_filtered
    if 'df_refunded' in locals(): del df_refunded
    if 'refund_transactions_grouped' in locals(): del refund_transactions_grouped
    if 'refund_transactions' in locals(): del refund_transactions
    gc.collect()


    # --- 其他分析框架/说明 ---
    logging.info(f"\n{'='*40}")
    logging.info(f"关于其他分析的需求:")
    logging.info(f"{'='*40}")

    # 3. 高价值商品的首选支付方式分析
    logging.info("\n--- 高价值商品的首选支付方式分析 ---")
    # Assuming 'price' column exists and is numeric
    HIGH_VALUE_THRESHOLD = 5000 # Define high value threshold
    # Ensure price is numeric and not None before filtering
    df_price_filtered = df_processed[pd.to_numeric(df_processed['price'], errors='coerce').notna()].copy()
    df_price_filtered['price'] = pd.to_numeric(df_price_filtered['price'], errors='coerce') # Convert to numeric
    df_high_value = df_price_filtered[df_price_filtered['price'] > HIGH_VALUE_THRESHOLD].copy()

    if df_high_value.empty:
        logging.info(f"No high-value items (price > {HIGH_VALUE_THRESHOLD}) found.")
    else:
        logging.info(f"Found {len(df_high_value)} high-value items.")
        # Analyze payment method distribution for high-value items
        # Ensure payment_method is not None or empty string before counting
        df_high_value_payment_filtered = df_high_value[df_high_value['payment_method'].notna() & (df_high_value['payment_method'] != '')].copy()
        if df_high_value_payment_filtered.empty:
             logging.info("No valid payment method data for high-value items.")
        else:
            payment_method_distribution = df_high_value_payment_filtered['payment_method'].value_counts(normalize=True)
            logging.info("\n高价值商品的首选支付方式分布:")
            print(payment_method_distribution)
            # Optional: Visualize this distribution (e.e.g., bar chart)
    # --- More robust memory release ---
    if 'df_price_filtered' in locals(): del df_price_filtered
    if 'df_high_value' in locals(): del df_high_value
    if 'df_high_value_payment_filtered' in locals(): del df_high_value_payment_filtered
    gc.collect()


    # 4. 时间序列模式挖掘 (框架说明)
    logging.info("\n--- 时间序列模式挖掘 ---")
    logging.info("此分析需要构建用户购买序列，并使用序列模式挖掘算法（如 PrefixSpan）。")
    logging.info("以下是构建序列的框架，但完整的序列模式挖掘算法需要额外实现或库。")
    # Assuming 'purchase_date' column exists and is in datetime format
    # Ensure purchase_date is datetime and not None
    df_date_filtered = df_processed[df_processed['purchase_date'].notna()].copy()
    # purchase_date conversion already done in load_and_process_data

    if df_date_filtered.empty:
         logging.info("No valid purchase dates found for time series analysis.")
    else:
        # Sort by user and date to create sequences
        df_processed_sorted = df_date_filtered.sort_values(by=['user_id', 'purchase_date'])

        # Group by user and collect large categories in order
        # Ensure large_categories_list is not empty for the sequence
        user_sequences_grouped = df_processed_sorted[df_processed_sorted['large_categories_list'].apply(len) > 0].groupby('user_id')['large_categories_list'].apply(lambda x: sum(x, []))
        user_sequences = user_sequences_grouped.tolist()


        logging.info(f"Built {len(user_sequences)} user sequences for time series analysis.")
        logging.info("示例用户购买序列 (前5个):")
        for i in range(min(5, len(user_sequences))):
            logging.info(user_sequences[i])

        # To perform sequence pattern mining (e.g., "A then B"), you would use a library like mlxtend's sequence mining
        # Example using mlxtend's SequentialPatternMiner (requires transforming sequences)
        # from mlxtend.frequent_patterns import apriori, association_rules
        # from mlxtend.preprocessing import TransactionEncoder, SequenceEncoder
        # se = SequenceEncoder()
        # encoded_sequences = se.fit_transform(user_sequences)
        # from mlxtend.fpatterns import spminer # This is an older module, check mlxtend documentation for current sequence mining
        # frequent_sequences = spminer(encoded_sequences, min_support=0.01) # Example, adjust min_support

        logging.info("\n完整的时序模式挖掘（如 'A -> B' 序列）需要专门的序列挖掘算法实现。")
        logging.info("您可以使用 mlxtend 或其他库中的序列挖掘功能。")
    # --- More robust memory release ---
    if 'df_date_filtered' in locals(): del df_date_filtered
    if 'df_processed_sorted' in locals(): del df_processed_sorted
    if 'user_sequences_grouped' in locals(): del user_sequences_grouped
    if 'user_sequences' in locals(): del user_sequences
    gc.collect()

    # Release df_processed after all analyses that use it are done
    if 'df_processed' in locals(): del df_processed
    gc.collect()


if __name__ == "__main__":
    main()
