import pandas as pd
import time
import matplotlib.pyplot as plt
import seaborn as sns
import networkx as nx
from mlxtend.preprocessing import TransactionEncoder
from mlxtend.frequent_patterns import apriori, association_rules
import pyfpgrowth

# 设置中文字体
plt.rcParams["font.family"] = ["SimHei"]
plt.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题

# -------------------- 数据加载与预处理 --------------------
print("开始数据预处理...")
start_preprocess = time.time()

# 加载数据集
df = pd.read_csv('Groceries_dataset.csv')

# 数据清洗
df = df.dropna(subset=['itemDescription'])  # 去除缺失值

# 构建交易列表（每个会员和日期组合对应一个交易）
transactions = df.groupby(['Member_number', 'Date'])['itemDescription'].apply(list).tolist()

end_preprocess = time.time()
print(f"数据预处理完成，耗时: {end_preprocess - start_preprocess:.2f}秒")
print(f"数据集大小: {len(transactions)}条交易，{df['itemDescription'].nunique()}种商品")

# -------------------- Apriori算法实现 --------------------
print("\n开始Apriori算法挖掘...")
start_apriori = time.time()

# 转换为布尔矩阵
te = TransactionEncoder()
te_ary = te.fit(transactions).transform(transactions)
df_encoded = pd.DataFrame(te_ary, columns=te.columns_)

# 挖掘频繁项集（支持度≥0.01，即至少在1%的交易中出现）
frequent_itemsets_apriori = apriori(df_encoded, min_support=0.001, use_colnames=True)

# 生成关联规则（置信度≥0.5）
rules_apriori = association_rules(frequent_itemsets_apriori, metric="confidence", min_threshold=0.2)

# 处理提升度中的非数值值
rules_apriori['lift'] = pd.to_numeric(rules_apriori['lift'], errors='coerce')
rules_apriori = rules_apriori.dropna(subset=['lift'])  # 删除包含NaN的行

end_apriori = time.time()
time_apriori = end_apriori - start_apriori
print(f"Apriori算法完成，耗时: {time_apriori:.2f}秒")
print(f"发现频繁项集: {len(frequent_itemsets_apriori)}个")
print(f"生成关联规则: {len(rules_apriori)}条")

# -------------------- FP-Growth算法实现 --------------------
print("\n开始FP-Growth算法挖掘...")
start_fpgrowth = time.time()

# 挖掘频繁项集（支持度计数≥交易总数*0.01）
support_threshold = int(len(transactions) * 0.001)
patterns = pyfpgrowth.find_frequent_patterns(transactions, support_threshold)

# 生成关联规则（置信度≥0.5）
rules_fpgrowth = pyfpgrowth.generate_association_rules(patterns, 0.2)

# 转换为DataFrame便于后续处理
rules_fpgrowth_df = []
for antecedent, (consequent, confidence) in rules_fpgrowth.items():
    # 确保后件存在于频繁项集中
    if consequent in patterns:
        support = patterns.get(antecedent + consequent, 0) / len(transactions)
        lift = confidence / (patterns.get(consequent, 0) / len(transactions)) if patterns.get(consequent, 0) > 0 else 0
        rules_fpgrowth_df.append({
            'antecedents': antecedent,
            'consequents': consequent,
            'support': support,
            'confidence': confidence,
            'lift': lift
        })
    else:
        # 如果后件不在频繁项集中，跳过该规则
        continue

rules_fpgrowth_df = pd.DataFrame(rules_fpgrowth_df)

# 确保存在lift列
if 'lift' in rules_fpgrowth_df.columns:
    # 处理提升度中的非数值值
    rules_fpgrowth_df['lift'] = pd.to_numeric(rules_fpgrowth_df['lift'], errors='coerce')
    rules_fpgrowth_df = rules_fpgrowth_df.dropna(subset=['lift'])  # 删除包含NaN的行
else:
    print("警告：FP-Growth生成的规则中不包含lift列，无法进行可视化")
    rules_fpgrowth_df = pd.DataFrame(columns=['antecedents', 'consequents', 'support', 'confidence', 'lift'])

end_fpgrowth = time.time()
time_fpgrowth = end_fpgrowth - start_fpgrowth
print(f"FP-Growth算法完成，耗时: {time_fpgrowth:.2f}秒")
print(f"发现频繁项集: {len(patterns)}个")
print(f"生成关联规则: {len(rules_fpgrowth_df)}条")

# -------------------- 性能对比 --------------------
print("\n-------------------- 算法性能对比 --------------------")
print(f"Apriori运行时间: {time_apriori:.2f}秒")
print(f"FP-Growth运行时间: {time_fpgrowth:.2f}秒")

if len(rules_fpgrowth_df) > 0 and len(rules_apriori) > 0:
    print(f"性能提升: {((time_apriori - time_fpgrowth) / time_apriori) * 100:.2f}%")
else:
    print("无法计算性能提升：一种或两种算法未生成有效规则")

# 可视化性能对比
plt.figure(figsize=(10, 6))
algorithms = ['Apriori', 'FP-Growth']
times = [time_apriori, time_fpgrowth]
plt.bar(algorithms, times, color=['#4e79a7', '#f28e2b'])
plt.ylabel('运行时间 (秒)')
plt.title('算法性能对比')
plt.grid(axis='y', linestyle='--', alpha=0.7)

# 添加数值标签
for i, v in enumerate(times):
    plt.text(i, v + 0.1, f'{v:.2f}s', ha='center', fontweight='bold')

plt.tight_layout()
plt.savefig('algorithm_performance.png', dpi=300)
plt.show()

# -------------------- 可视化分析: 规则分布散点图 --------------------
plt.figure(figsize=(14, 6))

# Apriori散点图
if len(rules_apriori) > 0:
    plt.subplot(1, 2, 1)
    # 将lift分为几个等级，以便使用hue和palette
    rules_apriori['lift_category'] = pd.qcut(rules_apriori['lift'], 4)
    sns.scatterplot(x='support', y='confidence', size='lift',
                    data=rules_apriori, alpha=0.7, palette='viridis',
                    hue='lift_category', sizes=(20, 200))
    plt.title('Apriori算法规则分布')
    plt.xlabel('支持度')
    plt.ylabel('置信度')
    plt.legend(title='提升度', bbox_to_anchor=(1.05, 1), loc='upper left')
else:
    plt.subplot(1, 2, 1)
    plt.text(0.5, 0.5, 'Apriori未生成有效规则', ha='center', va='center', fontsize=12)
    plt.axis('off')

# FP-Growth散点图
if len(rules_fpgrowth_df) > 0:
    plt.subplot(1, 2, 2)
    rules_fpgrowth_df['lift_category'] = pd.qcut(rules_fpgrowth_df['lift'], 4)
    sns.scatterplot(x='support', y='confidence', size='lift',
                    data=rules_fpgrowth_df, alpha=0.7, palette='viridis',
                    hue='lift_category', sizes=(20, 200))
    plt.title('FP-Growth算法规则分布')
    plt.xlabel('支持度')
    plt.ylabel('置信度')
    plt.legend(title='提升度', bbox_to_anchor=(1.05, 1), loc='upper left')
else:
    plt.subplot(1, 2, 2)
    plt.text(0.5, 0.5, 'FP-Growth未生成有效规则', ha='center', va='center', fontsize=12)
    plt.axis('off')

plt.tight_layout()
plt.savefig('rule_distribution.png', dpi=300)
plt.show()


# -------------------- 可视化分析: 关联规则网络图 --------------------
def plot_network(rules_df, title, top_n=30):
    """绘制关联规则网络图"""
    # 修改：使用constrained_layout=True
    fig, ax = plt.subplots(figsize=(14, 10), constrained_layout=True)
    G = nx.DiGraph()

    # 选择提升度最高的前N条规则
    top_rules = rules_df.sort_values('lift', ascending=False).head(top_n)

    # 添加节点和边
    for _, row in top_rules.iterrows():
        antecedent = ', '.join(row['antecedents'])
        consequent = ', '.join(row['consequents'])

        G.add_node(antecedent, size=1000)
        G.add_node(consequent, size=1000)
        G.add_edge(antecedent, consequent, weight=row['lift'],
                   confidence=row['confidence'], support=row['support'])

    # 布局算法
    pos = nx.spring_layout(G, k=0.3, iterations=50)

    # 绘制节点
    node_sizes = [G.nodes[node].get('size', 1000) for node in G.nodes]
    nodes = nx.draw_networkx_nodes(G, pos, node_size=node_sizes,
                                   node_color='#4e79a7', alpha=0.8, ax=ax)

    # 绘制边
    edge_widths = [d['weight'] * 0.5 for _, _, d in G.edges(data=True)]
    edge_colors = [d['confidence'] for _, _, d in G.edges(data=True)]

    # 创建ScalarMappable对象用于颜色条
    sm = plt.cm.ScalarMappable(cmap=plt.cm.YlOrRd,
                               norm=plt.Normalize(vmin=min(edge_colors),
                                                  vmax=max(edge_colors)))
    sm.set_array([])  # 设置空数组，仅用于创建颜色条

    # 绘制边
    edges = nx.draw_networkx_edges(G, pos, width=edge_widths,
                                   edge_color=edge_colors, alpha=0.6,
                                   edge_cmap=plt.cm.YlOrRd, arrowsize=20, ax=ax)

    # 绘制节点标签
    nx.draw_networkx_labels(G, pos, font_size=9, font_family='SimHei', ax=ax)

    # 添加边的标签（提升度）
    edge_labels = {(u, v): f"lift: {d['weight']:.2f}"
                   for u, v, d in G.edges(data=True) if d['weight'] > 1.2}
    nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels, font_size=8, ax=ax)

    # 添加颜色条
    # 修改：使用fig.colorbar的shrink参数自动调整颜色条大小
    cbar = fig.colorbar(sm, ax=ax, shrink=0.6)
    cbar.set_label('置信度')

    ax.set_title(title, fontsize=14)
    ax.axis('off')

    # 移除tight_layout()调用
    return G


# 绘制Apriori网络图
if len(rules_apriori) > 0:
    plot_network(rules_apriori, 'Apriori算法关联规则网络图')
    plt.savefig('apriori_network.png', dpi=300)
    plt.show()
else:
    plt.figure(figsize=(10, 6))
    plt.text(0.5, 0.5, 'Apriori未生成有效规则，无法绘制网络图', ha='center', va='center', fontsize=14)
    plt.axis('off')
    plt.savefig('apriori_network.png', dpi=300)
    plt.show()

# 绘制FP-Growth网络图
if len(rules_fpgrowth_df) > 0:
    plot_network(rules_fpgrowth_df, 'FP-Growth算法关联规则网络图')
    plt.savefig('fpgrowth_network.png', dpi=300)
    plt.show()
else:
    plt.figure(figsize=(10, 6))
    plt.text(0.5, 0.5, 'FP-Growth未生成有效规则，无法绘制网络图', ha='center', va='center', fontsize=14)
    plt.axis('off')
    plt.savefig('fpgrowth_network.png', dpi=300)
    plt.show()

# -------------------- 频繁项集分析 --------------------
# 分析频繁项集长度分布
plt.figure(figsize=(12, 6))

if len(frequent_itemsets_apriori) > 0:
    plt.subplot(1, 2, 1)
    apriori_lengths = frequent_itemsets_apriori['itemsets'].apply(len)
    apriori_lengths.value_counts().sort_index().plot(kind='bar', color='#4e79a7')
    plt.title('Apriori频繁项集长度分布')
    plt.xlabel('项集长度')
    plt.ylabel('数量')
else:
    plt.subplot(1, 2, 1)
    plt.text(0.5, 0.5, 'Apriori未生成频繁项集', ha='center', va='center', fontsize=12)
    plt.axis('off')

if len(patterns) > 0:
    plt.subplot(1, 2, 2)
    fpgrowth_lengths = pd.Series([len(itemset) for itemset in patterns.keys()])
    fpgrowth_lengths.value_counts().sort_index().plot(kind='bar', color='#f28e2b')
    plt.title('FP-Growth频繁项集长度分布')
    plt.xlabel('项集长度')
    plt.ylabel('数量')
else:
    plt.subplot(1, 2, 2)
    plt.text(0.5, 0.5, 'FP-Growth未生成频繁项集', ha='center', va='center', fontsize=12)
    plt.axis('off')

plt.tight_layout()
plt.savefig('itemset_length.png', dpi=300)
plt.show()

# 输出结果示例
print("\n-------------------- 关联规则结果示例 --------------------")
if len(rules_apriori) > 0:
    print("Apriori算法前5条规则（按提升度排序）:")
    print(rules_apriori.sort_values('lift', ascending=False).head()[
              ['antecedents', 'consequents', 'support', 'confidence', 'lift']])
else:
    print("Apriori未生成有效规则")

if len(rules_fpgrowth_df) > 0:
    print("\nFP-Growth算法前5条规则（按提升度排序）:")
    print(rules_fpgrowth_df.sort_values('lift', ascending=False).head()[
              ['antecedents', 'consequents', 'support', 'confidence', 'lift']])
else:
    print("FP-Growth未生成有效规则")