import pandas as pd
import numpy as np
from tqdm import tqdm
import heapq
from collections import defaultdict
import matplotlib.pyplot as plt
import seaborn as sns

# 设置中文显示
plt.rcParams["font.family"] = ["SimHei", "WenQuanYi Micro Hei", "Heiti TC"]
plt.rcParams["axes.unicode_minus"] = False


def load_and_preprocess_data(file_path):
    """加载并预处理交易数据"""
    try:
        print("开始加载数据...")
        # 读取Excel文件
        df = pd.read_excel(file_path)
        print(f"数据加载成功，共{len(df)}条交易记录")
        print(f"数据包含以下列：{list(df.columns)}")

        # 确保关键列存在
        required_columns = ['customer_id', 'product_id']
        for col in required_columns:
            if col not in df.columns:
                raise ValueError(f"数据中缺少必要的列 '{col}'，分析无法进行")

        return df
    except Exception as e:
        print(f"数据加载失败: {e}")
        raise


def calculate_support(df, condition):
    """计算条件的支持度"""
    mask = pd.Series(True, index=df.index)
    for col, value in condition.items():
        mask &= (df[col] == value)
    return mask.mean()


def calculate_confidence(df, antecedent, consequent):
    """计算规则 antecedent → consequent 的置信度"""
    ant_mask = pd.Series(True, index=df.index)
    for col, value in antecedent.items():
        ant_mask &= (df[col] == value)

    cons_mask = pd.Series(True, index=df.index)
    for col, value in consequent.items():
        cons_mask &= (df[col] == value)

    # 避免除零错误
    ant_support = ant_mask.mean()
    if ant_support == 0:
        return 0

    return (ant_mask & cons_mask).mean() / ant_support


def calculate_lift(df, antecedent, consequent):
    """计算规则的提升度"""
    conf = calculate_confidence(df, antecedent, consequent)

    cons_mask = pd.Series(True, index=df.index)
    for col, value in consequent.items():
        cons_mask &= (df[col] == value)

    cons_support = cons_mask.mean()
    if cons_support == 0:
        return 0

    return conf / cons_support


def heuristic_function(df, current_state, target_consequent):
    """启发式函数：估计从当前状态到目标的代价"""
    # 使用当前条件的支持度和置信度作为启发式估计
    support = calculate_support(df, current_state)
    confidence = calculate_confidence(df, current_state, target_consequent)

    # 我们希望支持度和置信度都高，所以代价可以定义为1/(支持度*置信度)
    if support == 0 or confidence == 0:
        return float('inf')

    # 支持度和置信度越高，代价越小
    return 1 / (support * confidence)


def a_star_search(df, target_user, target_product, min_support=0.01, min_confidence=0.5):
    """使用A*算法搜索最优关联规则"""
    # 目标结果：特定用户与商品
    target_consequent = {'customer_id': target_user, 'product_id': target_product}

    # 初始化开放列表和关闭列表
    open_list = []
    closed_set = set()

    # 初始状态：空条件
    initial_state = {}
    initial_cost = 0
    initial_heuristic = heuristic_function(df, initial_state, target_consequent)
    initial_f = initial_cost + initial_heuristic

    # 加入开放列表
    heapq.heappush(open_list, (initial_f, initial_cost, tuple(sorted(initial_state.items()))))

    # 生成所有可能的特征值组合
    unique_customers = df['customer_id'].unique()
    unique_products = df['product_id'].unique()
    possible_features = []
    for customer in unique_customers:
        possible_features.append(('customer_id', customer))
    for product in unique_products:
        possible_features.append(('product_id', product))

    total_features = len(possible_features)
    progress_bar = tqdm(total=total_features, desc="搜索最优规则")
    rules_found = []

    while open_list:
        # 从开放列表中取出f值最小的节点
        current_f, current_cost, current_state = heapq.heappop(open_list)
        current_state = dict(current_state)  # 从元组再转换回字典，方便后续操作

        # 检查是否已经在关闭列表中
        state_tuple = tuple(sorted(current_state.items()))
        if state_tuple in closed_set:
            continue

        # 添加到关闭列表
        closed_set.add(state_tuple)

        # 计算当前状态的支持度和置信度
        support = calculate_support(df, current_state)
        confidence = calculate_confidence(df, current_state, target_consequent)

        # 如果满足条件，添加到规则列表
        if support >= min_support and confidence >= min_confidence:
            lift = calculate_lift(df, current_state, target_consequent)
            rules_found.append({
                'antecedent': current_state.copy(),
                'consequent': target_consequent.copy(),
                'support': support,
                'confidence': confidence,
                'lift': lift
            })

        # 扩展节点
        for feature, value in possible_features:
            # 如果特征已经在当前状态中，跳过
            if feature in current_state:
                continue

            # 创建新状态
            new_state = current_state.copy()
            new_state[feature] = value

            # 计算新状态的代价和启发式
            new_cost = current_cost + 1  # 每增加一个条件，代价加1
            new_heuristic = heuristic_function(df, new_state, target_consequent)
            new_f = new_cost + new_heuristic

            # 添加到开放列表
            heapq.heappush(open_list, (new_f, new_cost, tuple(sorted(new_state.items()))))

        # 更新进度条
        progress_bar.update(1)

        # 限制搜索深度和规则数量，避免无限搜索
        if len(rules_found) >= 100 or len(closed_set) >= 1000:
            break

    progress_bar.close()
    return rules_found


def analyze_user_product_relations(rules):
    """分析与用户-商品相关的规则"""
    if not rules:
        print("没有找到与用户-商品相关的规则")
        return None

    # 按置信度和提升度排序
    sorted_rules = sorted(rules, key=lambda x: (x['confidence'], x['lift']), reverse=True)

    print(f"\n找到{len(sorted_rules)}条与用户-商品相关的规则")
    print("Top 10 关联规则:")

    top_rules = []
    for i, rule in enumerate(sorted_rules[:10], 1):
        antecedent_str = ", ".join([f"{k}={v}" for k, v in rule['antecedent'].items()])
        confidence = rule['confidence']
        lift = rule['lift']
        support = rule['support']

        print(f"{i}. {antecedent_str} → {rule['consequent']}")
        print(f"   支持度: {support:.4f}, 置信度: {confidence:.4f}, 提升度: {lift:.4f}")

        top_rules.append({
            'rule': f"{antecedent_str} → {rule['consequent']}",
            'support': support,
            'confidence': confidence,
            'lift': lift
        })

    return top_rules


def visualize_factor_impact(top_rules):
    """可视化各因素对用户-商品关联的影响"""
    if not top_rules or len(top_rules) == 0:
        print("没有足够的数据进行可视化")
        return

    factors = [rule['rule'] for rule in top_rules]
    confidences = [rule['confidence'] for rule in top_rules]

    plt.figure(figsize=(12, 8))
    sns.barplot(x=confidences, y=factors)

    plt.title('各因素对用户-商品关联的影响强度（Top 10）')
    plt.xlabel('置信度')
    plt.ylabel('关联规则')
    plt.tight_layout()
    plt.savefig('factor_impact.png')
    plt.show()


def main():
    # 文件路径 - 需要替换为实际文件路径
    file_path = 'A_xxx.xlsx'

    # 加载和预处理数据
    df = load_and_preprocess_data(file_path)

    # 示例：分析特定用户与特定商品的关联度
    target_user_id = 1  # 替换为你感兴趣的用户ID
    target_product_id = 101  # 替换为你感兴趣的商品ID

    print("开始使用A*算法搜索最优关联规则...")
    rules = a_star_search(df, target_user_id, target_product_id, min_support=0.01, min_confidence=0.3)

    # 分析与用户-商品相关的规则
    top_rules = analyze_user_product_relations(rules)

    # 可视化各因素对用户-商品关联的影响
    if top_rules:
        visualize_factor_impact(top_rules)


if __name__ == "__main__":
    main()