def calculate_support(data, itemsets):
    """计算项集在数据集中的支持度"""
    support_dict = {}
    total = len(data)
    for itemset in itemsets:
        count = 0
        for transaction in data:
            if itemset.issubset(transaction):
                count += 1
        support = count / total
        support_dict[frozenset(itemset)] = support  # 用frozenset作为键（可哈希）
    return support_dict


def generate_candidates(prev_itemsets, k):
    """由(k-1)-频繁项集生成候选k-项集（连接+剪枝）"""
    candidates = set()
    # 连接步：合并两个(k-1)-项集，若前k-2项相同则连接
    prev_list = list(prev_itemsets)
    for i in range(len(prev_list)):
        for j in range(i + 1, len(prev_list)):
            itemset1 = sorted(prev_list[i])
            itemset2 = sorted(prev_list[j])
            if itemset1[:k-2] == itemset2[:k-2]:  # 前k-2项相同
                candidate = set(itemset1) | set(itemset2)  # 合并
                if len(candidate) == k:
                    candidates.add(frozenset(candidate))
    # 剪枝步：移除存在非频繁(k-1)-子集的候选集
    pruned = set()
    for candidate in candidates:
        subsets = []
        # 生成所有(k-1)-子集
        elements = list(candidate)
        for i in range(len(elements)):
            subset = frozenset(elements[:i] + elements[i+1:])
            subsets.append(subset)
        # 若所有子集都在(k-1)-频繁项集中，则保留
        if all(subset in prev_itemsets for subset in subsets):
            pruned.add(candidate)
    return pruned


def apriori_frequent_itemsets(data, min_support):
    """挖掘所有频繁项集"""
    # 生成1-频繁项集
    all_items = set()
    for transaction in data:
        all_items.update(transaction)
    L1 = {frozenset([item]) for item in all_items}
    support_L1 = calculate_support(data, L1)
    L1 = {itemset for itemset in L1 if support_L1[itemset] >= min_support}
    frequent_itemsets = {1: L1}
    support_dict = support_L1  # 存储所有频繁项集的支持度

    k = 2
    while True:
        # 生成候选k-项集
        prev_L = frequent_itemsets[k-1]
        if not prev_L:  # 若上一层无频繁项集，则终止
            break
        Ck = generate_candidates(prev_L, k)
        if not Ck:
            break
        # 计算候选集支持度并筛选
        support_Ck = calculate_support(data, Ck)
        Lk = {itemset for itemset in Ck if support_Ck[itemset] >= min_support}
        if not Lk:
            break
        # 更新结果
        frequent_itemsets[k] = Lk
        support_dict.update(support_Ck)
        k += 1
    return frequent_itemsets, support_dict


def generate_rules(frequent_itemsets, support_dict, min_confidence):
    """从频繁项集中生成关联规则"""
    rules = []
    # 仅考虑长度≥2的频繁项集（可拆分为前件和后件）
    for k in frequent_itemsets:
        if k < 2:
            continue
        for itemset in frequent_itemsets[k]:
            items = list(itemset)
            # 生成所有可能的非空前件A（A是itemset的真子集）
            for i in range(1, len(items)):
                from itertools import combinations
                for antecedent in combinations(items, i):
                    antecedent = frozenset(antecedent)
                    consequent = itemset - antecedent
                    if not consequent:
                        continue
                    # 计算置信度：confidence(A→B) = support(A∪B) / support(A)
                    support_ab = support_dict[itemset]
                    support_a = support_dict[antecedent]
                    confidence = support_ab / support_a
                    if confidence >= min_confidence:
                        rules.append({
                            'antecedents': set(antecedent),
                            'consequents': set(consequent),
                            'support': support_ab,
                            'confidence': confidence
                        })
    return rules


# 主函数：运行Apriori算法
if __name__ == "__main__":
    # 数据集（交易记录）
    data = [
        {'牛奶', '面包', '尿布'},
        {'可乐', '面包', '尿布', '啤酒'},
        {'牛奶', '尿布', '啤酒', '鸡蛋'},
        {'面包', '牛奶', '尿布', '啤酒'},
        {'面包', '牛奶', '尿布', '鸡蛋'}
    ]

    # 参数设置
    min_support = 0.6    # 最小支持度
    min_confidence = 0.7  # 最小置信度

    # 挖掘频繁项集
    frequent_itemsets, support_dict = apriori_frequent_itemsets(data, min_support)
    print("===== 频繁项集 =====")
    for k in sorted(frequent_itemsets.keys()):
        for itemset in frequent_itemsets[k]:
            print(f"{set(itemset)}  支持度: {support_dict[itemset]:.2f}")

    # 生成关联规则
    rules = generate_rules(frequent_itemsets, support_dict, min_confidence)
    print("\n===== 关联规则 =====")
    for rule in rules:
        print(f"{rule['antecedents']} → {rule['consequents']}  "
              f"支持度: {rule['support']:.2f}, 置信度: {rule['confidence']:.2f}")