# apriori.py (Corrected Version)
from collections import defaultdict


def load_dataset():
    """
    加载一个更丰富的模拟超市购物篮数据集。
    """
    return [
        ['citrus fruit', 'semi-finished bread', 'margarine', 'ready soups'],
        ['tropical fruit', 'yogurt', 'coffee'],
        ['whole milk'],
        ['pip fruit', 'yogurt', 'cream cheese', 'meat spreads'],
        ['other vegetables', 'whole milk', 'condensed milk', 'long life bakery product'],
        ['whole milk', 'butter', 'yogurt', 'rice', 'abrasive cleaner'],
        ['rolls/buns'],
        ['other vegetables', 'UHT-milk', 'rolls/buns', 'bottled beer', 'liquor (appetizer)'],
        ['potted plants'],
        ['whole milk', 'cereals'],
        ['tropical fruit', 'other vegetables', 'white bread', 'bottled water', 'chocolate'],
        ['citrus fruit', 'tropical fruit', 'whole milk', 'butter', 'curd', 'yogurt', 'flour', 'bottled water',
         'dishes'],
        ['beef'],
        ['frankfurter', 'rolls/buns', 'soda'],
        ['chicken', 'tropical fruit'],
        ['butter', 'sugar', 'fruit/vegetable juice', 'newspapers'],
        ['fruit/vegetable juice'],
        ['packaged fruit/vegetables'],
        ['chocolate'],
        ['specialty bar'],
        ['other vegetables'],
        ['butter milk', 'pastry'],
        ['whole milk'],
        ['tropical fruit', 'cream cheese', 'processed cheese', 'detergent', 'newspapers'],
        ['tropical fruit', 'root vegetables', 'other vegetables', 'frozen dessert', 'rolls/buns', 'flour',
         'sweet spreads', 'salty snack', 'waffles', 'candy', 'bathroom cleaner'],
        ['bottled water', 'canned beer'],
        ['yogurt'],
        ['sausage', 'rolls/buns', 'soda', 'chocolate'],
        ['other vegetables'],
        ['brown bread', 'soda', 'fruit/vegetable juice', 'canned beer', 'newspapers', 'shopping bags'],
        ['yogurt', 'beverages', 'bottled water', 'specialty chocolate'],
        ['hamburger meat', 'other vegetables', 'rolls/buns', 'spices', 'bottled water', 'hygiene articles', 'napkins'],
        ['root vegetables', 'other vegetables', 'whole milk', 'beverages', 'sugar'],
        ['pork', 'berries', 'other vegetables', 'whole milk', 'whipped/sour cream', 'artif. sweetener', 'soda',
         'abrasive cleaner'],
        ['beef', 'grapes', 'detergent'],
        ['pastry', 'soda'],
        ['fruit/vegetable juice'],
        ['canned beer'],
        ['root vegetables', 'other vegetables', 'whole milk', 'dessert'],
        ['citrus fruit', 'zwieback', 'newspapers'],
        ['sausage', 'rolls/buns', 'soda', 'canned beer'],
        ['whole milk', 'rolls/buns', 'misc. beverages', 'salt', 'sugar', 'shopping bags'],
        ['butter', 'rolls/buns'],
        ['whole milk', 'sausage', 'long life bakery product', 'newspapers'],
        ['whole milk', 'curd', 'yogurt', 'processed cheese', 'bottled water', 'newspapers'],
        ['root vegetables', 'sweet spreads'],
        ['pastry'],
        ['pip fruit', 'coffee', 'canned beer'],
        ['other vegetables', 'cat food'],
        ['citrus fruit', 'whole milk', 'yogurt', 'sugar', 'specialty chocolate']
    ]


def create_C1(dataset):
    """创建大小为1的候选项集 C1"""
    C1 = set()
    for transaction in dataset:
        for item in transaction:
            C1.add(frozenset([item]))
    return sorted(list(C1))


def scan_D(dataset, Ck, min_support):
    """从候选项集Ck中筛选出满足最小支持度的频繁项集Lk"""
    D = list(map(set, dataset))
    item_counts = defaultdict(int)
    for transaction in D:
        for candidate in Ck:
            if candidate.issubset(transaction):
                item_counts[candidate] += 1

    num_transactions = float(len(D))
    Lk = []
    support_data = {}
    for key, count in item_counts.items():
        support = count / num_transactions
        if support >= min_support:
            Lk.append(key)
        support_data[key] = support
    return Lk, support_data


def apriori_gen(Lk, k):
    """通过连接Lk-1生成候选项集Ck"""
    Ck = set()
    len_Lk = len(Lk)
    for i in range(len_Lk):
        for j in range(i + 1, len_Lk):
            L1 = sorted(list(Lk[i]))[:k - 2]
            L2 = sorted(list(Lk[j]))[:k - 2]
            if L1 == L2:
                Ck.add(Lk[i] | Lk[j])
    return list(Ck)


def apriori(dataset, min_support=0.1):
    """Apriori算法主函数"""
    C1 = create_C1(dataset)
    L1, support_data = scan_D(dataset, C1, min_support)
    L = [L1]
    k = 2
    while (len(L[k - 2]) > 0):
        Ck = apriori_gen(L[k - 2], k)
        Lk, supK = scan_D(dataset, Ck, min_support)
        support_data.update(supK)
        L.append(Lk)
        k += 1
    return L, support_data


# --- CORRECTED RULE GENERATION FUNCTIONS ---
def generate_rules(L, support_data, min_confidence=0.6):
    """
    从频繁项集中生成关联规则 (修正版)
    """
    rules = []
    for i in range(1, len(L)):  # 只对包含两个或更多项的项集感兴趣
        for freq_set in L[i]:
            H1 = [frozenset([item]) for item in freq_set]
            if (i > 0):
                # 直接从 H1 (单个项目的后件) 开始生成规则
                _rules_from_conseq(freq_set, H1, support_data, rules, min_confidence)
    return rules


def _rules_from_conseq(freq_set, H, support_data, rules, min_confidence):
    """
    递归地为频繁项集生成规则 (修正版)
    """
    m = len(H[0])
    # 首先，评估当前H中的所有后件
    pruned_H = []
    for consequent in H:
        antecedent = freq_set - consequent
        if support_data.get(antecedent, 0) > 0:
            confidence = support_data[freq_set] / support_data[antecedent]
            if confidence >= min_confidence:
                rules.append((antecedent, consequent, confidence))
                pruned_H.append(consequent)  # 保留满足条件的后件

    # 如果频繁项集足够大，并且有多个满足条件的后件，才尝试生成更大的后件
    if (len(freq_set) > m + 1) and (len(pruned_H) > 1):
        H_next = apriori_gen(pruned_H, m + 1)
        if H_next:
            _rules_from_conseq(freq_set, H_next, support_data, rules, min_confidence)


# --- END OF CORRECTION ---

# --- 主程序入口 ---
if __name__ == '__main__':
    dataset = load_dataset()

    # 使用我们已经确定的最佳参数
    min_support = 0.08
    min_confidence = 0.5

    print(f"数据集记录数: {len(dataset)}")
    print(f"最小支持度: {min_support}")
    print(f"最小置信度: {min_confidence}\n")

    L, support_data = apriori(dataset, min_support)

    print("--- 发现的频繁项集 ---")
    for i, Lk in enumerate(L):
        if Lk:
            print(f"  {i + 1}-项频繁项集 (数量: {len(Lk)}):")
            count = 0
            for itemset in Lk:
                if count < 5:
                    print(f"    {set(itemset)}: 支持度 = {support_data[itemset]:.3f}")
                    count += 1

    rules = generate_rules(L, support_data, min_confidence)

    if rules:
        print(f"\n--- 发现的强关联规则 (共 {len(rules)} 条) ---")
        rules.sort(key=lambda x: x[2], reverse=True)
        for antecedent, consequent, confidence in rules:
            print(f"  规则: {set(antecedent)} -> {set(consequent)}  (置信度: {confidence:.2f})")
    else:
        print("\n--- 未发现满足最小置信度的强关联规则 ---")