from collections import defaultdict, Counter

class Apriori:
    def __init__(self, min_support=0.2, min_confidence=0.7):
        self.min_support = min_support
        self.min_confidence = min_confidence
        self.frequent_itemsets = {}
        self.transactions = []

    def load_data(self, data_path):
        with open(data_path, 'r', encoding='gbk') as f:
            for line in f:
                transaction = set(line.strip().split(','))
                self.transactions.append(transaction)
        self.total_transactions = len(self.transactions)
        print(f"加载完成，共{self.total_transactions}条事务")

    def calculate_support(self, itemset):
        count = 0
        for transaction in self.transactions:
            if itemset.issubset(transaction):
                count += 1
        return count / self.total_transactions

    def generate_L1(self):
        item_counts = Counter()
        for transaction in self.transactions:
            item_counts.update(transaction)
        L1 = {}
        for item, count in item_counts.items():
            support = count / self.total_transactions
            if support >= self.min_support:
                L1[frozenset([item])] = support
        return L1

    def join(self, Lk_1, k):
        Ck = set()
        Lk_1_list = list(Lk_1.keys())
        for i in range(len(Lk_1_list)):
            for j in range(i + 1, len(Lk_1_list)):
                itemset1 = sorted(Lk_1_list[i])
                itemset2 = sorted(Lk_1_list[j])
                if itemset1[:k-2] == itemset2[:k-2]:
                    new_itemset = Lk_1_list[i].union(Lk_1_list[j])
                    Ck.add(new_itemset)
        return Ck

    def prune(self, Ck, Lk_1, k):
        pruned_Ck = set()
        for itemset in Ck:
            subsets = [frozenset(subset) for subset in self.get_subsets(itemset, k-1)]
            if all(subset in Lk_1 for subset in subsets):
                pruned_Ck.add(itemset)
        return pruned_Ck

    def get_subsets(self, itemset, m):
        itemset_list = list(itemset)
        subsets = []
        n = len(itemset_list)
        for i in range(1, 1 << n):
            if bin(i).count('1') == m:
                subset = [itemset_list[j] for j in range(n) if (i >> j) & 1]
                subsets.append(subset)
        return subsets

    def generate_frequent_itemsets(self):
        L1 = self.generate_L1()
        if not L1:
            print("无满足最小支持度的1-频繁项集")
            return
        self.frequent_itemsets[1] = L1
        k = 2
        while True:
            Ck = self.join(self.frequent_itemsets[k-1], k)
            if not Ck:
                break
            pruned_Ck = self.prune(Ck, self.frequent_itemsets[k-1], k)
            if not pruned_Ck:
                break
            Lk = {}
            for itemset in pruned_Ck:
                support = self.calculate_support(itemset)
                if support >= self.min_support:
                    Lk[itemset] = support
            if not Lk:
                break
            self.frequent_itemsets[k] = Lk
            k += 1
        print("频繁项集挖掘完成")

    def generate_association_rules(self):
        rules = []
        for k in self.frequent_itemsets:
            if k < 2:
                continue
            for itemset in self.frequent_itemsets[k]:
                for i in range(1, k):
                    for antecedent in self.get_subsets(itemset, i):
                        antecedent = frozenset(antecedent)
                        consequent = itemset - antecedent
                        if not consequent:
                            continue
                        support_itemset = self.frequent_itemsets[k][itemset]
                        support_antecedent = self.frequent_itemsets[len(antecedent)][antecedent]
                        confidence = support_itemset / support_antecedent
                        if confidence >= self.min_confidence:
                            rules.append((antecedent, consequent, support_itemset, confidence))
        return rules

if __name__ == "__main__":
    DATA_PATH = "transactions.txt"
    MIN_SUPPORT = 0.2
    MIN_CONFIDENCE = 0.7

    apriori = Apriori(min_support=MIN_SUPPORT, min_confidence=MIN_CONFIDENCE)

    try:
        apriori.load_data(DATA_PATH)
    except FileNotFoundError:
        print(f"错误：未找到文件{DATA_PATH}，请确认文件路径正确")
        exit()
    except UnicodeDecodeError:
        print("gbk编码解析失败，尝试utf-8编码...")
        def load_data_utf8(self, data_path):
            with open(data_path, 'r', encoding='utf-8') as f:
                for line in f:
                    transaction = set(line.strip().split(','))
                    self.transactions.append(transaction)
            self.total_transactions = len(self.transactions)
            print(f"加载完成，共{self.total_transactions}条事务")
        apriori.load_data = load_data_utf8.__get__(apriori, Apriori)
        apriori.load_data(DATA_PATH)

    apriori.generate_frequent_itemsets()

    print("\n" + "="*50)
    print("频繁项集结果")
    print("="*50)
    if not apriori.frequent_itemsets:
        print("未挖掘到任何频繁项集")
    else:
        for k, itemsets in apriori.frequent_itemsets.items():
            print(f"\n{k}-频繁项集（共{len(itemsets)}个）：")
            for itemset, support in itemsets.items():
                print(f"项集：{set(itemset)} | 支持度：{support:.4f}")

    rules = apriori.generate_association_rules()
    print("\n" + "="*50)
    print("关联规则结果（前件→后件）")
    print("="*50)
    if not rules:
        print("未挖掘到满足最小置信度的关联规则")
    else:
        for idx, (antecedent, consequent, support, confidence) in enumerate(rules, 1):
            print(f"规则{idx}：{set(antecedent)} → {set(consequent)}")
            print(f"  支持度：{support:.4f} | 置信度：{confidence:.4f}\n")
