from collections import defaultdict, Counter

class Apriori:
    def __init__(self, min_support=0.3):
        self.min_support = min_support  # 最小支持度（比例）
        self.frequent_itemsets = []  # 所有频繁项集

    # 生成1-项集
    def generate_c1(self, dataset):
        c1 = []
        for transaction in dataset:
            for item in transaction:
                if [item] not in c1:
                    c1.append([item])
        c1.sort()
        return list(map(frozenset, c1))  # 转换为不可变集合，便于作为键

    # 筛选频繁项集
    def scan_dataset(self, dataset, candidate_itemsets):
        item_count = defaultdict(int)
        # 统计每个候选集出现的次数
        for transaction in dataset:
            for candidate in candidate_itemsets:
                if candidate.issubset(transaction):
                    item_count[candidate] += 1
        total_transactions = len(dataset)
        frequent_items = []
        support_data = {}
        # 筛选满足最小支持度的项集
        for key in item_count:
            support = item_count[key] / total_transactions
            if support >= self.min_support:
                frequent_items.insert(0, key)
            support_data[key] = support
        return frequent_items, support_data

    # 由频繁k-项集生成候选(k+1)-项集
    def apriori_gen(self, frequent_k_items):
        candidate_k1_items = []
        k = len(frequent_k_items[0]) if frequent_k_items else 0
        # 连接操作：前k-1个元素相同的项集合并
        for i in range(len(frequent_k_items)):
            for j in range(i+1, len(frequent_k_items)):
                L1 = list(frequent_k_items[i])[:k-1]
                L2 = list(frequent_k_items[j])[:k-1]
                L1.sort()
                L2.sort()
                if L1 == L2:
                    candidate_k1_items.append(frequent_k_items[i] | frequent_k_items[j])
        return candidate_k1_items

    # 核心算法
    def fit(self, dataset):
        c1 = self.generate_c1(dataset)
        dataset = list(map(set, dataset))
        L1, support_data = self.scan_dataset(dataset, c1)
        self.frequent_itemsets = [L1]
        k = 1
        # 迭代生成更高阶频繁项集
        while len(self.frequent_itemsets[k-1]) > 0:
            Ck = self.apriori_gen(self.frequent_itemsets[k-1])
            Lk, supK = self.scan_dataset(dataset, Ck)
            support_data.update(supK)
            self.frequent_itemsets.append(Lk)
            k += 1
        return self.frequent_itemsets, support_data

# 加载数据集
def load_dataset(file_path):
    dataset = []
    with open(file_path, 'r', encoding='utf-8') as f:
        for line in f:
            line = line.strip().split()
            if line:
                dataset.append(line)
    return dataset

# 测试
if __name__ == "__main__":
    # 加载数据
    dataset_path = "datasets/market_basket.txt"
    dataset = load_dataset(dataset_path)

    # 运行Apriori算法（最小支持度0.3，即出现次数≥3）
    apriori = Apriori(min_support=0.3)
    frequent_itemsets, support_data = apriori.fit(dataset)

    # 输出结果
    print("Apriori算法频繁项集（最小支持度0.3）：")
    for k, itemsets in enumerate(frequent_itemsets):
        if itemsets:
            print(f"\n{k+1}-项频繁集：")
            for itemset in itemsets:
                print(f"项集：{set(itemset)}，支持度：{support_data[itemset]:.2f}")