import os
import random
from collections import defaultdict

# 项集使用frozenset以便作为字典键
ItemSet = frozenset

class Apriori:
    def __init__(self, min_support=0.2):
        self.min_support = min_support  # 最小支持度阈值
        self.frequent_itemsets = []     # 存储所有频繁项集
        self.transactions = []          # 交易数据

    def load_transactions(self, file_path):
        """从文件加载交易数据（每行一个交易，物品空格分隔）"""
        self.transactions = []
        with open(file_path, 'r') as f:
            for line in f:
                items = line.strip().split()
                if items:
                    self.transactions.append(set(items))
        print(f"加载{len(self.transactions)}条交易数据")
        return self

    def generate_transactions(self, n_trans=1000, n_items=20):
        """生成模拟交易数据"""
        random.seed(42)
        self.transactions = []
        items = [f"商品{i}" for i in range(1, n_items+1)]  # 模拟商品名
        for _ in range(n_trans):
            # 每个交易包含2-5个随机商品
            n_items_in_trans = random.randint(2, 5)
            trans_items = random.sample(items, n_items_in_trans)
            self.transactions.append(set(trans_items))
        print(f"生成{len(self.transactions)}条模拟交易数据")
        return self

    def _get_frequent_itemsets(self, candidates):
        """筛选满足最小支持度的频繁项集"""
        item_counts = defaultdict(int)
        total = len(self.transactions)
        
        # 计数：统计每个候选集出现的交易数
        for trans in self.transactions:
            for cand in candidates:
                if cand.issubset(trans):
                    item_counts[cand] += 1
        
        # 筛选：保留支持度≥min_support的项集
        return [(itemset, count/total) for itemset, count in item_counts.items() 
                if count/total >= self.min_support]

    def _generate_candidates(self, frequent_k, k):
        """由k-频繁项集生成(k+1)-候选项集"""
        candidates = []
        len_k = len(frequent_k)
        
        # 连接步：前k-1项相同则合并
        for i in range(len_k):
            for j in range(i+1, len_k):
                items_i = sorted(list(frequent_k[i][0]))
                items_j = sorted(list(frequent_k[j][0]))
                if items_i[:k-1] == items_j[:k-1]:
                    candidates.append(frequent_k[i][0] | frequent_k[j][0])
        
        # 剪枝步：移除含非频繁子集的项集
        pruned = []
        for cand in candidates:
            from itertools import combinations
            # 生成所有k-子集
            subsets = [ItemSet(sub) for sub in combinations(cand, k)]
            # 检查所有子集是否都是频繁的
            if all(sub in [item[0] for item in frequent_k] for sub in subsets):
                pruned.append(cand)
        return pruned

    def mine(self):
        """挖掘所有频繁项集"""
        if not self.transactions:
            raise ValueError("请先加载或生成交易数据")
        
        # 1-频繁项集（单个物品）
        c1 = [ItemSet([item]) for trans in self.transactions for item in trans]
        frequent_1 = self._get_frequent_itemsets(list(set(c1)))
        if not frequent_1:
            print("无满足最小支持度的1-频繁项集")
            return []
        
        self.frequent_itemsets = [frequent_1]
        k = 1
        
        # 迭代生成高阶频繁项集
        while self.frequent_itemsets[-1]:
            k += 1
            # 生成k-候选项集
            candidates = self._generate_candidates(self.frequent_itemsets[-1], k-1)
            if not candidates:
                break
            # 筛选k-频繁项集
            frequent_k = self._get_frequent_itemsets(candidates)
            self.frequent_itemsets.append(frequent_k)
        
        # 输出结果
        print(f"\n共挖掘到{len(self.frequent_itemsets)}阶频繁项集：")
        for i, itemsets in enumerate(self.frequent_itemsets):
            print(f"\n{i+1}-项集（{len(itemsets)}个）：")
            for itemset, support in itemsets:
                print(f"项集：{set(itemset)}，支持度：{support:.4f}")
        
        return self.frequent_itemsets


# 测试代码
if __name__ == "__main__":
    # 初始化算法（最小支持度0.15）
    apriori = Apriori(min_support=0.15)
    
    # 生成模拟交易数据（1000条交易，20种商品）
    apriori.generate_transactions(n_trans=1000, n_items=20)
    
    # 挖掘频繁项集
    apriori.mine()
