import numpy as np
import pandas as pd
from collections import defaultdict

class Apriori:
    def __init__(self, min_support=0.2):
        self.min_support = min_support 
        self.frequent_itemsets = []

    def _scan_dataset(self, dataset, candidate_itemsets):
        item_count = defaultdict(int)
        for transaction in dataset:
            for candidate in candidate_itemsets:
                if set(candidate).issubset(set(transaction)):
                    item_count[tuple(candidate)] += 1
        total_transactions = len(dataset)
        frequent_items = []
        support_dict = {}
        for item, count in item_count.items():
            support = count / total_transactions
            if support >= self.min_support:
                frequent_items.append(list(item))
                support_dict[tuple(item)] = support
        return frequent_items, support_dict

    def _create_candidates(self, frequent_itemsets, k):
        candidates = []
        n = len(frequent_itemsets)
        for i in range(n):
            for j in range(i+1, n):
                item1 = sorted(frequent_itemsets[i][:k-2])
                item2 = sorted(frequent_itemsets[j][:k-2])
                if item1 == item2:
                    candidate = sorted(list(set(frequent_itemsets[i]) | set(frequent_itemsets[j])))
                    pruned = False
                    for idx in range(len(candidate)):
                        subset = candidate[:idx] + candidate[idx+1:]
                        if subset not in frequent_itemsets:
                            pruned = True
                            break
                    if not pruned:
                        candidates.append(candidate)
        return candidates

    def fit(self, dataset):
        all_items = sorted(list({item for transaction in dataset for item in transaction}))
        candidate_1 = [[item] for item in all_items]
        frequent_1, support_1 = self._scan_dataset(dataset, candidate_1)
        self.frequent_itemsets.append(frequent_1)
        all_support = support_1

        k = 2
        while len(self.frequent_itemsets[k-2]) > 0:
            candidate_k = self._create_candidates(self.frequent_itemsets[k-2], k)
            if not candidate_k:
                break
            frequent_k, support_k = self._scan_dataset(dataset, candidate_k)
            self.frequent_itemsets.append(frequent_k)
            all_support.update(support_k)
            k += 1

        self.frequent_itemsets = [f for f in self.frequent_itemsets if f]
        self.support_dict = all_support
        return self.frequent_itemsets, self.support_dict

def load_grocery_dataset():
    file_path = 'grocery.csv'
    if not os.path.exists(file_path):
        np.random.seed(42)
        items = ['牛奶', '面包', '鸡蛋', '可乐', '薯片', '饼干', '水果', '蔬菜', '肉类', '大米']
        dataset = []
        for _ in range(1000):
            transaction_size = np.random.randint(1, 6)
            transaction = np.random.choice(items, size=transaction_size, replace=False).tolist()
            dataset.append(transaction)
        pd.DataFrame(dataset).to_csv(file_path, index=False, header=False)
    else:
        df = pd.read_csv(file_path, header=None)
        dataset = df.apply(lambda x: x.dropna().tolist(), axis=1).tolist()
    return dataset

if __name__ == "__main__":
    dataset = load_grocery_dataset()
    apriori = Apriori(min_support=0.1)
    frequent_itemsets, support_dict = apriori.fit(dataset)
    print("所有频繁项集：")
    for k, itemsets in enumerate(frequent_itemsets, 1):
        print(f"\n{k}-项集：")
        for itemset in itemsets:
            print(f"{itemset} 支持度：{support_dict[tuple(itemset)]:.4f}")