import numpy as np
from collections import defaultdict
import warnings

# 忽略警告
warnings.filterwarnings("ignore")

# 加载数据集
def load_dataset(filename):
    data = np.loadtxt(filename, delimiter=',', dtype=str)
    return data

# 计算项集的支持度
def calculate_support(item, transactions):
    count = sum([1 for transaction in transactions if set(item).issubset(set(transaction))])
    return count / len(transactions)

# Apriori算法
def apriori(transactions, min_support):
    large_items = []
    current_items = [frozenset([item]) for transaction in transactions for item in transaction]
    current_items = list(set(current_items))  # Remove duplicates
    print("Initial items:", current_items)
    
    while current_items:
        current_support = [item for item in current_items if calculate_support(item, transactions) >= min_support]
        large_items.extend(current_support)
        print("Large items:", current_support)
        
        if not current_support:
            break
        
        current_items = generate_candidates(current_support)

    return large_items

# 生成候选项集
def generate_candidates(current_items):
    candidates = set()
    for i in range(len(current_items)):
        for j in range(i+1, len(current_items)):
            candidate = current_items[i].union(current_items[j])
            candidates.add(candidate)
    return candidates

# 主函数
def main():
    filename = 'iris.data'  # 请确保文件路径正确
    data = load_dataset(filename)
    transactions = [list(map(str, row)) for row in data]
    min_support = 0.1  # 最小支持度阈值
    large_items = apriori(transactions, min_support)
    print("Frequent Itemsets:", large_items)

if __name__ == "__main__":
    main()