import itertools
from collections import defaultdict
import pandas as pd


class AprioriAlgorithm:
    """实现Apriori算法用于关联规则挖掘"""

    @staticmethod
    def generate_candidates(transactions, k):
        """
        根据给定的事务列表生成k项集候选集。

        :param transactions: 事务列表，每个事务是一个特征组合的列表。
        :param k: 要生成的项集大小。
        :return: 候选项集列表。
        """
        candidate_set = set()
        for transaction in transactions:
            candidate_set.update(itertools.combinations(sorted(transaction), k))
        return list(candidate_set)

    @staticmethod
    def calculate_support(transactions, candidates):
        """
        计算候选项集的支持度。

        :param transactions: 事务列表。
        :param candidates: 候选项集列表。
        :return: 包含每个候选项集及其支持度的字典。
        """
        support_dict = defaultdict(int)
        total_transactions = len(transactions)
        for candidate in candidates:
            candidate_set = set(candidate)
            support_dict[candidate] = sum(1 for transaction in transactions if candidate_set.issubset(set(transaction)))
        return {candidate: count / total_transactions for candidate, count in support_dict.items()}

    def find_frequent_itemsets(self, transactions, min_support):
        """
        应用Apriori算法来发现频繁项集。

        :param transactions: 事务列表。
        :param min_support: 最小支持度阈值。
        :return: 每个级别的频繁项集及其支持度。
        """
        frequent_itemsets = []
        k = 1

        while True:
            candidates = self.generate_candidates(transactions, k)
            support = self.calculate_support(transactions, candidates)
            frequent_k_itemsets = {item: sup for item, sup in support.items() if sup >= min_support}

            if not frequent_k_itemsets:
                break

            frequent_itemsets.append(frequent_k_itemsets)
            # 更新事务列表为当前级别的频繁项集
            transactions = [list(item) for item in frequent_k_itemsets]
            k += 1

        return frequent_itemsets


def preprocess_iris_data():
    """
    预处理Iris数据集，包括离散化数值型特征。

    :return: 离散化后的数据框。
    """
    from sklearn.datasets import load_iris
    from sklearn.preprocessing import KBinsDiscretizer

    # 加载Iris数据集
    iris = load_iris()
    df = pd.DataFrame(data=iris.data, columns=iris.feature_names)
    df['species'] = iris.target

    # 离散化数值型特征
    discretizer = KBinsDiscretizer(n_bins=3, encode='ordinal', strategy='uniform')
    df_discretized = pd.DataFrame(discretizer.fit_transform(df[df.columns[:-1]]), columns=df.columns[:-1])

    # 将离散化的特征值转换为字符串形式，方便Apriori算法处理
    for col in df_discretized.columns:
        df_discretized[col] = df_discretized[col].apply(lambda x: f'{col}_{int(x)}')

    # 将数据转换为事务列表的形式
    transactions = df_discretized.apply(lambda x: x.tolist(), axis=1).tolist()

    return transactions


if __name__ == "__main__":
    try:
        # 预处理Iris数据并调用Apriori算法
        transactions = preprocess_iris_data()
        apriori = AprioriAlgorithm()
        min_support = 0.2  # 设置最小支持度

        frequent_itemsets = apriori.find_frequent_itemsets(transactions, min_support)

        # 输出结果
        for idx, itemsets in enumerate(frequent_itemsets, start=1):
            print(f"频繁{idx}-项集:")
            for itemset, support in itemsets.items():
                print(f"  {itemset}: 支持度 = {support:.2f}")

    except Exception as e:
        print(f"An error occurred: {e}")