import pandas as pd
import requests
from itertools import combinations


# 定义离散化函数，这里简单以特征值是否大于中位数来划分（可按需优化离散化策略）
def binarize_feature(feature_series):
    median_value = feature_series.median()
    return feature_series.apply(lambda x: 1 if x > median_value else 0)


def fetch_and_prepare_data():
    url = "https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data"
    columns = ['Class', 'Alcohol', 'Malic acid', 'Ash', 'Alcalinity of ash', 'Magnesium',
               'Total phenols', 'Flavanoids', 'Nonflavanoid phenols', 'Proanthocyanins',
               'Color intensity', 'Hue', 'OD280/OD315 of diluted wines', 'Proline']

    # 加载数据集
    df = pd.read_csv(url, header=None, names=columns)
    # 离散化除类别外的所有特征
    df_discretized = df.iloc[:, 1:].apply(binarize_feature)
    # 创建交易记录列表
    transactions = [set(df_discretized.columns[df_discretized.iloc[i] == 1]) for i in range(len(df_discretized))]
    return transactions


def create_candidate_sets(frequent_itemsets, k):
    candidates = set()
    for itemset_i in frequent_itemsets:
        for itemset_j in frequent_itemsets:
            candidate = itemset_i.union(itemset_j)
            if len(candidate) == k:
                candidates.add(candidate)
    return list(candidates)


def prune_candidates(transactions, candidates, min_support_ratio):
    support_counts = {}
    for transaction in transactions:
        for candidate in candidates:
            if candidate.issubset(transaction):
                support_counts[candidate] = support_counts.get(candidate, 0) + 1

    total_transactions = len(transactions)
    return {itemset: count / total_transactions for itemset, count in support_counts.items() if
            count / total_transactions >= min_support_ratio}


def derive_association_rules(frequent_itemsets, transactions, min_confidence):
    rules_list = []
    for level in frequent_itemsets:
        for itemset in level:
            for size in range(1, len(itemset)):
                for antecedent in combinations(itemset, size):
                    consequent = itemset.difference(antecedent)
                    if not consequent:
                        continue

                    antecedent = frozenset(antecedent)
                    support = level[itemset]
                    confidence = support / (frequent_itemsets[0].get(antecedent, 0) or 1)

                    if confidence >= min_confidence:
                        lift = confidence / (
                                    sum(1 for t in transactions if all(x in t for x in consequent)) / len(transactions))
                        if lift >= 1:
                            rules_list.append((antecedent, consequent, support, confidence, lift))
    return rules_list


def apriori_algorithm(transactions, min_sup, min_conf):
    frequent_itemsets_by_level = []
    num_of_transactions = len(transactions)

    # 初始化1-项集
    itemset_counts = {}
    for trans in transactions:
        for item in trans:
            itemset_counts[frozenset([item])] = itemset_counts.get(frozenset([item]), 0) + 1

    frequent_itemsets_by_level.append(
        {k: v / num_of_transactions for k, v in itemset_counts.items() if v / num_of_transactions >= min_sup})

    k = 2
    while True:
        candidates = create_candidate_sets(frequent_itemsets_by_level[-1].keys(), k)
        pruned = prune_candidates(transactions, candidates, min_sup)

        if not pruned:
            break

        frequent_itemsets_by_level.append(pruned)
        k += 1

    association_rules = derive_association_rules(frequent_itemsets_by_level, transactions, min_conf)

    return frequent_itemsets_by_level, association_rules


def execute_apriori_analysis():
    data_transactions = fetch_and_prepare_data()

    # 参数设置
    minimum_support = 0.1  # 支持度阈值
    minimum_confidence = 0.3  # 置信度阈值

    # 应用Apriori算法
    freq_itemsets, assoc_rules = apriori_algorithm(data_transactions, minimum_support, minimum_confidence)

    # 输出有价值的频繁项集
    print("\n高价值频繁项集:")
    for level in freq_itemsets:
        for itemset, support in level.items():
            if support >= 0.1:
                print(f"频繁项集: {set(itemset)}, 支持度: {support:.2f}")

    # 输出有价值的关联规则
    print("\n有意义的关联规则:")
    for antecedent, consequent, support, confidence, lift in assoc_rules:
        print(
            f"规则: {set(antecedent)} -> {set(consequent)}, 支持度: {support:.2f}, 置信度: {confidence:.2f}, 提升度: {lift:.2f}")


if __name__ == "__main__":
    execute_apriori_analysis()