import pandas as pd
import requests
from itertools import combinations

# 定义离散化函数，这里简单以特征值是否大于均值来划分（可按需优化离散化策略）
def discretize_feature(feature_series):
    mean_value = feature_series.mean()
    return feature_series.apply(lambda x: 1 if x > mean_value else 0)

def load_data():
    data_url = "https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data"
    col_names = ['Class', 'Alcohol', 'Malic acid', 'Ash', 'Alcalinity of ash', 'Magnesium',
                 'Total phenols', 'Flavanoids', 'Nonflavanoid phenols', 'Proanthocyanins',
                 'Color intensity', 'Hue', 'OD280/OD315 of diluted wines', 'Proline']
    data = pd.read_csv(data_url, header=None, names=col_names)
    # 去除类别列，只对特征列进行处理
    features_data = data.iloc[:, 1:]
    # 对每列特征进行离散化处理
    discretized_data = features_data.apply(discretize_feature)
    transactions = [set(discretized_data.columns[discretized_data.iloc[i] == 1]) for i in range(len(discretized_data))]
    return transactions

def generate_candidates(frequent_itemsets, k):
    previous_itemsets = list(frequent_itemsets.keys())
    candidates = []
    for i in range(len(previous_itemsets)):
        for j in range(i + 1, len(previous_itemsets)):
            candidate = previous_itemsets[i] | previous_itemsets[j]
            if len(candidate) == k:
                candidates.append(candidate)
    return candidates

def filter_candidates(transactions, candidate_itemsets, min_support, num_transactions):
    itemset_counts = {}
    for transaction in transactions:
        for candidate in candidate_itemsets:
            if candidate.issubset(transaction):
                itemset_counts[candidate] = itemset_counts.get(candidate, 0) + 1

    filtered_itemsets = {k: v for k, v in itemset_counts.items() if v / num_transactions >= min_support}
    return filtered_itemsets

def generate_rules(frequent_itemsets, transactions, num_transactions, min_confidence):
    rules = []
    for level_idx, level in enumerate(frequent_itemsets):
        for itemset, support_count in level.items():
            for i in range(1, len(itemset)):
                for antecedent in combinations(itemset, i):
                    antecedent = frozenset(antecedent)
                    consequent = itemset - antecedent

                    if consequent:
                        support = support_count / num_transactions
                        try:
                            confidence = support_count / frequent_itemsets[level_idx - 1][antecedent]
                        except KeyError:
                            # 处理前件在之前层级不存在的情况（比如前件的支持度未达到阈值），跳过当前规则生成
                            continue

                        if confidence >= min_confidence:
                            consequent_count = sum([1 for t in transactions if consequent.issubset(t)])
                            lift = confidence / (consequent_count / num_transactions) if consequent_count > 0 else 0
                            if support >= 0.05 and lift >= 1:  # 筛选出支持度和提升度满足条件的关联规则
                                rules.append((antecedent, consequent, support, confidence, lift))
    return rules

def apriori(transactions, min_support, min_confidence):
    num_transactions = len(transactions)
    frequent_itemsets = []

    # 生成1-项集
    itemsets = {}
    for transaction in transactions:
        for item in transaction:
            itemsets[frozenset([item])] = itemsets.get(frozenset([item]), 0) + 1

    frequent_itemsets.append({k: v for k, v in itemsets.items() if v / num_transactions >= min_support})

    # 进行k-项集生成，循环直到无法生成更多频繁项集
    k = 2
    while True:
        candidate_itemsets = generate_candidates(frequent_itemsets[-1], k)
        filtered_itemsets = filter_candidates(transactions, candidate_itemsets, min_support, num_transactions)

        if not filtered_itemsets:
            break

        frequent_itemsets.append(filtered_itemsets)
        k += 1

    # 生成关联规则
    rules = generate_rules(frequent_itemsets, transactions, num_transactions, min_confidence)

    return frequent_itemsets, rules

def use_apriori():
    transactions = load_data()

    # 设置参数，可以根据实际需求调整
    min_support = 0.05
    min_confidence = 0.2

    # 运行Apriori算法
    frequent_itemsets, rules = apriori(transactions, min_support, min_confidence)

    # 输出有价值的频繁项集（支持度大于等于0.05）
    print("\n有价值的频繁项集:")
    for level_idx, level in enumerate(frequent_itemsets):
        for itemset, support_count in level.items():
            if support_count / len(transactions) >= 0.05:
                print(f"{set(itemset)}: {support_count / len(transactions):.2f}")

    # 输出有价值的关联规则（支持度大于等于0.05且提升度大于等于1）
    print("\n有价值的关联规则:")
    for antecedent, consequent, support, confidence, lift in rules:
        print(f"{set(antecedent)} -> {set(consequent)} (support: {support:.2f}, confidence: {confidence:.2f}, lift: {lift:.2f})")


if __name__ == "__main__":
    use_apriori()