from itertools import combinations
import pandas as pd
import csv

def create_c1(dataset):
    """创建初始候选集，通过data_set生成候选集列表c1"""
    C1 = []
    for transaction in dataset:
        for item in transaction:
            if not [item] in C1:
                C1.append([item])
    C1.sort()
    return list(map(frozenset, C1))

def apriori_gen(fk,k):
    """生成候选项集"""
    ret_list=[]
    len_fk=len(fk)
    for i in range(len_fk):
        for j in range(i+1,len_fk):
            L1=list(fk[i])[:k-2]
            L2=list(fk[j])[:k-2]
            L1.sort()
            L2.sort()
            if L1==L2:
                ret_list.append(fk[i] | fk[j])
    return ret_list




def scan_dataset(data_set,ck,min_support):
    """根据最小支持度过滤候选集"""
    item_count={} #用于存储每个候选集的支持度
    #遍历数据集，计算每个候选集的支持度（计算事务数据集中包含项集ck的事务个数）
    for transaction in data_set:
        for candidate in ck:
            if candidate.issubset(transaction):
                if candidate not in item_count:
                    item_count[candidate]=1
                else:
                    item_count[candidate]+=1
        
    num_items=float(len(data_set))
    frequent_items=[] #频繁项集
    support_data={} #支持度字典
    for item in item_count:
        support=item_count[item]/num_items
        if support>=min_support:
            frequent_items.insert(0,item)
        support_data[item]=support
        
    return frequent_items,support_data




def load_dataset():
    """加载数据集"""
    dataset = []
    file_path="D:\Lenovo\Desktop\云南大学\空间数据挖掘\实验数据\实验数据5.csv"
    with open(file_path, 'r') as file:
        reader = csv.DictReader(file)
        for row in reader:
            # 跳过空单元格
            items = [item.strip() for item in row.values() if item.strip()]
            if items:
                dataset.append(items)
    return dataset

def write_results_to_file(L, support_data, rules):
    """将结果写入到文件中"""
    # 写入频繁项集
    with open('frequent_itemsets.txt', 'w') as f:
        for i, itemset in enumerate(L):
            if i > 0:
                f.write(f'\nFrequent {i+1}-itemsets\n')
                for item in itemset:
                    f.write(f"{list(item)}\n")
    # 写入支持度数据（按支持度降序排序）
    with open('support_data.txt', 'w') as f:
        f.write("Support Data\n")
        sorted_support_data = sorted(support_data.items(), key=lambda x: x[1], reverse=True)
        for itemset, support in sorted_support_data:
            f.write(f"{list(itemset)}: {support}\n")
    # 写入关联规则（按置信度降序排序）
    with open('association_rules.txt', 'w') as f:
        f.write("Association Rules\n")
        sorted_rules = sorted(rules, key=lambda x: x[2], reverse=True)
        for rule in sorted_rules:
            f.write(f"{list(rule[0])} --> {list(rule[1])}, Confidence: {rule[2]}\n")

def generate_rules(L, support_data, min_confidence):
    """生成关联规则"""
    rules = []
    for i in range(1, len(L)):
        for freq_set in L[i]:
            H1 = [frozenset([item]) for item in freq_set]
            if i > 1:
                rules_from_conseq(freq_set, H1, support_data, rules, min_confidence)
            else:
                calc_confidence(freq_set, H1, support_data, rules, min_confidence)
    return rules

def calc_confidence(freq_set, H, support_data, rules, min_confidence):
    """计算置信度并过滤不满足置信度的规则"""
    pruned_H = []
    for conseq in H:
        conf = support_data[freq_set] / support_data[freq_set - conseq]
        if conf >= min_confidence:
            print(freq_set - conseq, '-->', conseq, 'conf:', conf)
            rules.append((freq_set - conseq, conseq, conf))
            pruned_H.append(conseq)
    return pruned_H

def rules_from_conseq(freq_set, H, support_data, rules, min_confidence):
    """生成候选规则集"""
    m = len(H[0])
    if len(freq_set) > (m + 1):
        Hmp1 = apriori_gen(H, m + 1)
        Hmp1 = calc_confidence(freq_set, Hmp1, support_data, rules, min_confidence)
        if len(Hmp1) > 1:
            rules_from_conseq(freq_set, Hmp1, support_data, rules, min_confidence)

def apriori(data_set,min_support,min_confidence):
    """Apriori算法主函数"""
    C1=create_c1(data_set)
    D=list(map(set,data_set))
    L1,support_data=scan_dataset(D,C1,min_support)
    L=[L1] #得到支持度
    k=2
    while len(L[k-2])>0:
        Ck=apriori_gen(L[k-2],k)
        Lk,Sk=scan_dataset(D,Ck,min_support)
        support_data.update(Sk)
        L.append(Lk)
        k+=1
    rules = generate_rules(L, support_data, min_confidence)
    
    # 将结果写入文件
    write_results_to_file(L, support_data, rules)

    return L,support_data

# 主程序
dataset = load_dataset()
L, support_data = apriori(dataset,min_support=0.2,min_confidence=0.7)

print("频繁项集:")
print(L)
print("\n支持度:")
print(support_data)
print('已将结果保存到文件')
