#2024/12/28
# file name: fptree.py

# 使用FP-tree实现频繁模式和关联规则挖掘
import itertools
import random
from time import time

import pandas as pd
from collections import defaultdict


# 构建树的节点
class Node:
    def __init__(self, value, parent, count=0):
        self.value = value
        self.parent = parent
        self.count = count
        self.children = {}

    def addChild(self, child):
        self.children.update(child)


# 构建FP-tree
class FP_tree:
    def __init__(self, data, support, confidence):
        self.data = data
        self.first_list = []
        self.value_list = []
        self.support = support
        self.confidence = confidence
        self.confidences = defaultdict(float)
        self.tree = None
        self.frequent_patterns = []
        self.rule = set()

    def first_scan(self):
        """
        生成项头表，整理数据
        """
        Dict = dict()
        for i in self.data.values():
            for j in i:
                if j not in Dict.keys():
                    Dict.update({j: 1})
                else:
                    Dict[j] += 1
        self.first_list = list(Dict.items())
        self.first_list.sort(key=lambda l: l[1], reverse=True)
        for i in range(len(self.first_list) - 1, 0, -1):
            if self.first_list[i][1] < self.support * len(self.data):
                continue
            else:
                rubbish = [self.first_list[j][0] for j in range(i + 1, len(self.first_list))]
                self.first_list = self.first_list[:i + 1]
                break

        # 将原来的数据重新按支持度排序并剔除非频繁1项集
        """
        sort_refer = [i[0] for i in self.first_list]
        for i in self.data.values():
            for j in i[:]:
                if j in rubbish:
                    i.remove(j)
            i.sort(key=lambda l: sort_refer.index(l)) 
        """
        frequent_items = [i[0] for i in self.first_list]
        new_data_values = []
        for transaction in self.data.values():
            new_transaction = [item for item in transaction if item in frequent_items]
            new_data_values.append(sorted(new_transaction, key=lambda l: frequent_items.index(l)))
        self.data = {idx: value for idx, value in enumerate(new_data_values)}
        

        # 添加频繁1项集
        self.frequent_patterns.extend([list(i) for i in self.first_list])

        # 整理项头表
        self.value_list = [i[0] for i in self.first_list]
        temp = {}
        for i in self.first_list:
            temp.update({i[0]: []})
        self.first_list = temp

    def build_tree(self):
        """
        建立FP-tree
        :return:fp-tree
        """
        root = Node('root', None)
        parent = root
        for i in self.data.values():
            for j in i:
                # 更新树和项头表
                head = self.first_list
                if j not in parent.children.keys():
                    node = Node(j, parent, 1)
                    temp = {j: node}
                    parent.addChild(temp)
                    head[j].append(node)
                else:
                    parent.children[j].count += 1
                parent = parent.children[j]
            parent = root
        self.tree = root

    def find(self):
        """
        利用建立好的树挖掘频繁模式
        """
        for i in self.value_list[::-1]:
            i_dict = {}
            for j in self.first_list[i]:
                k = j
                count = j.count
                while k != None:
                    if k.value not in i_dict.keys():
                        i_dict[k.value] = count
                    else:
                        i_dict[k.value] += count
                    k = k.parent
            del i_dict['root']
            self.cal(i_dict, True)

    def cal(self, Dict: dict, delete=False, length=1):
        if delete:
            # 预处理，删去支持度低的项
            d = Dict.copy()
            for i, j in d.items():
                if j < self.support * len(self.data):
                    del Dict[i]
        if length == 1:
            self.rules(Dict)
        # 递归挖掘频繁模式
        if length <= len(Dict):
            l = list(Dict.keys())
            pinfan = [l[0], Dict[l[0]]]
            del l[0]
            result = itertools.combinations(l, length)
            for i in result:
                p = pinfan.copy()
                for j in i:
                    p.insert(-1, j)
                    if Dict[j] < p[-1]:
                        p[-1] = Dict[j]
                p[0:-1] = p[-2::-1]
                self.frequent_patterns.append(p)
            self.cal(Dict, length=length + 1)
    def find_rule(self):
        self.first_scan()
        self.build_tree()
        self.find()
        self.frequent_patterns = list(filter(lambda x: len(x[:-1]) >= 2, self.frequent_patterns))
        def tmpcmp(t):
            tail = t[-1]
            tmpcmpresult = sorted(t[:-1])
            tmpcmpresult.append(tail)
            return tmpcmpresult
            
        self.frequent_patterns = list(map(lambda t: tmpcmp(t) , self.frequent_patterns))
        self.frequent_patterns.sort(key=lambda l: (len(l), l[-1]), reverse=True)
        self.frequent_patterns = {'--'.join(sub[:-1]): sub[-1] / len(self.data) for sub in self.frequent_patterns}
        # print(self.frequent_patterns)
        
        # rule_list = sorted(self.rule, key=lambda l: (len(l), l[-1]))
        
        # support_data = {}
        # confidence_data = {}
        # for rule, confidence in rule_list:
        #     sorted_rule = '--'.join(sorted(rule.split('--')))
        #     support_data[rule] = self.frequent_patterns[sorted_rule]
        #     confidence_data[rule] = confidence
        # self.fp_df = pd.DataFrame(index=['support', 'confidence'])
        # support_df = pd.DataFrame({
        #     'support': support_data
        # })
        # confidence_df = pd.DataFrame({
        #     'confidence': confidence_data
        # })
        # self.fp_df = pd.concat([support_df, confidence_df], axis=1)
        # return self.fp_df
        
        support_df = pd.DataFrame({
            'support': self.frequent_patterns
        })
        self.frequent_patterns = pd.concat([support_df], axis=1)
        # print(self.frequent_patterns)
        return self.frequent_patterns
        
        
    def rules(self, Dict: dict):
        """
        只生成1对1的关联规则
        :param Dict:数据源
        """
        """
        if len(Dict) > 1:
            l = list(Dict.keys())
            for i in l[1:]:
                if min(Dict[l[0]], Dict[i]) / Dict[l[0]] > self.confidence:
                    self.rule.append(f"{l[0]}=>{i}")
        """
        """
        # print(Dict)
        items = list(Dict.keys())
        if len(items) > 1:
            for i in range(1, len(items)):
                for antecedent in itertools.combinations(items, i):
                    consequent = [item for item in items if item not in antecedent]
                    support_antecedent = min(Dict[item] for item in antecedent)
                    confidence = support_antecedent / Dict[antecedent[0]]
                    if confidence >= self.confidence:
                        rule = f"{'--'.join(map(str, antecedent))}--{'--'.join(map(str, consequent))}"
                        self.rule.add((rule, confidence))
                        self.confidences[rule] = confidence
        """
        
        items = list(Dict.keys())
        if len(items) > 1:
            for i in range(1, len(items)):
                for antecedent in itertools.combinations(items, i):
                    consequent = [item for item in items if item not in antecedent]
                    support_antecedent = min(Dict[item] for item in antecedent) if antecedent else 0
                    if support_antecedent == 0:
                        continue
                    confidence = support_antecedent / Dict[antecedent[0]]
                    # print(confidence)
                    if confidence >= self.confidence:
                        rule = f"{'--'.join(map(str, antecedent))}--{'--'.join(map(str, consequent))}"
                        self.rule.add((rule, confidence))
                        self.confidences[rule] = confidence
        
    def __str__(self):
        """
        输出频繁模式
        :return: 所有的频繁模式
        """
        print(f"support: {self.support}, confidenct: {self.confidence}")
        # print("关联规则：\n" + '\n'.join(map(str, sorted(self.rule, key=lambda l: (len(l), l[-1]), reverse=True))))
        # return "所有的频繁模式：\n" + '\n'.join(f"{key} ==> {self.frequent_patterns[key]}" for key in self.frequent_patterns.keys() )
        return "所有的频繁模式：\n" + str(self.frequent_patterns)

def loadDataSet():
    return [
        ['a', 'c', 'e'], ['b', 'd'], ['b', 'c'], ['a', 'b', 'c', 'd'],
        ['a', 'b'], ['b', 'c'], ['a', 'b'], ['a', 'b', 'c', 'e'], ['a', 'b', 'c'], ['a', 'c', 'e']
    ]
    
def transformDataSet(data):
    """ unique_elements = sorted(set(element for sublist in data for element in sublist))
    ct = lambda x: pd.Series({elem: 1 for elem in x if elem in unique_elements}, dtype=int)
    b = map(ct, data)
    new_data = pd.DataFrame(list(b)).fillna(0) """
    new_data = {
        i: data[i] for i in range(len(data)) 
    }
    return new_data

if __name__ == '__main__':
    data = {
                1: ['牛奶', '鸡蛋', '面包', '薯片'],
                2: ['鸡蛋', '爆米花', '薯片', '啤酒'],
                3: ['牛奶', '面包', '啤酒'],
                4: ['牛奶', '鸡蛋', '面包', '爆米花', '薯片', '啤酒'],
                5: ['鸡蛋', '面包', '薯片'],
                6: ['鸡蛋', '面包', '啤酒'],
                7: ['牛奶', '面包', '薯片'],
                8: ['牛奶', '鸡蛋', '面包', '黄油', '薯片'],
                9: ['牛奶', '鸡蛋', '黄油', '薯片'],
                10: ['鸡蛋', '薯片']
            }
    arr = ['牛奶', '面包', '鸡蛋', '馒头', '包子', '饼干']
    # support = float(input('请输入最小支持度：'))
    # confidence = float(input('请输入最小置信度：'))
    support = 0.5
    confidence = 0.75
    data2 = data
    # data2 = {i: [random.choice(arr) for j in range(10)] for i in range(100000)}
    # data2 = transformDataSet(loadDataSet())
    begin = time()
    f = FP_tree(data2, support, confidence)
    f.find_rule()
    print(f)
    print("总花费时间为%.3f秒" % (time() - begin))

