# -*- coding: utf-8 -*-
import pandas as pd
import os
from tqdm import tqdm
from joblib import Parallel, delayed
import multiprocessing
import time


def load_data(path):  # 根据路径加载数据集
    orders = pd.read_csv(path, encoding='unicode_escape')
    orders = orders.head(2000000)
    group = orders.groupby("order_id")
    ans = []
    for key, value in tqdm(group, desc="read_data", ncols=80):
        ans.append(value.product_id.tolist())
    return ans  # 返回处理好的数据集，为二维数组


def save_rule(rule, path):  # 保存结果到txt文件
    with open(path, "w") as f:
        f.write("index  confidence" + "   rules\n")
        index = 1
        for item in rule:
            s = " {:<4d}  {:.3f}        {}=>{}\n".format(index, item[2], str(list(item[0])), str(list(item[1])))
            index += 1
            f.write(s)
        f.close()
    print("result saved path is:{}".format(path))


class Node:
    def __init__(self, node_name, count, parentNode):
        self.name = node_name
        self.count = count
        self.nodeLink = None  # 根据nideLink可以找到整棵树中所有nodename一样的节点
        self.parent = parentNode  # 父亲节点
        self.children = {}  # 子节点{节点名字:节点地址}


class Fp_growth():

    def data_compress(self, data_set):
        data_dic = {}
        for trans in data_set:
            fset = frozenset(trans)
            # 返回指定键的值，如果键不在字典中，将会添加键并将值设置为一个指定值，默认为None
            data_dic.setdefault(fset, 0)
            data_dic[fset] += 1
        return data_dic

    def update_header(self, node, targetNode):  # 更新headertable中的node节点形成的链表
        while node.nodeLink != None:
            node = node.nodeLink
        node.nodeLink = targetNode

    def update_fptree(self, items, count, node, headerTable):  # 用于更新fptree
        if items[0] in node.children:
            # 判断items的第一个结点是否已作为子结点
            node.children[items[0]].count += count
        else:
            # 创建新的分支
            node.children[items[0]] = Node(items[0], count, node)
            # 更新相应频繁项集的链表，往后添加
            if headerTable[items[0]][1] == None:
                headerTable[items[0]][1] = node.children[items[0]]
            else:
                self.update_header(headerTable[items[0]][1], node.children[items[0]])
            # 递归
        if len(items) > 1:
            self.update_fptree(items[1:], count, node.children[items[0]], headerTable)

    def create_fptree(self, data_dic, min_support, flag=False):  # 建树主函数
        '''
        根据data_dic创建fp树
        header_table结构为
        {"nodename":[num,node],..} 根据node.nodelink可以找到整个树中的所有nodename
        '''

        headerTable = {}
        # 第一次遍历数据集， 记录每个数据项的支持度
        for trans in data_dic:
            for item in trans:
                headerTable[item] = headerTable.get(item, 0) + data_dic[trans]
        # 根据最小支持度过滤
        lessThanMinsup = list(filter(lambda k: headerTable[k] < min_support, headerTable.keys()))
        for k in lessThanMinsup:
            del (headerTable[k])

        freqItemSet = set(headerTable.keys())  # 满足最小支持度的频繁项集
        if len(freqItemSet) == 0:
            return None, None
        for k in headerTable:
            headerTable[k] = [headerTable[k], None]  # element: [count, node]
        tree_header = Node('head node', 1, None)  # 根节点
        if flag:
            ite = tqdm(data_dic, desc="create_tree", ncols=80)
        else:
            ite = data_dic
        for t in ite:  # 第二次遍历，建树
            localD = {}
            for item in t:
                if item in freqItemSet:  # 过滤，只取该样本中满足最小支持度的频繁项
                    localD[item] = headerTable[item][0]  # element : count
            if len(localD) > 0:
                # 根据全局频数从大到小对单样本排序
                order_item = [v[0] for v in sorted(localD.items(), key=lambda x: x[1], reverse=True)]
                # 用过滤且排序后的样本更新树
                self.update_fptree(order_item, data_dic[t], tree_header, headerTable)
        return tree_header, headerTable

    def find_path(self, node, nodepath):
        '''
        递归将node的父节点添加到路径
        '''
        if node.parent != None:
            nodepath.append(node.parent.name)
            self.find_path(node.parent, nodepath)

    def find_cond_pattern_base(self, node_name, headerTable):
        '''
        根据节点名字，找出所有条件模式基
        '''
        treeNode = headerTable[node_name][1]  # 节点路径
        cond_pat_base = {}  # 保存所有条件模式基
        while treeNode != None:
            nodepath = []
            self.find_path(treeNode, nodepath)
            if len(nodepath) > 1:
                cond_pat_base[frozenset(nodepath[:-1])] = treeNode.count
            treeNode = treeNode.nodeLink
        return cond_pat_base

    def create_cond_fptree(self, headerTable, min_support, temp, freq_items, support_data):
        # 最开始的频繁项集是headerTable中的各元素
        freqs = [v[0] for v in sorted(headerTable.items(), key=lambda p: p[1][0])]  # 根据频繁项的总频次排序
        for freq in freqs:  # 对每个频繁1-项集项
            freq_set = temp.copy()
            freq_set.add(freq)
            freq_items.add(frozenset(freq_set))
            if frozenset(freq_set) not in support_data:  # 检查该频繁项是否在support_data中
                support_data[frozenset(freq_set)] = headerTable[freq][0]
            else:
                support_data[frozenset(freq_set)] += headerTable[freq][0]

            cond_pat_base = self.find_cond_pattern_base(freq, headerTable)  # 寻找到所有条件模式基
            # 创建条件模式树
            cond_tree, cur_headtable = self.create_fptree(cond_pat_base, min_support)
            if cur_headtable != None:
                self.create_cond_fptree(cur_headtable, min_support, freq_set, freq_items, support_data)  # 递归挖掘条件FP树

    def generate_L(self, data_set, min_support):
        data_dic = self.data_compress(data_set)
        freqItemSet = set()
        support_data = {}
        tree_header, headerTable = self.create_fptree(data_dic, min_support, flag=True)  # 创建数据集的fptree
        # 创建各频繁一项的fptree，并挖掘频繁项并保存支持度计数
        self.create_cond_fptree(headerTable, min_support, set(), freqItemSet, support_data)

        max_l = 0
        for i in freqItemSet:  #
            if len(i) > max_l:
                max_l = len(i)  # 将频繁项根据大小保存到指定的容器L中
        L = [set() for _ in range(max_l)]
        for i in freqItemSet:
            L[len(i) - 1].add(i)
        for i in range(len(L)):
            print("frequent item {}:{}".format(i + 1, len(L[i])))
        return L, support_data

    def generate_R(self, data_set, min_support, min_conf):
        L, support_data = self.generate_L(data_set, min_support)
        sub_set_list = [i for item in L for i in item]

        def generate(i):
            rule_list = []
            for freq_set in tqdm(L[i], desc="Generate_Rules", ncols=80):
                for sub_set in sub_set_list:
                    # sub_set: milk, egg
                    # freq_set: milk, egg, book
                    # freq_set - sub_set: book, if book in support_data: book是频繁项集
                    # conf = support(milk, egg, book)/support(book)
                    # book => milk egg,  conf
                    if sub_set.issubset(freq_set) and freq_set - sub_set in support_data:
                        conf = support_data[freq_set] / support_data[freq_set - sub_set]
                        big_rule = (freq_set - sub_set, sub_set, conf)
                        if conf >= min_conf and big_rule not in rule_list:
                            rule_list.append(big_rule)
            return rule_list

        num_cores = multiprocessing.cpu_count()
        rule = Parallel(n_jobs=num_cores)(delayed(generate)(i) for i in range(len(L)))
        rule = [i for item in rule for i in item]
        rule = sorted(rule, key=lambda x: (x[2]), reverse=True)
        return rule


if __name__ == "__main__":

    start = time.time()
    filename = "transaction_data.csv"
    current_path = os.getcwd()
    if not os.path.exists(current_path + "/log"):
        os.mkdir("log")
    path = current_path + "/dataset/" + filename
    save_path = current_path + "/log/" + "rules_test1.txt"

    min_conf = 0.5
    min_support = 10

    data_set = load_data(path)
    fp = Fp_growth()
    rule = fp.generate_R(data_set, min_support, min_conf)
    # print(rule)
    end = time.time()
    print(str(end - start) + 's')
    save_rule(rule, save_path)
