import pandas as pd
import numpy as np
import time


class Apriri:
    def __init__(self,minsupport):
        self.dataset=None
        self.minsupport=minsupport
    # 生成项集
    def createC1(self,dataset):
        C1=[]
        for t in dataset:
            for item in t:
                if not [item] in C1:
                    C1.append([item])
        C1.sort()
        return list(map(frozenset, C1))
    # 计算所有项集的支持度
    def ScanD(self,D,Ck):
        ssCnt={}
        for tid in D:
            for scan in Ck:
                if scan.issubset(tid):
                    if scan not in ssCnt:
                        ssCnt[scan]=1
                    else:
                        ssCnt[scan]+=1
        numItems=float(len(D))
        retList=[]
        supportData={}
        # 计算所有项集的支持度
        for key in ssCnt:
            support=ssCnt[key]/numItems
            if support>=self.minsupport:
                retList.insert(0,key)
            supportData[key]=support
        return retList,supportData
    def apriorigGen(self,Lk,k):
        retList=[]
        lenLk=len(Lk)
        for i in range(lenLk):
            for j in range(i+1,lenLk):
                L1=list(Lk[i])[:k-2]
                L2=list(Lk[j])[:k-2]
                L1.sort()
                L2.sort()
                if L1==L2:
                    retList.append(Lk[i] | Lk[j])
        return retList
    def fit(self,dataset,k):
        self.dataset=dataset
        C1=self.createC1(self.dataset)
        D=list(map(set,dataset))
        L1,supportData=self.ScanD(D,C1)
        L=[L1]
        while(len(L[k-2])>0):
            Ck=self.apriorigGen(L[k-2],k)
            Lk,supK=self.ScanD(D,Ck)
            L.append(Lk)
            supportData.update(supK)
            k+=1
        return L,supportData
    def calConf(self,freqset,H,supportdata,brl,minconf):
        prunedh=[]
        for conseq in H:
            conf=supportdata[freqset]/supportdata[freqset-conseq]
            if conf>=minconf:
                print(freqset-conseq,'---',conseq,'conf:',conf)
                brl.append((freqset-conseq,conseq,conf))
                prunedh.append(conseq)
        return prunedh

    def rulesFromConseq(self,freqSet,H,supportdata,br1,minconf):
        m=len(H[0])
        if (len(freqSet)>(m+1)):
            hmpl=self.apriorigGen(H,m+1)
            hmpl=self.calConf(freqSet,hmpl,supportdata,br1,minconf)
            if(len(hmpl)>1):
                self.rulesFromConseq(freqSet,hmpl,supportdata,br1,minconf)
    def generateRules(self,L,supportData,minConf):
        bigRuleList=[]
        for i in range(1,len(L)):
            for freqSet in L[i]:
                Hl=[frozenset([item]) for item in freqSet]
                if(i>1):
                    # 生成无规则的组合
                    self.rulesFromConseq(freqSet,Hl,supportData,bigRuleList,minConf)
                else:
                    self.calConf(freqSet,Hl,supportData,bigRuleList,minConf)
        return bigRuleList


if __name__ == '__main__':
    # 对数据进行预处理
    data = pd.read_excel('data.xls', skiprows=[0],index_col=0)
    goods_name=pd.Series(data.columns)
    record=[]
    for row in data.as_matrix():
        record.append(goods_name[row=='T'].as_matrix().tolist())
    starttime=time.time()
    model=Apriri(minsupport=0.25)
    L,supportData=model.fit(dataset=record,k=2)
    endtime = time.time()
    # 只比较挖掘频繁模式基的速度
    print("程序运行的时间是{}s".format(endtime - starttime))
    model.generateRules(L,supportData,minConf=0.7)
