import math
import random
import sys
import pandas as pd
from pandas import DataFrame
import DataMiningtools as DMTS
import KNearestNeighbor as KNN
import NaiveBayes as NB
import DecisionTree as DT

class AdaBoost (): #封装了集成学习AdaBoost
    DMT = DMTS.DataMiningTools()

    def __init__(self,train,categorization,k,group_num) -> None:
        self.train_data = train.iloc[:,:] 
        self.train_list = [] #第i个元素为第i个二分类训练集([c_num-1])
        self.train_size = self.train_data.shape[0]
        self.a_num_arr = self.DMT.AttributeMax(self.train_data)
        self.test_list = [] #存放每个训练集i，每种算法j，测试结果k([c_num-1][3][k])
        self.categorization = categorization
        self.c_num = self.a_num_arr[self.categorization]
        self.k = k
        self.group_num = group_num
        self.learner_weight = [] #记录每个测试集中，3*group_num个学习器的权重([c-1][3*group_num])
        self.learner_list = [] #记录每个数据集的三种学习器([c_num-1][3])

        for i in range (0,self.c_num-1): #初始化
            self.train_list.append(None)
            self.test_list.append([])
            self.learner_weight.append([])
            self.learner_list.append([])

            for j in range (0,3):
                self.test_list[i].append([])
                self.learner_list[i].append(None)
                for k in range (0,self.group_num):
                    self.learner_weight[i].append(0)

        self.__ModelTrain()


    def __ArithmeticTest (self,method,train_id,sub_train,sub_train_size): #测试算法
        if method == 'KNN':
            self.learner_list[train_id][0] = KNN.KNearestNeighbor(sub_train,self.k,self.categorization)
            for i in range (0,sub_train_size):
                sample = sub_train.iloc[i:i+1,:]

                #r = self.learner_list[train_id][0].KNN_Model(sample)
                r = 0 #禁用KNN

                if r == sample.iloc[0,self.categorization]:
                    self.test_list[train_id][0][i] = 1
                else:
                    self.test_list[train_id][0][i] = -1

        elif method == 'NaiveBayes':
            self.learner_list[train_id][1] = NB.NaiveBayes(sub_train,self.categorization)
            for i in range (0,sub_train_size):
                sample = sub_train.iloc[i:i+1,:]

                r = self.learner_list[train_id][1].NaiveBayes_Model(sample)

                if r == sample.iloc[0,self.categorization]:
                    self.test_list[train_id][1][i] = 1
                else:
                    self.test_list[train_id][1][i] = -1
                print ('NB',train_id,self.test_list[train_id][1][i])

        elif method == 'DecisionTree':
            self.learner_list[train_id][2]= DT.DecisionTree(sub_train,self.categorization)

            for i in range (0,sub_train_size):
                sample = sub_train.iloc[i:i+1,:]

                r = self.learner_list[train_id][2].DecisionTree_Model(sample)

                if r == sample.iloc[0,self.categorization]:
                    self.test_list[train_id][2][i] = 1
                else:
                    self.test_list[train_id][2][i] = -1

                print ('DT',train_id,self.test_list[train_id][2][i])


    def __SingleModelTrain (self,train_id,sub_train_size): #为一个训练子集构建三种模型并测试
        sub_train = self.train_list[train_id]

        for i in range (0,3):
            for j in range (0,sub_train_size):
                self.test_list[train_id][i].append(0) #构建一个训练集的测试列表

        self.__ArithmeticTest('KNN',train_id,sub_train,sub_train_size)
        self.__ArithmeticTest('NaiveBayes',train_id,sub_train,sub_train_size)
        self.__ArithmeticTest('DecisionTree',train_id,sub_train,sub_train_size)


    def __Weight (self,last_weight,error_list,sub_train_size,train_id,learner_id): 
    #计算样本权重，error_list是每条样本是否判断错误，1为正确，-1为错误
        error_rate = 0
        new_weight = last_weight #记录新的权重
        z = 0

        for i in range (0,len(error_list)):
            if error_list[i] == -1:
                error_rate += last_weight[i]*1
        
        if error_rate == 0:
            error_rate += 0.00001 #避免错误率为0
        if error_rate == 1:
            error_rate -= 0.00001

        a = 1/2*math.log((1-error_rate)/error_rate)
        self.learner_weight[train_id][learner_id] = a  #记录该学习器权重

        for i in range (0,sub_train_size):
            z += last_weight[i]*(math.e**(-a*error_list[i]))
            new_weight[i] = last_weight[i]*(math.e**(-a*error_list[i]))
        
        for i in range (0,sub_train_size):
            new_weight[i] /= z

        return new_weight


    def __ModelTrain (self): #构造AdaBoost模型
        piece = self.c_num-1 #记录共有几个训练集
        weight = [] #记录第i个训练子集的第j个样本权重([piece][样本数])

        for i in range (0,piece):
            self.__CreateTrain(i) #构建了一个训练子集
            sub_train_size = self.train_list[i].shape[0] #取当前训练子集长度
            weight.append([]) #新加训练子集

            for j in range (0,sub_train_size): #初始化权重
                weight[i].append(1/sub_train_size)

            self.__SingleModelTrain(i,sub_train_size) 

            for j in range (0,self.group_num):
                for k in range (0,3):
                    weight[i] = \
                    self.__Weight(weight[i],self.test_list[i][k],self.train_list[i].shape[0],i,j*3+k)

    
    def Judgement (self,vote_list,train_id): #最终强学习器投票
        vote = 0

        for i in range (0,len(vote_list)):
            vote += vote_list[i]*self.learner_weight[train_id][i]

        if vote > 0:
            return 1
        else:
            return -1


    def AdaBoost_Model (self,sample): #AdaBoost集成学习模型
        piece = self.c_num-1
        vote = -1
        result = 0

        for i in range (0,piece):
            sub_train = self.train_list[i]
            r = [] #记录投票结果

            for j in range (0,self.group_num):
                KNN = self.learner_list[i][0]
                NB = self.learner_list[i][1]
                DT = self.learner_list[i][2]

                #rk = KNN.KNN_Model(sample)
                rk = 0 #禁用KNN
                rn = NB.NaiveBayes_Model(sample)
                rd = DT.DecisionTree_Model(sample)
                print (DT.node)

                # if rk == 0:
                #     rk = -1
                if rn == 0:
                    rn = -1
                if rd == 0:
                    rd = -1

                r.append(rk)
                r.append(rn)
                r.append(rd)
                print (i+1,'次')
                print (rk,rn,rd)

            vote = self.Judgement(r,i)

            if vote == 1: #已经确定类
                break
            result += 1 #若果不是，则继续下一个二分类
        
        return result

                
    def __CreateTrain (self,c_value): #将多分类数据集转换为二分类,类别等于c_value则值为1，否则为0
        c_data = self.train_data[self.train_data.iloc[:,self.categorization] == c_value]
        c_data_size = c_data.shape[0]
        #找值等于c_value的样本

        nc_data = self.train_data[self.train_data.iloc[:,self.categorization] != c_value]
        nc_data_size = nc_data.shape[0]
        #找值不等于c_value的样本
        
        c_sub_size = min(c_data_size,nc_data_size) #找出两个数据集中较小的一个

        if c_sub_size == c_data_size:
            start = random.randint(0,nc_data_size-c_sub_size)
            nc_data = self.DMT.Sampling(nc_data,start,c_sub_size)[0] #随机抽取与c类样本等量样本
        else:
            start = random.randint(0,c_data_size-c_sub_size)
            c_data = self.DMT.Sampling(c_data,start,c_sub_size)[0] #随机抽取与nc类样本等量样本

        for i in range (0,c_sub_size):
            nc_data.iloc[i,self.categorization] = 0
            c_data.iloc[i,self.categorization] = 1

        sub_train = pd.concat([c_data,nc_data]) #构造c类和非c类样本数量相等的训练集

        sub_train_arr = self.DMT.AttributeMax(sub_train) #记录训练子集每个属性个数

        a = pd.DataFrame()
        
        for i in range (0,len(self.a_num_arr)):
            
            if sub_train_arr[i] != self.a_num_arr[i]: #训练子集的一个属性取值个数减少

                dict = {}
                cols = self.train_data.columns

                for i in range (0,len (cols)):
                    dict[self.train_data.iloc[:,i].name] = 0

                a = pd.DataFrame ([dict])

                for j in range (0,self.train_data.shape[1]): #构建一个所有取值都最大的行
                    if j != self.categorization:
                        a.iloc[0,j] = self.a_num_arr[j]-1
                    else:
                        a.iloc[0,j] = 1
        
        sub_train = pd.concat([sub_train,a])

        col = sub_train.shape[1]
        sub_train = sub_train.reset_index()
        sub_train = sub_train.iloc[:,1:col+1] #重设行索引
        self.train_list[c_value] = sub_train