import sys

sys.path.append('..')
import iris_data
import numpy as np
from sklearn.model_selection import train_test_split

class AdaBoosting:
    """
    ada boosting
    """
    
    def __init__(self, X, y, max_iter=100):
        self.X = X
        self.y = y
        #for each iter we create a classifier tree
        self.max_iter = max_iter
        
        self.N, self.P = X.shape
        # weighted vecotr
        self.D = np.array([1/self.N] * self.N).reshape(self.y.shape)
        # the weighted of classifier
        self.alpha = []
        # classifier set
        self.foreast = []
        
    def classifier(self):
        """
        tree classifier
        return:
            tree: as a dict with keys['split', 'pos_dir', ]
        """
        error = float('inf')
        tree = {}
        for dim in range(self.P):
            X = self.X[:, dim]
            steps = [round(np.percentile(X, i), 2) + 0.01 for i in range(10, 100, 9)]
            for s in steps:
                for direction in ['r', 'l']:
                    if direction == 'r':
                        tmp_y = np.where(self.X[:, dim] > s, 1, -1)
                    else:
                        tmp_y = np.where(self.X[:, dim] > s, -1, 1)
                    # calculated the error
                    tmp_y = tmp_y.reshape(self.y.shape)
                    tmp_e = self.D[tmp_y != self.y].sum()
                    # take the smaller error
                    if tmp_e < error:
                        y_pre = tmp_y
                        error = tmp_e
                        tree['split'] = s
                        tree['pos_dir'] = direction
                        tree['dim'] = dim
                        
                
        return error, tree, y_pre
    
    def boosting_tree(self):
        """
        boosting_tree
        """
        step = 0
        error = 1
        # if error = 0, stop iteration
        while step < self.max_iter and error > 0:
            step += 1
            error, tree, y_pre = self.classifier()
            # calculated am
            # as error goes larger am goes smaller
            am = 1/2 * np.log((1 - error)/ error)
            self.alpha.append(am)
            # updated D
            index = y_pre == self.y
            exponent = np.where(y_pre == self.y, np.exp(-am), np.exp(am))
            Z = (self.D * exponent).sum()
            self.D = self.D * exponent / Z
            self.foreast.append(tree)

            
    def predict(self, X):
        res = 0
        #iter weigth matrix, classifier set
        for a, t in zip(self.alpha, self.foreast):
            #get direction, split point, dimension
            pos_dir = t['pos_dir']
            split = t['split']
            dim = t['dim']
            #get result for single classifier
            if pos_dir == 'r' and X[dim] > split:
                pre = 1
            elif pos_dir == 'l' and X[dim] < split:
                pre = 1
            else:
                pre = -1
            
            res += a * pre
        #return the result
        return np.sign(res)
    
if __name__ == '__main__':
    X, y = iris_data.get_data()
    #y belongs to {1, 0}, need to change it to {1, -1}
    #cause we have y * G(x)
    y = np.where(y == 0, -1, y)
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.3, random_state=20)
    obj = AdaBoosting(X_train, y_train)
    obj.boosting_tree()
    
    #predict
    pre = []
    error = 0
    for i in range(len(X_test)):
        y_pre = obj.predict(X_test[i])
        pre.append(y_pre)
        if y_pre != y_test[i]:
            error += 1
    
    print (error)