"""
线性回归算法，基于单层神经网络
"""
import numpy as np
import random
from singlelayerneuralnetwork import SingleLayerNeuralNetwork

class LinearRegression(SingleLayerNeuralNetwork):
    """
    线性回归算法
    使用 model = LinearRegression(iteration=10000, randomstate=0) 定义示例
    使用 model.fit(Xtrain, Ytrain, learningrate=0.0005, fastmode=False, stochastic=False, minerror=1) 训练样本
    使用 model.predict(Xtest) 进行测试
    """

    def __sign(self, inputdata):
        '''
        二值函数
        inputdata：二值函数输入变量
        返回值：   二值函数输出变量
        '''
        if inputdata > 0:
            return 1
        elif inputdata < 0:
            return -1
        else:
            return 0
    
    def fit(self, Xtrain, Ytrain, learningrate=0.0005, fastmode=False, stochastic=False, minerror=0.1, checkloss=False):
        '''
        训练测试样本
        Xtrain：      训练样本的数据，N * d 矩阵，第 i 行表示第 i 个数据
        Ytrain：      训练样本的标签，N * 1 矩阵，第 i 行表示第 i 个标签
        learningrate：学习率
        fastmode：    快速模式，可以加快训练速度，但是也会增加过拟合的风险
        stochastic：  随机模式
        minerror：    可容许的最小误差
        checkloss：   打印每次更新后的损失值
        @return：     (实际迭代的次数, 训练样本集上测试的准确率)
        '''
        N, D = Xtrain.shape  # 训练样本的维数D和数量N

        # 生成参与训练的矩阵
        Xtrain_ = np.append(np.ones((N, 1), dtype=Xtrain.dtype), Xtrain, 1)

        # 根据随机种子初始化回归面
        random.seed(self.randomstate)
        start_index = int(random.random() * N)

        # 初始化回归面W和损失值loss
        W = Xtrain_[start_index, :] - Xtrain_[start_index - 1, :]
        W = W.reshape(D + 1, 1)
        if stochastic == False:
            loss = np.matmul(np.transpose(Xtrain_), (np.matmul(Xtrain_, W) - Ytrain))
        else:
            loss = (np.matmul(Xtrain_[0,:].reshape(1, D+1), W)[0,0] - Ytrain[0,0]) * Xtrain_[0,:].reshape(D+1, 1)

        # 定义参与迭代的更新变量new_W和new_loss
        new_W = W.copy()
        new_loss = loss.copy()
        temple_loss = loss.copy()  # 用于快速模式的迭代计算
        
        # 开始迭代
        if stochastic == False:
            for cycle in range(self.iteration):
                # 显示new_W计算的损失函数
                if checkloss:
                    # print(new_loss.reshape(D +1))
                    print(np.linalg.norm(new_loss) / N)

                # 得到更好的W使得loss更小
                if np.linalg.norm(new_loss) < np.linalg.norm(loss):
                    W = new_W.copy()
                    loss = new_loss.copy()
                elif fastmode:
                    # 快速模式换用新的迭代loss
                    new_W = W.copy()
                    temple_loss = loss.copy()

                # 损失达到可容忍最小损失，跳出迭代循环
                if np.linalg.norm(loss) / N <= minerror:
                    break

                # 迭代更新new_W
                if fastmode == False:
                    new_W = new_W - learningrate * new_loss
                else:
                    new_W = new_W - learningrate * temple_loss

                # 计算新的new_W作用下的损失new_loss
                new_loss = np.matmul(np.transpose(Xtrain_), (np.matmul(Xtrain_, new_W) - Ytrain))
        else:
            for cycle in range(self.iteration):
                # 统计回归正确的样本数量
                correct = 0
                
                # 扫描样本数据集，获取回归错误的样本数据
                for i in range(N):
                    # 获取第i个数据并计算损失值
                    x = Xtrain_[i,:].reshape(1, D+1)
                    y = Ytrain[i,0]
                    loss = ((np.matmul(x, W)[0,0] - y) * x).reshape(D+1, 1)

                    if np.linalg.norm(loss) > minerror:
                        mis_x = x.copy()
                        mis_y = y
                        break
                    else:
                        correct += 1
                        continue
                
                # 所有样本回归结果均在容许误差以内，结束迭代
                if correct == N:
                    break
                else:
                    new_W = W.copy()
                    new_loss = loss.copy()

                # 显示new_W计算的损失函数
                if checkloss:
                    # print(new_loss.reshape(D +1))
                    print(np.linalg.norm(new_loss))

                # 更新W以减小损失值直到低于容许误差
                while True:
                    # 对W和loss进行更新
                    if np.linalg.norm(new_loss) < np.linalg.norm(loss):
                        W = new_W.copy()
                        loss = new_loss.copy()
                    elif fastmode:
                        new_W = W.copy()
                        temple_loss = new_loss.copy()

                    # 错误数据已经可以正确回归
                    if np.linalg.norm(new_loss) <= minerror:
                        break
                    
                    # 更新new_W和new_loss
                    if fastmode == False:
                        new_W = new_W - learningrate * new_loss
                    else:
                        new_W = new_W - learningrate * temple_loss

                    new_loss = ((np.matmul(mis_x, new_W)[0,0] - mis_y) * mis_x).reshape(D+1, 1)

        # 记录结果
        self.W = W[1:,:].reshape(D)
        self.w_0 = W[0,0]
        self.dimen = D
        self.Xtrain = Xtrain
        self.Ytrain = Ytrain
        self.actual_iteration = cycle + 1
        self.train_score = np.linalg.norm(loss) / N
        
        return (self.actual_iteration, self.train_score)


    def predict(self, Xtest, dichotomy=False):
        '''
        对测试样本进行预测
        Xtest：    测试样本的数据，N * d 矩阵，第 i 行表示第 i 个数据
        dichotomy：输出值表示二分类结果(True)
        @return：  测试样本的预测结果，N * 1 矩阵，第 i 行表示第 i 个标签
        '''
        # 检测初始条件是否满足
        super().predict(Xtest)

        # 获取预测样本的维数D和样本数量N
        N, D = Xtest.shape
        W = np.transpose(self.W.reshape(1, D))
        w_0 = self.w_0

        # 进行预测
        y_predict = np.zeros((N, 1))
        for i in range(N):
            x = Xtest[i, :].reshape(1, D)
            y = np.matmul(x, W)[0,0] + w_0
            if dichotomy:
                y = self.__sign(y)
            y_predict[i, 0] = y

        # 保存测试数据集和测试结果集
        self.Xtest = Xtest
        self.Ypred = y_predict

        return self.Ypred


if __name__ == "__main__" :
    X_train = np.array(
        [[1,2],[5,7],[1,1],[8,5],[4,1],
         [3,1],[4,9],[3,10],[3,2],[2,1],
         [8,8],[7,9],[2,0],[0,0],[6,11],
         [2,9],[4,2],[3,9],[6,0],[6,9]]
    )
    Y_train = np.array(
        [[1],[-1],[1],[-1],[1],
         [1],[-1],[-1],[1],[1],
         [-1],[-1],[1],[1],[-1],
         [-1],[1],[-1],[1],[-1]]
    )
    X_test = np.array([[0.5,0.5],[4,8],[3,10],[5,6]])

    model = LinearRegression()
    iteration, score = model.fit(X_train, Y_train)
    y_pred = model.predict(X_test, True)
    w_0, W = model.getW()

    print(("Iteration = {0}\n" + 
          "Score = {1}\n" + 
          "W = {2}\n" + 
          "w_0 = {3}")
          .format(iteration, score, W, w_0))
    print("y_pred = \n" + str(y_pred))
    model.drawTwoDimenPic(testdata=True)