# 逻辑回归算法
# 采用梯度下降法、批量梯度下降法和随机梯度下降法

import numpy as np
import matplotlib.pyplot as plt


class LogisticRegression:
    # 初始化线性模型
    def __init__(self):
        self.w = None
        self.b = None

    # 构建逻辑函数
    def Sigmoid(self, x):
        """
        :param x: 逻辑函数的输入值
        :return: y: 逻辑函数的输出值
        """
        y = 1.0 / (1.0 + np.exp(-x))
        return y

    # 定义预测函数
    def predict(self, x, theta):
        """
        :param x: 待预测样本
        :param theta: 设置的阈值
        :return: labels: 样本的预测标签 [0 or 1]
        """
        # 待预测样本个数
        inist_num = np.shape(x)[0]
        logisticY = []
        y = 0
        for i in range(int(len(x))):
            y = (np.sum(np.dot(np.transpose(x[i]), [0.3, 0.2, 0.1, 0.2, 0.2])))/100
            logisticY.append(self.Sigmoid(y))
        print(logisticY)
        # 初始化预测标签为 0
        labels = np.zeros(inist_num)
        # 如果逻辑函数的输出值大于阈值则判断为正类
        for i in range(int(len(logisticY))):
            if logisticY[i] > theta:
                labels[i] = 1
        return labels

    # 定义逻辑回归模型损失函数
    def calculate_loss(self, x_train, y_train):
        """
        :param x_train: 训练样本
        :param y_train: 训练样本的真实输出
        :return: loss: 模型在训练样本上的损失函数值
        """
        inist_num = x_train.shape[0]
        loss = 0.0
        for i in range(inist_num):
            a = np.dot(x_train[i, :], self.w)
            loss += -y_train[i] * a + np.log(1 + np.exp(a))  # 损失函数值
        loss = loss / inist_num
        return loss
    # 计算随机梯度下降法损失函数值
    def calculate_SGD_loss(self, x_train, y_train):
        """
        :param x_train: 训练样本
        :param y_train: 训练样本的真实输出
        :return: loss: 模型在训练样本上的损失函数值
        """
        inist_num = x_train.shape[0]
        loss = 0.0
        a = np.dot(x_train, self.w)
        loss += -y_train * a + np.log(1 + np.exp(a))  # 损失函数值
        loss = loss / inist_num
        return loss
    #计算回归模型系数 w 的梯度
    def calculate_grad(self, x_train, y_train):
        """
        :param x_train: 训练样本
        :param y_train: 训练样本的真实输出
        :return: grad: 梯度
        """
        m, n = np.shape(x_train) # m: 训练样本个数 n: 特征维数
        grad = np.zeros(n) # 初始化梯度
        logisticY = self.Sigmoid(np.dot(x_train, self.w)) # 逻辑函数输出值
        term = np.zeros((m, n))
        # 计算模型系数 w 的梯度
        for i in range(m):
            for j in range(n):
                term[i, j] = (logisticY[i]-y_train[i])*x_train[i][j]
        for i in range(n):
            grad[i] = sum(term[:, j])/m
        return grad
    # 计算随机梯度下降法系数 w 的梯度
    def calculate_SGD_grad(self, x_train, y_train):
        """
        :param x_train: 训练样本
        :param y_train: 训练样本的真实输出
        :return: grad: 梯度
        """
        m = len(x_train) # m: 训练样本个数 n: 特征维数
        grad = np.zeros(m) # 初始化梯度
        logisticY = self.Sigmoid(np.dot(x_train, self.w)) # 逻辑函数输出值
        term = np.zeros(m)
        # 计算模型系数 w 的梯度
        for i in range(m):
            term[i] = (logisticY-y_train)*x_train[i]
        grad = sum(term[:])/m
        return grad
    # 采用梯度下降法学习逻辑回归模型
    def GD_Fit(self, x_train, y_train, learn_rate, max_iter):
        """
        :param x_train: 训练样本
        :param y_train: 训练样本的真实输出
        :param learn_rate: 梯度下降法学习速率
        :param max_iter: 梯度下降法最大迭代次数
        :return: loss_list 损失函数值列表
        """
        m, n = np.shape(x_train)
        # 在原始数据矩阵后面增加一列常数 1 ，将常数项合并到 w 中
        X = np.c_[x_train, [1 for x in range(m)]] # np.c_ 连接两个矩阵
        self.w = np.zeros(n+1)
        grad = np.zeros(n+1)
        loss_list = [] # 用于存放每次训练中的损失函数值
        for i in range(max_iter):
            # 计算损失函数
            loss = self.calculate_loss(X, y_train)
            # 计算参数的梯度
            grad = self.calculate_grad(X, y_train)
            # 更新模型参数
            self.w += -learn_rate*grad
            # 添加损失函数值到损失列表
            loss_list.append(loss)
        return loss_list
    # 采用随机梯度下降法学习回归模型
    def SGD_Fit(self, x_train, y_train, learn_rate, max_iter):
        """
        :param x_train: 训练样本
        :param y_train: 训练样本的真实输出
        :param learn_rate: 梯度下降法学习速率
        :param max_iter: 梯度下降法最大迭代次数
        :return: loss_list 损失函数值列表
        """
        m, n = np.shape(x_train)
        # 在原始数据矩阵后面增加一列常数 1 ，将常数项合并到 w 中
        X = np.c_[x_train, [1 for x in range(m)]]  # np.c_ 连接两个矩阵
        self.w = np.zeros(n + 1)
        grad = np.zeros(n + 1)
        loss_list = []  # 用于存放每次训练中的损失函数值
        for i in range(max_iter):
            for j in range(m):
                x_train = X[j]
                y_train_1 = y_train[j]
                # 计算损失函数
                loss = self.calculate_SGD_loss(x_train, y_train_1)
                # 计算参数的梯度
                grad = self.calculate_SGD_grad(x_train, y_train_1)
                # 更新模型参数
                self.w += -learn_rate * grad
                # 添加损失函数值到损失列表
                loss_list.append(loss)
        return loss_list
    # 训练过程可视化，绘制模型损失关于迭代次数的曲线图
    def training_visualize(self, loss_list0):
        plt.plot(loss_list0, color = 'dodgerblue', label = 'GD Loss')
        # plt.plot(loss_list1, color = 'orangered', label = 'SGD Loss')
        plt.xlabel('iterations')
        plt.ylabel('loss')
        plt.show()