"""
LogisticRegression逻辑回归
"""
import numpy as np
import matplotlib.pyplot as plt


def load_data(file_):
    """
    加载数据集
    :param file_: 数据集文件路径
    :return: 返回X矩阵和Y矩阵
    """
    with open(file_) as f:
        data_ = np.loadtxt(f, dtype=float, comments='#', delimiter=',')
    l = data_.shape[1]
    x_ = data_[:, :l - 1]
    # Y的读取需要转化为向量，从一维数组转化为二维矩阵
    y_ = data_[:, l - 1].reshape(1, -1).T
    # 将数据转化为int型
    y_ = np.asarray(y_, dtype=int)
    return x_, y_


def sigmoid(t):
    """
    向前传播模块，压缩变换的函数sigmoid函数 S(x) = 1/(1+e^-x)
    :param t:xT·θ
    :return:
    """
    return 1. / (1. + np.exp(-t))


def J(theta_, x_, y_):
    """
    损失函数
    :param theta_:当前的参数θ
    :param x_:数据集中x变量
    :param y_:数据集结果
    :return: ∑ yi*ln(P(xi))-(1-yi)*(ln(1-P(xi))
    """
    y_hat = sigmoid(x_.dot(theta_))
    return -np.sum(y_ * np.log(y_hat) + (1 - y_) * np.log(1 - y_hat)) / len(y_)


def dJ(theta_, x_, y_):
    """
    损失函数对θ求导向量化
    :param theta_:
    :param x_:
    :param y_:
    :return:
    """
    return x_.T.dot(sigmoid(x_.dot(theta_)) - y_) / len(y_)


def gradient_function(x_, y_, _theta, _alpha, _echo):
    """
    梯度下降发计算θ
    :param x_: 训练集中参数
    :param y_: 训练集中y
    :param _theta: 初始θ
    :param _alpha: 学习率
    :param _echo: 学习轮数
    :return: 学习后的θ和损失函数数列
    """
    theta_ = _theta
    Jlog_ = np.mat(np.zeros([_echo, 1]))  # Jlog记录每次梯度下降的损失函数
    gradient = dJ(theta_, x_, y_)
    Jlog_[0] = J(theta_, x_, y_)
    i = 1
    while i < _echo:
        theta_ = theta_ - _alpha * gradient
        gradient = dJ(theta_, x_, y_)
        Jlog_[i] = J(theta_, x_, y_)
        i += 1
    return theta_, Jlog_


def predict(_x, _theta):
    """
    使用学习后得到的参数计算向前模块预测的结果
    将预测结果预测分类并转为int型
    :param _x:
    :param _theta:
    :return:
    """
    y_pre = sigmoid(_x.dot(_theta))
    return np.array(y_pre >= 0.5, dtype='int')


def genConfusionMatrix(pre, label, n):
    """
    计算混淆矩阵函数
    [[真实\预测, 1, 2, 3]
    [1, a, b, c],
    [2, d, e, f],
    [3, g, h, i]]
    :param pre:预测结果
    :param label:实际结果
    :param n:类别数
    :return:返回混淆矩阵[[tn, fp], [fn, tp]]
    """
    return np.bincount(n * label + pre, minlength=n ** 2).reshape(n, n)


def model_assessment(tn_, fp_, fn_, tp_):
    count = tn_ + fp_ + fn_ + tp_
    # 精度T
    T = (tp_ + fn_) / count
    print("精度:", '%.5f' % T, "错误率:", '%.5f' % (1 - T))
    # 查准率P, 查全率R
    P = tp_ / (tp_ + fp_)
    R = tp_ / (tp_ + fn_)
    print("查准率:", '%.5f' % P, "查全率:", '%.5f' % R)
    # F1指数
    F1 = (2 * tp_) / (count + tp_ - tn_)
    print("F1指数:", '%.5f' % F1)


file_train = ".\LogisticRegression_train.csv"
file_test = ".\LogisticRegression_test.csv"

# 学习率alpha和训练轮数echo
alpha = 0.1
echo = 500

# 读取训练集和验证集的数据
x, y = load_data(file_train)
X, Y = load_data(file_test)

# 初始化设定θ的值为全 1 向量
theta = np.ones([len(x[0]), 1])
# 梯度下降法求得转化后的Θ
Theta, Jlog = gradient_function(x, y, theta, alpha, echo)
# 结果预测
Y_pre = predict(X, Theta)
# Y, Y_pre为二维数组，需要降维
Y = Y.T.flatten()
Y_pre = Y_pre.T.flatten()

mat = genConfusionMatrix(Y_pre, Y, 2)
print("混淆矩阵：")
print(mat)
model_assessment(mat[0, 0], mat[0, 1], mat[1, 0], mat[1, 1])

# 绘制损失函数曲线图
plt.plot(Jlog, label="0.1")
plt.legend()
plt.show()



