import numpy as np
import matplotlib.pyplot as plt

plt.figure(figsize=[12, 8])
spr = 2
spc = 2
spn = 0

# 现有饮料厂分类样本数据集,分为训练集（d1.txt文件）和测试集（d2.txt文件）。
# 数据集格式如下，其中X1、X2、X3为饮料的检验指标（X1为含水量，X2为甜度，X3细菌指数），Y为分类结果（1为合格，0为不合格）。
#
# X1	X2	X3	Y
# 2.11	4.55	8.14	0
# 3.13	4.13	4.33	1
#
# 通过Python实现逻辑回归模型，并用此模型预测测试集数据：
#
# 完成数据集的读取
data_train = np.loadtxt('d1.txt', delimiter=',')
data_test = np.loadtxt('d2.txt', delimiter=',')
data = data_train
# scale and arrange training data
x = data[:, :-1]
m, n = x.shape
mu = x.mean(axis=0)  # ATTENTION axis=0
sigma = x.std(axis=0)  # ATTENTION axis=0
x -= mu
x /= sigma
y = data[:, -1]
XX = np.c_[np.ones([m, 1]), x]
# np.savetxt('trainXX.tmp.txt', XX)  # tmp

# scale and arrange testing data
x_test = data_test[:, :-1]
m_test = len(x_test)
mu = x_test.mean(axis=0)  # ATTENTION axis=0
sigma = x_test.std(axis=0)  # ATTENTION axis=0
x_test -= mu
x_test /= sigma
y_test = data_test[:, -1]
XX_test = np.c_[np.ones([m_test, 1]), x_test]
# np.savetxt('testXX.tmp.txt', XX_test)  # tmp


# 实现Sigmoid函数,并画出该函数
def model(XX, theta):
    return XX.dot(theta)


def sigmoid(z):
    return 1 / (1 + np.exp(-z))


spn += 1
plt.subplot(spr, spc, spn)
plt_x = np.linspace(-7, 7, 1000)
plt_y = sigmoid(plt_x)
plt.plot(plt_x, plt_y, label='sigmoid')
plt.grid()
plt.legend()


# 实现逻辑回归的代价函数，实现正则化逻辑回归
def cost_func(h, y, theta=None, lam=0):
    m = len(h)
    j = -1 / m * np.sum(y*np.log(h) + (1-y)*np.log(1-h))
    r = 0
    if lam != 0:
        r = lam / m * np.sum(theta ** 2)
    return j + r


# 实现梯度下降函数，要求输出迭代过程中的代价函数值
def gradient(XX, y, lam=0, alpha=0.001, iter0=15000):
    m, n = XX.shape
    theta = np.zeros(n)
    j_his = np.zeros(iter0)
    group = iter0 // 10

    for i in range(iter0):
        z = model(XX, theta)
        h = sigmoid(z)
        j = cost_func(h, y, theta, lam)
        if 0 == i % group:
            print(f'#{i}, j = {j}')  # 输出迭代过程中的代价函数值
        j_his[i] = j
        dt = 1 / m * XX.T.dot(h - y)
        if lam != 0:
            dt += lam / m * theta
        theta -= alpha * dt
    return theta, h, j_his


# 通过梯度下降计算回归模型，用所得模型对测试集的数据进行预测，并计算准确率
lam = 3
alpha = 0.001
iter0 = 15000
theta, h, j_his = gradient(XX, y, lam, alpha, iter0)
print(f'Theta = {theta}')
# 观察迭代过程中的代价函数值
spn += 1
plt.subplot(spr, spc, spn)
plt.plot(j_his, label='cost function')
plt.grid()
plt.legend()
# 用所得模型对测试集的数据进行预测，并计算准确率
z_test = model(XX_test, theta)
h_test = sigmoid(z_test)


def score(h, y):
    return np.mean(y == (h > 0.5))


print(f'训练准确率 = {score(h, y)}')
print(f'测试准确率 = {score(h_test, y_test)}')


def border(x, theta):
    return - (theta[0] + theta[2] * x) / theta[3]


# 使用X2，X3两组特征画出逻辑回归0-1分布图 (training data)
spn += 1
plt.subplot(spr, spc, spn)
plt.title('training data X2 and X3')
x2_train = x[:, 1]
x3_train = x[:, 2]
plt.scatter(x2_train[y == 1], x3_train[y == 1], s=1, label='positive')
plt.scatter(x2_train[y == 0], x3_train[y == 0], s=1, label='negitive')
plt_line_x = np.array([x2_train.min(), x2_train.max()])
plt_line_y = border(plt_line_x, theta)
plt.plot(plt_line_x, plt_line_y, label='border')
plt.xlabel('X2')
plt.ylabel('X3')
plt.xlim([-1.5, 1.5])
plt.ylim([-3, 3])
plt.grid()
plt.legend()

# 使用X2，X3两组特征画出逻辑回归0-1分布图 (testing data)
spn += 1
plt.subplot(spr, spc, spn)
plt.title('testing data X2 and X3')
x2_test = x_test[:, 1]
x3_test = x_test[:, 2]
plt.scatter(x2_test[y_test == 1], x3_test[y_test == 1], s=1, label='positive')
plt.scatter(x2_test[y_test == 0], x3_test[y_test == 0], s=1, label='negitive')
plt_line_x = np.array([x2_test.min(), x2_test.max()])
plt_line_y = border(plt_line_x, theta)
plt.plot(plt_line_x, plt_line_y, label='border')
plt.xlabel('X2')
plt.ylabel('X3')
plt.xlim([-1.5, 1.5])
plt.ylim([-3, 3])
plt.grid()
plt.legend()

plt.show()
