import numpy as np
import matplotlib.pyplot as plt

# 已知有一个菠萝分类回归样本data.Txt文件，x1项为菠萝直径，x2项为菠萝长度，y为菠萝好坏。是通过逻辑回归对菠萝样本进行分类。
# 通过Python实现逻辑回归模型，并用此模型预测测试集数据
#
# 完成数据集的读取
# load
data = np.loadtxt('data.txt', delimiter=',')
x = data[:, :-1]
m = len(x)
y = data[:, -1]

# scale
mu = x.mean(axis=0)
sigma = x.std(axis=0)
x -= mu
x /= sigma

# splice
XX = np.c_[np.ones(m), x]

# shuffle
np.random.seed(1)
a = np.random.permutation(m)
x = x[a]
y = y[a]
XX = XX[a]

# split
m_train = int(0.7 * m)
m_test = m - m_train
x_train, x_test = np.split(x, [m_train])
y_train, y_test = np.split(y, [m_train])
XX_train, XX_test = np.split(XX, [m_train])


# 实现Sigmoid函数,并画出该函数
def model(XX, theta):
    return XX.dot(theta)


def sigmoid(z):
    return 1 / (1 + np.exp(-z))


plt.figure(figsize=[16, 5])
spr = 1  # subplot row
spc = 3  # subplot column
spn = 1  # subplot number
plt.subplot(spr, spc, spn)
plt_x = np.linspace(-10, 10, 1001)
plt_y = sigmoid(plt_x)
plt.plot(plt_x, plt_y, 'r-', label='sigmoid')
plt.grid()
plt.legend()


# 实现逻辑回归的代价函数，实现正则化逻辑回归
def cost_func(h, y, lam, theta):
    m = len(h)
    theta_cp = theta.copy()
    theta_cp[0] = 0
    r = lam / 2 / m * np.sum(theta_cp ** 2)
    return r - 1 / m * np.sum(y * np.log(h) + (1 - y) * np.log(1 - h))


# 实现梯度下降函数，要求输出迭代过程中的代价函数值
def grad(XX, y, lam = 0, alpha=0.01, iter0=1500):
    m, n = XX.shape
    group = iter0 // 10
    theta = np.zeros(n)
    j_his = np.zeros(iter0)
    for i in range(iter0):
        h = sigmoid(model(XX, theta))
        j = cost_func(h, y, lam, theta)
        j_his[i] = j
        if 0 == i % group:
            print(f'#{i + 1} cost function value = {j}')
        theta_cp = theta.copy()
        theta_cp[0] = 0
        r = lam / m * theta_cp
        dt = r + 1 / m * XX.T.dot(h - y)
        theta -= alpha * dt
    if 0 != i % group:
        print(f'#{i + 1} cost function value = {j}')
    return theta, j_his, h


def score(h, y):
    return np.mean(y == (h > 0.5))


# 通过梯度下降计算回归模型，用所得模型对测试集的数据进行预测，并计算准确率
alpha = 0.01
iter0 = 1500
lam = 0
print('-' * 32)
print(f'Lambda = {lam}')
theta0, j_his0, h0_train = grad(XX_train, y_train, lam, alpha, iter0)
print(f'Theta = {theta0}')
print(f'Training score = {score(h0_train, y_train)}')
h0_test = sigmoid(model(XX_test, theta0))
print(f'Testing score = {score(h0_test, y_test)}')
lam = 3
print('-' * 32)
print(f'Lambda = {lam}')
theta3, j_his3, h3_train = grad(XX_train, y_train, lam, alpha, iter0)
print(f'Theta = {theta3}')
print(f'Training score = {score(h3_train, y_train)}')
h3_test = sigmoid(model(XX_test, theta3))
print(f'Testing score = {score(h3_test, y_test)}')
spn += 1
plt.subplot(spr, spc, spn)
plt.plot(j_his0, label='cost function, lambda = 0')
plt.plot(j_his3, label='cost function, lambda = 3')
plt.grid()
plt.legend()

# 画出逻辑回归0-1分布图
spn += 1
plt.subplot(spr, spc, spn)
pos_idx = y == 1
neg_idx = np.invert(pos_idx)
plt.scatter(x[pos_idx, 0], x[pos_idx, 1], s=1, c='r', label='positive')
plt.scatter(x[neg_idx, 0], x[neg_idx, 1], s=1, c='b', label='negative')
plt.grid()
plt.legend()

# finally show all drawings
plt.show()
