import numpy as np
import matplotlib.pyplot as plt

# 以下是一组数据X1表示特征1，X2表特征2，y表示中1表示第一类，0表示第二类
X1=[0.711,0.664,0.334,0.128,0.236,0.453,0.461,0.465,0.646,0.223,0.234,0.354,0.679,0.533,0.340,0.544,0.129]
X2=[0.434,0.673,0.464,0.884,0.123,0.234,0.678,0.862,0.197,0.257,0.953,0.135,0.647,0.274,0.396,0.471,0.764]
y = [1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0]

# 1.	创建特征矩阵(提示：使用numpy的列拼接)（5分）
m = len(X1)
x = np.c_[X1, X2]
y = np.array(y)  # ATTENTION

# 2.	归一化特征缩放（5分）
xmin = x.mean(axis=0)
xmax = x.max(axis=0)
x -= xmin
x /= (xmax - xmin)

# 3.	洗牌(使用随机种子)（5分）
np.random.seed(666)
rnd_idx = np.random.permutation(m)
x = x[rnd_idx]
y = y[rnd_idx]

# 4.	 简答题：shuffle的作用（2分）
# Seed的作用（3分）
# shuffle的作用：打乱顺序，切分训练集和测试集
# seed的作用：固定随机顺序，方便观察模型


# 5.	定义logistic函数（5分）（核心）
def sigmoid(z):
    return 1 / (1 + np.exp(-z))


# 6.	画出sigmoid图（6分）
plt.figure(figsize=[12, 5])
spr = 1
spc = 3
spn = 0
spn += 1
plt.subplot(spr, spc, spn)
a = np.linspace(-10, 10, 1001)
plt.plot(a, sigmoid(a), label='sigmoid')
plt.grid()
plt.legend()


# 7.	定义模型（核心）（6分）
def model(XX, theta):
    return XX.dot(theta)


# 8.	写出logistic回归的交叉熵函数，加入防止过拟合的L2项（8分）（核心）
def cost_func(h, y, lam, theta):
    m = len(h)
    r = lam / 2 / m * np.sum(theta ** 2)
    return r - 1 / m * np.sum(y * np.log(h) + (1 - y) * np.log(1 - h))


# 9.	Gradient descent（20分）
#  -函数的定义正确4分
#  -正则化项8分
#  -Gradient descent算法8分
def grad(XX, y, lam=0, alpha=0.01, iter0=5000):
    m, n = XX.shape
    group = iter0 // 20
    theta = np.zeros(n)
    j_his = np.zeros(iter0)
    for i in range(iter0):
        h = sigmoid(model(XX, theta))
        j = cost_func(h, y, lam, theta)
        j_his[i] = j
        if 0 == i % group:
            print(f'#{i + 1} cost func v = {j}')
        r = lam / m * theta  # ATTENTION Here is a vector, not a np.sum
        dt = 1 / m * XX.T.dot(h - y) + r
        theta -= alpha * dt
    if 0 != i % group:
        print(f'#{i + 1} cost func v = {j}')
    return theta, j_his, h


# 10.	分别用0作为L2正则系数和用3作为L2正则系数训练模型(6分)
XX = np.c_[np.ones(m), x]
alpha=0.01
iter0=5000
theta0, j_his0, h0 = grad(XX, y, 0, alpha, iter0)
theta3, j_his3, h3 = grad(XX, y, 3, alpha, iter0)
print(f'theta0 = {theta0}')
print(f'theta3 = {theta3}')

# 11.	画出对比图，关于代价函数曲线(6分)
spn += 1
plt.subplot(spr, spc, spn)
plt.plot(j_his0, label='lambda = 0')
plt.plot(j_his3, label='lambda = 3')
plt.grid()
plt.legend()


# 	11.	定义函数计算准确率（8分）
def score(h, y):
    return np.mean(y == (h > 0.5))


# 	12. 做测试集的数据占20%，进行准确率的计算（5分）
m_test = int(m * 0.2)
m_train = m - m_test
XX_train, XX_test = np.split(XX, [m_train])
y_train, y_test = np.split(y, [m_train])
h_test = sigmoid(model(XX_test, theta0))
print(f'Testing score = {score(h_test, y_test)}')

# 	13. 以X1为横坐标，X2为纵坐标画出样本的散点分布图（5分）
# 		画出一个边界决策曲线（5分）
plt_x = np.array([x[:, 0].min(), x[:, 0].max()])


def get_line(x1, theta):
    return - (theta[0] + theta[1] * x1) / theta[2]


plt_y0 = get_line(plt_x, theta0)
plt_y3 = get_line(plt_x, theta3)
spn += 1
plt.subplot(spr, spc, spn)
pos_idx = y == 1
neg_idx = np.invert(pos_idx)
plt.scatter(x[pos_idx, 0], x[pos_idx, 1], c='y', label='positive')
plt.scatter(x[neg_idx, 0], x[neg_idx, 1], c='b', label='negative')
plt.plot(plt_x, plt_y0, 'r-', label='border lam=0')
plt.plot(plt_x, plt_y3, 'g--', label='border lam=3')
plt.grid()
plt.legend()

plt.show()
