import numpy as np
import matplotlib.pyplot as plt

X1 = [0.697,0.774,0.634,0.608,0.556,0.403,0.481,0.437,0.666,0.243,0.245,0.343,0.639,0.657,0.360,0.593,0.719]
X2 = [0.460,0.376,0.264,0.318,0.215,0.237,0.149,0.211,0.091,0.267,0.057,0.099,0.161,0.198,0.370,0.042,0.103]
Y = [1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0]

# 2.题目（50分）：
# 以上给出了三列数据，分别为两个属性X1,X2,和一个标签Y。通过神经网络
#
# 相关性能按要求完成给定任务。
#
# ①　将以上三列数据加载进入程序   (5)
X1 = np.array(X1)
m = len(X1)
X2 = np.array(X2)
y = np.array(Y).reshape(m, 1)
x = np.c_[X1, X2]

# ②　进行数据的初始化（预处理） ，进行对应洗牌    (5)
mu = x.mean(axis=0)
sigma = x.std(axis=0)
x -= mu
x /= sigma
np.random.seed(1)
a = np.random.permutation(m)
x = x[a]
y = y[a]
XX = np.c_[np.ones(m), x]

# ③　将洗牌的数据集进行对应分层（训练集：测试集=7：3）    (5)
m_train = int(0.7 * m)
x_train, x_test = np.split(x, [m_train])
y_train, y_test = np.split(y, [m_train])
XX_train, XX_test = np.split(XX, [m_train])


# ④　要求写出sigmoid函数    (5)
def model(XX, theta):
    return XX.dot(theta)


def sigmoid(z):
    return 1 / (1 + np.exp(-z))


# ⑤　要求写出 forward propagation    (5)
def FP(XX, theta1, theta2):
    a1 = XX
    z2 = model(a1, theta1)
    a2 = sigmoid(z2)
    z3 = model(a2, theta2)
    a3 = sigmoid(z3)
    return a2, a3


# ⑥　要求写出损失函数   (5)
def cost_func(h, y):
    return -1 / m * np.sum(y * np.log(h) + (1 - y) * np.log(1 - h))


# ⑦　要求写出Back Propagation  5分
def BP(XX, y, theta1, theta2, a2, a3, alpha):
    m, n = XX.shape
    s3 = a3 - y
    s2 = s3.dot(theta2.T) * (a2 * (1 - a2))

    dt2 = 1 / m * a2.T.dot(s3)
    dt1 = 1 / m * XX.T.dot(s2)
    theta2 -= alpha * dt2
    theta1 -= alpha * dt1
    return theta1, theta2


# ⑧　要求写出梯度下降，（隐藏层个数自己决定） 要求学习率alpha=0.1  循环步长10000  (5)
def grad(XX, y, alpha=0.1, iter0=10000):
    m, n = XX.shape
    group = iter0 // 20
    theta1 = np.random.randn(n, 4)
    theta2 = np.random.randn(4, 1)
    j_his = np.zeros(iter0)
    for i in range(iter0):
        a2, a3 = FP(XX, theta1, theta2)
        j = cost_func(a3, y)
        j_his[i] = j
        if i % group == 0:
            print(f'#{i + 1} cost function value = {j}')
        theta1, theta2 = BP(XX, y, theta1, theta2, a2, a3, alpha)
    if i % group != 0:
        print(f'#{i + 1} cost function value = {j}')
    return theta1, theta2, j_his, a3


def score(h, y):  # ATTENTION How to implement it with matrix rather than vectors? They are code related to onehot and argmax.
    rate = y == (h > 0.5)
    s = np.mean(rate)
    return s


theta1, theta2, j_his, h_train = grad(XX_train, y_train)

# ⑨　输出对应权重系数   (5)
print(f'Theta1 = {theta1}, theta2 = {theta2}')
print(f'Training score = {score(h_train, y_train)}')
_, h_test = FP(XX_test, theta1, theta2)
print(f'Testing score = {score(h_test, y_test)}')

# ⑩　画出代价曲线    (5)
plt.plot(j_his, label='cost function value')
plt.xlabel('Iterations')
plt.grid()
plt.legend()
plt.show()
