import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import OneHotEncoder


def model(x, theta):
    return x.dot(theta)


def sigmoid(z):
    return 1 / (1 + np.exp(-z))


def cost_function(h, y):
    m = len(h)
    return -1 / m * np.sum(y * np.log(h) + (1 - y) * np.log(1 - h))  # ATTENTION -1 / m


def FP(x, theta1, theta2, theta3):
    a1 = x
    z2 = model(a1, theta1)
    a2 = sigmoid(z2)
    z3 = model(a2, theta2)
    a3 = sigmoid(z3)
    z4 = model(a3, theta3)
    a4 = sigmoid(z4)
    return a2, a3, a4


def BP(x, y, theta1, theta2, theta3, a2, a3, a4, alpha):
    m = len(x)
    s4 = a4 - y
    s3 = s4.dot(theta3.T) * (a3 * (1 - a3))
    s2 = s3.dot(theta2.T) * (a2 * (1 - a2))
    dt3 = 1 / m * a3.T.dot(s4)  # ATTENTION: 1 / m
    dt2 = 1 / m * a2.T.dot(s3)
    dt1 = 1 / m * x.T.dot(s2)
    theta3 -= alpha * dt3
    theta2 -= alpha * dt2
    theta1 -= alpha * dt1
    return theta1, theta2, theta3


def grad(x, y, alpha=0.01, iter0=2000):
    m, n = x.shape
    group = iter0 // 20
    theta1 = np.random.randn(n, 100)
    theta2 = np.random.randn(100, 50)
    theta3 = np.random.randn(50, 10)
    j_his = np.zeros(iter0)
    for i in range(iter0):
        a2, a3, a4 = FP(x, theta1, theta2, theta3)
        j = cost_function(a4, y)
        j_his[i] = j
        if 0 == i % group:
            print(f'#{i+1} cost func v = {j}')
        theta1, theta2, theta3 = BP(x, y, theta1, theta2, theta3, a2, a3, a4, alpha)
    if 0 != i % group:
        print(f'#{i + 1} cost func v = {j}')
    return theta1, theta2, theta3, a4, j_his


def score(h, y):
    h_ = np.argmax(h, axis=1)
    y_ = np.argmax(y, axis=1)
    return np.mean(y_ == h_)


if '__main__' == __name__:
    np.random.seed(66)

    # load data
    x = np.loadtxt(r'../../../../large_data/hand_writing/imgX.txt', delimiter=',')
    print(x.shape)
    m = len(x)
    y = np.loadtxt(r'../../../../large_data/hand_writing/labely.txt', delimiter=',')
    y[y == 10] = 0  # 10 => 0

    # scale data
    mu = x.mean()  # ATTENTION for data of zero, 整体缩放
    sigma = x.std()  # ATTENTION for data of zero, 整体缩放
    x -= mu
    x /= sigma

    # shuffle data
    rnd_idx = np.random.permutation(m)
    x = x[rnd_idx]
    y = y[rnd_idx]

    # splice data
    XX = np.c_[np.ones(m), x]

    # onehot
    y_onehot = np.zeros([m, 10])
    for i in range(m):
        y_onehot[i, int(y[i])] = 1
    # b = OneHotEncoder(categories='auto')
    # y_onehot = b.fit_transform(y.reshape(-1, 1)).toarray()
    print(y_onehot)

    # split
    m_train = int(0.7 * m)
    m_test = m - m_train
    x_train, x_test = np.split(x, [m_train])
    XX_train, XX_test = np.split(XX, [m_train])
    y_train, y_test = np.split(y, [m_train])
    y_onehot_train, y_onehot_test = np.split(y_onehot, [m_train])

    # train
    alpha = 0.2
    iter0 = 500
    theta1, theta2, theta3, h_train, j_his = grad(XX_train, y_onehot_train, alpha, iter0)
    plt.plot(j_his, label='cost func')
    print(f'Training score = {score(h_train, y_onehot_train)}')
    plt.grid()
    plt.legend()
    plt.show()

    # test
    _, _, h_test = FP(XX_test, theta1, theta2, theta3)
    print(f'Testing score = {score(h_test, y_onehot_test)}')

    # drawing
    a = x_train[0].reshape(20, 20).T
    print(y_train[0], h_train[0])
    plt.imshow(a)
    plt.show()
