import numpy as np
import matplotlib.pyplot as plt
import os
import pickle
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score, precision_score, recall_score, f1_score


def model(XX, theta):
    return XX.dot(theta)


def sigmoid(z):
    return 1 / (1 + np.exp(-z))


def cost_func(h, y):
    m = len(h)
    return - 1 / m * np.sum(y * np.log(h) + (1 - y) * np.log(1 - h))


def FP(XX, theta1, theta2):
    a1 = XX
    z2 = model(a1, theta1)
    a2 = sigmoid(z2)
    z3 = model(a2, theta2)
    a3 = sigmoid(z3)
    return a2, a3


def BP(XX, y, theta1, theta2, a2, a3, alpha):
    m, n = XX.shape
    s3 = a3 - y
    s2 = s3.dot(theta2.T) * (a2 * (1 - a2))
    dt2 = 1 / m * a2.T.dot(s3)
    dt1 = 1 / m * XX.T.dot(s2)
    theta2 -= alpha * dt2
    theta1 -= alpha * dt1
    return theta1, theta2


def grad(XX, y, alpha=0.1, iter0=2000):
    m, n = XX.shape
    group = iter0 // 20
    theta1 = np.random.randn(n, 350)
    theta2 = np.random.randn(350, 3)
    j_his = np.zeros(iter0)
    for i in range(iter0):
        a2, a3 = FP(XX, theta1, theta2)
        j = cost_func(a3, y)
        j_his[i] = j
        if 0 == i % group:
            print(f'#{i + 1} cost func value = {j}')
        theta1, theta2 = BP(XX, y, theta1, theta2, a2, a3, alpha)
    if 0 != i % group:
        print(f'#{i + 1} cost func value = {j}')
    return theta1, theta2, j_his, a3


def score(h, y):
    h = h.argmax(axis=1)
    y = y.argmax(axis=1)
    return np.mean(h == y)


if '__main__' == __name__:
    # load
    data = np.loadtxt(r'../../../../large_data/2020Nov/PEPX.txt', delimiter=',')
    print(data.shape)
    m = len(data)
    y = np.loadtxt(r'../../../../large_data/2020Nov/PEPL.txt', delimiter=',')
    y.resize(m, 1)
    # y[y == 3] = 0
    y -= 1

    # onehot
    y_onehot = np.zeros([m, 3])
    for k, v in enumerate(y):
        y_onehot[k][int(v)] = 1
    y = y_onehot

    # shuffle
    np.random.seed(1)
    a = np.random.permutation(m)
    data = data[a]
    y = y[a]

    # scale
    mu = data.mean()
    sigma = data.std()
    data -= mu
    data /= sigma

    # splice
    XX = np.c_[np.ones(m), data]

    # split
    m_train = int(0.75 * m)
    m_test = m - m_train
    data_train, data_test = np.split(data, [m_train])
    y_train, y_test = np.split(y, [m_train])
    XX_train, XX_test = np.split(XX, [m_train])

    # train and test
    alpha = 0.1
    iter0 = 200
    path = str(alpha) + '_' + str(iter0) +'ann_v1.0.tmp.dat'
    if os.path.exists(path):
        print('Loading...')
        f = open(path, 'rb')
        theta1, theta2 = pickle.load(f)
        f.close()
        _, h_train = FP(XX_train, theta1, theta2)
        print(f'Training score = {score(h_train, y_train)}')
    else:
        print('Training...')
        theta1, theta2, j_his, h_train = grad(XX_train, y_train, alpha, iter0)
        print(f'Training score = {score(h_train, y_train)}')
        f = open(path, 'wb')
        pickle.dump([theta1, theta2], f)
        f.close()
        plt.plot(j_his, label='cost function')
        plt.grid()
        plt.legend()
        plt.show()

    _, h_test = FP(XX_test, theta1, theta2)
    print(f'Testing score = {score(h_test, y_test)}')

    yyy = y_test
    hhh = h_test
    yy = yyy.argmax(axis=1)
    hh = hhh.argmax(axis=1)
    print(f'confusion matrix:', confusion_matrix(yy, hh))
    print(f'classification rpt:', classification_report(yy, hh))
    print(f'accuracy:', accuracy_score(yy, hh))
    print(f'precision:', precision_score(yy, hh, average='micro'))
    print(f'recall:', recall_score(yy, hh, average='micro'))
    print(f'f1:', f1_score(yy, hh, average='micro'))
