import numpy as np
import sys
import mlp_impl_sigmoid as mlps

np.random.seed(1)
eps = 1e-20


def linear_forward(inputs):
    return inputs.copy()


def linear_backward(output):
    return np.ones_like(output, dtype=np.float32)


def sigmoid_forward(inputs):
    return 1. / (1. + np.exp(-inputs + eps))


def sigmoid_backward(output):
    return output * (1. - output)


def relu_forward(inputs):
    outputs = inputs.copy()
    outputs[inputs <= 0.] = 0.
    return outputs


def relu_backward(output):
    delta = np.zeros_like(output, dtype=np.float32)
    delta[output > 0.] = 1.0
    return delta


def leaky_forward(inputs):
    outputs = inputs.copy()
    outputs[inputs <= 0.] = 0.1 * inputs
    return outputs


def leaky_backward(output):
    delta = np.full(0.1, output.shape, dtype=np.float32)
    delta[output > 0.] = 1.0
    return delta


acts = dict(
    linear=(linear_forward, linear_backward),
    sigmoid=(sigmoid_forward, sigmoid_backward),
    relu=(relu_forward, relu_backward),
    leaky=(leaky_forward, leaky_backward),
)


def softmax_foward(inputs):
    exp = np.exp(inputs)
    exp_sum = exp.sum(axis=-1).reshape(*inputs.shape[:-1], 1)
    outputs = exp / (exp_sum + eps)
    return outputs


class MLPsbLayer(object):

    def __init__(self, n_neurons, n_inputs, act):
        self.n_neurons = n_neurons
        self.n_inputs = n_inputs
        self.coef = np.random.randn(n_inputs, n_neurons).astype(np.float32)
        self.bias = np.zeros([1, n_neurons], dtype=np.float32)
        self.act = acts[act]

    def trainable_vars(self):
        return self.coef.size + self.bias.size

    def __call__(self, inputs, *args, **kwargs):
        self.inputs = inputs
        m, n = inputs.shape
        self.X = inputs
        self.W = self.coef
        Z = np.dot(self.X, self.W) + self.bias
        A = self.act[0](Z)
        self.outputs = A
        return A

    def backword(self, delta):
        A = self.outputs
        m = len(self.X)
        dz = delta * self.act[1](A)  # chained diff
        self.delta_bias = dz.sum(axis=0) / m
        self.delta_coef = np.dot(self.X.T, dz) / m
        self.delta = np.dot(dz, self.W.T)  # chained diff
        return self.delta

class MLPsb(object):

    def __init__(self, input_size, layers_spec_list, lr):
        self.lr = lr
        self.n_layers = len(layers_spec_list)
        n_inputs = input_size
        self.layers = []
        n_all_vars = 0
        self.bin = False
        for i, (n_neurons, act) in enumerate(layers_spec_list):
            if i == self.n_layers - 1 and 1 == n_neurons:
                self.bin = True
            layer = MLPsbLayer(n_neurons, n_inputs, act)
            n_vars = layer.trainable_vars()
            n_all_vars += n_vars
            print(f'{n_inputs} X {n_neurons} {n_vars}')
            self.layers.append(layer)
            n_inputs = n_neurons
        print(f'All vars: {n_all_vars}')

    def save_weights(self, path):
        vars = np.array([], dtype=np.float32)
        for layer in self.layers:
            vars = np.append(vars, layer.bias)
            vars = np.append(vars, layer.coef)
        np.save(path, vars, allow_pickle=False)

    def load_weights(self, path):
        vars = np.load(path)
        print('vars', vars.shape)
        pos = 0
        for layer in self.layers:
            pos_end = pos + layer.n_neurons
            layer.bias = vars[pos:pos_end].reshape(1, layer.n_neurons)
            pos = pos_end  # important
            pos_end = pos + layer.n_neurons * layer.n_inputs
            layer.coef = vars[pos:pos_end].reshape(layer.n_inputs, layer.n_neurons)
            pos = pos_end  # important
        print('pos', pos)

    def __call__(self, inputs, *args, **kwargs):
        for layer in self.layers:
            layer(inputs)
            inputs = layer.outputs
        outputs = layer.outputs
        if not self.bin:
            outputs = softmax_foward(outputs)
        self.outputs = outputs
        return self.outputs

    def cost(self, A, Y):
        m = len(Y)
        if self.bin:
            j = np.dot(Y.T, np.log(A)) + np.dot((1.0 - Y.T), np.log(1.0 - A))
            j = j[0][0] / -m
        else:
            j = (Y * np.log(A)).sum(axis=-1).sum()
            j = j / -m
        return j

    def metrics(self, Y, A, thresh=0.5):
        m = len(Y)
        if self.bin:
            Y = Y.astype(np.float32)
            ones = np.ones_like(Y, dtype=np.int)
            TP = ones[(A > thresh) & (Y > thresh)].sum()
            TN = ones[(A < thresh) & (Y < thresh)].sum()
            FP = ones[(A > thresh) & (Y < thresh)].sum()
            FN = ones[(A < thresh) & (Y > thresh)].sum()
            acc = (TP + TN) / m
            recall = TP / (TP + FN + 1e-10)
            precision = TP / (TP + FP + 1e-10)
            f1 = (2 * recall * precision) / (recall + precision + 1e-10)
            return np.array([acc, recall, precision, f1], dtype=np.float32)
        else:
            return np.array([[(Y.argmax(axis=-1) == A.argmax(axis=-1)).astype(np.int).sum() / m]], dtype=np.float32)

    def backward(self, Y):
        A = self.outputs
        if self.bin:
            delta = (A - Y) / (A * (1.0 - A) + eps)
        else:
            delta = A - Y
        for i, layer in enumerate(self.layers[::-1]):
            delta = layer.backword(delta)

    def step(self):
        for i, layer in enumerate(self.layers):
            layer.bias -= self.lr * layer.delta_bias
            layer.coef -= self.lr * layer.delta_coef


if '__main__' == __name__:
    from python_ai.category.data.mnist_bin_dataset import x_train, x_test, x_val, y_train, y_test, y_val, shape_
    from python_ai.common.xcommon import *
    import matplotlib.pyplot as plt
    import os
    import io
    from PIL import Image

    # Hyper params
    np.random.seed(1)
    VER = 'v1.0'  # You could change VER even you have not modified the code to save models with the same N_EPOCH many times
    N_BATCH_SIZE = 256
    LR = 1e-1  # learning rate (It should be higher for all sigmoid layers than all relu ones)
    N_EPOCH = 301  # Please to be greater than 650 for a good model; to be 300 for a normal P-R curve
    def get_saved_filename():
        return f'MPL_mnist_5_bin_{N_EPOCH}'
    BASE_DIR, FILE_NAME = os.path.split(__file__)
    SAVE_DIR = os.path.join(BASE_DIR, '_save', FILE_NAME, VER)
    os.makedirs(SAVE_DIR, exist_ok=True)
    SAVE_PATH_PREFIX = os.path.join(SAVE_DIR, get_saved_filename())
    SAVE_PATH = f'{SAVE_PATH_PREFIX}.npy'
    CURVE_SAVE_PATH = f'{SAVE_PATH_PREFIX}.png'

    # Build model
    sep('Model')
    model = MLPsb(x_test.shape[1], [(128, 'sigmoid'), (64, 'sigmoid'), (32, 'sigmoid'), (1, 'sigmoid')], LR)

    # Train or load
    is_train = False
    if os.path.exists(SAVE_PATH):
        sep('Load weight')
        print(f'Loading from {SAVE_PATH} ...')
        model.load_weights(SAVE_PATH)
        print('Loaded.')
    else:
        sep('Train')
        is_train = True

        cost_his, metrics_his = [], []
        cost_his_val, metrics_his_val = [], []
        for i in range(N_EPOCH):
            cost_avg, metrics_avg, *_ = mlps.run_on_data(model, 'train', x_test, y_test, N_BATCH_SIZE, True, i)
            cost_his.append(cost_avg)
            metrics_his.append(list(metrics_avg))
            cost_avg, metrics_avg, *_ = mlps.run_on_data(model, 'val', x_test, y_test, N_BATCH_SIZE, False, i)
            cost_his_val.append(cost_avg)
            metrics_his_val.append(list(metrics_avg))
        metrics_his = np.array(metrics_his)
        metrics_his_val = np.array(metrics_his_val)

        print(f'Saving to {SAVE_PATH} ...')
        model.save_weights(SAVE_PATH_PREFIX)
        print('Saved.')

        # save training curve
        plt.figure(figsize=[12, 6])
        spr = 1
        spc = 2
        spn = 0

        spn += 1
        plt.subplot(spr, spc, spn)
        plt.title('Cost')
        plt.plot(cost_his, label='train')
        plt.plot(cost_his_val, label='val')
        plt.legend()
        plt.grid()

        spn += 1
        plt.subplot(spr, spc, spn)
        plt.title('Metrics')
        plt.plot(metrics_his[:, 0], label='acc_train')
        plt.plot(metrics_his_val[:, 0], label='acc_val')
        plt.plot(metrics_his[:, 1], label='recall_train')
        plt.plot(metrics_his_val[:, 1], label='recall_val')
        plt.plot(metrics_his[:, 2], label='precision_train')
        plt.plot(metrics_his_val[:, 2], label='precision_val')
        plt.plot(metrics_his[:, 3], label='f1_train')
        plt.plot(metrics_his_val[:, 3], label='f1_val')
        plt.legend()
        plt.grid()

        plt.savefig(CURVE_SAVE_PATH)
        plt.close()

    # do test
    sep('Test')
    cost_avg, metrics_avg, pred = mlps.run_on_data(model, 'test', x_test, y_test, N_BATCH_SIZE, False, 0)

    # Draw testing demo in memory buf
    plt.figure(figsize=[6, 6])
    spr = 4
    spc = 4
    spn = 0
    m = spr * spc
    for i in range(m):
        spn += 1
        plt.subplot(spr, spc, spn)
        y = y_test[i, 0]
        img = x_test[i].reshape(*shape_)
        h = pred[i, 0]
        pr = int(h>0.5)
        plt.axis('off')
        plt.title(f'{y}=>{pr}({h:.2f})', color='black' if pr == y else 'red')
        plt.imshow(img)
    buf = io.BytesIO()
    buf.seek(0)
    plt.savefig(buf, format='png')
    img = Image.open(buf)
    plt.close()

    # Calculation for P-R curve
    metrics_all = []
    thresh_list = np.arange(start=0.2, stop=0.8, step=0.05)
    for thresh in thresh_list:
        metrics = model.metrics(y_test, pred, thresh).tolist()
        metrics_all.append(metrics)
    metrics_all = np.float32(metrics_all)
    print('thresh', 'recall', 'precision', 'f1')
    print(np.c_[thresh_list.reshape(-1, 1), metrics_all[:, [1, 2, 3]]])
    optimal_idx = np.argsort(metrics_all[:, 3])[-1]
    optimal_thresh = thresh_list[optimal_idx]
    print('Optimal threshold =', optimal_thresh)

    # Calculate AP
    recall = np.append(metrics_all[:, 1], 0.0)
    precision = np.append(metrics_all[:, 2], 1.0)
    print('recall', recall)
    print('precision', precision)
    d_recall = recall[:-1] - recall[1:]
    print('d_recall', d_recall)
    AP = precision[:-1].reshape(1, -1).dot(d_recall.reshape(-1, 1))[0, 0]
    print('AP', AP)

    # show testing demo and P-R curve
    plt.figure(figsize=[12, 6])
    spr = 1
    spc = 2
    spn = 0
    spn += 1
    plt.subplot(spr, spc, spn)
    plt.axis('off')
    plt.imshow(img)
    buf.close()
    spn += 1
    plt.subplot(spr, spc, spn)
    plt.title('P - R curve')
    plt.plot(metrics_all[:, 1], metrics_all[:, 2])
    print('Please check and close the plotting window to continue ...')
    plt.show()

    # Curve of cost and metrics in training
    if os.path.exists(CURVE_SAVE_PATH):
        plt.figure(figsize=[12, 6])
        plt.axis('off')
        img = plt.imread(CURVE_SAVE_PATH)
        plt.imshow(img)
        plt.show()
