import numpy as np


# base Module
class Module(object):
    def __init__(self, lr=0.01):
        self.lr = lr
        self.loss_pro = 0.
        self.loss_now = 0.
        self.y_pred = 0.
        self.y_true = 0.
        self.weight = None
        self.bias = None
        self.train = True
        self.loss_diff = np.zeros((1, 1))
        self._diff_weight = 1.
        self._diff_bias = np.zeros((1, 1))
        self.name = self.__class__.__name__

    def __call__(self, *args, **kwargs):
        inputs_shape = []
        for arg in args:
            inputs_shape.append(arg.shape)
        for _, arg in kwargs:
            inputs_shape.append(arg.shape)

        if len(inputs_shape) == 0:
            self.build(inputs_shape)
        elif len(inputs_shape) == 1:
            self.build(inputs_shape[0])
        else:
            self.build(inputs_shape)

        if hasattr(self, 'forward'):
            forward = getattr(self, 'forward')
            self.y_pred = forward(*args, **kwargs)
            self.diff_parameters(*args, **kwargs)
        return self.y_pred

    def loss(self, y_pred, y_true, **kwargs):
        return 0.

    def build(self, inputs_shape):
        if len(inputs_shape) == 0:
            pass
        else:
            if self.weight is None:  # 随机初始化, 效果较差
                self.weight = np.zeros(*inputs_shape[1:])[:, np.newaxis]
            if self.bias is None:  # 随机初始化, 效果较差
                self.bias = np.zeros((1, 1))

    def diff_parameters(self, *args, **kwargs) -> None:
        pass

    def backprop(self, g_diff=None):
        weight_diff = g_diff[0] if g_diff else np.matmul(self._diff_weight.T, self.loss_diff)
        bias_diff = g_diff[1] if g_diff else self._diff_bias * self.loss_diff
        self.weight -= self.lr * weight_diff
        self.bias -= self.lr * bias_diff
        return True

    def set_loss(self, loss):
        if self.loss_pro != self.loss_now:
            self.loss_pro = self.loss_now
        self.loss_now = loss


# Logistic
class Logistic(Module):
    def __init__(self, lr=0.01):
        super(Logistic, self).__init__(lr)

    def forward(self, x):
        return 1. / (1. + np.exp(np.matmul(x, self.weight) + self.bias))

    def loss(self, y_pred, y_true, delta=1e-16):
        self.loss_pro = self.loss_now
        y_pred = np.minimum(np.maximum(y_pred, delta), 1. - delta)
        self.loss_now = -(y_true * np.log(y_pred) +
                          (1 - y_true) * np.log(1 - y_pred))
        self.loss_diff = (1 - y_true) / (1 - y_pred) - y_true / y_pred
        return self.loss_now

    def diff_parameters(self, x):
        self._diff_weight = -2. * np.matmul(x.T, self.y_pred * (1 - self.y_pred))
        self._diff_bias = -2. * self.y_pred * (1 - self.y_pred)
    pass


class Linear(Module):
    def __init__(self, lr=0.01):
        super(Linear, self).__init__(lr)

    def forward(self, x):
        return np.matmul(x, self.weight) + self.bias

    def build(self, inputs_shape):
        if self.weight is None:
            self.weight = np.random.randn(*inputs_shape[1:])[:, np.newaxis]
        if self.bias is None:
            self.bias = np.random.randn(1, 1)

    def loss(self, y_pred, y_true, **kwargs):
        self.loss_pro = self.loss_now
        self.loss_now = np.sum((y_pred - y_true) ** 2)
        self.loss_diff = 2 * (y_pred - y_true)
        return self.loss_now

    def diff_parameters(self, x):
        self._diff_weight = x
        self._diff_bias = 1
    pass


def SGD(x, y, model, batch_size=1, epochs=10, threshold=0.001):
    inputs_shape = x.shape
    if inputs_shape[-1] < batch_size:
        batch_size = inputs_shape[-1]
    bool_break = False
    num_roced = x.shape[-1]
    for i in range(epochs):
        loss_mean = 0.
        for j in range(num_roced):
            y_pred = model(x[j:j + 1, ...])
            y_true = y[j, ...]
            sgd_loss = model.loss(y_pred, y_true)
            if (j + 1) % batch_size == 0:
                if np.abs(loss_mean) < threshold or loss_mean == np.NAN:
                    bool_break = True
                    break
                loss_mean = 0.

            loss_mean = (loss_mean * j + sgd_loss) / (j + 1)
            model.backward()

        if bool_break:
            break
    return model


class Adam(object):
    def __init__(self, lr=0.01, beta1=0.9, beta2=0.999, delta=1e-6):
        self.lr = lr
        self.beta1 = beta1
        self.beta2 = beta2
        self.delta = delta
        self.v = None
        self.s = None

    def __call__(self, x, y, model: Module, batch_size=1, epochs=10, threshold=0.01):
        inputs_shape = x.shape
        self.v, self.s = self._init_parameters(inputs_shape)
        if inputs_shape[0] < batch_size:
            batch_size = inputs_shape[0]
        bool_break = False
        num_roced = x.shape[-1]
        for i in range(epochs):
            loss_mean = 0.
            for j in range(num_roced):
                y_pred = model(x[j:j + 1, ...])
                y_true = y[j, ...]
                sgd_loss = model.loss(y_pred, y_true)
                if j % batch_size == 0:
                    if np.abs(loss_mean) < threshold or loss_mean == np.NAN:
                        bool_break = True
                        break
                    loss_mean = 0.

                loss_mean = (loss_mean * j + sgd_loss) / (j + 1)
                g_diff = self._make_loss(model.loss_diff, i)
                model.backprop(g_diff)

            if bool_break:
                break
        return model

    def _make_loss(self, loss_diff, hyp_t):
        v_bias_corr, s_bias_corr = self._update_parameters(loss_diff, hyp_t)
        return v_bias_corr / (s_bias_corr ** 2 + self.delta)

    def _update_parameters(self, loss_diff, hyp_t):
        self.v = self.beta1 * self.v + (1 - self.beta1) * loss_diff
        self.s = self.beta1 * self.s + (1 - self.beta1) * loss_diff ** 2
        v_bias_corr = self.v / (1 - self.beta1 ** hyp_t)
        s_bias_corr = self.s / (1 - self.beta2 ** hyp_t)
        return v_bias_corr, s_bias_corr

    @staticmethod
    def _init_parameters(inputs_shape):
        v_weight, v_bias = np.zeros((inputs_shape[1], 1)), np.zeros(1)
        s_weight, s_bias = np.zeros((inputs_shape[1], 1)), np.zeros(1)
        return (v_weight, v_bias), (s_weight, s_bias)


def Sigmod(x, w, b):
    return 1. / (1. + np.exp(np.matmul(x, w) + b))


def Linear_(x, w, b):
    return np.matmul(x, w) + b


def test_Opt_Logistic(x, w, b, optimizer):
    y_logistic = Sigmod(x, w, b)
    rand_y = np.random.randn(len(y_logistic))
    y_logistic = (y_logistic + 0.01 * rand_y / np.max(np.abs(rand_y))) > 0.5

    model = Logistic(lr=0.0005)
    if optimizer == 'sgd':
        sgd_model = SGD(x, y_logistic, model, batch_size=256,
                        epochs=10000, threshold=.5)
    else:
        adam = Adam(lr=0.0005)
        sgd_model = adam(x, y_logistic, model, batch_size=256,
                         epochs=10000, threshold=.5)
    # weight = np.array([1.8, 2.5, 3., 2.1])
    pred = Sigmod(x, sgd_model.weight, sgd_model.bias)

    print(sgd_model.weight)
    print(sgd_model.bias)
    print(np.sum(np.abs(np.float32(pred > 0.5) - y_logistic)) * 1. / len(y_logistic))


def test_Opt_Linear(x, w, b, optimizer):
    y_linear = Linear_(x, w, b)
    rand_y = np.random.randn(len(y_linear), 1)
    y_linear += 0.01 * rand_y / np.max(np.abs(rand_y))

    model = Linear(lr=0.1)
    if optimizer == 'sgd':
        sgd_model = SGD(x, y_linear, model, batch_size=256,
                        epochs=10000, threshold=.5)
    else:
        adam = Adam(0.01)
        sgd_model = adam(x, y_linear, model, batch_size=256,
                         epochs=10000, threshold=.5)
    # weight = np.array([1.8, 2.5, 3., 2.1])

    print(sgd_model.weight)
    print(sgd_model.bias)


def test_SGD(model='logistic', optimizer='adam'):
    w = np.array([[1.8], [2.5], [3.1], [2.3]])
    b = 0.1
    # Data
    x = np.random.randn(1024, len(w))
    if model == 'logistic':
        test_Opt_Logistic(x, w, b, optimizer)
    elif model == 'linear':
        test_Opt_Linear(x, w, b, optimizer)


if __name__ == '__main__':
    # test_YOLO_SPP()
    # Mx()(1)
    test_SGD('linear', optimizer='sgd')  # 'linear'
