import numpy as np

from comman.layers import VariableLayer, BatchNormalization, Dropout


class Sequential:

    def __init__(self, layer_arr):
        self.layer_arr = layer_arr

        # 初始化
        self.loss_ = None
        self.optimizer = None

    def compile(self, loss, optimizer):
        self.loss_ = loss
        self.optimizer = optimizer

    def predict(self, x):
        y = x.copy()
        for layer in self.layer_arr:
            if isinstance(layer, (Dropout, BatchNormalization)):
                y = layer.forward(y, False)
            else:
                y = layer.forward(y)
        return y

    # x:输入数据, t:监督数据
    def loss(self, x, t):
        y = self.predict(x)
        return self.loss_.forward(y, t)

    def evaluate(self, x, t):
        y = self.predict(x)
        y = np.argmax(y, axis=-1)
        if t.ndim != 1:
            t = np.argmax(t, axis=-1)

        accuracy = np.mean(y == t)
        return accuracy

    def gradient(self, x, t):

        layers = self.layer_arr
        # forward
        for layer in layers:
            x = layer.forward(x)
        self.loss_.forward(x, t)

        # backward
        dout = 1
        dout = self.loss_.backward(dout)

        layers.reverse()
        for layer in layers:
            dout = layer.backward(dout)
        layers.reverse()

        return dout

    def fit(self, x_train, y_train, **kwargs):
        """
        epochs=epochs, batch_size=batch_size,
        validation_data=(x_test, y_test)

        :param x_train:
        :param y_train:
        :return:
        """
        verbose = kwargs["verbose"] if "verbose" in kwargs else False
        epochs = kwargs["epochs"] if "epochs" in kwargs else 1
        batch_size = kwargs["batch_size"] if "batch_size" in kwargs else 100
        validation_data = kwargs.get("validation_data")

        train_size = x_train.shape[0]

        for epoch in range(epochs):
            for _ in range(int(train_size / batch_size)):
                batch_mask = np.random.choice(train_size, batch_size)
                x_batch = x_train[batch_mask]
                t_batch = y_train[batch_mask]

                # 计算梯度
                self.gradient(x_batch, t_batch)
                # 更新权重
                layers = self.layer_arr
                for layer in layers:
                    if isinstance(layer, VariableLayer):
                        layer.update_variable(self.optimizer)

                if validation_data is not None and verbose:
                    print(f"loss={self.loss(*validation_data)} ,accuracy:{self.evaluate(*validation_data)}")

            loss_val = self.loss(*validation_data)
            evaluate_val = self.evaluate(*validation_data)
            if validation_data is not None:
                print(f"===== epoch={epoch} ,loss={loss_val} ,accuracy:{evaluate_val}")
