import random
import numpy as np
from .utils import R


class Layer:
    def __init__(self) -> None:
        self.params = []
        self.previous: "Layer" = None
        self.next: "Layer" = None
        self.input_data = None
        self.output_data = None
        self.input_delta = None
        self.output_delta = None

    def Connect(self, layer: "Layer"):
        self.previous = layer
        layer.next = self

    def Forward(self):
        raise NotImplementedError

    def GetForwardInput(self):
        if self.previous is not None:
            return self.previous.output_data
        else:
            return self.input_data

    def Backward(self):
        raise NotImplementedError

    def GetBackwardInput(self):
        if self.next is not None:
            return self.next.output_delta
        else:
            return self.output_delta

    def ClearDeltas(self):
        pass

    def UpdateParams(self, learning_rate):
        pass

    def Describe(self):
        raise NotImplementedError


class ActivationLayer(Layer):
    def __init__(self, input_dim) -> None:
        super().__init__()
        self.input_dim = input_dim
        self.output_dim = input_dim

    def Forward(self):
        data = self.GetForwardInput()
        self.output_data = R.Sigmoid(data)

    def Backward(self):
        delta = self.GetBackwardInput()
        data = self.GetForwardInput()
        self.output_delta = delta * R.SigmoidPrime(data)

    def Describe(self):
        print("|-- " + self.__class__.__name__)
        print(f"  |-- dimensions: ({self.input_dim},{self.output_dim})")


class DenseLayer(Layer):
    def __init__(self, input_dim, output_dim) -> None:
        super().__init__()
        self.input_dim = input_dim
        self.output_dim = output_dim

        self.weight = np.random.randn(output_dim, input_dim)
        self.bias = np.random.randn(output_dim, 1)

        self.params = [self.weight, self.bias]

        self.delta_w = np.zeros(self.weight.shape)
        self.delta_b = np.zeros(self.bias.shape)

    def Forward(self):
        data = self.GetForwardInput()
        self.output_data = np.dot(self.weight, data) + self.bias

    def Backward(self):
        data = self.GetForwardInput()
        delta = self.GetBackwardInput()
        self.delta_b += delta
        self.delta_w += np.dot(delta, data.transpose())
        self.output_delta = np.dot(self.weight.transpose(), delta)

    def UpdateParams(self, learning_rate):
        self.weight -= learning_rate * self.delta_w
        self.bias -= learning_rate * self.delta_b

    def ClearDeltas(self):
        self.delta_w = np.zeros(self.weight.shape)
        self.delta_b = np.zeros(self.bias.shape)

    def Describe(self):
        print("|--- " + self.__class__.__name__)
        print("  |-- dimensions: ({},{})".format(self.input_dim, self.output_dim))


class MSE:
    def Loss(self, predictions, labels):
        diff = predictions - labels
        return 0.5 * sum(diff * diff)[0]

    def LossDerivative(self, predictions, labels):
        return predictions - labels


class SquentialNetwork:
    def __init__(self, loss=None) -> None:
        print("Init network...")
        self.layers: list[Layer] = []
        self.loss = MSE()
        if loss is not None:
            self.loss = loss

    def Add(self, layer: Layer):
        self.layers.append(layer)
        layer.Describe()
        if len(self.layers) > 1:
            self.layers[-1].Connect(self.layers[-2])

    def Train(
        self, training_data, epochs, mini_batch_size, learning_rate, test_data=None
    ):
        n = len(training_data)
        for epoch in range(epochs):
            random.shuffle(training_data)
            mini_batchs = [
                training_data[k : k + mini_batch_size]
                for k in range(0, n, mini_batch_size)
            ]
            for mini_batch in mini_batchs:
                self.TrainBatch(mini_batch, learning_rate)
            if test_data:
                n_test = len(test_data)
                print(f"Epoch {epoch}: {self.Evaluate(test_data)} / {n_test}")
            else:
                print(f"Epoch {epoch} complete.")

    def TrainBatch(self, mini_batch, learning_rate):
        self.ForwardBackward(mini_batch)
        self.Update(mini_batch, learning_rate)

    def Update(self, mini_batch, learning_rate):
        learning_rate = learning_rate / len(mini_batch)
        for layer in self.layers:
            layer.UpdateParams(learning_rate)
        for layer in self.layers:
            layer.ClearDeltas()

    def ForwardBackward(self, mini_batch):
        for x, y in mini_batch:
            self.layers[0].input_data = x
            for layer in self.layers:
                layer.Forward()
            self.layers[-1].input_delta = self.loss.LossDerivative(
                self.layers[-1].output_data, y
            )
            for layer in reversed(self.layers):
                layer.Backward()

    def SingleForward(self, x):
        self.layers[0].input_data = x
        for layer in self.layers:
            layer.Forward()
        return self.layers[-1].output_data

    def Evaluate(self, test_data):
        test_results = [
            (np.argmax(self.SingleForward(x)), np.argmax(y)) for (x, y) in test_data
        ]
        return sum(int(x == y) for (x, y) in test_results)

