import time
import random
import numpy as np
from typing import List, Union, Tuple


class Network(object):
    def __init__(self, sizes: List[int]) -> None:
        self.cnt_layers: int = len(sizes)
        self.sizes: List[int] = sizes
        self.biases = [np.random.randn(y, 1) for y in sizes[1:]]
        self.weights = [
            np.random.randn(sizes[i + 1], sizes[i]) for i in range(self.cnt_layers - 1)
        ]

    def feedforward(self, input: np.ndarray) -> np.ndarray:
        """calculate the output of the network"""
        res: np.ndarray = input
        for b, w in zip(self.biases, self.weights):
            res = sigmoid(np.dot(w, res) + b)
        return res

    def SGD(
        self, training_data, epochs, batch_size, learning_rate=0.05, test_data=None
    ):
        """Stochastic Gradient Descent"""
        len_training = len(training_data)
        for i in range(epochs):
            train_begin_time = time.time()
            random.shuffle(training_data)
            batches = [
                training_data[j : j + batch_size]
                for j in range(0, len_training, batch_size)
            ]
            for batch in batches:
                self.update_batch(batch, learning_rate)
            train_end_time = time.time()

            print(
                "Epoch {0}/{1}: training completed in {2:.2f} seconds".format(
                    i, epochs, train_end_time - train_begin_time
                )
            )

            if test_data:
                eval_begin_time = time.time()
                res = self.evaluate(test_data)
                eval_end_time = time.time()
                print(
                    "\t evaluation result: {0} / {1}, took {2:.2f} seconds".format(
                        res, len(test_data), eval_end_time - eval_begin_time
                    )
                )

    def update_batch(self, batch, learning_rate: float):
        """update the network's weights and biases by applying gradient descent using backpropagation to a single batch"""
        nabla_w = [np.zeros(w.shape) for w in self.weights]
        nabla_b = [np.zeros(b.shape) for b in self.biases]
        for x, y in batch:
            delta_nabla_w, delta_nabla_b = self.backpropagation(x, y)
            nabla_w = [nw + dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
            nabla_b = [nb + dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
        self.weights = [
            w - (learning_rate / len(batch)) * nw
            for w, nw in zip(self.weights, nabla_w)
        ]
        self.biases = [
            b - (learning_rate / len(batch)) * nb for b, nb in zip(self.biases, nabla_b)
        ]

    def backpropagation(self, x, y):
        """return a tuple ``(nabla_w, nabla_b)`` representing the gradient for the cost function C_x"""
        nabla_w = [np.zeros(w.shape) for w in self.weights]
        nabla_b = [np.zeros(b.shape) for b in self.biases]

        # first, feedforward
        activation = x
        activations = [x]
        zs = [x]
        for w, b in zip(self.weights, self.biases):
            z = np.dot(w, activation) + b
            zs.append(z)
            activation = sigmoid(z)
            activations.append(activation)

        # then, backward pass
        delta = self.cost_derivative(activations[-1], y) * sigmoid_derivative(zs[-1])
        nabla_w[-1] = np.dot(delta, activations[-2].transpose())
        nabla_b[-1] = delta
        for l in range(2, self.cnt_layers):
            z = zs[-l]
            delta = np.dot(
                self.weights[-l + 1].transpose(), delta
            ) * sigmoid_derivative(z)
            nabla_w[-l] = np.dot(delta, activations[-l - 1].transpose())
            nabla_b[-l] = delta

        return nabla_w, nabla_b

    def evaluate(self, test_data) -> int:
        """calculate the number of correct predictions"""
        eval_results = [(np.argmax(self.feedforward(x)), y) for x, y in test_data]
        return sum(int(x == y) for (x, y) in eval_results)

    def cost_derivative(self, output_activations, y):
        """derivative of th cost function"""
        return output_activations - y


def sigmoid(v: Union[float, np.ndarray]):
    """sigmoid function"""
    return 1.0 / (1.0 + np.exp(-v))


def sigmoid_derivative(v: Union[float, np.ndarray]):
    """derivative of the sigmoid function"""
    return sigmoid(v) * (1 - sigmoid(v))
