import time
from collections import OrderedDict
import numpy as np

class HeUniform:
    def __call__(self, weight_shape):
        n_in = weight_shape[0]
        b = np.sqrt(6 / n_in)
        return np.random.uniform(-b, b, size=weight_shape)

class SGD:
    def __init__(self, lr):
        self.lr = lr

    def __call__(self, params, params_grad):
        return params - self.lr * params_grad


class Sigmoid:
    def __call__(self, z):
        return self.forward(z)

    def forward(self, z):
        return 1 / (1 + np.exp(-z))

    def grad(self, x):
        ex = np.exp(-x)
        return ex / ((1 + ex) ** 2)


def softmax(x):
    e_x = np.exp(x)
    return e_x / e_x.sum(axis=-1, keepdims=True)


def minibatch(X, batch_size, shuffle=True):
    n = X.shape[0]
    n_batches = int(np.ceil(n / batch_size))
    idx = np.arange(n)
    if shuffle:
        np.random.shuffle(idx)

    def mb_generator():
        for i in range(n_batches):
            yield idx[i * batch_size: (i + 1) * batch_size]

    return mb_generator(), n_batches


class FullyConnected:
    def __init__(self, n_out, acti_fn=None):
        self.X = None  
        self.params = {}  
        self.gradients = {}  
        self.optimizer = SGD(lr=0.01)

        self.n_in = None  
        self.n_out = n_out 
        self.acti_fn = Sigmoid() if acti_fn else None  
        self.init_weight = HeUniform()
        self.is_initialized = False

    def _init_params(self):
        b = np.zeros((1, self.n_out))
        W = self.init_weight((self.n_in, self.n_out))
        self.gradients = {'W': np.zeros_like(W), 'b': np.zeros_like(b)}
        self.params = {'W': W, 'b': b}
        self.is_initialized = True

    def forward(self, X):
        if not self.is_initialized:
            self.n_in = X.shape[1]
            self._init_params()
        W = self.params['W']
        b = self.params['b']
        Z = X @ W + b
        a = self.acti_fn.forward(Z) if self.acti_fn else Z
        self.X = X
        return a

   
    def backward(self, dLda):
        dx, dw, db = self._bwd(dLda)
        self.gradients['W'] += dw
        self.gradients['b'] += db
        return dx

    def _bwd(self, dLda):
        W = self.params['W']
        b = self.params['b']
        Z = self.X @ W + b
        dz = dLda * self.acti_fn.grad(Z) if self.acti_fn else dLda
        dx = dz @ W.T
        dw = self.X.T @ dz
        db = np.sum(dz, axis=0)
        return dx, dw, db

   
    def flush_gradients(self):
        self.X = None
        for k, v in self.gradients.items():
            self.gradients[k] = np.zeros_like(v)

   
    def update(self):
        for k, v in self.gradients.items():
            if k in self.params:
                self.params[k] = self.optimizer(self.params[k], v)


class CrossEntropy:
    def __call__(self, y_pred, y_true):
        return self.loss(y_pred, y_true)

    def loss(self, y_pred, y_true):
        eps = np.finfo(float).eps
        return -np.sum(y_true * np.log(y_pred + eps))

    
    def grad(self, y_pred, y_true):
        return y_pred - y_true


class DFN(object):
    # 两层神经网络
    def __init__(self, hidden_dims_1=None, hidden_dims_2=None, loss=CrossEntropy()):
        self.hidden_dim1 = hidden_dims_1
        self.hidden_dim2 = hidden_dims_2
        self.loss = loss
        self.is_initialized = False

    def _set_params(self):
        self.layers = OrderedDict()
        self.layers['FC1'] = FullyConnected(
            n_out=self.hidden_dim1,
            acti_fn='sigmoid'
        )
        self.layers['FC2'] = FullyConnected(
            n_out=self.hidden_dim2
        )
        self.is_initialized = True

    def forward(self, X):
        out = X
        for k, v in self.layers.items():
            out = v.forward(out)
        return out

    def backward(self, grad):
        out = grad
        for k, v in reversed(list(self.layers.items())):
            out = v.backward(out)

    def update(self):
        for k, v in reversed(list(self.layers.items())):
            v.update()
        self.flush_gradients()

    def flush_gradients(self):
        for k, v in self.layers.items():
            v.flush_gradients()

    def fit(self, X_train, Y_train, n_epochs=20, batch_size=64):
        if not self.is_initialized:
            self._set_params()

        pre_loss = np.inf
        for i in range(n_epochs):
            start_time = time.time()
            epoch_loss = 0.0
            batch_generator, n_batches = minibatch(X_train, batch_size)
            for j, batch_idx in enumerate(batch_generator):
                X_batch, Y_batch = X_train[batch_idx], Y_train[batch_idx]
                out = self.forward(X_batch)
                y_pred = softmax(out)
                batch_loss = self.loss(y_pred, Y_batch)
                grad = self.loss.grad(y_pred, Y_batch)
                self.backward(grad)
                self.update()
                epoch_loss += batch_loss
            epoch_loss /= X_train.shape[0]
            print(
                f'[Epoch: {i}] Avg. loss: {epoch_loss} Delta: {pre_loss - epoch_loss} ({(time.time() - start_time) / 60.0}m/epoch)')
            pre_loss = epoch_loss

    def evaluate(self, X_test, Y_test, batch_size=128):
        batch_generator, n_batches = minibatch(X_test, batch_size)
        acc = 0.0
        for j, batch_idx in enumerate(batch_generator):
            X_batch, Y_batch = X_test[batch_idx], Y_test[batch_idx]
            out = self.forward(X_batch)
            y_pred = np.argmax(out, axis=1)
            Y_batch = np.argmax(Y_batch, axis=1)
            acc += np.sum(y_pred == Y_batch)
        return acc / X_test.shape[0]
