# -*- Coding: UTF-8 -*-
# ConvNet.py
# @author: SongLiangCao
# @email: 2023733547@qq.com
# @description: none
# @created: 2021-12-16T16:28:17.290Z+08:00
# @last-modified: 2022-01-10T22:33:56.590Z+08:00
#

import numpy as np
from typing import OrderedDict
from random import randint
from layers import BatchNormalization, Relu, Convolution, SoftmaxWithLoss, Affine, Pooling


class ConvNet:
    '''ConvNet layer
    '''
    def __init__(self, input_dim=(1, 28, 28),
                 conv_param={'filter_num': 60, 'filter_size': 5, 'pad': 0, 'stride': 1},
                 hidden_size=100, output_size=10, weight_init_std=0.01) -> None:
        filter_num = conv_param['filter_num']
        filter_size = conv_param['filter_size']
        filter_pad = conv_param['pad']
        filter_stride = conv_param['stride']
        input_size = input_dim[1]
        conv_output_size = (input_size - filter_size + 2*filter_pad) / filter_stride + 1
        # pool1_output_size = int(filter_num * (conv_output_size/2) * (conv_output_size/2))
        conv2_input_dim = filter_num
        conv2_input_size = int(conv_output_size / 2)
        conv2_output_size = (conv2_input_size - filter_size + 2*filter_pad) / filter_stride + 1
        pool2_output_size = int(filter_num * (conv2_output_size / 2) * (conv2_output_size / 2))

        # Weight parameter initialization
        self.params = {}
        self.params['W1'] = np.random.randn(filter_num, input_dim[0], filter_size, filter_size) / np.sqrt(filter_num / 2)
        self.params['b1'] = np.zeros(filter_num)

        self.params['W2'] = np.random.randn(filter_num, conv2_input_dim, filter_size, filter_size) / np.sqrt(filter_num / 2)
        self.params['b2'] = np.zeros(filter_num)

        self.params['W3'] = np.random.randn(16 * filter_num, hidden_size) / np.sqrt(hidden_size / 2)
        self.params['b3'] = np.zeros(hidden_size)

        self.params['W4'] = np.random.randn(hidden_size, hidden_size) / np.sqrt(hidden_size)
        self.params['b4'] = np.zeros(hidden_size)

        self.params['W5'] = np.random.randn(hidden_size, output_size) / np.sqrt(hidden_size)
        self.params['b5'] = np.zeros(output_size)

        # self.params['gamma1'] = np.ones(hidden_size)
        # self.params['beta1'] = np.zeros(hidden_size)
        self.params['gamma3'] = np.ones(hidden_size)
        self.params['beta3'] = np.zeros(hidden_size)
        self.params['gamma4'] = np.ones(hidden_size)
        self.params['beta4'] = np.zeros(hidden_size)

        # Generate the necessary layers
        self.layers = OrderedDict()
        self.layers['Conv1'] = Convolution(self.params['W1'],
                                           self.params['b1'],
                                           conv_param['stride'],
                                           conv_param['pad'])
        self.layers['Relu1'] = Relu()
        # self.layers['BatchNorm1'] = BatchNormalization(self.params['gamma1'], self.params['beta1'])
        self.layers['pool1'] = Pooling(pool_h=2, pool_w=2, stride=2)
        self.layers['Conv2'] = Convolution(self.params['W2'],
                                           self.params['b2'],
                                           conv_param['stride'],
                                           conv_param['pad'])
        self.layers['Relu2'] = Relu()
        # self.layers['BatchNorm1'] = BatchNormalization(self.params['gamma1'], self.params['beta1'])
        self.layers['pool2'] = Pooling(pool_h=2, pool_w=2, stride=2)

        self.layers['Affine1'] = Affine(self.params['W3'], self.params['b3'])
        self.layers['Relu3'] = Relu()
        self.layers['BatchNorm3'] = BatchNormalization(self.params['gamma3'], self.params['beta3'])
        # maybe a dropout layer
        # self.layers['Dropout1'] = Dropout()
        self.layers['Affine2'] = Affine(self.params['W4'], self.params['b4'])
        self.layers['Relu4'] = Relu()
        self.layers['BatchNorm4'] = BatchNormalization(self.params['gamma4'], self.params['beta4'])
        self.layers['Affine3'] = Affine(self.params['W5'], self.params['b5'])
        # maybe a dropout layer
        # self.layers['Dropout2'] = Dropout()
        self.last_layer = SoftmaxWithLoss()

        self.loss_value = []

    def predict(self, x):
        for layer in self.layers.values():
            x = layer.forward(x)
        return x

    def loss(self, x, t):
        y = self.predict(x)
        return self.last_layer.forward(y, t)

    def gradient(self, x, t):
        # forward
        temp_loss = self.loss(x, t)
        self.loss_value.append(temp_loss)
        # backward
        dout = 1
        dout = self.last_layer.backward(dout)

        layers = list(self.layers.values())
        layers.reverse()
        for layer in layers:
            dout = layer.backward(dout)

        # set up
        grads = {}
        grads['W1'] = self.layers['Conv1'].dW
        grads['b1'] = self.layers['Conv1'].db
        grads['W2'] = self.layers['Conv2'].dW
        grads['b2'] = self.layers['Conv2'].db
        grads['W3'] = self.layers['Affine1'].dW
        grads['b3'] = self.layers['Affine1'].db
        grads['W4'] = self.layers['Affine2'].dW
        grads['b4'] = self.layers['Affine2'].db
        grads['W5'] = self.layers['Affine3'].dW
        grads['b5'] = self.layers['Affine3'].db
        # batch normalize
        # grads['gamma1'] = self.layers['BatchNorm1'].dgamma
        # grads['beta1'] = self.layers['BatchNorm1'].dbeta
        # grads['gamma2'] = self.layers['BatchNorm2'].dgamma
        # grads['beta2'] = self.layers['BatchNorm2'].dbeta
        grads['gamma3'] = self.layers['BatchNorm3'].dgamma
        grads['beta3'] = self.layers['BatchNorm3'].dbeta
        grads['gamma4'] = self.layers['BatchNorm4'].dgamma
        grads['beta4'] = self.layers['BatchNorm4'].dbeta

        return grads

    def accuracy(self, x_sample, t_sample, batch_size=100):
        if t_sample.ndim != 1:
            t_sample = np.argmax(t_sample, axis=1)
        acc = 0.0
        for i in range(int(x_sample.shape[0] / batch_size)):
            tx = x_sample[i*batch_size: (i + 1)*batch_size]
            tt = t_sample[i*batch_size: (i + 1)*batch_size]
            y = self.predict(tx)
            y = np.argmax(y, axis=1)
            acc += np.sum(y == tt)

        return acc / x_sample.shape[0]


class AdaGrad:
    '''AdaGrad method: learning rate decay
    '''
    def __init__(self, lr=0.01):
        self.lr = lr
        self.h = None

    def update(self, params, grads):
        if self.h is None:
            self.h = {}
            for key, val in params.items():
                self.h[key] = np.zeros_like(val)

        for key in params.keys():
            self.h[key] += grads[key] * grads[key]
            params[key] -= self.lr * grads[key] / (np.sqrt(self.h[key]) + 1e-7)
            # 1e-7 is to prevent 0 from appearing in self.h[key], and divide 0 by 0


class Train:
    """train the ConvNet
    """
    def __init__(self, network, x_train, t_train, x_test, t_test, epochs=15, batch_size=200, evaluate_sample_num_per_epoch=1000) -> None:
        self.network = network
        self.train_data = x_train
        self.train_label = t_train
        self.test_data = x_test
        self.test_label = t_test
        # self.optimizer = optimizer
        # self.optimizer_param = optimizer_param
        self.epochs = epochs
        self.batch_size = batch_size
        self.evaluate_sample_num_per_epoch = evaluate_sample_num_per_epoch

        # optimizer
        self.optimizer = AdaGrad()
        self.train_size = x_train.shape[0]
        self.iter_per_epoch = max(self.train_size / batch_size, 1)
        self.max_iter = int(epochs * self.iter_per_epoch)
        self.current_iter = 0
        self.current_epoch = 0

        # record
        self.train_loss_list = []
        self.train_acc_list = []
        self.test_acc_list = []

    def train_step(self):
        batch_mask = np.random.choice(self.train_size, self.batch_size)
        x_batch = self.train_data[batch_mask]
        t_batch = self.train_label[batch_mask]

        grads = self.network.gradient(x_batch, t_batch)
        self.optimizer.update(self.network.params, grads)

        loss = self.network.loss(x_batch, t_batch)
        self.train_loss_list.append(loss)
        print(str(self.current_epoch), end=" ")
        print("train loss{}: ".format(self.current_iter), str(loss))

        if self.current_iter % self.iter_per_epoch == 0:
            self.current_epoch += 1
            print(str(self.current_epoch))

            x_train_sample_begin = randint(0, (self.train_data.shape[0] - self.evaluate_sample_num_per_epoch - 1))
            x_test_sample_begin = randint(0, (self.test_data.shape[0] - self.evaluate_sample_num_per_epoch - 1))
            x_train_sample = self.train_data[x_train_sample_begin:x_train_sample_begin+self.evaluate_sample_num_per_epoch]
            t_train_sample = self.train_label[x_train_sample_begin:x_train_sample_begin+self.evaluate_sample_num_per_epoch]
            x_test_sample = self.test_data[x_test_sample_begin:x_test_sample_begin+self.evaluate_sample_num_per_epoch]
            t_test_sample = self.test_label[x_test_sample_begin:x_test_sample_begin+self.evaluate_sample_num_per_epoch]

            train_acc = self.network.accuracy(x_train_sample, t_train_sample)
            test_acc = self.network.accuracy(x_test_sample, t_test_sample)
            self.train_acc_list.append(train_acc)
            self.test_acc_list.append(test_acc)
            self.train_loss_list = self.network.loss_value
        self.current_iter += 1

    def train(self):
        for i in range(self.max_iter):
            self.train_step()

        test_acc = self.network.accuracy(self.test_data, self.test_label)
        print("=============== Final Test Accuracy ===============")
        print("test acc:" + str(test_acc))
