"""
@author: chenzhenhua
@project: jf_fashion
@file: mnist.py
@time: 2021/8/3 0003 15:53
@desc:
"""

import unittest

import numpy as np
from sklearn.metrics import accuracy_score
from tensorflow.python.keras.models import load_model
from tensorflow.python.keras.optimizers import SGD

from jf_fashion.keras.mnist import read_mnist, preprocessing
from jf_fashion.keras.model import create_model, create_lenet
from jf_fashion.keras.pruning import get_wb, forward, new_weight


class TestCase(unittest.TestCase):

    def setUp(self) -> None:
        (x1, y1), (x2, y2) = read_mnist('data/mnist.npz')
        x_train, y_train, x_test, y_test = preprocessing(x1, y1, x2, y2)
        self.x1 = x1
        self.x2 = x2
        self.y1 = y1
        self.y2 = y2
        self.x_train = x_train
        self.y_train = y_train
        self.x_test = x_test
        self.y_test = y_test

    # @unittest.skip("")
    def test_mnist_train(self):
        # 创建模型:输入784个神经元，输出10个神经元 784-10
        model = create_model(784)

        # 定义优化
        sgd = SGD(lr=0.2)

        # 定义优化器，loss function, 训练过程中的准确率
        model.compile(optimizer=sgd, loss='mse', metrics=['accuracy'])

        # 进行模型训练
        model.fit(self.x_train, self.y_train, batch_size=32, epochs=10)
        model.save('data/model.h5')

    @unittest.skip("")
    def test_forward_train(self):
        model = load_model('data/model.h5')
        layer_data, weights, biases = get_wb(model, self.x_train)
        output = forward(self.x_train, weights, biases)

        layer_data, weights, biases = get_wb(model, self.x_test)
        output_test = forward(self.x_test, weights, biases)

        print('不剪枝train：', accuracy_score(self.y1, np.argmax(output, axis=1)))
        print('不剪枝test：', accuracy_score(self.y2, np.argmax(output_test, axis=1)))

    @unittest.skip("")
    def test_pruning_nocorr(self):
        model = load_model('data/model.h5')
        layer_data, weights, biases = get_wb(model, self.x_train)

        weights, biases = new_weight(weights, biases, layer_data)

        output = forward(self.x_train, weights, biases)

        # layer_data, weights, biases = get_wb(model, self.x_train)
        output_test = forward(self.x_test, weights, biases)

        print('不加相关系数剪枝后train：', accuracy_score(self.y1, np.argmax(output, axis=1)))
        print('不加相关系数剪枝后test：', accuracy_score(self.y2, np.argmax(output_test, axis=1)))

    @unittest.skip("")
    def test_pruning_corr(self):
        model = load_model('data/model.h5')
        layer_data, weights, biases = get_wb(model, self.x_train)

        weights, biases = new_weight(weights, biases, layer_data, True)

        output = forward(self.x_train, weights, biases)

        # layer_data, weights, biases = get_wb(model, self.x_train)
        output_test = forward(self.x_test, weights, biases)

        print('加上相关系数剪枝后train：', accuracy_score(self.y1, np.argmax(output, axis=1)))
        print('加上相关系数剪枝后test：', accuracy_score(self.y2, np.argmax(output_test, axis=1)))

    @unittest.skip("")
    def test_lenet_train(self):
        x_train, y_train, x_test, y_test = preprocessing(self.x1, self.y1, self.x2, self.y2, mode=2)
        model = create_lenet()

        model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
        model.fit(x_train, y_train, epochs=10, batch_size=128)
        model.save('data/lenet.h5')


if __name__ == '__main__':
    unittest.main()
