"""
1.	利用神经网络(至少含有一个隐藏层)的底层代码，实现mnist数据集的二分类，其中，标签5为一类，其余标签为另一类，并进行分类问题的模型评估，
按下面要求完成相应操作（50分）
"""
# ①	导入必要的工具包，并读入mnist数据集
import numpy as np
import tensorflow as tf
from tensorflow import keras
from sklearn.model_selection import train_test_split
import sys
import matplotlib.pyplot as plt
import seaborn as sns

np.random.seed(1)
tf.random.set_seed(1)
(_, _), (x, y_) = keras.datasets.mnist.load_data()

x = x.reshape(-1, 28*28)
x = np.float32(x) / 255.
y_bool = y_ == 5
y = np.int64(y_bool).reshape(-1, 1)
print('x', x.shape)
print('y', y.shape)
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.7, random_state=1, shuffle=True)
print('x_train', x_train.shape)
print('y_train', y_train.shape)
print('x_test', x_test.shape)
print('y_test', y_test.shape)

# ②	定义训练批次、周期、图像尺寸、学习率等超参数
BATCH_SIZE = 128
N_EPOCH = 200
ALPHA = 1e-3

# ③	编写一个类，合理定义模型，并包含：代价函数，训练函数，测试函数
class BinAnnClfImpl(object):

    def __init__(self, n_features, alpha, batch_size, n_epoch):
        L1, L2 = 200, 100
        self.alpha = alpha
        self.batch_size = batch_size
        self.n_epoch = n_epoch
        self.w1 = np.random.normal(0., 1., (n_features, L1))
        self.b1 = np.zeros((1, L1), dtype=np.float32)
        self.w2 = np.random.normal(0., 1., (L1, L2))
        self.b2 = np.zeros((1, L2), dtype=np.float32)
        self.w3 = np.random.normal(0., 1., (L2, 1))
        self.b3 = np.zeros((1, 1), dtype=np.float32)

    def sigmoid(self, x):
        return 1. / (1. + np.e ** (-x))

    def forward(self, x):
        self.z1 = np.dot(x, self.w1) + self.b1
        self.a1 = self.sigmoid(self.z1)
        self.z2 = np.dot(self.a1, self.w2) + self.b2
        self.a2 = self.sigmoid(self.z2)
        self.z3 = np.dot(self.a2, self.w3) + self.b3
        self.a3 = self.sigmoid(self.z3)
        return self.a3

    def loss(self, a, y):
        return (-1. / len(a)) * (y * np.log(a) + (1 - y) * np.log(1 - a)).sum()

    # ④	在类中正确编写训练函数，包含反向传播和参数更新
    def train(self, x, y, n_print):
        print('Training ...')
        m = len(x)
        n_batch = int(np.ceil(m / self.batch_size))
        n_cnt = 0
        loss_his, acc_his = [], []
        for epoch in range(self.n_epoch):
            for i in range(n_batch):
                bx = x[i*self.batch_size:(i+1)*self.batch_size]
                by = y[i*self.batch_size:(i+1)*self.batch_size]
                self._train(bx, by)
                loss = self.loss(self.a3, by)
                acc = self.acc(self.a3, by)
                loss_his.append(loss)
                acc_his.append(acc)
                n_cnt += 1
                if n_cnt % n_print == 0 or n_cnt == self.n_epoch * n_batch:
                    print(f'epoch#{epoch + 1}: batch#{i + 1}: loss = {loss}, acc = {acc}')
        print('Trained.')
        return loss_his, acc_his

    def _train(self, bx, by):
        m = len(bx)
        self.forward(bx)

        dz3 = self.a3 - by
        da2 = np.dot(dz3, self.w3.T)
        dz2 = da2 * self.a2 * (1 - self.a2)
        da1 = np.dot(dz2, self.w2.T)
        dz1 = da1 * self.a1 * (1 - self.a1)

        dw3 = (1 / m) * np.dot(self.a2.T, dz3)
        db3 = dz3.sum() / m
        dw2 = (1 / m) * np.dot(self.a1.T, dz2)
        db2 = dz2.sum() / m
        dw1 = (1 / m) * np.dot(bx.T, dz1)
        db1 = dz1.sum() / m

        self.w3 -= self.alpha * dw3
        self.b3 -= self.alpha * db3
        self.w2 -= self.alpha * dw2
        self.b2 -= self.alpha * db2
        self.w1 -= self.alpha * dw1
        self.b1 -= self.alpha * db1

    def test(self, x, y):
        print('Testing ...')
        m = len(x)
        n_batch = int(np.ceil(m / self.batch_size))
        avg_loss, avg_acc = 0., 0.
        pred = []
        for i in range(n_batch):
            bx = x[i * self.batch_size:(i + 1) * self.batch_size]
            by = y[i * self.batch_size:(i + 1) * self.batch_size]
            loss, acc = self._test(bx, by)
            avg_loss += loss
            avg_acc += acc
            pred.extend(self.a3)
        avg_loss /= i + 1
        avg_acc /= i + 1
        pred = np.float32(pred)
        print('Tested.')
        return avg_loss, avg_acc, pred

    def _test(self, bx, by):
        self.forward(bx)
        loss = self.loss(self.a3, by)
        acc = self.acc(self.a3, by)
        return loss, acc

    # ⑤	在类中正确定义计算模型准确率函数
    def acc(self, a, y):
        eq = np.float32((a > 0.5) == (y > 0.5))
        acc = eq.sum() / len(a)
        return acc


# ⑥	实例化该类，并进行神经网络训练
clf = BinAnnClfImpl(28*28, ALPHA, BATCH_SIZE, N_EPOCH)

# ⑦	模型训练中，每500次打印，并输出代价
loss_his, acc_his = clf.train(x_train, y_train, 500)

# ⑧	绘出训练神经网络模型的学习曲线
spr = 2
spc = 2
spn = 0
plt.figure(figsize=[7, 7])
spn += 1
plt.subplot(spr, spc, spn)
plt.title('Loss')
plt.grid()
plt.plot(loss_his)
spn += 1
plt.subplot(spr, spc, spn)
plt.title('Accuracy')
plt.grid()
plt.plot(acc_his)

# ⑨	模型训练完成后，用测试集测试模型分类猫狗正确和不正确饼图
avg_loss, avg_acc, pred = clf.test(x_test, y_test)
print(f'Test: loss = {avg_loss}, acc = {avg_acc}')
spn += 1
plt.subplot(spr, spc, spn)
plt.title('Accuracy')
plt.pie([avg_acc, 1 - avg_acc], explode=[0, 0.1], labels=['', 'wrong'], autopct='%0.2f%%')

# ⑩	用测试集数据，底层代码计算TN、TP、FN、FP的混淆矩阵，并热图输出
TN = np.int64((y_test < 0.5) & (pred < 0.5)).sum()
TP = np.int64((y_test > 0.5) & (pred > 0.5)).sum()
FN = np.int64((y_test > 0.5) & (pred < 0.5)).sum()
FP = np.int64((y_test < 0.5) & (pred > 0.5)).sum()
mat = [[TN, FP], [FN, TP]]
spn += 1
plt.subplot(spr, spc, spn)
plt.title('Confusion matrix')
sns.heatmap(mat, annot=True)

# 11	公式计算输出查准率P和查全率R
P = TP / (TP + FP)
R = TP / (TP + FN)
print(f'Precision = {P}, Recall = {R}')

# 12	公式计算输出准确率和F1分值
acc = (TN + TP) / (TN + TP + FP + FN)
F1 = 2 * P * R / (P + R)
print(f'Accuracy = {acc}, F1 = {F1}')

# Finally
plt.show()
