# 单变量多样本线性回归
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
from matplotlib.colors import LogNorm


from HelperClass.DataReader_1_0 import *
from HelperClass.HyperParameters_1_0 import *
from HelperClass.TrainingHistory_1_0 import *

class NeuralNet_1_0(object):
    def __init__(self, params):
        self.params = params  # 神经网络各参数
        self.w = 0  # 权重，初始化为0
        self.b = 0  # 偏置，初始化为0
        
    
    def __forwardBatch(self, batch_x):
        """
        前向计算模型的预测值
        :param batch_x: 批大小为batch_size的批次数据，(batch_size, 1)
        :return: 批大小为batch_size的批次预测结果，(batch_size, 1)，最后一组可能不足batch_size
        """
        batch_z = np.dot(batch_x, self.w) + self.b  # z=x*w+b
        return batch_z
    
    def __backwardBatch(self, batch_x, batch_y, batch_z):
        """
        计算权重dw和偏置db的梯度，用于更新权重和偏置
        :param batch_x: 批次数据
        :param batch_y: 批次真实值
        :param batch_z: 批次预测值
        :return: 权重dw和偏置db的梯度
        """
        batch_size = batch_x.shape[0]  # batch_size
        dz = batch_z - batch_y  # (batch_size, 1)
        db = dz.sum(axis=0, keepdims=True) / batch_size  # db=dz -->  db=dz/batch_size，(1, 1)
        dw = np.dot(batch_x.T, dz) / batch_size  # 这里的np.dot用于计算矩阵乘法，dw=x*dz --> dw=dot(x.T, dz)，(1, 1)
        return dw, db

    def __update(self, dw, db):
        """
        更新权重w和偏置b
        :param dw: 权重w的梯度
        :param db: 偏置b的梯度
        :return:
        """
        self.w = self.w - self.params.eta * dw  # w=w-eta*dw
        self.b = self.b - self.params.eta * db  # b=b-eta*db

    def inference(self, batch_x):
        """
        返回模型预测值
        :param batch_x: 批次数据
        :return: 批次模型预测值
        """
        return self.__forwardBatch(batch_x)

    def train(self, dataReader):
        """
        训练函数，共max_epoch*max_iteration次，并且画出损失的变化和损失等高线图
        :param dataReader:
        :return:
        """
        loss_history = TrainingHistory_1_0()  # 创建记录训练损失和权重/偏差值的历史记录对象
        # 如果没有修改即默认是-1则将批大小设置为数据总数
        if self.params.batch_size == -1:
            self.params.batch_size = dataReader.num_train
        max_iteration = dataReader.num_train // self.params.batch_size  # 每一轮的最大训练次数
        print(f"max_iteration={max_iteration}, num_train={dataReader.num_train}, batch_size={self.params.batch_size}")
        for epoch in range(self.params.max_epoch):
            # 进行每一轮训练
            print(f"epoch={epoch}")
            dataReader.Shuffle()  # 打乱数据集
            for iteration in range(max_iteration):
                # 进行每一轮中每一次的训练
                batch_x, batch_y = dataReader.GetBatchTrainSamples(self.params.batch_size, iteration)  # 获取批次数据
                batch_z = self.__forwardBatch(batch_x)  # 计算批次模型预测值
                dw, db = self.__backwardBatch(batch_x, batch_y, batch_z)  # 计算权重w和偏置b的梯度
                self.__update(dw, db)  # 更新权重和偏置
                if iteration % 2 == 0:
                    loss = self.__checkLoss(dataReader)  # 使用均方差误差公式计算损失
                    print(f"epoch={epoch}, iteration={iteration}, loss={loss}")
                    # 添加历史数据：迭代次数、损失值、权重、偏置
                    loss_history.AddLossHistory(epoch*max_iteration+iteration, loss, self.w[0][0], self.b[0][0])
                    # 如果此时损失值小于已给的阈值，则说明模型已经达标，就停止训练跳出循环
                    if loss < self.params.eps:
                        break
            if loss < self.params.eps:
                break
        # 画损失值的历史数据并且返回一个标题（学习率，批大小）
        loss_history.ShowLossHistory(self.params)
        print(self.w, self.b)
        # 可视化整个训练过程中损失的变化
        self.loss_contour(dataReader, loss_history, self.params.batch_size, epoch*max_iteration+iteration, subplot=121, show=False)

    def __checkLoss(self, dataReader):
        """
        使用均方差误差公式计算损失
        :param dataReader: 数据对象，用于获取所有数据参与计算损失
        :return: 损失
        """
        x, y = dataReader.GetWholeTrainSamples()  # 获取全部训练样本
        batch_size = x.shape[0]  # 将批大小设置为样本数量
        batch_z = self.__forwardBatch(x)  # 模型的预测结果
        Loss = (batch_z - y) ** 2
        # loss = Loss.sum() / batch_size / 2
        loss = Loss.sum() / batch_size  # 均方差误差公式
        return loss

    def loss_contour(self, dataReader, loss_history, batch_size, iteration, subplot=111, show=True):
        """
        用于可视化损失值的等高线图
        :param dataReader:
        :param loss_history: 损失值的历史数据
        :param batch_size: 批大小
        :param iteration: 迭代次数
        :return:
        """
        last_loss, result_w, result_b = loss_history.GetLast()  # 获取最后一次训练的损失、权重和偏置
        len1, len2 = 50, 50
        w = np.linspace(result_w-1, result_w+1, len1)  # 生成从result_w-1到result+1均匀分布的50个样本的序列
        b = np.linspace(result_b-1, result_b+1, len2)
        W, B = np.meshgrid(w, b)  # 根据w和b生成坐标矩阵
        len_ = len1 * len2
        x, y = dataReader.GetWholeTrainSamples()
        size = x.shape[0]
        # z=x*w+b, (100, 1)x(1, 2500)+(1, 2500)
        # numpy的广播机制，所以计算结果z的意思就是一个矩阵，每一个位置代表一个x*w+b，其中不同行代表不同x，不同列代表一种w和b的组合总共2500种
        z = np.dot(x, W.ravel().reshape(1, len_)) + B.ravel().reshape(1, len_)
        print(f"W={W}, B={B}")
        print(f"W.shape={W.shape}, B.shape={B.shape}, x.shape={x.shape}, y.shape={y.shape}, z.shape={z.shape}")
        # W.shape=(50, 50), B.shape=(50, 50)
        # x.shape=(100, 1), y.shape=(100, 1), z.shape=(100, 2500)
        Loss1 = (z - y) ** 2
        Loss2 = Loss1.sum(axis=0, keepdims=True) / size
        Loss3 = Loss2.reshape(len1, len2)  # 均方差误差公式计算损失
        print(f"Loss1.shape={Loss1.shape}, Loss2.shape={Loss2.shape}, Loss3.shape={Loss3.shape}")
        # Loss1.shape=(100, 2500), L2.shape=(1, 2500), L3.shape=(50, 50)
        plt.figure(figsize=(10, 6))
        plt.subplot(subplot)
        # 绘制损失值的等高线图，使用对数归一化
        plt.contour(W, B, Loss3, levels=np.logspace(-5, 5, 100), norm=LogNorm(), cmap=plt.jet())

        w_history = loss_history.w_history
        b_history = loss_history.b_history
        plt.plot(w_history, b_history)
        plt.xlabel("w")
        plt.ylabel("b")
        title = str.format("batch_size={0}, iteration={1}, eta={2}, w={3:.3f}, b={4:.3f}", batch_size, iteration, self.params.eta, result_w, result_b)
        plt.title(title)
        plt.axis((result_w-1, result_w+1, result_b-1, result_b+1, ))
        if show is True:
            plt.show()



