# -*- coding:utf-8 -*-#
# @Time:2023/6/4 13:13
# @Author:Adong
# @Software:PyCharm

"""
污染样本快速检测自编码器，
重要文件,慎重修改！
重要文件,慎重修改！
重要文件,慎重修改！
重要文件,慎重修改！
重要文件,慎重修改！
"""
import os

import numpy as np
import torch
import matplotlib.pyplot as plt
from torchvision import transforms
from torch import optim
from tqdm import tqdm
import dataset
from torch.utils import data
from PIL import Image
from net import AE


def rebuild_show(imgs, re_imgs):
    # 展示效果:将测试步骤中的数据、重构数据绘图
    fig = plt.figure()
    ax1 = fig.add_subplot(121)
    ax1.imshow(imgs.view(64,64).cpu().detach().numpy())
    ax2 = fig.add_subplot(122)
    ax2.imshow(re_imgs.view(64,64).cpu().detach().numpy())
    plt.show()


class Trainer:
    def __init__(self, l1_size, l2_size, l3_size, l4_size, l5_size):
        self.cur_epoch = None
        self.l1_size = l1_size
        self.l2_size = l2_size
        self.l3_size = l3_size
        self.l4_size = l4_size
        self.l5_size = l5_size
        # 配置训练设备
        self.device = "cuda" if torch.cuda.is_available() else 'cpu'
        print("training by " + self.device + "!")
        # 加载模型
        self.model = AE(l1_size, l2_size, l3_size, l4_size, l5_size).to(self.device)
        # 配置损失函数
        self.loss_BCE = torch.nn.BCELoss(reduction='sum')  # 交叉熵，衡量各个像素原始数据与重构数据的误差
        self.loss_MSE = torch.nn.MSELoss(reduction='sum')  # 均方误差可作为交叉熵替代使用.衡量各个像素原始数据与重构数据的误差
        # 配置训练器
        self.optimizer = optim.Adam(self.model.parameters(), lr=1e-3)
        # 读取dataset
        self.train_dataset = dataset.MyData_V2('./data/no_noise_img/train')
        self.test_dataset = dataset.MyData_V2('./data/no_noise_img/test')
        self.veri_dataset = dataset.MyData_V2('./data/no_noise_img/veri')
        # 批大小
        self.batch_size = 8
        # 加载DataLoder，dataloader的尺寸为480是因为batch_size为5，2400张图片每5个一批能组成480个dataloader
        self.train_dataloader = data.DataLoader(dataset=self.train_dataset, batch_size=self.batch_size, shuffle=True)
        self.test_dataloader = data.DataLoader(dataset=self.test_dataset, batch_size=self.batch_size, shuffle=True)
        self.veri_dataloader = data.DataLoader(dataset=self.veri_dataset, batch_size=self.batch_size, shuffle=True)

    def train(self,if_show = False):
        model = self.model
        device = self.device
        optimizer = self.optimizer
        input_size = self.l1_size
        train_dataloader = self.train_dataloader

        model.train()  # 将模型设置为训练模式，告诉网络当前阶段为训练阶段，模型的参数再该阶段会更新
        cumulative_loss = 0  # 已训练样本的累计误差
        nbatch = 0  # 已训练的样本批数

        t = tqdm(train_dataloader, desc=f'[train]epoch:{self.cur_epoch}')
        for source, target in t:  # source原始图像，target目标图像
            batch_size = source.shape[0]  # 获取批大小
            source = source.to(device).view(batch_size, input_size)  # 加载数据并拉直每张图片
            re_imgs = model(source)  # 模型运算，重构图像
            loss = self.loss_MSE(re_imgs, source)  # 计算重构损失
            loss.backward()  # 反向传播
            optimizer.step()  # 参数优化
            optimizer.zero_grad()  # 重置梯度
            cumulative_loss += loss.item()  # 计算累积损失
            nbatch += 1  # 计算已学习的样本数
            average_loss = cumulative_loss / nbatch  # 计算每批平均误差
            t.set_postfix({'Current batch loss': loss.item(), 'train average loss': average_loss})

        if if_show:
            # 以第0张图片为例输出这次epoch训练后，重构效果图
            rebuild_show(imgs=source[0], re_imgs=re_imgs[0])
        else:
            pass
        return average_loss  # 返回当前代每批平均误差

    def test(self):
        model = self.model
        device = self.device
        input_size = self.l1_size
        test_dataloader = self.test_dataloader

        model.eval()  # 测试
        cumulative_loss = 0  # 已测试样本的累积误差
        nbatch = 0  # 已测试的样本批数

        e = tqdm(test_dataloader, desc=f'[eval]epoch:{self.cur_epoch}')
        for source, target in e:
            batch_size = source.shape[0]  # 获取批大小
            imgs = source.to(device).view(batch_size, input_size)  # 加载数据并拉直每张图片
            re_imgs = model(imgs)  # 模型运算，重构图像
            loss = self.loss_MSE(re_imgs, imgs)  # 计算损失

            cumulative_loss += loss.item()  # 计算累积损失
            nbatch += 1  # 计算已学习的样本数
            average_loss = cumulative_loss / nbatch  # 计算每批平均误差
            e.set_postfix({'Current batch loss': loss.item(), 'test average loss': average_loss})
        return average_loss  # 返回当前代每批平均误差

    def start_training(self, epochs, model_name):
        model_savepath = r'./save_model/' + model_name  # 设置训练模型的保存路径和名称
        loss_history = {'train': [], 'eval': []}  # 创建记录训练误差和验证误差的空列表
        for epoch in range(epochs):
            self.cur_epoch = epoch  # 记录当前代数
            train_average_loss = self.train()  # 训练一代，返回当前代没批平均误差
            loss_history['train'].append(train_average_loss)
            test_average_loss = self.test()  # 测试一代，返回当前代没批平均误差
            loss_history['eval'].append(test_average_loss)

            if test_average_loss <= min(loss_history['eval']):  # 如果当前代的测试集误差小于历史最小误差，则保存当前模型
                print("model with lowest test-average-loss has been saved!")
                torch.save(self.model.state_dict(), model_savepath)  # 存储模型
        # 显示每个epoch的loss变化
        plt.plot(range(epochs), loss_history['train'], label='train average loss')
        plt.plot(range(epochs), loss_history['eval'], color='red', label='test average loss')
        # 简单的设置legend(设置位置)位置在右上角
        plt.legend(loc='upper right')
        plt.xlabel("epoch")  # x轴上的名字
        plt.ylabel("average loss")  # y轴上的名字
        plt.savefig(r'./result_img/' + model_name.split('/')[-1].split('.')[0] + '.png')
        plt.show()

    def start_rebuild(self, mode, model_weight):
        if mode == 'veri':
            model = self.model              # 加载模型
            model.load_state_dict(torch.load(model_weight))     # 加载已训练的网络权重
            device = self.device
            input_size = self.l1_size
            veri_dataloader = self.veri_dataloader

            cumulative_loss = 0  # 已测试样本的累积误差
            nbatch = 0  # 已测试的样本批数
            t = tqdm(veri_dataloader, desc=f'[verification]')
            for source, target in t:
                batch_size = source.shape[0]  # 获取批大小
                source = source.to(device).view(batch_size, input_size)  # 加载数据并拉直每张图片
                re_imgs = model(source)  # 模型运算，重构图像
                for i in range(len(re_imgs)):
                    rebuild_show(imgs=source[i], re_imgs=re_imgs[i])
                loss = self.loss_MSE(re_imgs, source)  # 计算重构损失
                cumulative_loss += loss.item()  # 计算累积损失
                nbatch += 1  # 计算已学习的样本数
                average_loss = cumulative_loss / nbatch  # 计算每批平均误差
                t.set_postfix({'Current batch loss': loss.item(), 'test average loss': average_loss})
        elif mode == 'use':
            model = self.model              # 加载模型
            model.load_state_dict(torch.load(model_weight))     # 加载已训练的网络权重
            device = self.device
            input_size = self.l1_size

            '''手动读取'''
            # while True:
            #     img_path = input("请输入要重构的图像路径：(e退出)")
            #     if img_path == 'e':
            #         break
            #     img = Image.open(img_path)  # 打开图片
            #     img = transforms.ToTensor()(img).to(device).view(-1, input_size)  # 加载并拉直图片
            #     re_img = self.model(img)  # 重构
            #     rebuild_show(imgs=img, re_imgs=re_img)
            #     loss = self.loss_MSE(img, re_img).item()
            #     print("loss MSE = ", loss)
            #     if loss >= 400:
            #         print('noise')
            #     else:
            #         print("no_noise")

            '''自动读取'''
            losslist = []
            for i in os.listdir(r'E:\@MY_code\Voice_Recognize\data\yes_noise_img_veri'):
                filepath = r'E:\@MY_code\Voice_Recognize\data\yes_noise_img_veri\\' + i
                img = Image.open(filepath)  # 打开图片
                img = transforms.ToTensor()(img).to(device).view(-1, input_size)  # 加载并拉直图片
                re_img = self.model(img)  # 重构
                # rebuild_show(imgs=img, re_imgs=re_img)
                loss = self.loss_MSE(img, re_img).item()
                losslist.append(loss)
                print("loss MSE = ", loss)
            plt.figure()
            plt.scatter(range(len(losslist)),losslist)
            mean = np.mean(losslist)
            mid = np.median(losslist)
            sigma = np.std(losslist)
            plt.axhline(mean)
            plt.axhline(mean + 1 * sigma)
            plt.axhline(mean - 1 * sigma)
            plt.show()



if __name__ == '__main__':
    '''测试'''
    zqd = Trainer(l1_size=64*64, l2_size=256, l3_size=32, l4_size=256, l5_size=64*64)
    zqd.start_rebuild(mode='use',model_weight=r'./save_model/AutoEncoder_Vdetect.pth')
    '''训练'''
    # zqd = Trainer(l1_size=64 * 64, l2_size=256, l3_size=32, l4_size=256, l5_size=64*64)
    # zqd.start_training(epochs=100,model_name='AutoEncoder_Vdetect.pth')
