# -*- coding:utf-8 -*-#
# @Time:2023/6/4 13:13
# @Author:Adong
# @Software:PyCharm

'''
降噪自编码器demo
'''

import torch
import matplotlib.pyplot as plt
from torchvision import datasets, transforms
from torch import nn, optim
from torch.nn import functional as F
from tqdm import tqdm
import os
import dataset
from torch.utils import data
from PIL import Image


class Encoder(torch.nn.Module):
    '''
    编码器的网络结构
    '''
    def __init__(self,input_size,hidden_size,latent_size):
        super(Encoder, self).__init__()
        self.linear1 = nn.Linear(input_size, hidden_size)
        self.linear2 = nn.Linear(hidden_size, latent_size)
    def forward(self, x):
        x = torch.relu(self.linear1(x))
        x = self.linear2(x)
        return x


class Decoder(torch.nn.Module):
    '''
    解码器的网络结构
    '''
    def __init__(self,latent_size,hidden_size,output_size):
        super(Decoder, self).__init__()
        self.linear1 = nn.Linear(latent_size, hidden_size)
        self.linear2 = nn.Linear(hidden_size, output_size)
    def forward(self, x):
        x = torch.relu(self.linear1(x))
        x = torch.sigmoid(self.linear2(x))
        return x


class AE(torch.nn.Module):
    '''
    将编码器解码器组合，组成自编码器。
    数据先后通过编码器、解码器处理。
    '''
    #将编码器解码器组合，数据先后通过编码器、解码器处理
    def __init__(self, input_size, output_size, latent_size, hidden_size):
        '''

        :param input_size: 编码器输入层的尺寸
        :param output_size: 解码器输出层的尺寸
        :param latent_size: 编码器输出层的尺寸
        :param hidden_size: 编码器和解码器隐藏层的尺寸
        '''
        super(AE, self).__init__()
        self.encoder = Encoder(input_size, hidden_size, latent_size)
        self.decoder = Decoder(latent_size, hidden_size, output_size)
    def forward(self, x):
        feat = self.encoder(x)
        re_x = self.decoder(feat)
        return re_x

class Trainer:
    def __init__(self,input_size,hidden_size,latent_size,output_size,model_name = None,model_weight = None):
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.latent_size = latent_size
        self.output_size = output_size
        # 配置训练设备
        self.device = "cuda" if torch.cuda.is_available() else 'cpu'
        print("training by " + self.device + "!")
        # 加载模型
        self.model = AE(input_size, output_size, latent_size, hidden_size).to(self.device)
        # 配置损失函数
        self.loss_BCE = torch.nn.BCELoss(reduction='sum')       # 交叉熵，衡量各个像素原始数据与重构数据的误差
        self.loss_MSE = torch.nn.MSELoss(reduction='sum')       # 均方误差可作为交叉熵替代使用.衡量各个像素原始数据与重构数据的误差
        # 配置训练器
        self.optimizer = optim.Adam(self.model.parameters(), lr=1e-3)
        # 读取dataset
        self.train_dataset = dataset.MyData_V2('./data/wav_to_gray/train/normal') + dataset.MyData_V2('./data/wav_to_gray/train/lowSNR_normal') +\
                             dataset.MyData_V2('./data/wav_to_gray/train/pianci_no') + dataset.MyData_V2('./data/wav_to_gray/train/pianci_yes') + dataset.MyData_V2('./data/wav_to_gray/train/songdong')
        self.test_dataset = dataset.MyData_V2('./data/wav_to_gray/test/normal') +dataset.MyData_V2('./data/wav_to_gray/test/lowSNR_normal')+dataset.MyData_V2('./data/wav_to_gray/test/pianci_yes')\
                            +dataset.MyData_V2('./data/wav_to_gray/test/pianci_no')+dataset.MyData_V2('./data/wav_to_gray/test/songdong')
        # 批大小
        self.batch_size = 5
        # 加载DataLoder，dataloader的尺寸为480是因为batch_size为5，2400张图片每5个一批能组成480个dataloader
        self.train_dataloader = data.DataLoader(dataset=self.train_dataset, batch_size=self.batch_size, shuffle=True)
        self.test_dataloader = data.DataLoader(dataset=self.test_dataset, batch_size=self.batch_size, shuffle=True)

        if model_name != None:
            self.model_name = model_name
        else:
            pass
        # 如果model_weight不为空，调用模型，加载权重
        if model_weight != None:
            self.model.load_state_dict(torch.load(model_weight))
        else:
            pass


    def train(self,if_show = False):
        model = self.model
        device = self.device
        optimizer = self.optimizer
        input_size = self.input_size
        train_dataloader = self.train_dataloader

        model.train()           # 将模型设置为训练模式，告诉网络当前阶段为训练阶段，模型的参数再该阶段会更新
        cumulative_loss = 0     # 已训练样本的累计误差
        nsample = 0.0                 # 已训练样本的计数

        t = tqdm(train_dataloader, desc=f'[train]epoch:{self.cur_epoch}')
        for imgs, target in t:  # imgs原始图像，target目标图像
            # 获取批大小
            batch_size = imgs.shape[0]
            # 生成尺寸与imgs相同的均值为0标准差为1的高斯噪声
            noisy = torch.randn_like(imgs)
            # torch.clip(noisy, 0, 1)小于0则为0，大于1则为1，否则为原值
            noisy = torch.clip(noisy, 0, 1)
            # 向原始图片添加高斯噪声
            noisy_img = imgs + noisy

            # 加载数据
            imgs = imgs.to(device).view(-1, input_size)  # imgs:(batch_size,28*28)
            noisy_img = noisy_img.to(device).view(-1, input_size)  # imgs:(batch_size,28*28)
            # 模型运算,输入加噪图像
            re_imgs = model(noisy_img)
            # 计算损失
            loss = self.loss_BCE(re_imgs, imgs)  # 重构与原始数据的差距(也可使用loss_MSE)

            # 反向传播、参数优化，重置
            loss.backward()
            optimizer.step()
            optimizer.zero_grad()
            # 计算平均损失，设置进度条
            cumulative_loss += loss.item()      # 计算累积损失
            nsample += batch_size                     # 计算已学习的样本数
            t.set_postfix({'train average loss': cumulative_loss / nsample})    #计算输出样本平均误差

        if if_show == True:
            # 以第0张图片为例输出这次epoch训练后，重构效果图
            self.rebuild_show(imgs=imgs[0],noisy_imgs=noisy_img[0],re_imgs=re_imgs[0])
        else:
            pass
        return cumulative_loss / nsample,model  # 返回样本平均误差

    def test(self):
        model = self.model
        device = self.device
        input_size = self.input_size
        test_dataloader = self.test_dataloader
        # 测试
        model.eval()
        cumulative_loss = 0     # 已测试样本的累积误差
        nsample = 0             # 已测试的样本数
        e = tqdm(test_dataloader, desc=f'[eval]epoch:{self.cur_epoch}')
        for imgs, target in e:
            # 获取批大小
            batch_size = imgs.shape[0]
            # 生成尺寸与imgs相同的均值为0标准差为1的高斯噪声
            noisy = torch.randn_like(imgs)
            # torch.clip(noisy, 0, 1)小于0则为0，大于1则为1，否则为原值
            noisy = torch.clip(noisy, 0, 1)
            # 向原始图片添加高斯噪声
            noisy_img = imgs + noisy
            # 加载数据
            imgs = imgs.to(device).view(batch_size, input_size)
            noisy_img = noisy_img.to(device).view(-1, input_size)
            # 模型运算
            re_imgs = model(noisy_img)
            # 计算损失
            loss = self.loss_BCE(re_imgs, imgs)
            # 计算平均损失，设置进度条
            cumulative_loss += loss.item()
            nsample += batch_size
            e.set_postfix({'test average loss': cumulative_loss / nsample})
        return cumulative_loss / nsample



    def start_training(self,epochs):

        # 设置训练模型的保存路径和名称
        modelname = self.model_name
        # 创建记录训练误差和验证误差的空列表
        loss_history = {'train': [], 'eval': []}
        for epoch in range(epochs):
            self.cur_epoch = epoch
            train_average_loss,self.model = self.train()
            loss_history['train'].append(train_average_loss)

            test_average_loss = self.test()
            # 每个epoch记录总损失
            loss_history['eval'].append(test_average_loss)

            if test_average_loss <= min(loss_history['eval']):
                print("model with lowest test-average-loss has been saved!")
                # 存储模型
                torch.save(self.model.state_dict(), modelname)
        # 显示每个epoch的loss变化
        plt.plot(range(epochs), loss_history['train'], label='train average loss')
        plt.plot(range(epochs), loss_history['eval'], color='red', label='test average loss')
        # 简单的设置legend(设置位置)位置在右上角
        plt.legend(loc='upper right')
        plt.xlabel("epoch")  # x轴上的名字
        plt.ylabel("average loss")  # y轴上的名字
        plt.savefig(r'./save_model/' + modelname.split('/')[-1].split('.')[0] + '.png')
        plt.show()

    def start_rebuild(self,img = None,batch = None,mode = 'test'):
        if mode == 'test':
            # 取文件
            rebuild_file_path = r'./data/wav_to_gray/veri/lowSNR_normal/1.png'
            img = Image.open(rebuild_file_path)
            img = transforms.ToTensor()(img).to(self.device).view(-1, self.input_size)
            # 降噪
            re_img = self.model(img)
            self.rebuild_show(imgs=img, re_imgs=re_img)
        elif mode == 'use':
            img = img.view(-1, self.input_size)
            re_img = self.model(img)
            return re_img.view(batch, -1, 64, 64)




    def rebuild_show(self,imgs,re_imgs,noisy_imgs = None):
        # 展示效果:将测试步骤中的数据、重构数据绘图
        if noisy_imgs:
            concat = torch.cat((imgs.view(64, 64), noisy_imgs.view(64, 64), re_imgs.view(64, 64)), 1)
        else:
            concat = torch.cat((imgs.view(64, 64), re_imgs.view(64, 64)), 1)
        plt.matshow(concat.cpu().detach().numpy(), cmap=plt.cm.gray)
        plt.show()



if __name__ == '__main__':
    '''测试'''
    # zqd = Trainer(input_size=64*64,hidden_size=256,latent_size=32,output_size=64*64,model_weight='./save_model/AutoEncoder_V5.pth')
    # zqd.start_rebuild()
    '''训练'''#todo(训练不同的模型需要修改模型保存名称)
    zqd = Trainer(input_size=64*64,hidden_size=256,latent_size=32,output_size=64*64,model_name='./save_model/AutoEncoder_V6.pth')
    zqd.start_training(100)