# -*- coding:utf-8 -*-#
# @Time:2023/6/4 13:13
# @Author:Adong
# @Software:PyCharm

'''
自编码器demo
'''

import torch
import matplotlib.pyplot as plt
from torchvision import datasets, transforms
from torch import nn, optim
from torch.nn import functional as F
from tqdm import tqdm
import os
import dataset
from torch.utils import data



class Encoder(torch.nn.Module):
    '''
    编码器的网络结构
    '''
    def __init__(self,input_size,hidden_size,latent_size):
        super(Encoder, self).__init__()
        self.linear1 = nn.Linear(input_size, hidden_size)
        self.linear2 = nn.Linear(hidden_size, latent_size)
    def forward(self, x):# x: bs,input_size
        x = F.relu(self.linear1(x)) #-> bs,hidden_size
        x = self.linear2(x) #-> bs,latent_size
        return x


class Decoder(torch.nn.Module):
    '''
    解码器的网络结构
    '''
    def __init__(self,latent_size,hidden_size,output_size):
        super(Decoder, self).__init__()
        self.linear1 = nn.Linear(latent_size, hidden_size)
        self.linear2 = nn.Linear(hidden_size, output_size)
    def forward(self, x):# x: bs,input_size
        x = F.relu(self.linear1(x)) #-> bs,hidden_size
        x = torch.sigmoid(self.linear2(x)) #-> bs,latent_size
        return x


class AE(torch.nn.Module):
    '''
    将编码器解码器组合，组成自编码器。
    数据先后通过编码器、解码器处理。
    '''
    #将编码器解码器组合，数据先后通过编码器、解码器处理
    def __init__(self, input_size, output_size, latent_size, hidden_size):
        super(AE, self).__init__()
        self.encoder = Encoder(input_size, hidden_size, latent_size)
        self.decoder = Decoder(latent_size, hidden_size, output_size)
    def forward(self, x): #x: bs,input_size
        feat = self.encoder(x) #feat: bs,latent_size
        re_x = self.decoder(feat) #re_x: bs, output_size
        return re_x

class Trainer:
    def __init__(self,input_size,hidden_size,latent_size,output_size):
        #
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.latent_size = latent_size
        self.output_size = output_size


        # 配置训练设备
        self.device = "cuda" if torch.cuda.is_available() else 'cpu'
        print("training by " + self.device + "!")
        # 加载模型
        self.model = AE(input_size, output_size, latent_size, hidden_size).to(self.device)
        # 配置损失函数
        self.loss_BCE = torch.nn.BCELoss(reduction='sum')       # 交叉熵，衡量各个像素原始数据与重构数据的误差
        self.loss_MSE = torch.nn.MSELoss(reduction='sum')       # 均方误差可作为交叉熵替代使用.衡量各个像素原始数据与重构数据的误差
        # 配置训练器
        self.optimizer = optim.Adam(self.model.parameters(), lr=1e-3)
        # 读取dataset
        self.train_dataset = dataset.makeDataset('train')
        self.test_dataset = dataset.makeDataset('test')
        # 加载DataLoder
        self.train_dataloader = data.DataLoader(dataset=self.train_dataset, batch_size=5, shuffle=True)
        self.test_dataloader = data.DataLoader(dataset=self.test_dataset, batch_size=5, shuffle=True)

        # 训练参数
        self.batch_size = 32  # 每步训练样本数
        self.learning_rate = 1e-3  # 学习率


    def start_training(self,epochs):
        device = self.device
        loss_MSE = self.loss_MSE
        model = self.model
        train_dataloader = self.train_dataloader
        test_dataloader = self.test_dataloader
        input_size = self.input_size
        optimizer = self.optimizer
        modelname = './save_model/AutoEncoder_V1.pth'

        '训练模型'
        # 训练及测试
        loss_history = {'train': [], 'eval': []}
        for epoch in range(epochs):
            # 训练
            model.train()
            # 每个epoch重置损失，设置进度条
            train_loss = 0
            train_nsample = 0
            t = tqdm(train_dataloader, desc=f'[train]epoch:{epoch}')
            for imgs, lbls in t:  # imgs:(bs,28,28)
                bs = imgs.shape[0]
                # 获取数据
                imgs = imgs.to(device).view(bs, input_size)  # imgs:(bs,28*28)
                # 模型运算
                re_imgs = model(imgs)
                # 计算损失
                loss = loss_MSE(re_imgs, imgs)  # 重构与原始数据的差距(也可使用loss_MSE)
                # 反向传播、参数优化，重置
                loss.backward()
                optimizer.step()
                optimizer.zero_grad()
                # 计算平均损失，设置进度条
                train_loss += loss.item()
                train_nsample += bs
                t.set_postfix({'loss': train_loss / train_nsample})
            # 每个epoch记录总损失
            loss_history['train'].append(train_loss / train_nsample)

            # 测试
            model.eval()
            # 每个epoch重置损失，设置进度条
            test_loss = 0
            test_nsample = 0
            e = tqdm(test_dataloader, desc=f'[eval]epoch:{epoch}')
            for imgs, label in e:
                bs = imgs.shape[0]
                # 获取数据
                imgs = imgs.to(device).view(bs, input_size)
                # 模型运算
                re_imgs = model(imgs)
                # 计算损失
                loss = loss_MSE(re_imgs, imgs)
                # 计算平均损失，设置进度条
                test_loss += loss.item()
                test_nsample += bs
                e.set_postfix({'loss': test_loss / test_nsample})
            # 每个epoch记录总损失
            loss_history['eval'].append(test_loss / test_nsample)

            # 展示效果
            # 将测试步骤中的数据、重构数据绘图
            concat = torch.cat((imgs[0].view(64, 64),re_imgs[0].view(64, 64)), 1)
            plt.matshow(concat.cpu().detach().numpy())
            plt.show()

            # 显示每个epoch的loss变化
            plt.plot(range(epoch + 1), loss_history['train'])
            plt.plot(range(epoch + 1), loss_history['eval'])
            plt.show()
            # 存储模型
            torch.save(model.state_dict(), modelname)

    def start_verifing(self,modelname):
        # '调用模型'
        model = AE(self.input_size, self.output_size, self.latent_size, self.hidden_size).to(self.device)
        model.load_state_dict(torch.load(modelname))
        # 对数据集
        dataset = datasets.MNIST('/data', train=False, transform=transforms.ToTensor())
        # 取一组数据
        raw = dataset[0][0].view(1, -1)  # raw: bs,28,28->bs,28*28
        # 用encoder压缩数据
        feat = model.encoder(raw)
        # 展示数据及维度
        print(raw.shape, '->', feat.shape)


if __name__ == '__main__':
    zqd = Trainer(64*64,256,32,64*64)
    zqd.start_training(20)






    '''
    # 损失函数
    # 交叉熵，衡量各个像素原始数据与重构数据的误差
    loss_BCE = torch.nn.BCELoss(reduction='sum')
    # 均方误差可作为交叉熵替代使用.衡量各个像素原始数据与重构数据的误差
    loss_MSE = torch.nn.MSELoss(reduction='sum')

    '超参数及构造模型'
    # 模型参数
    latent_size = 32  # 压缩后的特征维度
    hidden_size = 256  # encoder和decoder中间层的维度
    input_size = output_size = 64 * 64  # 原始图片和生成图片的维度

    # 训练参数
    epochs = 20  # 训练时期
    batch_size = 32  # 每步训练样本数
    learning_rate = 1e-3  # 学习率
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')  # 训练设备

    # 确定模型，导入已训练模型（如有）
    modelname = 'ae.pth'
    model = AE(input_size, output_size, latent_size, hidden_size).to(device)
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)
    try:
        model.load_state_dict(torch.load(modelname))
        print('[INFO] Load Model complete')
    except:
        pass

    '训练模型'
    # 读取dataset
    train_dataset = dataset.makeDataset('train')
    test_dataset = dataset.makeDataset('test')
    # 加载DataLoder
    train_dataloader = data.DataLoader(dataset=train_dataset, batch_size=5, shuffle=True)
    test_dataloader = data.DataLoader(dataset=test_dataset, batch_size=5, shuffle=True)
    # 准备mnist数据集 (数据会下载到py文件所在的data文件夹下)
    train_loader = torch.utils.data.DataLoader(
        datasets.MNIST('/data', train=True, download=True,
                       transform=transforms.ToTensor()),
        batch_size=batch_size, shuffle=True)
    test_loader = torch.utils.data.DataLoader(
        datasets.MNIST('/data', train=False, transform=transforms.ToTensor()),
        batch_size=batch_size, shuffle=False)
    # 此方法获取的数据各像素值范围0-1

    # 训练及测试
    loss_history = {'train': [], 'eval': []}
    for epoch in range(epochs):
        # 训练
        model.train()
        # 每个epoch重置损失，设置进度条
        train_loss = 0
        train_nsample = 0
        t = tqdm(train_dataloader, desc=f'[train]epoch:{epoch}')
        for imgs, lbls in t:  # imgs:(bs,28,28)
            bs = imgs.shape[0]
            # 获取数据
            imgs = imgs.to(device).view(bs, input_size)  # imgs:(bs,28*28)
            # 模型运算
            re_imgs = model(imgs)
            # 计算损失
            loss = loss_BCE(re_imgs, imgs)  # 重构与原始数据的差距(也可使用loss_MSE)
            # 反向传播、参数优化，重置
            loss.backward()
            optimizer.step()
            optimizer.zero_grad()
            # 计算平均损失，设置进度条
            train_loss += loss.item()
            train_nsample += bs
            t.set_postfix({'loss': train_loss / train_nsample})
        # 每个epoch记录总损失
        loss_history['train'].append(train_loss / train_nsample)

        # 测试
        model.eval()
        # 每个epoch重置损失，设置进度条
        test_loss = 0
        test_nsample = 0
        e = tqdm(test_dataloader, desc=f'[eval]epoch:{epoch}')
        for imgs, label in e:
            bs = imgs.shape[0]
            # 获取数据
            imgs = imgs.to(device).view(bs, input_size)
            # 模型运算
            re_imgs = model(imgs)
            # 计算损失
            loss = loss_BCE(re_imgs, imgs)
            # 计算平均损失，设置进度条
            test_loss += loss.item()
            test_nsample += bs
            e.set_postfix({'loss': test_loss / test_nsample})
        # 每个epoch记录总损失
        loss_history['eval'].append(test_loss / test_nsample)

        # 展示效果
        # 将测试步骤中的数据、重构数据绘图
        concat = torch.cat((imgs[0].view(64, 64),
                            re_imgs[0].view(64, 64)), 1)
        plt.matshow(concat.cpu().detach().numpy())
        plt.show()

        # 显示每个epoch的loss变化
        plt.plot(range(epoch + 1), loss_history['train'])
        plt.plot(range(epoch + 1), loss_history['eval'])
        plt.show()
        # 存储模型
        torch.save(model.state_dict(), modelname)

    # '调用模型'
    # 对数据集
    dataset = datasets.MNIST('/data', train=False, transform=transforms.ToTensor())
    # 取一组数据
    raw = dataset[0][0].view(1, -1)  # raw: bs,28,28->bs,28*28
    # 用encoder压缩数据
    feat = model.encoder(raw)
    # 展示数据及维度
    print(raw.shape, '->', feat.shape)
    '''