import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import random
import os

import torch
import torchvision
from torchvision import transforms
from torch.utils.data import DataLoader,random_split
from torch import nn
import torch.nn.functional as F
import torch.optim as optim
from sklearn.manifold import TSNE

PATH_DATASETS = "E:/study_code/torch_study/aigc/data" # 预设路径
BATCH_SIZE = 256  # 批量
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
"cuda" if torch.cuda.is_available() else "cpu"

train_ds = torchvision.datasets.MNIST(PATH_DATASETS, train=True, download=True)
test_ds  = torchvision.datasets.MNIST(PATH_DATASETS, train=False, download=True)

# fig, axs = plt.subplots(4, 5, figsize=(8,8))
# for ax in axs.flatten():
#     # 随机抽样
#     img, label = random.choice(train_ds)
#     ax.imshow(np.array(img), cmap='gist_gray')
#     ax.set_title('Label: %d' % label)
#     ax.set_xticks([])
#     ax.set_yticks([])
# plt.tight_layout()
# plt.show()

# 转为张量
train_ds.transform = transforms.ToTensor()
test_ds.transform = transforms.ToTensor()

# 切割20%训练资料作为验证资料
m=len(train_ds) # 总笔数
train_data, val_data = random_split(train_ds, [int(m-m*0.2), int(m*0.2)])

train_loader = torch.utils.data.DataLoader(train_data, batch_size=BATCH_SIZE)
valid_loader = torch.utils.data.DataLoader(val_data, batch_size=BATCH_SIZE)
test_loader = torch.utils.data.DataLoader(test_ds, batch_size=BATCH_SIZE,shuffle=True)


class Encoder(nn.Module):
    def __init__(self, encoded_space_dim, fc2_input_dim):
        super().__init__()

        # Convolution
        self.encoder_cnn = nn.Sequential(
            nn.Conv2d(1, 8, 3, stride=2, padding=1),
            nn.ReLU(True),
            nn.Conv2d(8, 16, 3, stride=2, padding=1),
            nn.BatchNorm2d(16),
            nn.ReLU(True),
            nn.Conv2d(16, 32, 3, stride=2, padding=0),
            nn.ReLU(True)
        )

        self.flatten = nn.Flatten(start_dim=1)

        self.encoder_lin = nn.Sequential(
            nn.Linear(3 * 3 * 32, 128),
            nn.ReLU(True),
            nn.Linear(128, encoded_space_dim)
        )

    def forward(self, x):
        x = self.encoder_cnn(x)
        x = self.flatten(x)
        x = self.encoder_lin(x)
        return x


class Decoder(nn.Module):
    def __init__(self, encoded_space_dim, fc2_input_dim):
        super().__init__()

        self.decoder_lin = nn.Sequential(
            nn.Linear(encoded_space_dim, 128),
            nn.ReLU(True),
            nn.Linear(128, 3 * 3 * 32),
            nn.ReLU(True)
        )

        self.unflatten = nn.Unflatten(dim=1, unflattened_size=(32, 3, 3))

        self.decoder_conv = nn.Sequential(
            # 反卷积
            nn.ConvTranspose2d(32, 16, 3, stride=2, output_padding=0),
            nn.BatchNorm2d(16),
            nn.ReLU(True),
            nn.ConvTranspose2d(16, 8, 3, stride=2, padding=1, output_padding=1),
            nn.BatchNorm2d(8),
            nn.ReLU(True),
            nn.ConvTranspose2d(8, 1, 3, stride=2, padding=1, output_padding=1)
        )

    def forward(self, x):
        x = self.decoder_lin(x)
        x = self.unflatten(x)
        x = self.decoder_conv(x)
        x = torch.sigmoid(x)
        return x


# 固定随机乱数种子，以利掌握执行结果
torch.manual_seed(0)

# encoder 输出个数、decoder 输入个数
d = 4
encoder = Encoder(encoded_space_dim=d,fc2_input_dim=128).to(device)
decoder = Decoder(encoded_space_dim=d,fc2_input_dim=128).to(device)
loss_fn = torch.nn.MSELoss()
lr= 0.001 # Learning rate

params_to_optimize = [
    {'params': encoder.parameters()},
    {'params': decoder.parameters()}
]

optim = torch.optim.Adam(params_to_optimize, lr=lr)

def add_noise(inputs,noise_factor=0.3):
    noise = inputs+torch.randn_like(inputs)*noise_factor
    noise = torch.clip(noise,0.,1.)
    return noise

def train_epoch_den(encoder, decoder, device, dataloader,
                    loss_fn, optimizer,noise_factor=0.3):
    # 指定为训练阶段
    encoder.train()
    decoder.train()
    train_loss = []
    # 训练
    for image_batch, _ in dataloader:
        # 加杂讯
        image_noisy = add_noise(image_batch,noise_factor)
        image_noisy = image_noisy.to(device)
        # 编码
        encoded_data = encoder(image_noisy)
        # 解码
        decoded_data = decoder(encoded_data)
        # 计算损失
        loss = loss_fn(decoded_data, image_noisy)
        # 反向传导
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        # print(f'损失：{loss.data}')
        train_loss.append(loss.detach().cpu().numpy())

    return np.mean(train_loss)

# fix 中文乱码
from matplotlib.font_manager import FontProperties
plt.rcParams['font.sans-serif'] = ['Microsoft JhengHei'] # 微软正黑体
plt.rcParams['axes.unicode_minus'] = False

def test_epoch_den(encoder, decoder, device, dataloader,
                   loss_fn,noise_factor=0.3):
    # 指定为评估阶段
    encoder.eval()
    decoder.eval()
    with torch.no_grad(): # No need to track the gradients
        conc_out = []
        conc_label = []
        for image_batch, _ in dataloader:
            # 加杂讯
            image_noisy = add_noise(image_batch,noise_factor)
            image_noisy = image_noisy.to(device)
            # 编码
            encoded_data = encoder(image_noisy)
            # 解码
            decoded_data = decoder(encoded_data)
            # 输出存入 conc_out 变数
            conc_out.append(decoded_data.cpu())
            conc_label.append(image_batch.cpu())
        # 合并
        conc_out = torch.cat(conc_out)
        conc_label = torch.cat(conc_label)
        # 验证资料的损失
        val_loss = loss_fn(conc_out, conc_label)
    return val_loss.data

def plot_ae_outputs_den(epoch,encoder,decoder,n=5,noise_factor=0.3):
    plt.figure(figsize=(10,4.5))
    for i in range(n):
        ax = plt.subplot(3,n,i+1)
        img = test_ds[i][0].unsqueeze(0)
        image_noisy = add_noise(img,noise_factor)
        image_noisy = image_noisy.to(device)

        encoder.eval()
        decoder.eval()

        with torch.no_grad():
            rec_img  = decoder(encoder(image_noisy))

        if epoch == 0:
            plt.imshow(img.cpu().squeeze().numpy(), cmap='gist_gray')
            ax.get_xaxis().set_visible(False)
            ax.get_yaxis().set_visible(False)
            if i == n//2:
                ax.set_title('原图')
            ax = plt.subplot(3, n, i + 1 + n)
            plt.imshow(image_noisy.cpu().squeeze().numpy(), cmap='gist_gray')
            ax.get_xaxis().set_visible(False)
            ax.get_yaxis().set_visible(False)
            if i == n//2:
                ax.set_title('加杂讯')

        if epoch == 0:
            ax = plt.subplot(3, n, i + 1 + n + n)
        else:
            ax = plt.subplot(1, n, i + 1)
        plt.imshow(rec_img.cpu().squeeze().numpy(), cmap='gist_gray')
        ax.get_xaxis().set_visible(False)
        ax.get_yaxis().set_visible(False)
        if epoch == 0 and i == n//2:
            ax.set_title('重建图像')
    plt.subplots_adjust(left=0.1,
                    bottom=0.1,
                    right=0.7,
                    top=0.9,
                    wspace=0.3,
                    hspace=0.3)
    plt.show()


noise_factor = 0.3
num_epochs = 30
history_da={'train_loss':[],'val_loss':[]}

for epoch in range(num_epochs):
    # print(f'EPOCH {epoch + 1}/{num_epochs}')
    # 训练
    train_loss=train_epoch_den(
        encoder=encoder,
        decoder=decoder,
        device=device,
        dataloader=train_loader,
        loss_fn=loss_fn,
        optimizer=optim,noise_factor=noise_factor)
    # 验证
    val_loss = test_epoch_den(
        encoder=encoder,
        decoder=decoder,
        device=device,
        dataloader=valid_loader,
        loss_fn=loss_fn,noise_factor=noise_factor)
    # Print Validation loss
    history_da['train_loss'].append(train_loss)
    history_da['val_loss'].append(val_loss)
    print('EPOCH {epoch + 1}/{num_epochs} \t 训练损失：{train_loss:.3f}' +
          ' \t 验证损失： {val_loss:.3f}')
    plot_ae_outputs_den(epoch,encoder,decoder,noise_factor=noise_factor)