import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report, accuracy_score
import torch
from torch import nn
import torch.utils.data as Data
import torch.nn.functional as F
import torch.optim as optim
from torchvision import transforms
from torchvision.datasets import CIFAR10
from torchvision.utils import make_grid
from torchinfo import summary
from tqdm import tqdm
from ae_net import AE_FC, AE_CNN
import torchvision
import torchvision.datasets as dset
import math
from util import get_project_root

Root_path = get_project_root()
# 定义是否使用GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

def train_model(net, dataset_name, trainset, optimizer, loss_fn, back_size, batch_size):
    trainloader = torch.utils.data.DataLoader(
        trainset, batch_size=batch_size, shuffle=True,
        num_workers=0)  # 生成一个个batch进行批训练，组成batch的时候顺序打乱取

    data_iter = iter(trainloader)
    # save fixed inputs for debugging
    fixed_x, _ = next(data_iter)

    pic_path = Root_path + "/result/" + dataset_name
    if (not os.path.exists(pic_path)):
        os.makedirs(pic_path)
    nrow = int(math.sqrt(batch_size))
    torchvision.utils.save_image(fixed_x.data.cpu(), pic_path + '/real_images.png', nrow=nrow, padding=2,)

    save_path = Root_path + "/save_models/{}/{}.pth".format(dataset_name, net.model_code)
    if (os.path.exists(save_path)):
        print("loading:", save_path)
        net.load_state_dict(torch.load(save_path))
    else:
        sub_path = Root_path + "/save_models/{}".format(dataset_name,)
        if not os.path.exists(sub_path):
            os.makedirs(sub_path)

    net = net.to(device)
    loss_fn = loss_fn.to(device)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', patience=10,
                                                           factor=0.1)  # mode为min，则loss不下降学习率乘以factor，max则反之
    num_epochs = 100

    for epoch in range(num_epochs):
        running_loss = 0
        lr = optimizer.state_dict()['param_groups'][0]['lr']
        net.train()
        with tqdm(trainloader, desc="training in Epoch {}/{}".format(epoch, num_epochs),) as tq:
            for batchidx, (x, labels) in enumerate(tq):
                x= x.to(device)
                z, outputs = net.forward(x)
                # loss = loss_fn(outputs.view(outputs.size(0), -1), x.view(x.size(0), -1))
                loss = loss_fn(outputs, x)

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                lr = optimizer.state_dict()['param_groups'][0]['lr']
                # # 三个函数的调用次数比应为：
                # # loss.backward(): optimizer.step() : optimizer.zero_grad() = N : 1 : 1
                # loss.backward()
                # # backward + optimize only if in training phase
                # if batchidx % back_size == 0:
                #     optimizer.zero_grad()
                #     optimizer.step()

                running_loss += loss.item()

                epoch_loss = running_loss / (batchidx*batch_size + x.size(0))
                tq.set_postfix(Lr=lr, Loss=epoch_loss, )

            scheduler.step(running_loss)
            if (epoch + 1) % 10 == 0:
                _, reconst_images = net(fixed_x.to(device))

                torchvision.utils.save_image(reconst_images.data.cpu(),
                                             pic_path + '/reconst_images_{:04d}.png'.format(epoch + 1),
                                             nrow=nrow, padding=2,)

                torch.save(net.state_dict(), save_path)


def train_vae_model(net, dataset_name, trainset, optimizer, loss_fn, back_size, batch_size):
    trainloader = torch.utils.data.DataLoader(
        trainset, batch_size=batch_size, shuffle=True,
        num_workers=0)  # 生成一个个batch进行批训练，组成batch的时候顺序打乱取

    data_iter = iter(trainloader)
    # save fixed inputs for debugging
    fixed_x, _ = next(data_iter)

    pic_path = Root_path + "/result/" + dataset_name
    if (not os.path.exists(pic_path)):
        os.makedirs(pic_path)
    nrow = int(math.sqrt(batch_size))
    torchvision.utils.save_image(fixed_x.data.cpu(), pic_path + '/real_images.png', nrow=nrow, padding=2,)

    save_path = Root_path + "/save_models/{}/{}.pth".format(dataset_name, net.model_code)
    if (os.path.exists(save_path)):
        print("loading:", save_path)
        net.load_state_dict(torch.load(save_path))
    else:
        sub_path = Root_path + "/save_models/{}".format(dataset_name,)
        if not os.path.exists(sub_path):
            os.makedirs(sub_path)

    net = net.to(device)
    loss_fn = loss_fn.to(device)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', patience=100,
                                                           factor=0.1)  # mode为min，则loss不下降学习率乘以factor，max则反之
    num_epochs = 1000

    for epoch in range(num_epochs):
        running_loss = 0
        lr = optimizer.state_dict()['param_groups'][0]['lr']
        net.train()
        with tqdm(trainloader, desc="training in Epoch {}/{}".format(epoch, num_epochs),) as tq:
            for batchidx, (x, labels) in enumerate(tq):
                x= x.to(device)
                (mu, log_var), outputs = net.forward(x)

                # 计算重构损失和KL散度
                # 重构损失
                reconst_loss = loss_fn(outputs, x)
                # KL散度
                kl_div = - 0.5 * torch.sum(1 + log_var - mu.pow(2) - log_var.exp())
                # 反向传播与优化
                # 计算误差(重构误差和KL散度值)
                loss = reconst_loss + 0.5*kl_div

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                lr = optimizer.state_dict()['param_groups'][0]['lr']
                # # 三个函数的调用次数比应为：
                # # loss.backward(): optimizer.step() : optimizer.zero_grad() = N : 1 : 1
                # loss.backward()
                # # backward + optimize only if in training phase
                # if batchidx % back_size == 0:
                #     optimizer.zero_grad()
                #     optimizer.step()

                running_loss += loss.item()

                epoch_loss = running_loss / (batchidx*batch_size + x.size(0))
                tq.set_postfix(Lr=lr, Loss=epoch_loss, Reconst_Loss=reconst_loss.item()/x.size(0),
                               KL_Div= kl_div.item()/x.size(0),)

            scheduler.step(running_loss)
            if (epoch + 1) % 10 == 0:
                _, reconst_images = net(fixed_x.to(device))

                torchvision.utils.save_image(reconst_images.data.cpu(),
                                             pic_path + '/reconst_images_{:04d}.png'.format(epoch + 1),
                                             nrow=nrow, padding=2,)

                torch.save(net.state_dict(), save_path)

