import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report, accuracy_score
import torch
from torch import nn
import torch.utils.data as Data
import torch.nn.functional as F
import torch.optim as optim
from torchvision import transforms
from torchvision.datasets import CIFAR10
from torchvision.utils import make_grid
from torchinfo import summary
from tqdm import tqdm
from ae_net import AE_FC, AE_CNN, VAE_CNN
import torchvision
import torchvision.datasets as dset
import math
from util import get_project_root


Root_path = get_project_root()
# 定义是否使用GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

def eval_model(net, dataset_name, testset, batch_size):
    testloader = torch.utils.data.DataLoader(
        testset, batch_size=batch_size, shuffle=True,
        num_workers=0)  # 生成一个个batch进行批训练，组成batch的时候顺序打乱取

    data_iter = iter(testloader)
    # save fixed inputs for debugging
    fixed_x, _ = next(data_iter)

    pic_path = Root_path + "/result/" + dataset_name
    if (not os.path.exists(pic_path)):
        os.makedirs(pic_path)
    nrow = int(math.sqrt(batch_size))
    torchvision.utils.save_image(fixed_x.data.cpu(), pic_path + '/real_images.png', nrow=nrow, padding=2,)

    save_path = Root_path + "/save_models/{}/{}.pth".format(dataset_name, net.model_code)
    if (os.path.exists(save_path)):
        print("loading:", save_path)
        net.load_state_dict(torch.load(save_path))
    else:
        assert False, "模型不存在"

    net = net.to(device)
    # 关闭求导，节约大量的显存
    for param in net.parameters():
        param.requires_grad = False

    # 定义损失函数和优化方式
    loss_fn = nn.MSELoss(reduction='sum').to(device)  # 损失函数为交叉熵，多用于多分类问题 reduction='sum

    running_loss = 0
    net.eval()
    with tqdm(testloader, desc="testing:",) as tq:
        for batchidx, (x, labels) in enumerate(tq):
            x= x.to(device)
            z, outputs = net.forward(x)
            loss = loss_fn(outputs, x)

            running_loss += loss.item()

            epoch_loss = running_loss / (batchidx*batch_size + x.size(0))
            tq.set_postfix(Loss=epoch_loss, )

    # print(dataset_name, "epoch loss",epoch_loss)
    print(f'{dataset_name} epoch loss {epoch_loss}')

    _, reconst_images = net(fixed_x.to(device))

    torchvision.utils.save_image(reconst_images.data.cpu(),
                                 pic_path + '/reconst_images_eval.png',
                                 nrow=nrow, padding=2,)


def eval_cnn():
    image_size=64
    net = AE_CNN()
    summary(net, (1, 3, image_size, image_size))

    batch_size = 16
    dataset_name = "pathlo"
    if image_size==256:
        trans = transforms.Compose((transforms.Resize(image_size), transforms.ToTensor()))
        testset = dset.ImageFolder(root=Root_path + "/data/pathol{}".format(image_size), transform=trans)
    else:
        trans = transforms.Compose((transforms.RandomCrop(image_size), transforms.ToTensor()))
        testset = dset.ImageFolder(root=Root_path + "/data/pathol{}".format(256), transform=trans)

    eval_model(net, dataset_name, testset, batch_size)

    # cifar 10
    dataset_name = "cifar10"
    img_size = 32
    trans = transforms.Compose((transforms.Resize(img_size), transforms.ToTensor()))
    batch_size = 256
    testset = CIFAR10(
        root=r'D:/Data',
        train=True,
        download=False,
        transform=trans)  # 训练数据集
    eval_model(net, dataset_name, testset, batch_size)

def eval_FC():
    image_size = 256
    net = AE_FC(image_size, 64)
    # net = AE_CNN()
    summary(net, (1, 3, image_size, image_size))

    batch_size = 16
    dataset_name = "pathlo"
    if image_size==256:
        trans = transforms.Compose((transforms.Resize(image_size), transforms.ToTensor()))
        testset = dset.ImageFolder(root=Root_path + "/data/pathol{}".format(image_size), transform=trans)
    else:
        trans = transforms.Compose((transforms.RandomCrop(image_size), transforms.ToTensor()))
        testset = dset.ImageFolder(root=Root_path + "/data/pathol{}".format(256), transform=trans)

    eval_model(net, dataset_name, testset, batch_size)

    # cifar 10
    dataset_name = "cifar10"
    image_size = 32
    trans = transforms.Compose((transforms.Resize(image_size), transforms.ToTensor()))
    batch_size = 256
    testset = CIFAR10(
        root=r'D:/Data',
        train=True,
        download=False,
        transform=trans)  # 训练数据集
    eval_model(net, dataset_name, testset, batch_size)

def eval_VAE_CNN():
    image_size = 256
    net = VAE_CNN(image_size,)
    summary(net, (1, 3, image_size, image_size))

    batch_size = 16
    dataset_name = "pathlo"
    if image_size==256:
        trans = transforms.Compose((transforms.Resize(image_size), transforms.ToTensor()))
        testset = dset.ImageFolder(root=Root_path + "/data/pathol{}".format(image_size), transform=trans)
    else:
        trans = transforms.Compose((transforms.RandomCrop(image_size), transforms.ToTensor()))
        testset = dset.ImageFolder(root=Root_path + "/data/pathol{}".format(256), transform=trans)

    eval_model(net, dataset_name, testset, batch_size)

    # cifar 10
    dataset_name = "cifar10"
    image_size = 32
    trans = transforms.Compose((transforms.Resize(image_size), transforms.ToTensor()))
    batch_size = 256
    testset = CIFAR10(
        root=r'D:/Data',
        train=True,
        download=False,
        transform=trans)  # 训练数据集
    eval_model(net, dataset_name, testset, batch_size)

if __name__ == '__main__':
    # eval_cnn()
    # eval_FC()
    eval_VAE_CNN()
