#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2025/8/19
# @USER    : Shengji He
# @File    : eval.py
# @Software: PyCharm
# @Version  : Python-
# @TASK:

import torch
import matplotlib.pyplot as plt

from dataset import get_data_loader
from util import set_random_seed
from network import VQGAN, Discriminator, Transformer


def show_mnist(images):
    _, figs = plt.subplots(1, len(images), figsize=(12, 12))
    for f, img in zip(figs, images):
        f.imshow(img.view(28, 28), cmap='gray')
        f.axes.get_xaxis().set_visible(False)
        f.axes.get_yaxis().set_visible(False)
    # plt.show()
    plt.show(block=True)


def eval():
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    seed = 42

    train_dir = r'D:\heshengji\tools\dataset'
    pt_folder, pt_file4vqgan, pt_file4disc, pt_file4tfm = './', 'vqgan.pt', 'disc.pt', 'tfm.pt'
    train_ratio, valid_ratio, test_ratio, k = 0.8, 0.1, 0.1, 0
    lr = 5e-3
    patience = 80
    epoch_num = 1000
    in_dim = 784
    out_dim = 128
    batch_size = 128

    set_random_seed(seed)

    torch.backends.cudnn.deterministic = True

    train_loader, valid_loader = get_data_loader(train_dir, batch_size)

    vqgan = VQGAN().to(device)
    discriminator = Discriminator().to(device)

    vqgan.load_state_dict(torch.load(f'{pt_folder}//{pt_file4vqgan}', map_location=device, weights_only=False))
    discriminator.load_state_dict(torch.load(f'{pt_folder}//{pt_file4disc}', map_location=device, weights_only=False))
    # vqgan = torch.load(f'{pt_folder}//{pt_file4vqgan}', map_location=device, weights_only=False)
    # discriminator = torch.load(f'{pt_folder}//{pt_file4disc}', map_location=device, weights_only=False)

    vqgan.eval()
    discriminator.eval()
    pred = []
    for step, (pic, label) in enumerate(valid_loader):
        pic = pic.to(device)
        decoded_pic, _, q_loss = vqgan(pic)
        disc_real = discriminator(pic)
        disc_fake = discriminator(decoded_pic)
        pred.append(decoded_pic.detach().cpu())
        break
    pred = torch.cat(pred, dim=0)

    show_mnist(pred[0: 10])

    show_mnist(pic[0: 10])
    pass


def eval_transformer():
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    seed = 42

    train_dir = r'D:\heshengji\tools\dataset'
    pt_folder, pt_file4vqgan, pt_file4disc, pt_file4tfm = './', 'vqgan.pt', 'disc.pt', 'tfm.pt'
    train_ratio, valid_ratio, test_ratio, k = 0.8, 0.1, 0.1, 0
    lr = 5e-3
    patience = 80
    epoch_num = 1000
    in_dim = 784
    out_dim = 128
    batch_size = 128

    set_random_seed(seed)

    torch.backends.cudnn.deterministic = True

    train_loader, valid_loader = get_data_loader(train_dir, batch_size)

    vqgan = VQGAN().to(device)
    vqgan.load_state_dict(torch.load(f'{pt_folder}//{pt_file4vqgan}', map_location=device, weights_only=False))
    vqgan.eval()

    tfm = Transformer(vqgan).to(device)
    tfm.load_state_dict(torch.load(f'{pt_folder}//{pt_file4tfm}', map_location=device, weights_only=False))
    # tfm.eval()

    for step, (pic, label) in enumerate(valid_loader):
        pic = pic[:1, ].to(device)
        log, sampled_imgs = tfm.log_images(pic)
        break
    imgs = [img.detach().cpu().squeeze() for img in sampled_imgs]
    show_mnist(imgs)
    pass


if __name__ == '__main__':
    eval_transformer()
    # eval()
    print('done')
