import os
import sys
import re
from python_ai.common.xcommon import *
from model import *
from data import *
from my_arguments import *
import matplotlib.pyplot as plt
import pickle

if '__main__' == __name__:
    weights_dir = './_save/on_mid_before'
    # weights_dir = './_save'
    files = os.listdir(weights_dir)
    epoch_info = []
    epoch_no_arr = []
    regexp = re.compile(r'^epoch(\d+)\.pkl$')
    for file in files:
        matcher = regexp.match(file)
        if matcher is None:
            continue
        epoch_no = int(matcher[1])
        epoch_info.append([epoch_no, file])
        epoch_no_arr.append(epoch_no)

    epoch_info = sorted(epoch_info, key=lambda x: x[0])
    print(epoch_info)

    args = Args()
    args.set_train_args()  # 获取命令行参数
    opts = args.get_opts()
    print(opts)
    random_seed = opts.random_seed
    val_dataset = MyDataset(opts.dataset_dir, seed=random_seed, mode="val", train_val_ratio=0.9)
    val_loader = DataLoader(val_dataset, batch_size=opts.batch_size, shuffle=False, num_workers=opts.num_workers)

    val_loss_arr = []
    for i, [epoch_no, file] in enumerate(epoch_info):
        print(i, [epoch_no, file])
        model_path = os.path.join(weights_dir, file)
        model = torch.load(model_path)
        if opts.use_GPU:
            model.to(opts.GPU_id)
        model.eval()

        loss_avg = 0
        with torch.no_grad():  # 加上这个可以减少在validation过程时的显存占用，提高代码的显存利用率
            for i, (imgs, labels) in enumerate(val_loader):
                labels = labels.view(-1, GL_NUMGRID, GL_NUMGRID, 30)
                labels = labels.permute(0, 3, 1, 2)
                if opts.use_GPU:
                    imgs = imgs.to(opts.GPU_id)
                    labels = labels.to(opts.GPU_id)
                preds = model(imgs)  # 前向传播
                loss = model.calculate_loss(labels).detach().cpu().item()  # 计算损失
                loss_avg += loss
        loss_avg /= i + 1
        val_loss_arr.append(loss_avg)
        print(f'{epoch_no}: {loss_avg}')

    epoch_and_val_loss_arr = zip(epoch_no_arr, val_loss_arr)
    save_name = rand_name_on_now() + '.val_loss.pickle'
    save_path = os.path.join(weights_dir, save_name)
    with open(save_path, 'bw') as f:
        pickle.dump(epoch_and_val_loss_arr, f)

    plt.figure(figsize=[6, 6])
    plt.title('Val loss in epochs')
    plt.plot(epoch_no_arr, val_loss_arr)
    plt.show()
