import torch
from torchvision import utils as vutils
import torch.nn.functional as F
from util.dataset import MVTecDRAEMTestDataset
from torch.utils.data import DataLoader
import numpy as np
from sklearn.metrics import roc_auc_score, average_precision_score
from module.Unet import DiscriminativeSubNetwork
import os
from module import models_mae as models_mae


def save_image_tensor(input_tensor: torch.Tensor, filename):
    """
    将tensor保存为图片
    :param input_tensor: 要保存的tensor
    :param filename: 保存的文件名
    """
    assert (len(input_tensor.shape) == 4 and input_tensor.shape[0] == 1)
    # 复制一份
    input_tensor = input_tensor.clone().detach()
    # 到cpu
    input_tensor = input_tensor.to(torch.device('cpu'))
    # 反归一化
    # input_tensor = unnormalize(input_tensor)
    vutils.save_image(input_tensor, filename)


def write_results_to_file(run_name, image_auc, pixel_auc, image_ap, pixel_ap):
    fin_str = "img_auc," + run_name
    for i in image_auc:
        fin_str += "," + str(np.round(i, 3))
    fin_str += "," + str(np.round(np.mean(image_auc), 3))
    fin_str += "\n"
    fin_str += "pixel_auc," + run_name
    for i in pixel_auc:
        fin_str += "," + str(np.round(i, 3))
    fin_str += "," + str(np.round(np.mean(pixel_auc), 3))
    fin_str += "\n"
    fin_str += "img_ap," + run_name
    for i in image_ap:
        fin_str += "," + str(np.round(i, 3))
    fin_str += "," + str(np.round(np.mean(image_ap), 3))
    fin_str += "\n"
    fin_str += "pixel_ap," + run_name
    for i in pixel_ap:
        fin_str += "," + str(np.round(i, 3))
    fin_str += "," + str(np.round(np.mean(pixel_ap), 3))
    fin_str += "\n"
    fin_str += "--------------------------\n"

    with open("./result/results.txt", 'a+') as file:
        file.write(fin_str)


def make_mask(mask, model):
    mask = mask.detach()
    mask = mask.unsqueeze(-1).repeat(1, 1, model.patch_embed.patch_size[0] ** 2 * 3)  # (N, H*W, p*p*3)
    mask = model.unpatchify(mask)  # 1 is removing, 0 is keeping
    mask = torch.einsum('nchw->nhwc', mask).detach()
    return mask


# def test(obj_names, mvtec_path, checkpoint_path, base_model_name):
#     obj_ap_pixel_list = []
#     obj_auroc_pixel_list = []
#     obj_ap_image_list = []
#     obj_auroc_image_list = []
#     for obj_name in obj_names:
#         img_dim = 224
#         run_name = base_model_name + "_" + obj_name + '_'
#
#         model = getattr(models_mae, 'mae_vit_large_patch16')()
#         model.load_state_dict(torch.load(os.path.join(checkpoint_path, run_name + ".pckl"), map_location='cuda:0'))
#         model.cuda()
#         model.eval()
#
#         model_seg = DiscriminativeSubNetwork(in_channels=18, out_channels=2)
#         model_seg.load_state_dict(
#             torch.load(os.path.join(checkpoint_path, run_name + "_seg.pckl"), map_location='cuda:0'))
#         model_seg.cuda()
#         model_seg.eval()
#         print('model loaded.')
#
#         dataset = MVTecDRAEMTestDataset(mvtec_path + obj_name + "/test/", resize_shape=[img_dim, img_dim])
#         dataloader = DataLoader(dataset, batch_size=1,
#                                 shuffle=False, num_workers=2)
#
#         total_pixel_scores = np.zeros((img_dim * img_dim * len(dataset)))
#         total_gt_pixel_scores = np.zeros((img_dim * img_dim * len(dataset)))
#         mask_cnt = 0
#
#         anomaly_score_gt = []
#         anomaly_score_prediction = []
#
#         print(len(dataloader))
#
#         for i_batch, sample_batched in enumerate(dataloader):
#             gray_batch = sample_batched["image"].cuda()
#             is_normal = sample_batched["has_anomaly"].detach().numpy()[0, 0]
#             anomaly_score_gt.append(is_normal)
#             true_mask = sample_batched["mask"]
#             true_mask_cv = true_mask.detach().numpy()[0, :, :, :].transpose((1, 2, 0))
#             x = torch.einsum('nchw->nhwc', gray_batch)
#
#             # 掩码策略
#             noise = torch.rand(1, 196, device='cuda')
#             _, gray_rec, mask = model(gray_batch, 0.8, noise)
#             print(make_mask(mask, model).shape)
#             gray_rec = model.unpatchify(gray_rec)
#             joined_in = torch.cat((gray_rec, gray_batch), dim=1)
#             concat = torch.cat((gray_rec, gray_batch), dim=3)
#             concat_mask = x * (1 - make_mask(mask, model))
#
#             m = int(196 * 0.8)
#             maskk = (noise <= noise.sort(1)[0][:, m].reshape(-1, 1)).to(dtype=torch.int32)
#             noise = torch.mul(maskk, noise)
#             _, gray_rec, mask = model(gray_batch, 0.8, noise)
#             gray_rec = model.unpatchify(gray_rec)
#             joined_in = torch.cat((gray_rec, joined_in), dim=1)
#             concat = torch.cat((gray_rec, concat), dim=3)
#             concat_mask = torch.cat((x * (1 - make_mask(mask, model)), concat_mask), dim=2)
#
#             maskk = (noise <= noise.sort(1)[0][:, m].reshape(-1, 1)).to(dtype=torch.int32)
#             noise = torch.mul(maskk, noise)
#             _, gray_rec, mask = model(gray_batch, 0.8, noise)
#             gray_rec = model.unpatchify(gray_rec)
#             joined_in = torch.cat((gray_rec, joined_in), dim=1)
#             concat = torch.cat((gray_rec, concat), dim=3)
#             concat_mask = torch.cat((x * (1 - make_mask(mask, model)), concat_mask), dim=2)
#
#             maskk = (noise <= noise.sort(1)[0][:, m].reshape(-1, 1)).to(dtype=torch.int32)
#             noise = torch.mul(maskk, noise)
#             _, gray_rec, mask = model(gray_batch, 0.8, noise)
#             gray_rec = model.unpatchify(gray_rec)
#             joined_in = torch.cat((gray_rec, joined_in), dim=1)
#             concat = torch.cat((gray_rec, concat), dim=3)
#             concat_mask = torch.cat((x * (1 - make_mask(mask, model)), concat_mask), dim=2)
#
#             maskk = (noise <= noise.sort(1)[0][:, m].reshape(-1, 1)).to(dtype=torch.int32)
#             noise = torch.mul(maskk, noise)
#             _, gray_rec, mask = model(gray_batch, 0.8, noise)
#             gray_rec = model.unpatchify(gray_rec)
#             joined_in = torch.cat((gray_rec, joined_in), dim=1)
#             concat = torch.cat((gray_rec, concat), dim=3)
#             concat_mask = torch.cat((x * (1 - make_mask(mask, model)), concat_mask), dim=2)
#
#             out_mask = model_seg(joined_in)
#             out_mask_sm = torch.softmax(out_mask, dim=1)
#             save_image_tensor(concat, './result/result' + str(i_batch) + '.jpg')
#
#             out_mask_cv = out_mask_sm[0, 1, :, :].detach().cpu().numpy()
#             out_mask_averaged = torch.nn.functional.avg_pool2d(out_mask_sm[:, 1:, :, :], 21, stride=1,
#                                                                padding=21 // 2).cpu().detach().numpy()
#             image_score = np.max(out_mask_averaged)
#             anomaly_score_prediction.append(image_score)
#
#             flat_true_mask = true_mask_cv.flatten()
#             flat_out_mask = out_mask_cv.flatten()
#
#             true_mask = true_mask.cuda()
#             out_mmm = out_mask_sm[:, 1:, :, :]
#             out_true_mask = true_mask
#             concat1 = torch.cat((out_mmm, out_true_mask), dim=3)
#             save_image_tensor(concat1, './result/result_mask' + str(i_batch) + '.jpg')
#             print(concat.shape)
#             print(concat_mask.shape)
#             save_image_tensor(torch.einsum('nhwc->nchw', concat_mask), './result/in_mask' + str(i_batch) + '.jpg')
#             print(str(i_batch) + '.jpg has done...')
#
#             total_pixel_scores[mask_cnt * img_dim * img_dim:(mask_cnt + 1) * img_dim * img_dim] = flat_out_mask
#             total_gt_pixel_scores[mask_cnt * img_dim * img_dim:(mask_cnt + 1) * img_dim * img_dim] = flat_true_mask
#             mask_cnt += 1
#
#         anomaly_score_prediction = np.array(anomaly_score_prediction)
#         anomaly_score_gt = np.array(anomaly_score_gt)
#         auroc = roc_auc_score(anomaly_score_gt, anomaly_score_prediction)
#         ap = average_precision_score(anomaly_score_gt, anomaly_score_prediction)
#
#         total_gt_pixel_scores = total_gt_pixel_scores.astype(np.uint8)
#         total_gt_pixel_scores = total_gt_pixel_scores[:img_dim * img_dim * mask_cnt]
#         total_pixel_scores = total_pixel_scores[:img_dim * img_dim * mask_cnt]
#         auroc_pixel = roc_auc_score(total_gt_pixel_scores, total_pixel_scores)
#         ap_pixel = average_precision_score(total_gt_pixel_scores, total_pixel_scores)
#         obj_ap_pixel_list.append(ap_pixel)
#         obj_auroc_pixel_list.append(auroc_pixel)
#         obj_auroc_image_list.append(auroc)
#         obj_ap_image_list.append(ap)
#         print(obj_name)
#         print("AUC Image:  " + str(auroc))
#         print("AP Image:  " + str(ap))
#         print("AUC Pixel:  " + str(auroc_pixel))
#         print("AP Pixel:  " + str(ap_pixel))
#         print("==============================")
#
#     print(run_name)
#     print("AUC Image mean:  " + str(np.mean(obj_auroc_image_list)))
#     print("AP Image mean:  " + str(np.mean(obj_ap_image_list)))
#     print("AUC Pixel mean:  " + str(np.mean(obj_auroc_pixel_list)))
#     print("AP Pixel mean:  " + str(np.mean(obj_ap_pixel_list)))
#
#     write_results_to_file(run_name, obj_auroc_image_list, obj_auroc_pixel_list, obj_ap_image_list, obj_ap_pixel_list)


def test(obj_names, mvtec_path, checkpoint_path, base_model_name):
    obj_ap_pixel_list = []
    obj_auroc_pixel_list = []
    obj_ap_image_list = []
    obj_auroc_image_list = []
    for obj_name in obj_names:
        img_dim = 224
        run_name = base_model_name + "_" + obj_name + '_'

        model = getattr(models_mae, 'mae_vit_large_patch16')()
        model.load_state_dict(torch.load(os.path.join(checkpoint_path, run_name + ".pckl"), map_location='cuda:0'))
        model.cuda()
        model.eval()

        model_seg = DiscriminativeSubNetwork(in_channels=18, out_channels=2)
        model_seg.load_state_dict(
            torch.load(os.path.join(checkpoint_path, run_name + "_seg.pckl"), map_location='cuda:0'))
        model_seg.cuda()
        model_seg.eval()
        print('model loaded.')

        dataset = MVTecDRAEMTestDataset(mvtec_path + obj_name + "/test/", resize_shape=[img_dim, img_dim])
        dataloader = DataLoader(dataset, batch_size=1,
                                shuffle=False, num_workers=2)

        total_pixel_scores = np.zeros((img_dim * img_dim * len(dataset)))
        total_gt_pixel_scores = np.zeros((img_dim * img_dim * len(dataset)))
        mask_cnt = 0

        anomaly_score_gt = []
        anomaly_score_prediction = []

        print(len(dataloader))

        for i_batch, sample_batched in enumerate(dataloader):
            gray_batch = sample_batched["image"].cuda()
            is_normal = sample_batched["has_anomaly"].detach().numpy()[0, 0]
            anomaly_score_gt.append(is_normal)
            true_mask = sample_batched["mask"]
            true_mask_cv = true_mask.detach().numpy()[0, :, :, :].transpose((1, 2, 0))
            x = torch.einsum('nchw->nhwc', gray_batch)

            # 掩码策略
            noise = torch.rand(1, 196, device='cuda')
            _, gray_rec, mask = model(gray_batch, 0.8, noise)
            # print(make_mask(mask, model).shape)
            gray_rec = model.unpatchify(gray_rec)
            joined_in = torch.cat((gray_rec, gray_batch), dim=1)
            concat = torch.cat((gray_rec, gray_batch), dim=3)
            concat_mask = x * (1 - make_mask(mask, model))

            m = int(196 * 0.2)
            # 应该是把小的变大
            maskk = (noise >= noise.sort(1)[0][:, m].reshape(-1, 1)).to(dtype=torch.int32)
            noise = torch.mul(maskk, noise) + abs(maskk - 1)
            _, gray_rec, mask = model(gray_batch, 0.8, noise)
            gray_rec = model.unpatchify(gray_rec)
            joined_in = torch.cat((gray_rec, joined_in), dim=1)
            concat = torch.cat((gray_rec, concat), dim=3)
            concat_mask = torch.cat((x * (1 - make_mask(mask, model)), concat_mask), dim=2)

            maskk = (noise >= noise.sort(1)[0][:, m].reshape(-1, 1)).to(dtype=torch.int32)
            noise = torch.mul(maskk, noise) + abs(maskk - 1)
            _, gray_rec, mask = model(gray_batch, 0.8, noise)
            gray_rec = model.unpatchify(gray_rec)
            joined_in = torch.cat((gray_rec, joined_in), dim=1)
            concat = torch.cat((gray_rec, concat), dim=3)
            concat_mask = torch.cat((x * (1 - make_mask(mask, model)), concat_mask), dim=2)

            maskk = (noise >= noise.sort(1)[0][:, m].reshape(-1, 1)).to(dtype=torch.int32)
            noise = torch.mul(maskk, noise) + abs(maskk - 1)
            _, gray_rec, mask = model(gray_batch, 0.8, noise)
            gray_rec = model.unpatchify(gray_rec)
            joined_in = torch.cat((gray_rec, joined_in), dim=1)
            concat = torch.cat((gray_rec, concat), dim=3)
            concat_mask = torch.cat((x * (1 - make_mask(mask, model)), concat_mask), dim=2)

            maskk = (noise >= noise.sort(1)[0][:, m].reshape(-1, 1)).to(dtype=torch.int32)
            noise = torch.mul(maskk, noise) + abs(maskk - 1)
            _, gray_rec, mask = model(gray_batch, 0.8, noise)
            gray_rec = model.unpatchify(gray_rec)
            joined_in = torch.cat((gray_rec, joined_in), dim=1)
            concat = torch.cat((gray_rec, concat), dim=3)
            concat_mask = torch.cat((x * (1 - make_mask(mask, model)), concat_mask), dim=2)

            out_mask = model_seg(joined_in)
            out_mask_sm = torch.softmax(out_mask, dim=1)
            save_image_tensor(concat, './result/result' + str(i_batch) + '.jpg')

            out_mask_cv = out_mask_sm[0, 1, :, :].detach().cpu().numpy()
            out_mask_averaged = torch.nn.functional.avg_pool2d(out_mask_sm[:, 1:, :, :], 21, stride=1,
                                                               padding=21 // 2).cpu().detach().numpy()
            image_score = np.max(out_mask_averaged)
            anomaly_score_prediction.append(image_score)

            flat_true_mask = true_mask_cv.flatten()
            flat_out_mask = out_mask_cv.flatten()

            true_mask = true_mask.cuda()
            out_mmm = out_mask_sm[:, 1:, :, :]
            out_true_mask = true_mask
            concat1 = torch.cat((out_mmm, out_true_mask), dim=3)
            save_image_tensor(concat1, './result/result_mask' + str(i_batch) + '.jpg')
            # print(concat.shape)
            # print(concat_mask.shape)
            save_image_tensor(torch.einsum('nhwc->nchw', concat_mask), './result/in_mask' + str(i_batch) + '.jpg')
            print(str(i_batch) + '.jpg has done...')

            total_pixel_scores[mask_cnt * img_dim * img_dim:(mask_cnt + 1) * img_dim * img_dim] = flat_out_mask
            total_gt_pixel_scores[mask_cnt * img_dim * img_dim:(mask_cnt + 1) * img_dim * img_dim] = flat_true_mask
            mask_cnt += 1

        anomaly_score_prediction = np.array(anomaly_score_prediction)
        anomaly_score_gt = np.array(anomaly_score_gt)
        auroc = roc_auc_score(anomaly_score_gt, anomaly_score_prediction)
        ap = average_precision_score(anomaly_score_gt, anomaly_score_prediction)

        total_gt_pixel_scores = total_gt_pixel_scores.astype(np.uint8)
        total_gt_pixel_scores = total_gt_pixel_scores[:img_dim * img_dim * mask_cnt]
        total_pixel_scores = total_pixel_scores[:img_dim * img_dim * mask_cnt]
        auroc_pixel = roc_auc_score(total_gt_pixel_scores, total_pixel_scores)
        ap_pixel = average_precision_score(total_gt_pixel_scores, total_pixel_scores)
        obj_ap_pixel_list.append(ap_pixel)
        obj_auroc_pixel_list.append(auroc_pixel)
        obj_auroc_image_list.append(auroc)
        obj_ap_image_list.append(ap)
        print(obj_name)
        print("AUC Image:  " + str(auroc))
        print("AP Image:  " + str(ap))
        print("AUC Pixel:  " + str(auroc_pixel))
        print("AP Pixel:  " + str(ap_pixel))
        print("==============================")

    print(run_name)
    print("AUC Image mean:  " + str(np.mean(obj_auroc_image_list)))
    print("AP Image mean:  " + str(np.mean(obj_ap_image_list)))
    print("AUC Pixel mean:  " + str(np.mean(obj_auroc_pixel_list)))
    print("AP Pixel mean:  " + str(np.mean(obj_ap_pixel_list)))

    write_results_to_file(run_name, obj_auroc_image_list, obj_auroc_pixel_list, obj_ap_image_list, obj_ap_pixel_list)


def test_fixed_mask(obj_names, mvtec_path, checkpoint_path, base_model_name):
    # 加载训练时的固定noise
    origin_noise = torch.load("origin_noise.pt")
    noise = origin_noise
    m = int(196 * (1 - args.mask_ratio))

    maskk = (noise >= noise.sort(1)[0][:, m].reshape(-1, 1)).to(dtype=torch.int32)
    noise = torch.mul(maskk, noise)
    noise1 = noise

    maskk = (noise >= noise.sort(1)[0][:, m].reshape(-1, 1)).to(dtype=torch.int32)
    noise = torch.mul(maskk, noise)
    noise2 = noise

    maskk = (noise >= noise.sort(1)[0][:, m].reshape(-1, 1)).to(dtype=torch.int32)
    noise = torch.mul(maskk, noise)
    noise3 = noise

    maskk = (noise >= noise.sort(1)[0][:, m].reshape(-1, 1)).to(dtype=torch.int32)
    noise = torch.mul(maskk, noise)
    noise4 = noise

    obj_ap_pixel_list = []
    obj_auroc_pixel_list = []
    obj_ap_image_list = []
    obj_auroc_image_list = []
    for obj_name in obj_names:
        img_dim = 224
        run_name = base_model_name + "_" + obj_name + '_'

        model = getattr(models_mae, 'mae_vit_large_patch16')()
        model.load_state_dict(torch.load(os.path.join(checkpoint_path, run_name + ".pckl"), map_location='cuda:0'))
        model.cuda()
        model.eval()

        model_seg = DiscriminativeSubNetwork(in_channels=18, out_channels=2)
        model_seg.load_state_dict(
            torch.load(os.path.join(checkpoint_path, run_name + "_seg.pckl"), map_location='cuda:0'))
        model_seg.cuda()
        model_seg.eval()
        print('model loaded.')
        dataset = MVTecDRAEMTestDataset(mvtec_path + obj_name + "/test/", resize_shape=[img_dim, img_dim])
        dataloader = DataLoader(dataset, batch_size=1, shuffle=False, num_workers=2)

        total_pixel_scores = np.zeros((img_dim * img_dim * len(dataset)))
        total_gt_pixel_scores = np.zeros((img_dim * img_dim * len(dataset)))
        mask_cnt = 0

        anomaly_score_gt = []
        anomaly_score_prediction = []

        print(len(dataloader))

        for i_batch, sample_batched in enumerate(dataloader):
            gray_batch = sample_batched["image"].cuda()
            is_normal = sample_batched["has_anomaly"].detach().numpy()[0, 0]
            anomaly_score_gt.append(is_normal)
            true_mask = sample_batched["mask"]
            true_mask_cv = true_mask.detach().numpy()[0, :, :, :].transpose((1, 2, 0))

            _, gray_rec, mask = model(gray_batch, 0.8, origin_noise)
            gray_rec = model.unpatchify(gray_rec)
            joined_in = torch.cat((gray_rec, gray_batch), dim=1)
            concat = torch.cat((gray_rec, gray_batch), dim=3)

            _, gray_rec, mask = model(gray_batch, 0.8, noise1)
            gray_rec = model.unpatchify(gray_rec)
            joined_in = torch.cat((gray_rec, joined_in), dim=1)
            concat = torch.cat((gray_rec, concat), dim=3)

            _, gray_rec, mask = model(gray_batch, 0.8, noise2)
            gray_rec = model.unpatchify(gray_rec)
            joined_in = torch.cat((gray_rec, joined_in), dim=1)
            concat = torch.cat((gray_rec, concat), dim=3)

            _, gray_rec, mask = model(gray_batch, 0.8, noise3)
            gray_rec = model.unpatchify(gray_rec)
            joined_in = torch.cat((gray_rec, joined_in), dim=1)
            concat = torch.cat((gray_rec, concat), dim=3)

            _, gray_rec, mask = model(gray_batch, 0.8, noise4)
            gray_rec = model.unpatchify(gray_rec)
            joined_in = torch.cat((gray_rec, joined_in), dim=1)
            concat = torch.cat((gray_rec, concat), dim=3)

            out_mask = model_seg(joined_in)
            out_mask_sm = torch.softmax(out_mask, dim=1)
            save_image_tensor(concat, './result/result' + str(i_batch) + '.jpg')

            out_mask_cv = out_mask_sm[0, 1, :, :].detach().cpu().numpy()
            out_mask_averaged = torch.nn.functional.avg_pool2d(out_mask_sm[:, 1:, :, :], 21, stride=1,
                                                               padding=21 // 2).cpu().detach().numpy()
            image_score = np.max(out_mask_averaged)
            anomaly_score_prediction.append(image_score)

            flat_true_mask = true_mask_cv.flatten()
            flat_out_mask = out_mask_cv.flatten()

            true_mask = true_mask.cuda()
            out_mmm = out_mask_sm[:, 1:, :, :]
            out_true_mask = true_mask
            concat1 = torch.cat((out_mmm, out_true_mask), dim=3)
            save_image_tensor(concat1, './result/result_mask' + str(i_batch) + '.jpg')
            print(str(i_batch) + '.jpg has done...')

            total_pixel_scores[mask_cnt * img_dim * img_dim:(mask_cnt + 1) * img_dim * img_dim] = flat_out_mask
            total_gt_pixel_scores[mask_cnt * img_dim * img_dim:(mask_cnt + 1) * img_dim * img_dim] = flat_true_mask
            mask_cnt += 1

        anomaly_score_prediction = np.array(anomaly_score_prediction)
        anomaly_score_gt = np.array(anomaly_score_gt)
        auroc = roc_auc_score(anomaly_score_gt, anomaly_score_prediction)
        ap = average_precision_score(anomaly_score_gt, anomaly_score_prediction)

        total_gt_pixel_scores = total_gt_pixel_scores.astype(np.uint8)
        total_gt_pixel_scores = total_gt_pixel_scores[:img_dim * img_dim * mask_cnt]
        total_pixel_scores = total_pixel_scores[:img_dim * img_dim * mask_cnt]
        auroc_pixel = roc_auc_score(total_gt_pixel_scores, total_pixel_scores)
        ap_pixel = average_precision_score(total_gt_pixel_scores, total_pixel_scores)
        obj_ap_pixel_list.append(ap_pixel)
        obj_auroc_pixel_list.append(auroc_pixel)
        obj_auroc_image_list.append(auroc)
        obj_ap_image_list.append(ap)
        print(obj_name)
        print("AUC Image:  " + str(auroc))
        print("AP Image:  " + str(ap))
        print("AUC Pixel:  " + str(auroc_pixel))
        print("AP Pixel:  " + str(ap_pixel))
        print("==============================")

    print(run_name)
    print("AUC Image mean:  " + str(np.mean(obj_auroc_image_list)))
    print("AP Image mean:  " + str(np.mean(obj_ap_image_list)))
    print("AUC Pixel mean:  " + str(np.mean(obj_auroc_pixel_list)))
    print("AP Pixel mean:  " + str(np.mean(obj_ap_pixel_list)))
    write_results_to_file(run_name, obj_auroc_image_list, obj_auroc_pixel_list, obj_ap_image_list, obj_ap_pixel_list)


if __name__ == "__main__":
    import datetime
    print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ' run ' + __file__.split('/')[-1])
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--data_set', action='store', type=str,
                        default='/kaggle/input/mvtecad-mvtec-anomaly-detection/mvtec_anomaly_detection/')
    parser.add_argument('--base_model_name', action='store', type=str, default="DRAEM_test_0.0001_200_bs8")
    parser.add_argument('--obj_name', action='store', type=str, default='transistor')
    parser.add_argument('--checkpoint_path', action='store', type=str, default='./checkpoints/')
    parser.add_argument('--random_mask', action='store', type=bool, default=True)
    args = parser.parse_args()

    with torch.cuda.device(0):
        with torch.no_grad():
            if args.random_mask:
                test([args.obj_name], args.data_set, args.checkpoint_path, args.base_model_name)
            else:
                test_fixed_mask([args.obj_name], args.data_set, args.checkpoint_path, args.base_model_name)

