import torch
from torchvision import utils as vutils
from util.dataset import MVTecDRAEMTrainDataset
from module import models_mae as models_mae
from torch.utils.data import DataLoader
from torch import optim
from module.Unet import DiscriminativeSubNetwork
from util.loss import FocalLoss, SSIM
import os
import time


def save_image_tensor(input_tensor: torch.Tensor, filename):
    """
    将tensor保存为图片
    :param input_tensor: 要保存的tensor
    :param filename: 保存的文件名
    """
    print(input_tensor.shape)
    assert (len(input_tensor.shape) == 4 and input_tensor.shape[0] == 1)
    # 复制一份
    input_tensor = input_tensor.clone().detach()
    # 到cpu
    input_tensor = input_tensor.to(torch.device('cpu'))
    vutils.save_image(input_tensor, filename)


def prepare_model(chkpt_dir, arch='mae_vit_large_patch16'):
    # build model
    model = getattr(models_mae, arch)()
    # load model
    checkpoint = torch.load(chkpt_dir, map_location=torch.device('cuda'))
    msg = model.load_state_dict(checkpoint['model'], strict=False)
    print(msg)
    return model


def get_lr(optimizer):
    for param_group in optimizer.param_groups:
        return param_group['lr']


def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        m.weight.data.normal_(0.0, 0.02)
    elif classname.find('BatchNorm') != -1:
        m.weight.data.normal_(1.0, 0.02)
        m.bias.data.fill_(0)


def train_on_device(obj_names, args):
    if not os.path.exists(args.checkpoint_path):
        os.makedirs(args.checkpoint_path)

    for obj_name in obj_names:
        run_name = 'DRAEM_test_' + str(args.lr) + '_' + str(args.epochs) + '_bs' + str(args.bs) + "_" + obj_name + '_'

        if args.mae_model is None:
            os.system("wget -nc https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_large_ganloss.pth")
            model = prepare_model('mae_visualize_vit_large_ganloss.pth', 'mae_vit_large_patch16')
        else:
            model = torch.load(args.mae_model)
        model.cuda()

        model_seg = DiscriminativeSubNetwork(in_channels=18, out_channels=2)
        model_seg.cuda()
        model_seg.apply(weights_init)

        optimizer = torch.optim.Adam([
            {"params": model.parameters(), "lr": args.lr},
            {"params": model_seg.parameters(), "lr": args.lr}])
        scheduler = optim.lr_scheduler.MultiStepLR(optimizer, [args.epochs * 0.6, args.epochs * 0.8], gamma=0.2,
                                                       last_epoch=-1)
        loss_l2 = torch.nn.modules.loss.MSELoss()
        loss_ssim = SSIM()
        loss_focal = FocalLoss()

        dataset = MVTecDRAEMTrainDataset(args.data_path + obj_name + "/train/good/", args.anomaly_source_path,
                                         resize_shape=[224, 224])
        dataloader = DataLoader(dataset, batch_size=args.bs,
                                shuffle=True, num_workers=2)

        n_iter = 0
        total_l2_loss = 0
        total_ssim_loss = 0
        total_segment_loss = 0

        start_time = time.time()
        for epoch in range(args.epochs):
            if epoch % 100 == 99:
                print("Epoch: " + str(epoch + 1) + '   time cost:' + str(time.time() - start_time))
            for i_batch, sample_batched in enumerate(dataloader):
                gray_batch = sample_batched["image"].cuda()
                aug_gray_batch = sample_batched["augmented_image"].cuda()
                anomaly_mask = sample_batched["anomaly_mask"].cuda()

                # 调制noise大小控制mask的位置
                noise = torch.rand(anomaly_mask.shape[0], 196, device='cuda')
                _, gray_rec, mask = model(aug_gray_batch, args.mask_ratio, noise)
                gray_rec = model.unpatchify(gray_rec)
                joined_in = torch.cat((gray_rec, aug_gray_batch), dim=1)
                l2_loss = loss_l2(gray_rec, gray_batch)
                ssim_loss = loss_ssim(gray_rec, gray_batch)

                m = int(196 * (1 - args.mask_ratio))
                maskk = (noise >= noise.sort(1)[0][:, m].reshape(-1, 1)).to(dtype=torch.int32)
                noise = torch.mul(maskk, noise)
                _, gray_rec, mask = model(aug_gray_batch, args.mask_ratio, noise)
                gray_rec = model.unpatchify(gray_rec)
                joined_in = torch.cat((gray_rec, joined_in), dim=1)
                l2_loss += loss_l2(gray_rec, gray_batch)
                ssim_loss += loss_ssim(gray_rec, gray_batch)

                maskk = (noise >= noise.sort(1)[0][:, m].reshape(-1, 1)).to(dtype=torch.int32)
                noise = torch.mul(maskk, noise)
                _, gray_rec, mask = model(aug_gray_batch, args.mask_ratio, noise)
                gray_rec = model.unpatchify(gray_rec)
                joined_in = torch.cat((gray_rec, joined_in), dim=1)
                l2_loss += loss_l2(gray_rec, gray_batch)
                ssim_loss += loss_ssim(gray_rec, gray_batch)

                maskk = (noise >= noise.sort(1)[0][:, m].reshape(-1, 1)).to(dtype=torch.int32)
                noise = torch.mul(maskk, noise)
                _, gray_rec, mask = model(aug_gray_batch, args.mask_ratio, noise)
                gray_rec = model.unpatchify(gray_rec)
                joined_in = torch.cat((gray_rec, joined_in), dim=1)
                l2_loss += loss_l2(gray_rec, gray_batch)
                ssim_loss += loss_ssim(gray_rec, gray_batch)

                maskk = (noise >= noise.sort(1)[0][:, m].reshape(-1, 1)).to(dtype=torch.int32)
                noise = torch.mul(maskk, noise)
                _, gray_rec, mask = model(aug_gray_batch, args.mask_ratio, noise)
                gray_rec = model.unpatchify(gray_rec)
                joined_in = torch.cat((gray_rec, joined_in), dim=1)
                l2_loss += loss_l2(gray_rec, gray_batch)
                ssim_loss += loss_ssim(gray_rec, gray_batch)

                out_mask = model_seg(joined_in)
                out_mask_sm = torch.softmax(out_mask, dim=1)
                segment_loss = loss_focal(out_mask_sm, anomaly_mask)

                loss = l2_loss + ssim_loss + segment_loss
                total_ssim_loss += ssim_loss.item()
                total_segment_loss += segment_loss.item()
                total_l2_loss += l2_loss.item()

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                if n_iter == args.show_loss:
                    print('l2: ', round(total_l2_loss / (5 * args.show_loss), 6),
                          ' ssim: ', round(total_ssim_loss / (5 * args.show_loss), 6),
                          ' seg loss: ', round(total_segment_loss / args.show_loss, 6),
                          ' time cost: ', int(time.time() - start_time))
                    total_ssim_loss = 0
                    total_segment_loss = 0
                    total_l2_loss = 0
                    n_iter = 0
                n_iter += 1
            scheduler.step()

        torch.save(model.state_dict(), os.path.join(args.checkpoint_path, run_name + ".pckl"))
        torch.save(model_seg.state_dict(), os.path.join(args.checkpoint_path, run_name + "_seg.pckl"))
        print('model saved...')
    return


def train_on_device_fixed_mask(obj_names, args):
    # 随机初始化一个固定noise
    origin_noise = torch.rand(1, 196, device='cuda')
    temp_noise = origin_noise
    m = int(196 * (1 - args.mask_ratio))
    torch.save(origin_noise, "origin_noise.pt")

    maskk = (temp_noise >= temp_noise.sort(1)[0][:, m].reshape(-1, 1)).to(dtype=torch.int32)
    temp_noise = torch.mul(maskk, temp_noise)
    noise1 = temp_noise

    maskk = (temp_noise >= temp_noise.sort(1)[0][:, m].reshape(-1, 1)).to(dtype=torch.int32)
    temp_noise = torch.mul(maskk, temp_noise)
    noise2 = temp_noise

    maskk = (temp_noise >= temp_noise.sort(1)[0][:, m].reshape(-1, 1)).to(dtype=torch.int32)
    temp_noise = torch.mul(maskk, temp_noise)
    noise3 = temp_noise

    maskk = (temp_noise >= temp_noise.sort(1)[0][:, m].reshape(-1, 1)).to(dtype=torch.int32)
    temp_noise = torch.mul(maskk, temp_noise)
    noise4 = temp_noise

    if not os.path.exists(args.checkpoint_path):
        os.makedirs(args.checkpoint_path)

    # train
    start_time = time.time()
    for obj_name in obj_names:
        run_name = 'DRAEM_test_' + str(args.lr) + '_' + str(args.epochs) + '_bs' + str(args.bs) + "_" + obj_name + '_'

        if args.mae_model is None:
            os.system("wget -nc https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_large_ganloss.pth")
            model = prepare_model('mae_visualize_vit_large_ganloss.pth', 'mae_vit_large_patch16')
        else:
            model = torch.load(args.mae_model)
        model.cuda()

        model_seg = DiscriminativeSubNetwork(in_channels=18, out_channels=2)
        model_seg.cuda()
        model_seg.apply(weights_init)

        optimizer = torch.optim.Adam([
            {"params": model.parameters(), "lr": args.lr},
            {"params": model_seg.parameters(), "lr": args.lr}])

        scheduler = optim.lr_scheduler.MultiStepLR(optimizer, [args.epochs * 0.6, args.epochs * 0.8], gamma=0.2,
                                                   last_epoch=-1)
        loss_l2 = torch.nn.modules.loss.MSELoss()
        loss_ssim = SSIM()
        loss_focal = FocalLoss()

        dataset = MVTecDRAEMTrainDataset(args.data_path + obj_name + "/train/good/", args.anomaly_source_path,
                                         resize_shape=[224, 224])
        dataloader = DataLoader(dataset, batch_size=args.bs,
                                shuffle=True, num_workers=2)

        n_iter = 0
        total_l2_loss = 0
        total_ssim_loss = 0
        total_segment_loss = 0

        for epoch in range(args.epochs):
            if epoch % 100 == 99:
                print("Epoch: " + str(epoch + 1) + '   time cost:' + str(time.time() - start_time))
            for i_batch, sample_batched in enumerate(dataloader):
                gray_batch = sample_batched["image"].cuda()
                aug_gray_batch = sample_batched["augmented_image"].cuda()
                anomaly_mask = sample_batched["anomaly_mask"].cuda()

                increase_dim = torch.tensor([[1]] * anomaly_mask.shape[0])

                noise = torch.mul(increase_dim, origin_noise)
                _, gray_rec, mask = model(aug_gray_batch, args.mask_ratio, noise)
                gray_rec = model.unpatchify(gray_rec)
                joined_in = torch.cat((gray_rec, aug_gray_batch), dim=1)
                l2_loss = loss_l2(gray_rec, gray_batch)
                ssim_loss = loss_ssim(gray_rec, gray_batch)

                noise = torch.mul(increase_dim, noise1)
                _, gray_rec, mask = model(aug_gray_batch, args.mask_ratio, noise)
                gray_rec = model.unpatchify(gray_rec)
                joined_in = torch.cat((gray_rec, joined_in), dim=1)
                l2_loss += loss_l2(gray_rec, gray_batch)
                ssim_loss += loss_ssim(gray_rec, gray_batch)

                noise = torch.mul(increase_dim, noise2)
                _, gray_rec, mask = model(aug_gray_batch, args.mask_ratio, noise)
                gray_rec = model.unpatchify(gray_rec)
                joined_in = torch.cat((gray_rec, joined_in), dim=1)
                l2_loss += loss_l2(gray_rec, gray_batch)
                ssim_loss += loss_ssim(gray_rec, gray_batch)

                noise = torch.mul(increase_dim, noise3)
                _, gray_rec, mask = model(aug_gray_batch, args.mask_ratio, noise)
                gray_rec = model.unpatchify(gray_rec)
                joined_in = torch.cat((gray_rec, joined_in), dim=1)
                l2_loss += loss_l2(gray_rec, gray_batch)
                ssim_loss += loss_ssim(gray_rec, gray_batch)

                noise = torch.mul(increase_dim, noise4)
                _, gray_rec, mask = model(aug_gray_batch, args.mask_ratio, noise)
                gray_rec = model.unpatchify(gray_rec)
                joined_in = torch.cat((gray_rec, joined_in), dim=1)
                l2_loss += loss_l2(gray_rec, gray_batch)
                ssim_loss += loss_ssim(gray_rec, gray_batch)

                out_mask = model_seg(joined_in)
                out_mask_sm = torch.softmax(out_mask, dim=1)
                segment_loss = loss_focal(out_mask_sm, anomaly_mask)

                loss = l2_loss + ssim_loss + segment_loss
                total_ssim_loss += ssim_loss.item()
                total_segment_loss += segment_loss.item()
                total_l2_loss += l2_loss.item()

                optimizer.zero_grad()

                loss.backward()
                optimizer.step()

                if n_iter == args.show_loss:
                    print('l2: ', round(total_l2_loss / (5 * args.show_loss), 6),
                          ' ssim: ', round(total_ssim_loss / (5 * args.show_loss), 6),
                          ' seg loss: ', round(total_segment_loss / args.show_loss, 6),
                          ' time cost: ', int(time.time() - start_time))
                    total_ssim_loss = 0
                    total_segment_loss = 0
                    total_l2_loss = 0
                    n_iter = 0
                n_iter += 1
            scheduler.step()

        torch.save(model.state_dict(), os.path.join(args.checkpoint_path, run_name + ".pckl"))
        torch.save(model_seg.state_dict(), os.path.join(args.checkpoint_path, run_name + "_seg.pckl"))
        print('model saved...')
    return


if __name__ == "__main__":
    import argparse
    import datetime
    print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ' run ' + __file__.split('/')[-1])
    parser = argparse.ArgumentParser()
    parser.add_argument('--obj_name', action='store', type=str, required=True)
    parser.add_argument('--bs', action='store', type=int, default=8)
    parser.add_argument('--lr', action='store', type=float, default=1e-4)
    parser.add_argument('--epochs', action='store', type=int, default=500)
    parser.add_argument('--gpu_id', action='store', type=int, default=0)
    parser.add_argument('--data_path', action='store', type=str, required=True)
    parser.add_argument('--anomaly_source_path', action='store', type=str, required=True)
    parser.add_argument('--checkpoint_path', action='store', type=str, default='./checkpoints/')
    parser.add_argument('--mae_model', action='store', type=str, default=None)
    parser.add_argument('--mask_ratio', action='store', type=float, default=0.8)
    parser.add_argument('--show_loss', action='store', type=int, default=100)
    parser.add_argument('--random_mask', action='store', type=bool, default=True)
    args = parser.parse_args()

    if args.obj_name == 'all':
        obj_list = ['capsule',
                    'bottle',
                    'carpet',
                    'leather',
                    'pill',
                    'transistor',
                    'tile',
                    'cable',
                    'zipper',
                    'toothbrush',
                    'metal_nut',
                    'hazelnut',
                    'screw',
                    'grid',
                    'wood'
                    ]
        picked_classes = obj_list
    else:
        picked_classes = [args.obj_name]

    with torch.cuda.device(args.gpu_id):
        print(args.obj_name, ' start training.')
        if args.random_mask:
            train_on_device(picked_classes, args)
        else:
            train_on_device_fixed_mask(picked_classes, args)
