from anomaly.utils.kdad_utils import get_config
from anomaly.datasets.mvtec import load_data
from pathlib import Path
from torch.autograd import Variable
import pickle
from anomaly.tools.kdad_test import detection_test,localization_test
from anomaly.losses import DirectionOnlyLoss, MseDirectionLoss
from argparse import ArgumentParser
from anomaly.models import get_networks
import torch
from tqdm import tqdm

parser = ArgumentParser()
parser.add_argument('--config', type=str, default='configs/config_kdad.yaml', help="training configuration")

device = 'cuda' if torch.cuda.is_available() else 'cpu'


def train(config):
    direction_loss_only = config["direction_loss_only"]
    normal_class = config["normal_class"]
    learning_rate = float(config['learning_rate'])
    num_epochs = config["num_epochs"]
    lamda = config['lamda']
    continue_train = config['continue_train']
    last_checkpoint = config['last_checkpoint']

    # 统一改名
    checkpoint_path = config["weights_dir"]


    # create directory
    Path(checkpoint_path).mkdir(parents=True, exist_ok=True)

    train_dataloader, test_dataloader = load_data(config)   # one class

    #vgg是老师模型，model是学生模型
    if continue_train:
        vgg, model = get_networks(config, load_checkpoint=True)
    else:
        vgg, model = get_networks(config)

    # Criteria And Optimizers
    if direction_loss_only:
        criterion = DirectionOnlyLoss()
    else:
        criterion = MseDirectionLoss(lamda)

    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
    if continue_train:
        optimizer.load_state_dict(
            torch.load('{}/Opt_{}_epoch_{}.pth'.format(checkpoint_path, normal_class, last_checkpoint)))

    losses = []
    roc_aucs = []
    if continue_train:
        with open('{}/Auc_{}_epoch_{}.pickle'.format(checkpoint_path, normal_class, last_checkpoint), 'rb') as f:
            roc_aucs = pickle.load(f)

    for epoch in range(num_epochs + 1):
        model.train()
        epoch_loss = 0
        for data in train_dataloader:
            X = data[0]   # 格式一致
            if X.shape[1] == 1:
                X = X.repeat(1, 3, 1, 1)
            X = Variable(X).cuda()
            # X.to(device)  
            # print('using device:',device)
            # print('x.type',type(X))
            output_pred = model.forward(X)
            output_real = vgg(X)
            # print('pred_len',len(output_pred))
            # for i,output_pred_single in enumerate(output_pred):
            #     print(i," ",output_pred_single.shape)
            # # print('real_len',len(output_real))

            total_loss = criterion(output_pred, output_real)

            # Add loss to the list
            epoch_loss += total_loss.item()
            losses.append(total_loss.item())

            # Clear the previous gradients
            optimizer.zero_grad()
            # Compute gradients
            total_loss.backward()
            # Adjust weights
            optimizer.step()

            # 增加scheduler

        print('epoch [{}/{}], loss:{:.4f}'.format(epoch + 1, num_epochs, epoch_loss))
        if epoch % 10 == 0:
            roc_auc_loc = localization_test(model=model, vgg=vgg, test_dataloader=test_dataloader,
                                        config=config)   

            roc_auc_det = detection_test(model=model, vgg=vgg, test_dataloader=test_dataloader,
                                                config=config)         
            print("RocAUC_loc after {} epoch:".format(epoch), roc_auc_loc)
            print("RocAUC_det after {} epoch:".format(epoch), roc_auc_det)


        if epoch % 50 == 0:
            torch.save(model.state_dict(),
                       '{}/Cloner_{}_epoch_{}.pth'.format(checkpoint_path, normal_class, epoch))
            torch.save(optimizer.state_dict(),
                       '{}/Opt_{}_epoch_{}.pth'.format(checkpoint_path, normal_class, epoch))
            # with open('{}/Auc_{}_epoch_{}.pickle'.format(checkpoint_path, normal_class, epoch),
            #           'wb') as f:
            #     pickle.dump(roc_aucs, f)
            #     # TODO
            #     # 保存成txt可直观看到优化的过程


def main():
    args = parser.parse_args()
    config = get_config(args.config)
    train(config)


if __name__ == '__main__':
    main()
