import time
import numpy as np
import torch
from torch.cuda.amp import autocast as autocast, GradScaler
from models.simCNN_contrastive import *
from models.ContrasiveLoss_SoftLabel import *
from evaluation import openset_eval_contrastive_logits
from utils import load_ImageNet200, load_ImageNet200_contrastive, get_smooth_labels
from mixup import *
from dataset import TFF, NPS
from models.Graph_Representation import tff_graph_representation, nps_graph_representation
import argparse
from torch_geometric.loader import DataLoader


# os.environ["CUDA_VISIBLE_DEVICES"] = "1"

def get_args():
    parser = argparse.ArgumentParser(description='PyTorch OSR Example')
    parser.add_argument('--batch_size', type=int, default=64, help='input batch size for training (default: 64)')
    parser.add_argument('--num_classes', type=int, default=10, help='number of classes')
    parser.add_argument('--epochs', type=int, default=20, help='number of epochs to train (default: 50)')
    parser.add_argument('--lr', type=float, default=0.001, help='learning rate (default: 1e-3)')
    parser.add_argument('--wd', type=float, default=0.00, help='weight decay')
    parser.add_argument('--momentum', type=float, default=0.01, help='momentum (default: 1e-3)')
    parser.add_argument('--decreasing_lr', default='60,100,150', help='decreasing strategy')
    parser.add_argument('--lr_decay', type=float, default=0.1, help='decreasing strategy')
    parser.add_argument('--seed', type=int, default=117, help='random seed (default: 1)')
    parser.add_argument('--seed_sampler', type=str, default='777 1234 2731 3925 5432',
                        help='random seed for dataset sampler')
    parser.add_argument('--log_interval', type=int, default=20,
                        help='how many batches to wait before logging training status')
    parser.add_argument('--val_interval', type=int, default=5, help='how many epochs to wait before another val')
    parser.add_argument('--test_interval', type=int, default=5, help='how many epochs to wait before another test')
    parser.add_argument('--lamda', type=int, default=100, help='lamda in loss function')
    parser.add_argument('--beta_z', type=int, default=1, help='beta of the kl in loss function')
    parser.add_argument('--beta_anneal', type=int, default=0, help='the anneal epoch of beta')
    parser.add_argument('--threshold', type=float, default=0.5, help='threshold of gaussian model')
    parser.add_argument('--debug', action="store_true", default=False, help='If debug mode')

    # train
    parser.add_argument('--dataset', type=str, default="NPS", help='The dataset going to use')
    parser.add_argument('--eval', action="store_true", default=False, help='directly eval?')
    parser.add_argument('--baseline', action="store_true", default=False, help='If is the bseline?')  # False
    parser.add_argument('--use_model', action="store_true", default=False, help='If use model to get the train feature')
    parser.add_argument('--encode_z', type=int, default=None, help='If encode z and dim of z')  # None
    parser.add_argument("--contrastive_loss", action="store_true", default=False, help="Use contrastive loss")  # False
    parser.add_argument("--temperature", type=float, default=1.0, help="Temperature for contrastive loss")  # 1.0
    parser.add_argument("--contra_lambda", type=float, default=1.0, help="Scaling factor of contrastive loss")
    parser.add_argument("--save_epoch", type=int, default=None, help="save model in this epoch")
    parser.add_argument("--exp", type=int, default=0, help="which experiment")
    parser.add_argument("--unseen_num", type=int, default=13, help="unseen class num in CIFAR100")

    # test
    parser.add_argument('--cf', action="store_true", default=False, help='use counterfactual generation')
    parser.add_argument('--cf_threshold', action="store_true", default=False,
                        help='use counterfactual threshold in revise_cf')
    parser.add_argument('--yh', action="store_true", default=False, help='use yh rather than feature_y_mean')
    parser.add_argument('--use_model_gau', action="store_true", default=False, help='use feature by model in gau')

    args = parser.parse_args()
    return args


def control_seed(args):
    # seed
    args.cuda = torch.cuda.is_available()
    torch.manual_seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed_all(args.seed)
    np.random.seed(args.seed)
    torch.backends.cudnn.deterministic = True


if __name__ == '__main__':
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    args = get_args()
    control_seed(args)

    classid_training = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]

    lr = 0.001
    num_contrastive_epochs = 10
    percentile = 5
    temperature = 0.1
    label_smoothing_coeff = 0.2
    feature_dim = 128
    model_folder_path = './saved_models/'

    load_dataset = NPS.NPS_Dataset()
    args.num_classes = 12
    in_channel = 1
    ggr = nps_graph_representation()

    args.run_idx = 0
    seed_sampler = int(args.seed_sampler.split(' ')[0])

    train_dataset, val_dataset, test_dataset = load_dataset.sampler(seed_sampler, args)

    train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=0,
                              drop_last=True)

    best_epoch = -1
    best_auc = 0

    feature_encoder = simCNN_contrastive(classid_list=classid_training, head='linear')
    feature_encoder.to(device)
    criterion = SupConLoss(temperature=temperature, base_temperature=temperature)
    criterion.to(device)

    optimizer = torch.optim.Adam(feature_encoder.parameters(), lr=lr)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=10, T_mult=2, eta_min=lr * 1e-3)

    scaler = GradScaler()

    print("==> Training Class: ", classid_training)

    for epoch in range(1, num_contrastive_epochs + 1):
        feature_encoder.train()
        time1 = time.time()
        total_loss = 0
        for i, (data, labels) in enumerate(train_loader):
            data = ggr.get_rep(data)
            data = [data, data]
            targets = get_smooth_labels(labels, classid_training, label_smoothing_coeff)
            data_mixup, targets_mixup, targets_a, targets_b, lam = mixup_data_contrastive(data, targets, alpha=1,
                                                                                          use_cuda=False)
            data = torch.cat([data[0], data[1]], dim=0)
            data = data.to(device)
            targets = targets.to(device)

            data_mixup = torch.cat([data_mixup[0], data_mixup[1]], dim=0)
            data_mixup = data_mixup.to(device)
            targets_mixup = targets_mixup.to(device)

            bsz = targets.shape[0]

            optimizer.zero_grad()
            with autocast():
                logits = feature_encoder(data)
                logits1, logits2 = torch.split(logits, [bsz, bsz], dim=0)
                logits = torch.cat([logits1.unsqueeze(1), logits2.unsqueeze(1)], dim=1)

                logits_mixup = feature_encoder(data_mixup)
                logits3, logits4 = torch.split(logits_mixup, [bsz, bsz], dim=0)
                logits_mixup = torch.cat([logits3.unsqueeze(1), logits4.unsqueeze(1)], dim=1)

                logits_combine = torch.cat([logits, logits_mixup], dim=0)
                targets_combine = torch.cat([targets, targets_mixup], dim=0)
                loss = criterion(logits_combine, targets_combine)

            scaler.scale(loss).backward()
            scaler.step(optimizer)
            scaler.update()
            total_loss += loss

        print('epoch {}: contrastive_loss = {:.3f},  '.format(epoch, total_loss))
        time2 = time.time()
        scheduler.step()
        print('time for this epoch: {:.3f} minutes'.format((time2 - time1) / 60.0))

    torch.save(feature_encoder, model_folder_path + 'nps_encoder.pt')
