import time
import numpy as np
import torch
from torch.cuda.amp import autocast as autocast, GradScaler
from models.simCNN_contrastive import *
from models.ContrasiveLoss_SoftLabel import *
from evaluation import openset_eval_contrastive_logits
from utils import load_ImageNet200, load_ImageNet200_contrastive, get_smooth_labels
from mixup import *
from dataset import TFF, NPS
from models.Graph_Representation import tff_graph_representation, nps_graph_representation
import argparse
from torch_geometric.loader import DataLoader


# os.environ["CUDA_VISIBLE_DEVICES"] = "1"

def get_args():
    parser = argparse.ArgumentParser(description='PyTorch OSR Example')
    parser.add_argument('--batch_size', type=int, default=64, help='input batch size for training (default: 64)')
    parser.add_argument('--num_classes', type=int, default=10, help='number of classes')
    parser.add_argument('--epochs', type=int, default=20, help='number of epochs to train (default: 50)')
    parser.add_argument('--lr', type=float, default=0.001, help='learning rate (default: 1e-3)')
    parser.add_argument('--wd', type=float, default=0.00, help='weight decay')
    parser.add_argument('--momentum', type=float, default=0.01, help='momentum (default: 1e-3)')
    parser.add_argument('--decreasing_lr', default='60,100,150', help='decreasing strategy')
    parser.add_argument('--lr_decay', type=float, default=0.1, help='decreasing strategy')
    parser.add_argument('--seed', type=int, default=117, help='random seed (default: 1)')
    parser.add_argument('--seed_sampler', type=str, default='777 1234 2731 3925 5432',
                        help='random seed for dataset sampler')
    parser.add_argument('--log_interval', type=int, default=20,
                        help='how many batches to wait before logging training status')
    parser.add_argument('--val_interval', type=int, default=5, help='how many epochs to wait before another val')
    parser.add_argument('--test_interval', type=int, default=5, help='how many epochs to wait before another test')
    parser.add_argument('--lamda', type=int, default=100, help='lamda in loss function')
    parser.add_argument('--beta_z', type=int, default=1, help='beta of the kl in loss function')
    parser.add_argument('--beta_anneal', type=int, default=0, help='the anneal epoch of beta')
    parser.add_argument('--threshold', type=float, default=0.5, help='threshold of gaussian model')
    parser.add_argument('--debug', action="store_true", default=False, help='If debug mode')

    # train
    parser.add_argument('--dataset', type=str, default="NPS", help='The dataset going to use')
    parser.add_argument('--eval', action="store_true", default=False, help='directly eval?')
    parser.add_argument('--baseline', action="store_true", default=False, help='If is the bseline?')  # False
    parser.add_argument('--use_model', action="store_true", default=False, help='If use model to get the train feature')
    parser.add_argument('--encode_z', type=int, default=None, help='If encode z and dim of z')  # None
    parser.add_argument("--contrastive_loss", action="store_true", default=False, help="Use contrastive loss")  # False
    parser.add_argument("--temperature", type=float, default=1.0, help="Temperature for contrastive loss")  # 1.0
    parser.add_argument("--contra_lambda", type=float, default=1.0, help="Scaling factor of contrastive loss")
    parser.add_argument("--save_epoch", type=int, default=None, help="save model in this epoch")
    parser.add_argument("--exp", type=int, default=0, help="which experiment")
    parser.add_argument("--unseen_num", type=int, default=13, help="unseen class num in CIFAR100")

    # test
    parser.add_argument('--cf', action="store_true", default=False, help='use counterfactual generation')
    parser.add_argument('--cf_threshold', action="store_true", default=False,
                        help='use counterfactual threshold in revise_cf')
    parser.add_argument('--yh', action="store_true", default=False, help='use yh rather than feature_y_mean')
    parser.add_argument('--use_model_gau', action="store_true", default=False, help='use feature by model in gau')

    args = parser.parse_args()
    return args


def control_seed(args):
    # seed
    args.cuda = torch.cuda.is_available()
    torch.manual_seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed_all(args.seed)
    np.random.seed(args.seed)
    torch.backends.cudnn.deterministic = True


if __name__ == '__main__':
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    args = get_args()
    control_seed(args)

    classid_training = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]

    lr = 0.00007
    num_classifier_epochs = 20
    percentile = 5
    label_smoothing_coeff = 0.1
    feature_dim = 128
    model_folder_path = './saved_models/'

    feature_encoder = torch.load(model_folder_path + 'nps_encoder.pt')
    feature_encoder.to(device)

    load_dataset = NPS.NPS_Dataset()
    args.num_classes = 12
    in_channel = 1
    ggr = nps_graph_representation()

    args.run_idx = 0
    seed_sampler = int(args.seed_sampler.split(' ')[0])

    train_dataset, val_dataset, test_dataset = load_dataset.sampler(seed_sampler, args)

    train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=0,
                              drop_last=True)
    val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=0,
                            drop_last=True)

    best_epoch = -1
    best_auc = 0
    scaler = GradScaler()

    feature_encoder.eval()
    classifier = MLPClassifier(classid_list=classid_training, feature_dim=feature_dim)
    classifier.to(device)
    optimizer_classifier = torch.optim.Adam(classifier.parameters(), lr=lr, betas=(0.9, 0.999))
    scheduler_classifier = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer_classifier, T_max=num_classifier_epochs)

    print("==> Training Class: ", classid_training)

    for classifier_epoch in range(num_classifier_epochs):
        classifier.train()
        time3 = time.time()
        running_correct = 0
        total_classification_loss = 0
        for i, (data_classifier, labels_classifier) in enumerate(train_loader):
            data_classifier = ggr.get_rep(data_classifier)
            data_classifier = data_classifier.to(device)
            labels_classifier = labels_classifier.to(device)
            targets = get_smooth_labels(labels_classifier, classid_training, smoothing_coeff=label_smoothing_coeff)
            optimizer_classifier.zero_grad()
            with autocast():
                with torch.no_grad():
                    features = feature_encoder.get_feature(data_classifier)
                logits = classifier(features)
                _, pred = torch.max(logits.data, 1)
                classification_loss = classifier.get_loss(logits, targets)

            scaler.scale(classification_loss).backward()
            scaler.step(optimizer_classifier)
            scaler.update()
            total_classification_loss += classification_loss
            running_correct += torch.sum(pred == labels_classifier.data)
        scheduler_classifier.step()

        classifier.eval()
        val_correct = 0
        for i, (data_classifier, labels_classifier) in enumerate(val_loader):
            data_classifier = ggr.get_rep(data_classifier)
            data_classifier = data_classifier.to(device)
            labels_classifier = labels_classifier.to(device)
            with autocast():
                with torch.no_grad():
                    features = feature_encoder.get_feature(data_classifier)
                logits = classifier(features)
                _, pred = torch.max(logits.data, 1)
            val_correct += torch.sum(pred == labels_classifier.data)

        print("Train Accuracy is:{:.4f}%, Val Accuracy is:{:.4f}%".format(
            100 * running_correct / len(train_loader.dataset), 100 * val_correct / len(val_loader.dataset)))
        print('classifier_epoch {}: classification_loss = {:.3f}'.format(classifier_epoch, total_classification_loss))
        time4 = time.time()
        print('time for this epoch: {:.3f} minutes'.format((time4 - time3) / 60.0))

    classifier.eval()
    classwise_thresholds = []
    classwise_logits = []
    for i in range(12):
        classwise_logits.append([])

    for i, (data_classifier, labels_classifier) in enumerate(val_loader):
        data_classifier = ggr.get_rep(data_classifier)
        data_classifier = data_classifier.to(device)
        labels_classifier = labels_classifier.to(device)
        with torch.no_grad():
            features = feature_encoder.get_feature(data_classifier)
            logits = classifier(features)
            max_logit, pred_val = torch.max(logits.data, 1)

        for j, label in enumerate(labels_classifier):
            if pred_val[j] == label.item():
                classwise_logits[label.item()].append(logits[j, label.item()].item())

    for val in classwise_logits:
        if len(val) == 0:
            classwise_thresholds.append(0)
        else:
            threshold = np.percentile(val, percentile)
            classwise_thresholds.append(threshold)

    classifier.classwise_thresholds = classwise_thresholds
    print(classifier.classwise_thresholds)
    # with autocast():
    #     thresholds = classifier.estimate_threshold_logits(feature_encoder, val_loader, percentile=percentile)
    #     print(thresholds)

    torch.save(classifier, model_folder_path + 'nps_classifier.pt')
