import time
import numpy as np
import torch
from torch.cuda.amp import autocast as autocast, GradScaler
from models.simCNN_contrastive import *
from models.ContrasiveLoss_SoftLabel import *
from evaluation import openset_eval_contrastive_logits
from utils import load_ImageNet200, load_ImageNet200_contrastive, get_smooth_labels
from mixup import *
from dataset import TFF, NPS
from models.Graph_Representation import tff_graph_representation, nps_graph_representation
import argparse
from torch_geometric.loader import DataLoader



# os.environ["CUDA_VISIBLE_DEVICES"] = "1"

def get_args():
    parser = argparse.ArgumentParser(description='PyTorch OSR Example')
    parser.add_argument('--batch_size', type=int, default=64, help='input batch size for training (default: 64)')
    parser.add_argument('--num_classes', type=int, default=10, help='number of classes')
    parser.add_argument('--epochs', type=int, default=20, help='number of epochs to train (default: 50)')
    parser.add_argument('--lr', type=float, default=0.001, help='learning rate (default: 1e-3)')
    parser.add_argument('--wd', type=float, default=0.00, help='weight decay')
    parser.add_argument('--momentum', type=float, default=0.01, help='momentum (default: 1e-3)')
    parser.add_argument('--decreasing_lr', default='60,100,150', help='decreasing strategy')
    parser.add_argument('--lr_decay', type=float, default=0.1, help='decreasing strategy')
    parser.add_argument('--seed', type=int, default=117, help='random seed (default: 1)')
    parser.add_argument('--seed_sampler', type=str, default='777 1234 2731 3925 5432',
                        help='random seed for dataset sampler')
    parser.add_argument('--log_interval', type=int, default=20,
                        help='how many batches to wait before logging training status')
    parser.add_argument('--val_interval', type=int, default=5, help='how many epochs to wait before another val')
    parser.add_argument('--test_interval', type=int, default=5, help='how many epochs to wait before another test')
    parser.add_argument('--lamda', type=int, default=100, help='lamda in loss function')
    parser.add_argument('--beta_z', type=int, default=1, help='beta of the kl in loss function')
    parser.add_argument('--beta_anneal', type=int, default=0, help='the anneal epoch of beta')
    parser.add_argument('--threshold', type=float, default=0.5, help='threshold of gaussian model')
    parser.add_argument('--debug', action="store_true", default=False, help='If debug mode')

    # train
    parser.add_argument('--dataset', type=str, default="NPS", help='The dataset going to use')
    parser.add_argument('--eval', action="store_true", default=False, help='directly eval?')
    parser.add_argument('--baseline', action="store_true", default=False, help='If is the bseline?')  # False
    parser.add_argument('--use_model', action="store_true", default=False, help='If use model to get the train feature')
    parser.add_argument('--encode_z', type=int, default=None, help='If encode z and dim of z')  # None
    parser.add_argument("--contrastive_loss", action="store_true", default=False, help="Use contrastive loss")  # False
    parser.add_argument("--temperature", type=float, default=1.0, help="Temperature for contrastive loss")  # 1.0
    parser.add_argument("--contra_lambda", type=float, default=1.0, help="Scaling factor of contrastive loss")
    parser.add_argument("--save_epoch", type=int, default=None, help="save model in this epoch")
    parser.add_argument("--exp", type=int, default=0, help="which experiment")
    parser.add_argument("--unseen_num", type=int, default=13, help="unseen class num in CIFAR100")

    # test
    parser.add_argument('--cf', action="store_true", default=False, help='use counterfactual generation')
    parser.add_argument('--cf_threshold', action="store_true", default=False,
                        help='use counterfactual threshold in revise_cf')
    parser.add_argument('--yh', action="store_true", default=False, help='use yh rather than feature_y_mean')
    parser.add_argument('--use_model_gau', action="store_true", default=False, help='use feature by model in gau')

    args = parser.parse_args()
    return args


def control_seed(args):
    # seed
    args.cuda = torch.cuda.is_available()
    torch.manual_seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed_all(args.seed)
    np.random.seed(args.seed)
    torch.backends.cudnn.deterministic = True


if __name__ == '__main__':
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    args = get_args()
    control_seed(args)

    model_folder_path = './saved_models/'
    encoder = torch.load(model_folder_path + 'nps_encoder.pt')
    classifier = torch.load(model_folder_path + 'nps_classifier.pt')

    load_dataset = NPS.NPS_Dataset()
    args.num_classes = 12
    in_channel = 1
    ggr = nps_graph_representation()

    args.run_idx = 0
    seed_sampler = int(args.seed_sampler.split(' ')[0])

    train_dataset, val_dataset, test_dataset = load_dataset.sampler(seed_sampler, args)

    test_loader_known = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=0, drop_last=True)
    test_loader_unknown = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=0, drop_last=True)

    open(r'./results/test_pre_conosr.txt', 'w').close()

    encoder.eval()
    classifier.eval()
    for i, (data_known, labels_known) in enumerate(test_loader_known):
        data_known = ggr.get_rep(data_known)
        data_known = data_known.to(device)
        labels_known = labels_known.to(device)
        with torch.no_grad():
            features = encoder.get_feature(data_known)
            logits = classifier(features)
            max_logit, pred_known = torch.max(logits.data, 1)

        results_known = pred_known
        for j in range(pred_known.shape[0]):
            if max_logit[j] <= classifier.classwise_thresholds[pred_known[j]]:
                results_known[j] = 20
        results_known = torch.Tensor.cpu(results_known).detach().numpy()

        with open(r'./results/test_pre_conosr.txt', 'ab') as p_val:
            np.savetxt(p_val, results_known, fmt='%d', delimiter=' ', newline='\r')
            p_val.write(b'\n')

    for i, (data_unknown, labels_unknown) in enumerate(test_loader_unknown):
        data_unknown = ggr.get_rep(data_unknown)
        data_unknown = data_unknown.to(device)
        labels_unknown = labels_unknown.to(device)
        with torch.no_grad():
            features = encoder.get_feature(data_unknown)
            logits = classifier(features)
            max_logit, pred_unknown = torch.max(logits.data, 1)

        results_unknown = pred_unknown
        for j in range(pred_unknown.shape[0]):
            if max_logit[j] <= classifier.classwise_thresholds[pred_unknown[j]]:
                results_unknown[j] = 20
        results_unknown = torch.Tensor.cpu(results_unknown).detach().numpy()

        with open(r'./results/test_pre_conosr.txt', 'ab') as p_val:
            np.savetxt(p_val, results_unknown, fmt='%d', delimiter=' ', newline='\r')
            p_val.write(b'\n')
    # _, _, _, AUROC = openset_eval_contrastive_logits(encoder, classifier, test_loader_known, test_loader_unknown)

    # print("==> Known Class: ", classid_known)
    # print("==> Unknown Class: ", classid_unknown)
    # print('unknown detection AUC = {:.3f}%'.format(AUROC * 100))
