import sys
sys.path.append("../../")
import gc
import fitlog
from torch.optim.lr_scheduler import LambdaLR
from transformers import AdamW
from transformers import DistilBertConfig
from transformers import DistilBertTokenizer
from transformers import BertConfig
from transformers import BertTokenizer
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, precision_recall_fscore_support
from datareader import *
from metrics import *
from model import VanillaBert
from modeling_bert import BertForSequenceClassification, AugBertForSequenceClassification
from modeling_distilbert import DistilBertForSequenceClassification
import pickle
from tqdm import trange, tqdm


def pred_Logits(model:VanillaBert, data, idxs=None, batch_size=20):
    preds = []
    if idxs is None:
        idxs = list(range(len(data)))
    with torch.no_grad():
        for i in trange(0, len(idxs), batch_size):
            batch_idxs = idxs[i:min(len(idxs), i+batch_size)]
            batch = data.collate_raw_batch([data[idx] for idx in batch_idxs])
            pred = model.predict(batch)
            preds.append(pred)
    pred_tensor = torch.cat(preds)
    return pred_tensor

def prediction(model:VanillaBert, data, idxs=None, batch_size=20):
    pred_tensor = pred_Logits(model, data, idxs, batch_size)
    vals, idxs = pred_tensor.sort(dim=1)
    return idxs[:, -1], vals[:, -1]

def acc_P_R_F1(y_true, y_pred):
    return accuracy_score(y_true, y_pred.cpu()), \
                f1_score(y_true, y_pred.cpu(), average="micro"), \
                    precision_recall_fscore_support(y_true, y_pred.cpu())

def Perf(model:VanillaBert, data, label, idxs=None, batch_size=20):
    y_pred, _ = prediction(model, data, idxs=idxs, batch_size=batch_size)
    y_true = label[idxs] if idxs is not None else label
    return acc_P_R_F1(y_true, y_pred)

def WeakLabeling(model:VanillaBert, data, logits=None, batch_size=20):
    if logits is not None:
        c_idxs = torch.arange(len(data))[logits.__ne__(-1.0)].tolist()
    else:
        c_idxs = torch.arange(len(data)).tolist()
    pred_tensor = pred_Logits(model, data, idxs=c_idxs, batch_size=batch_size)
    confs, preds = pred_tensor.sort(dim=1)
    weak_label = preds[:, -1].cpu().numpy()
    data.setLabel(weak_label, c_idxs)
    entrophy = torch.zeros([len(data)], device=model.device)
    entrophy[c_idxs] = (confs.log().abs() * confs).sum(dim=1)
    if logits is not None:
        logits[logits.__ne__(-1.0)] = confs[:, -1]
        return entrophy, preds[:, -1], logits
    else:
        return entrophy, preds[:, -1], confs[:, -1]

def DistillationLoss(Lambda):
    def loss_func(label, confidence, preds):
        preds = (preds - 1e-8).abs()
        confidence = label*torch.stack([confidence, confidence, confidence]).T
        soft_label = (Lambda)*confidence + (1-Lambda)*label
        loss_arr = -1*soft_label*preds.log()
        loss = loss_arr.sum(dim=1).mean()
        return loss
    return loss_func

def ModelTrain(model:VanillaBert, weak_set:NLIDataset, weak_set_label:torch.Tensor, logits:torch.Tensor,
               max_epoch:int, batch_size:int, train_indices:List, loss_func, evaluator=None):
    labels, pseaudo_labels = weak_set_label.to(model.device), weak_set.labelTensor()
    print("trainSet perf:", acc_P_R_F1(labels[train_indices].cpu(),
                            pseaudo_labels[train_indices].cpu()))
    lr = 5e-5
    optimizer_grouped_parameters = [
        {'params': p,
         'lr': lr * pow(0.8, 12 - int(n.split("layer.")[1].split(".", 1)[0])) if "layer." in n \
             else (lr * pow(0.8, 13) if "embedding" in n else lr) \
         # layer-wise fine-tuning
         } for n, p in model.named_parameters()
    ]
    model_optim = torch.optim.Adam(optimizer_grouped_parameters)
    pseaudo_labels = torch.stack([(-1 * pseaudo_labels).clamp(-1) + 1,
                                    1 - (pseaudo_labels - 1).abs(),
                                    (pseaudo_labels - 1).clamp(0)]).T.to(model.device)
    for epoch in range(max_epoch):
        start = 0
        sum_loss = 0.
        train_indices = random.sample(train_indices, len(train_indices))
        for t_idx in range(0, len(train_indices), batch_size):
            batch_idxs = train_indices[t_idx:min(t_idx+batch_size, len(train_indices))]
            batch = weak_set.collate_raw_batch([ weak_set[batch_idx]
                                                    for batch_idx in batch_idxs])
            preds = model.predict(batch)
            loss = loss_func(pseaudo_labels[batch_idxs], logits[batch_idxs], preds)
            model.zero_grad()
            model_optim.zero_grad()
            loss.backward()
            model_optim.step()
            torch.cuda.empty_cache()
            print('####Model Update (%3d | %3d) ####, loss = %6.8f' % (
                start, len(train_indices), loss.data.mean()
            ))
            sum_loss += loss.data
            start += batch_size
        mean_loss = (sum_loss * 1.0) / ((len(train_indices) // batch_size) + 1)
        print("mean loss:", mean_loss)
        if mean_loss < 0.2:  # early stop
            break
        elif evaluator is not None:
            val_acc, prec, rec, f1 = evaluator(model)
            print(
                '##### %6d | %6d, val_acc/val_prec/val_rec/val_f1 = %6.8f/%6.7f/%6.7f/%6.7f' % (
                    epoch, max_epoch,
                    val_acc, prec, rec, f1)
            )
        else:
            pass

def SelfTrain(tr_model, weak_set, pseudo_logits, entrophy, weak_set_label,
              threshold, tr_model_suffix, train_iter):
    pseudo_logits = pseudo_logits.abs()
    flags = pseudo_logits.__ge__(threshold)
    valid_idxs = torch.arange(0, len(weak_set), 1)[flags].tolist()
    loss_fn = DistillationLoss(0.7)
    ModelTrain(tr_model, weak_set, weak_set_label, pseudo_logits,
                1, 32, valid_idxs, loss_fn, evaluator = None)
    rst_model1 = Perf(tr_model, weak_set, weak_set_label)
    acc_v, microF1_v, (p_v, r_v, f1_v, _) = rst_model1
    print(f"{train_iter} : Post-MetaTrain Performance of model1:", rst_model1)
    pseaudo_labels = weak_set.labelTensor()
    acc_s, microF1_s, (p_s, r_s, f1_s, _) = acc_P_R_F1(weak_set_label[valid_idxs],
                                            pseaudo_labels[valid_idxs])
    fitlog.add_metric({f"{tr_model_suffix}":
                           {"valid_acc": acc_v,
                            "valid_prec": p_v[-1],
                            "valid_recall": r_v[-1],
                            "valid_f1": microF1_v,
                            "selected_num": len(valid_idxs) * 1.0 / len(weak_set),
                            "selected_acc": acc_s, "selected_prec": p_s[1],
                            "selected_recall": r_s[1], "selected_f1": microF1_s,
                            "init_entrophy": entrophy.mean(),
                            "selected_entrophy": entrophy[valid_idxs].mean()
                            }}, step=train_iter)


if __name__ == "__main__":
    with open("../../args.pkl", 'rb') as fr:
        args = pickle.load(fr)
    args.model_dir = str(__file__).rstrip(".py")
    # Set all the seeds
    seed = args.seed
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    # See if CUDA available
    device = torch.device("cpu")
    if args.n_gpu > 0 and torch.cuda.is_available():
        print("Training on GPU")
        device = torch.device("cuda:0")

    # model configuration
    batch_size = args.batch_size
    lr = args.lr
    weight_decay = args.weight_decay
    n_epochs = args.n_epochs
    args.full_bert = True
    args.bertPath = "../../../bert_en/"

    batch_size = args.batch_size
    lr = args.lr
    weight_decay = args.weight_decay
    n_epochs = args.n_epochs
    args.full_bert = True
    print("====>", args.full_bert)
    bert_model = 'bert-base-uncased' if args.full_bert else 'distilbert-base-uncased'
    if args.full_bert:
        bert_config = BertConfig.from_pretrained(bert_model, num_labels=2) if args.bertPath is None else \
            BertConfig.from_pretrained(args.bertPath, num_labels=2)
        tokenizer = BertTokenizer.from_pretrained(bert_model) if args.bertPath is None else \
            BertTokenizer.from_pretrained(args.bertPath)
    else:
        bert_config = DistilBertConfig.from_pretrained(bert_model, num_labels=2) if args.distillBertPath is None else \
            DistilBertConfig.from_pretrained(args.distillBertPath, num_labels=2)
        tokenizer = DistilBertTokenizer.from_pretrained(bert_model) if args.distillBertPath is None else \
            DistilBertTokenizer.from_pretrained(args.distillBertPath)

    log_dir = args.model_dir
    if not os.path.exists(log_dir):
        os.system("mkdir %s"%log_dir)
    else:
        os.system("rm -rf %s"%log_dir)
        os.system("mkdir %s"%log_dir)
    fitlog.set_log_dir(log_dir)
    fitlog.add_hyper({
        "epochs": n_epochs,
        "learning_rate": lr,
        "warmup": args.warmup_steps,
        "weight_decay": weight_decay,
        "batch_size": batch_size,
        "train_split_percentage": args.train_pct,
        "bert_model": bert_model,
        "seed": seed,
        "pretrained_model": args.pretrained_model,
        "tags": ",".join(args.tags)
    }, name=args.run_name)

    bert_config.num_labels = 3
    bert_config.hidden_act = "relu"
    # Create the model
    bert = AugBertForSequenceClassification.from_pretrained(
            bert_model, config=bert_config).to(device) if args.bertPath is None \
            else AugBertForSequenceClassification.from_pretrained(
            args.bertPath, config=bert_config).to(device)

    model = VanillaBert(bert).to(device)
    model1_path = "../../saved/modelSNLI_1.pth"
    model2_path = "../../saved/modelSNLI_2.pth"

    domain_ID = 2
    few_shot_cnt = 100
    new_domain_name = list(NLI_domain_map.keys())[domain_ID - 1]
    if domain_ID > 5:
        test_set = NLIDataset(f"../../../multinli_1.0/multinli_1.0_dev_mismatched.jsonl", tokenizer=tokenizer)
    else:
        test_set = NLIDataset(f"../../../multinli_1.0/multinli_1.0_dev_matched.jsonl", tokenizer=tokenizer)
    test_set.domainSelect(domain_ID)
    new_domain_label = test_set.labelTensor()

    state_dicts = torch.load(model2_path)
    model.load_state_dict(state_dicts)
    print("Performance:", Perf(model, test_set, new_domain_label))
    ###======select hard samples===========###
    entrophy, preds, logits = WeakLabeling(model, test_set)
    hard_idxs = entrophy.argsort()[:few_shot_cnt].cpu().tolist()
    logits[hard_idxs] = -1.0
    test_set.setLabel(preds.cpu().numpy()[hard_idxs], hard_idxs)
    ###==================================###
    threshold = 0.7
    for iterate in range(100):
        if iterate != 0:
            state_dicts = torch.load(f"./{log_dir}/model1_{new_domain_name}.pth")
            model.load_state_dict(state_dicts)
        SelfTrain(model, test_set, logits, entrophy, new_domain_label,
                  threshold, f"model1_{new_domain_name}", iterate + 1)
        torch.save(model.state_dict(), f"./{log_dir}/model1_{new_domain_name}.pth")
        entrophy, preds, logits = WeakLabeling(model, test_set, logits)
        if iterate == 0:
            state_dicts = torch.load(model2_path)
            model.load_state_dict(state_dicts)
        else:
            state_dicts = torch.load(f"./{log_dir}/model2_{new_domain_name}.pth")
            model.load_state_dict(state_dicts)
        SelfTrain(model, test_set, logits, entrophy, new_domain_label,
                  threshold, f"model2_{new_domain_name}", iterate + 1)
        torch.save(model.state_dict(), f"./{log_dir}/model2_{new_domain_name}.pth")
        if iterate != 99:
            entrophy, preds, logits = WeakLabeling(model, test_set, logits)
