import sys
sys.path.append("../../")
from datareader import NLI_domain_map, NLIDataset
from sklearn.metrics import accuracy_score
from model import VanillaBert
from MetaSelfTrainFramewoks import MetaSelfTrainV8
import pickle, numpy as np
import torch
import torch.nn.functional as F
from NLI_Utils import reconfig_args, obtain_CRST_set, obtain_model

class MetaSelfTrainV10P(MetaSelfTrainV8):
    def __init__(self, beta, seed, alpha, topk, class_num, log_dir, suffix, weight_eta=0.1, lr4model=2e-2,
                 coeff4expandset=1.0, max_few_shot_size=20, Inner_BatchSize=5, meta_step=5, extra_step=5):
        super(MetaSelfTrainV10P, self).__init__(seed, alpha, topk, class_num, log_dir, suffix, weight_eta, lr4model,
                 coeff4expandset, max_few_shot_size, Inner_BatchSize, meta_step, extra_step)
        self.beta = beta

    def annotate(self, model: VanillaBert, data:NLIDataset, pseaudo_idxs=[], batch_size=20):
        c_idxs = list(set(range(len(data))) - set(pseaudo_idxs))
        with torch.no_grad():
            pred_tensor = self.dataset_logits(model, data, idxs=c_idxs, batch_size=batch_size)
            if not hasattr(data, "logits"):
                data.logits = torch.zeros([len(data), self.class_num], device=pred_tensor.device)
            data.logits[c_idxs] = pred_tensor
        pseudo_label = (1 - self.beta)*(data.labelTensor()[c_idxs]) + (self.beta * pred_tensor).cpu()
        pseudo_label = (pseudo_label/(pseudo_label.sum(dim=1).unsqueeze(-1))).numpy()
        data.setLabel(pseudo_label, c_idxs)

    def lossAndAcc(self, model, batch, label_weight=None, reduction=None):
        preds = model.predict(batch)
        # to avoid the prediction [1.0, 0.0], which leads to the 'nan' value in log operation
        epsilon = torch.ones_like(preds) * 1e-8
        preds = (preds - epsilon).abs()
        labels = batch[-2]
        if labels.dim() == 1:
            loss = F.nll_loss(preds.log(), labels.to(preds.device), weight=label_weight, reduction=reduction)
        elif labels.dim() == 2:
            if label_weight is None:
                loss = (-1.0 *
                        (preds.log()) *
                        labels.to(preds.device)
                        ).sum(dim=1).mean()
            else:
                w_labels = (labels.to(preds.device)) * label_weight
                loss = (-1.0 *
                        (preds.log()) *
                         w_labels
                        ).sum(dim=1).sum()
            labels = labels.argmax(dim=1)
        else:
            raise
        acc = accuracy_score(labels.cpu().numpy(), preds.argmax(dim=1).cpu().numpy())
        return loss, acc


if __name__ == "__main__":
    with open("../../args.pkl", 'rb') as fr:
        argConfig = pickle.load(fr)
    argConfig.model_dir = str(__file__).rstrip(".py")
    # Set all the seeds
    seed = argConfig.seed
    reconfig_args(argConfig)

    # See if CUDA available
    device = torch.device("cpu") if not torch.cuda.is_available() else torch.device("cuda:0")
    model1_path = "../../saved/modelSNLI_1.pth"

    domainID = 2
    fewShotCnt = 100
    NLIDomainList = list(NLI_domain_map.keys())
    newDomainName = NLIDomainList[domainID-1]

    model1, tokenizer = obtain_model(args=argConfig, model_device=device)
    labeledSource, validTarget, unlabeledTarget, labeledTarget = obtain_CRST_set(newDomainName,
                                                                                   tokenizer_M=tokenizer,
                                                                                   lt_count=0)
    model1.load_state_dict(torch.load(model1_path))
    trainer = MetaSelfTrainV10P(0.25, seed, alpha=0.1, topk=0.2, class_num=3, log_dir=argConfig.model_dir,
                                 suffix="{}_FS{}".format(newDomainName, fewShotCnt),
                                 weight_eta=1.5, lr4model=5e-5, coeff4expandset=1.0, max_few_shot_size=20,
                                 Inner_BatchSize=32, meta_step=20, extra_step=5)
    v_idxs = validTarget.read_indexs.copy()
    np.random.shuffle(v_idxs)
    ULabel = unlabeledTarget.labelTensor()
    ULabel = ULabel.argmax(dim=1) if ULabel.dim() == 2 else ULabel.clone()
    unlabeledTarget.setLabel(
        torch.zeros_like(unlabeledTarget.labelTensor()).cpu().numpy(),
        list(range(len(unlabeledTarget)))
    )
    for _ in range(40):
        validTarget.read_indexs = v_idxs[:len(v_idxs)//2]
        trainer.Training(model1, unlabeledTarget, validTarget, ULabel,
                         labeledSource, labeledTarget, max_epoch=1, max_valid_every=30,
                         model_file=f"{argConfig.model_dir}/MetaSelfTrain_{newDomainName}.pkl")
        print("= = = = alternate model = = = => ")
        validTarget.read_indexs = v_idxs[len(v_idxs) // 2:]
        trainer.Training(model1, unlabeledTarget, validTarget, ULabel,
                         labeledSource, labeledTarget, max_epoch=1, max_valid_every=30,
                         model_file=f"{argConfig.model_dir}/MetaSelfTrain_{newDomainName}.pkl")