import sys
sys.path.append("../../")
from sentiDataReader import SentiDatasetV2, Senti_domain_map
from metrics import precision_recall_fscore_support
from model import VanillaBert, accuracy_score
import torch, pickle, copy, fitlog, random, os
from tqdm import trange
import torch.nn.functional as F, numpy as np
from Senti_Utils import obtain_model, obtain_domain_set, reconfig_args

class GradientReversal(torch.autograd.Function):
    """
    Basic layer for doing gradient reversal
    """
    lambd = 1.0
    @staticmethod
    def forward(ctx, x):
        return x

    @staticmethod
    def backward(ctx, grad_output):
        return GradientReversal.lambd * grad_output.neg()

def Generator1(labeledSource, unlabeledTarget, labeledTarget, batchSize):
    halfBS = batchSize // 2
    bs2 = halfBS if halfBS < len(labeledTarget) else len(labeledTarget)
    bs1 = batchSize - bs2
    bs3 = batchSize // 3 if batchSize < 3 * len(unlabeledTarget) else len(unlabeledTarget)
    iter1, iter2, iter3 = len(labeledSource) // bs1, \
                          len(labeledTarget) // bs2, \
                          len(unlabeledTarget) // bs3
    maxIters = max([iter1, iter2, iter3])
    def generator():
        idxsS, idxsLT, idxsUT = [], [], []
        for i in range(maxIters + 1):
            if i % iter1 == 0:
                idxsS = random.sample(range(len(labeledSource)), len(labeledSource)) * 2
            if i % iter2 == 0:
                idxsLT = random.sample(range(len(labeledTarget)), len(labeledTarget)) * 2
            if i % iter3 == 0:
                idxsUT = random.sample(range(len(unlabeledTarget)), len(unlabeledTarget)) * 2
            # i * bs1 could be larger than the len(labelSource), thus we need to start from the remainder
            start_LS, start_LT, start_UT = (i * bs1) % len(labeledSource), \
                                           (i * bs2) % len(labeledTarget), \
                                           (i * bs3) % len(unlabeledTarget)
            end_LS, end_LT, end_UT = start_LS + bs1, start_LT + bs2, start_UT + bs3

            items1 = [labeledSource[jj] for jj in idxsS[start_LS:end_LS]]
            items2 = [labeledTarget[jj] for jj in idxsLT[start_LT:end_LT]]
            items3 = [unlabeledTarget[jj] for jj in idxsUT[start_UT:end_UT]]
            batch1 = labeledTarget.collate_raw_batch(items1 + items2)
            batch2 = unlabeledTarget.collate_raw_batch(
                random.sample(items2, batchSize - bs1 - bs3) + items1 + items3
            )
            yield batch1, batch2
    return maxIters, generator

def Generator2(labeledSource, unlabeledTarget, batchSize):
    bs2 = batchSize // 2 if batchSize < 2 * len(unlabeledTarget) else len(unlabeledTarget)
    iter1, iter2 = len(labeledSource) // batchSize, \
                   len(unlabeledTarget) // bs2
    maxIters = max([iter1, iter2])
    def generator():
        idxsS, idxsUT = [], []
        for i in range(maxIters + 1):
            if i % iter1 == 0:
                idxsS = random.sample(range(len(labeledSource)), len(labeledSource)) * 2
            if i % iter2 == 0:
                idxsUT = random.sample(range(len(unlabeledTarget)), len(unlabeledTarget)) * 2
            # i * bs1 could be larger than the len(labelSource), thus we need to start from the remainder
            start_LS, start_UT = (i * batchSize) % len(labeledSource), \
                                    (i * bs2) % len(unlabeledTarget)
            end_LS, end_UT = start_LS + batchSize, start_UT + bs2

            items1 = [labeledSource[jj] for jj in idxsS[start_LS:end_LS]]
            items2 = [unlabeledTarget[jj] for jj in idxsUT[start_UT:end_UT]]
            batch1 = labeledSource.collate_raw_batch(items1)
            batch2 = unlabeledTarget.collate_raw_batch(
                random.sample(items1, batchSize - bs2) + items2
            )
            yield batch1, batch2 # batch1 for CE Loss, batch2 for DA Loss
    return maxIters, generator

def DataIter(labeledSource, unlabeledTarget, labeledTarget=None, batchSize=32):
    if labeledTarget is not None:
        assert len(labeledTarget) > 0
        return  Generator1(labeledSource, unlabeledTarget, labeledTarget, batchSize)
    else:
        return Generator2(labeledSource, unlabeledTarget, batchSize)

class BiATUtils:
    def __init__(self, random_seed):
        self.seed = random_seed

    def initTrainingEnv(self):
        random.seed(self.seed)
        np.random.seed(self.seed)
        torch.manual_seed(self.seed)
        torch.cuda.manual_seed_all(self.seed)
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False

    def acc_P_R_F1(self, y_true, y_pred):
        return accuracy_score(y_true, y_pred.cpu()), \
                    precision_recall_fscore_support(y_true, y_pred.cpu())

    def dataIter(self, pseudo_set, labeled_target=None, batch_size=32):
        p_idxs = list(range(len(pseudo_set))) if not hasattr(pseudo_set, 'valid_indexs') else pseudo_set.valid_indexs
        p_len = len(p_idxs)
        if labeled_target is None:
            l_len = 0
            l_idxs = []
        else:
            l_idxs = list(range(len(labeled_target))) if not hasattr(labeled_target, 'valid_indexs') \
                                                        else labeled_target.valid_indexs
            l_len = len(l_idxs)
        data_size = p_len + l_len
        idxs = random.sample(range(data_size), data_size)*2
        for start_i in range(0, data_size, batch_size):
            batch_idxs = idxs[(start_i):(start_i+batch_size)]
            items = [pseudo_set[p_idxs[idx]] if idx < p_len else \
                        labeled_target[l_idxs[idx-p_len]] for idx in batch_idxs]
            yield pseudo_set.collate_raw_batch(items)

    def Batch2Vecs(self, model: VanillaBert, batch):
        if batch[0].device != model.device:  # data is on a different device
            input_ids, masks, seg_ids = batch[0].to(model.device), \
                                        batch[1].to(model.device), \
                                        batch[2].to(model.device)
        else:
            input_ids, masks, seg_ids = batch[0], batch[1], batch[2]
        encoder_dict = model.bert.bert.forward(
            input_ids=input_ids,
            attention_mask=masks,
            token_type_ids=seg_ids
        )
        return encoder_dict.pooler_output

    def AugBatch2Vecs(self, model:VanillaBert, batch):
        rand = random.random()
        if rand < 0.2:
            model.bert.bert.embeddings.aug_type = "gaussian"
        elif rand < 0.4:
            model.bert.bert.embeddings.aug_type = "g_blur"
        elif rand < 0.6:
            model.bert.bert.embeddings.aug_type = None
            loss, acc = model.lossAndAcc(batch)
            loss.backward()
            model.bert.bert.embeddings.aug_type = "adver"
        elif rand < 0.8:
            model.bert.bert.embeddings.aug_type = "rMask"
        else:
            model.bert.bert.embeddings.aug_type = "rReplace"
        return self.Batch2Vecs(model, batch)

    def lossAndAcc(self, model:VanillaBert, batch, temperature=1.0):
        pooledOutput = self.Batch2Vecs(model, batch)
        logits = model.bert.classifier(pooledOutput)
        preds = F.softmax(logits / temperature, dim=1)
        epsilon = torch.ones_like(preds) * 1e-8
        preds = (preds - epsilon).abs()  # to avoid the prediction [1.0, 0.0], which leads to the 'nan' value in log operation
        labels = batch[-2].to(preds.device)
        labels = labels.argmax(dim=1) if labels.dim() == 2 else labels
        loss = F.nll_loss(preds.log(), labels)
        acc = accuracy_score(labels.cpu().numpy(), preds.argmax(dim=1).cpu().numpy())
        return loss, acc

    def dataset_logits(self, model:VanillaBert, data, idxs=None, batch_size=40):
        preds = []
        if idxs is None:
            idxs = list(range(len(data)))
        for i in trange(0, len(idxs), batch_size):
            batch_idxs = idxs[i:min(len(idxs), i + batch_size)]
            batch = data.collate_raw_batch([data[idx] for idx in batch_idxs])
            pred = model.predict(batch, temperature=1.0)
            preds.append(pred)
        pred_tensor = torch.cat(preds)
        return pred_tensor

    def dataset_inference(self, model:VanillaBert, data, idxs=None, batch_size=20):
        pred_tensor = self.dataset_logits(model, data, idxs, batch_size)
        vals, idxs = pred_tensor.sort(dim=1)
        return idxs[:, -1], vals[:, -1]

    def perf(self, model:VanillaBert, data, label, idxs=None, batch_size=20):
        with torch.no_grad():
            predTensor = self.dataset_logits(model, data, idxs, batch_size)
            _, yPred = predTensor.sort(dim=1)
        yTrue = label[idxs] if idxs is not None else label
        loss = F.nll_loss(predTensor.log(), yTrue.to(predTensor.device))
        return self.acc_P_R_F1(yTrue, yPred[:, -1]) + (loss,)

class BiATTrainer(BiATUtils):
    def __init__(self, random_seed, log_dir, suffix, model_file, class_num, lambda1, lambda2, lambda3, lambda4,
                 temperature=1.0, learning_rate=5e-3, batch_size=32):
        super(BiATTrainer, self).__init__(random_seed=random_seed)
        if not os.path.exists(log_dir):
            os.system("mkdir {}".format(log_dir))
        fitlog.set_log_dir("{}/".format(log_dir), new_log=True)
        self.log_dir = log_dir
        self.suffix = suffix
        self.model_file = model_file
        self.best_valid_acc = 0.0
        self.min_valid_loss = 1e8
        self.class_num = class_num
        self.temperature = temperature
        self.learning_rate = learning_rate
        self.batch_size = batch_size
        self.valid_step = 0
        self.lambda1, self.lambda2, self.lambda3, self.lambda4 = lambda1, lambda2, lambda3, lambda4

    def obtainOptim(self, HNet, TNetClassifier):
        optimizerGroupedParameters = [
                                         {'params': p,
                                          'lr': self.learning_rate * pow(0.8, 12 - int(
                                              n.split("layer.")[1].split(".", 1)[0])) if "layer." in n \
                                              else (self.learning_rate * pow(0.8, 13) if "embedding" in n else self.learning_rate)
                                          # layer-wise fine-tuning
                                          } for n, p in HNet.named_parameters()
                                     ] + \
                                     [{
                                         'params': TNetClassifier.parameters(),
                                         'lr': self.learning_rate
                                     }]
        optim = torch.optim.Adam(optimizerGroupedParameters)
        return optim

    def Entrophy(self, trModel:VanillaBert, batch):
        if batch[0].device != trModel.device:  # data is on a different device
            inputIds, masks, segIds = batch[0].to(trModel.device), \
                                      batch[1].to(trModel.device), \
                                      batch[2].to(trModel.device)
        else:
            inputIds, masks, segIds = batch[0], batch[1], batch[2]
        pooledOutput = trModel.PoolerVecs(inputIds, token_ids=segIds, attention_mask=masks)
        pooledOutput = trModel.bert.dropout(pooledOutput)
        pooledOutput = GradientReversal.apply(pooledOutput)
        normedPoolOut = pooledOutput / (pooledOutput.norm())
        logits = trModel.bert.classifier(normedPoolOut)
        preds = F.softmax(logits / self.temperature, dim=1)
        epsilon = torch.ones_like(preds) * (1e-8)
        preds = (preds - epsilon).abs()
        loss = -1 * (preds * (preds.log())).sum()
        return loss

    def vecs2logits(self, vecs, label, classifier):
        logits = classifier(vecs)
        preds = F.softmax(logits / self.temperature, dim=1)
        epsilon = torch.ones_like(preds) * (1e-8)
        preds = (preds - epsilon).abs()
        loss = F.nll_loss(preds.log(), label.to(preds.device))
        acc = accuracy_score(label.cpu().numpy(), preds.argmax(dim=1).cpu().numpy())
        return loss, acc

    def CELoss(self, HNet:VanillaBert, batch, label=None):
        pooledOutput = self.Batch2Vecs(HNet, batch)
        loss, acc = self.vecs2logits(pooledOutput,
                                     batch[-2] if label is None else label,
                                     HNet.bert.classifier)
        return loss, acc

    def ATLoss(self, HNet:VanillaBert, batch, label=None):
        HNet.bert.bert.embeddings.aug_type = "adver"
        pooledOutput = self.Batch2Vecs(HNet, batch)
        adverLoss, adverAcc = self.vecs2logits(pooledOutput,
                                               batch[-2] if label is None else label,
                                               HNet.bert.classifier)
        HNet.bert.bert.embeddings.aug_type = None
        return adverLoss, adverAcc

    def AATLoss(self, HNet:VanillaBert, TNetClassifier, batch):
        pooledOutput = self.Batch2Vecs(HNet, batch)
        loss, acc = self.vecs2logits(pooledOutput, batch[-2], TNetClassifier)
        HNet.zero_grad()
        loss.backward()
        aatLoss, aatAcc = self.ATLoss(HNet, batch)
        return aatLoss, aatAcc

    def MMELoss(self, HNet:VanillaBert, batch):
        pooledOutput = self.Batch2Vecs(HNet, batch)
        pooledOutput = GradientReversal.apply(pooledOutput)
        normedPoolOut = pooledOutput / (pooledOutput.norm())
        logits = HNet.bert.classifier(normedPoolOut)
        preds = F.softmax(logits / self.temperature, dim=1)
        epsilon = torch.ones_like(preds) * (1e-8)
        preds = (preds - epsilon).abs()
        loss = -1 * (preds * (preds.log())).sum()
        return loss

    def EVatLoss(self, HNet:VanillaBert, batch):
        with torch.no_grad():
            preds = HNet.predict(batch)
            pseudoLable = preds.argmax(dim=1)
        loss, _ = self.CELoss(HNet, batch, pseudoLable)
        HNet.zero_grad()
        loss.backward()

        HNet.bert.bert.embeddings.aug_type = "adver"
        pooledOutput = self.Batch2Vecs(HNet, batch)

        logits = HNet.bert.classifier(pooledOutput)
        preds = F.softmax(logits / self.temperature, dim=1)
        epsilon = torch.ones_like(preds) * (1e-8)
        preds = (preds - epsilon).abs()
        vat_loss = F.nll_loss(preds.log(), pseudoLable)
        entrophy = -1 * (preds * (preds.log())).sum()
        HNet.bert.bert.embeddings.aug_type = None
        return vat_loss+entrophy

    def TLoss(self, HNet, TNetClassifier, batch):
        pooledOutput = self.Batch2Vecs(HNet, batch)
        loss, acc = self.vecs2logits(pooledOutput, batch[-2], TNetClassifier)
        return loss, acc

    def dataSampler(self, labeledSource, labeledTarget, unlabeledTarget):
        half_size = self.batch_size//2
        bs2 = len(labeledTarget) if len(labeledTarget) < half_size else half_size
        bs1 = self.batch_size - bs2
        def sample_fn(max_iter_step):
            for _ in range(max_iter_step):
                items1 = [labeledSource[idx] for idx in random.sample(range(len(labeledSource)),
                                                                      min(len(labeledSource), self.batch_size))]
                items2 = [labeledTarget[idx] for idx in random.sample(range(len(labeledTarget)),
                                                                      min(len(labeledTarget), self.batch_size))]
                items3 = [unlabeledTarget[idx] for idx in random.sample(range(len(unlabeledTarget)), self.batch_size)]
                batchCE = labeledSource.collate_raw_batch(items1[:bs1] + items2[:bs2])
                batchEVat = unlabeledTarget.collate_raw_batch(items3)
                batchTNet = labeledTarget.collate_raw_batch(items2)
                batchAAT = labeledSource.collate_raw_batch(items1)
                yield batchCE, batchAAT, batchEVat, batchTNet
        return sample_fn

    def PreTrainTNet(self, HNet, TNetClassifier, labeledTarget : SentiDatasetV2, batchSize=32):
        # the rule for early stop: when the variance of the recent 50 training loss is smaller than 0.05, the training process will be stopped
        optim = self.obtainOptim(HNet, TNetClassifier)
        lossList = []
        for step in range(200):
            batch = labeledTarget.collate_raw_batch(
                [labeledTarget[idx] for idx in random.sample(range(len(labeledTarget)),
                                                             min(len(labeledTarget), batchSize))]
            )
            TLoss, TAcc = self.TLoss(HNet, TNetClassifier, batch)
            optim.zero_grad()
            TLoss.backward()
            optim.step()
            print('####Pre Train Domain Classifier (%3d | %3d) ####, loss = %6.8f, Acc = %6.8f' % (
                step, 1000, TLoss.data.item(), TAcc
            ))
            lossList.append(TLoss.data.item())
            if len(lossList) > 20:
                lossList.pop(0)
                if np.std(lossList) < 0.05 and np.mean(lossList) < 0.2:
                    return

    def Training(self, HNet:VanillaBert, labeledSource : SentiDatasetV2, labeledTarget : SentiDatasetV2,
                    unlabeledTarget : SentiDatasetV2, validSet : SentiDatasetV2, testSet:SentiDatasetV2,
                        testLabel, maxStep=10000, validEvery=20):
        assert labeledTarget is not None
        print("labeled Source/labeled Target/unlabeled Target: {}/{}/{}".format(len(labeledSource),
                                                                                len(labeledTarget),
                                                                                len(unlabeledTarget)))
        TNetClassifier = copy.deepcopy(HNet.bert.classifier)
        optim = self.obtainOptim(HNet, TNetClassifier)

        validLabel = validSet.labelTensor()
        sampler = self.dataSampler(labeledSource, labeledTarget, unlabeledTarget)
        for step, (batchCE, batchAAT, batchEVat, batchTNet) in enumerate(sampler(maxStep)):
            loss, acc = self.CELoss(HNet, batchCE)
            HNet.zero_grad()
            loss.backward()
            ATLoss, ATAcc = self.ATLoss(HNet, batchCE)
            (self.lambda1*ATLoss).backward()
            optim.step()

            AATLoss, AATAcc = self.AATLoss(HNet, TNetClassifier, batchAAT)
            optim.zero_grad()
            (self.lambda2*AATLoss).backward()
            optim.step()

            EVaTLoss = self.EVatLoss(HNet, batchEVat)
            optim.zero_grad()
            (self.lambda3*EVaTLoss).backward()
            optim.step()

            TLoss, TAcc = self.TLoss(HNet,TNetClassifier, batchTNet)
            optim.zero_grad()
            TLoss.backward()
            optim.step()

            MMELoss = self.MMELoss(HNet, batchEVat)
            optim.zero_grad()
            (-1*self.lambda4*MMELoss).backward()
            optim.step()

            print('#Model Update (%3d | %3d) #, loss_CE/acc_CE = %6.8f/%6.8f  |  loss_AT/acc_AT = %6.8f/%6.8f | \
                        loss_AAT/acc_AAT = %6.8f/%6.8f  |  loss_EVaT = %6.8f  | loss_T/acc_T = %6.8f/%6.8f  | loss_MME = %6.8f ' % (
                            step, maxStep, loss.data.item(), acc, ATLoss.data.item(), ATAcc, AATLoss.data.item(), AATAcc,
                                EVaTLoss.data.item(), TLoss.data.item(), TAcc,MMELoss.data.item()
            ))
            if (step + 1) % validEvery == 0:
                rst = self.perf(HNet, validSet, validLabel)
                acc_v, (p_v, r_v, f1_v, _), loss_v = rst
                print("valid perf:", rst)
                output_items = [("valid_acc", acc_v)] + \
                               [("valid_loss", loss_v)] + \
                               [('valid_prec_{}'.format(i), p_v[i]) for i in range(self.class_num)] + \
                               [('valid_recall_{}'.format(i), r_v[i]) for i in range(self.class_num)] + \
                               [('valid_f1_{}'.format(i), f1_v[i]) for i in range(self.class_num)]
                fitlog.add_metric({f"ValidPerf_{self.suffix}": dict(output_items)}, step=self.valid_step)
                self.valid_step += 1
                if acc > self.best_valid_acc:
                    self.logPerf(HNet, testSet, testLabel, self.suffix, step=1)
                    torch.save(HNet.state_dict(), self.model_file)
                    self.best_valid_acc = acc
                else:
                    self.logPerf(HNet, testSet, testLabel, self.suffix, step=0)

    def logPerf(self, model, test_set, test_label, test_suffix, step=0):
        rst_model = self.perf(model, test_set, test_label)
        if step > 0:
            print("BestPerf : ", rst_model)
        else:
            print("TestPerf : ", rst_model)
        acc_v, (p_v, r_v, f1_v, _), loss_v = rst_model
        output_items = [("test_acc", acc_v)] + \
                       [("test_loss", loss_v)] + \
                       [('test_prec_{}'.format(i), p_v[i]) for i in range(self.class_num)] + \
                       [('test_recall_{}'.format(i), r_v[i]) for i in range(self.class_num)] + \
                       [('test_f1_{}'.format(i), f1_v[i]) for i in range(self.class_num)]
        fitlog.add_best_metric({f"BestPerf_{test_suffix}": dict(output_items)})

if __name__ == "__main__":
    with open("../../args.pkl", 'rb') as fr:
        argConfig = pickle.load(fr)
    argConfig.model_dir = str(__file__).rstrip(".py")
    # Set all the seeds
    seed = argConfig.seed
    reconfig_args(argConfig)

    # See if CUDA available
    device = torch.device("cpu") if not torch.cuda.is_available() else torch.device("cuda:0")

    domainID = 2
    fewShotCnt = 100
    SentiDomainList = list(Senti_domain_map.keys())
    newDomainName = SentiDomainList[domainID-1]

    model1, tokenizer = obtain_model(args=argConfig, model_device=device)
    labeledSource, validTarget, testTarget, labeledTarget, unlabeledTarget = obtain_domain_set(newDomainName,
                                                                                   tokenizer_M=tokenizer,
                                                                                   lt_count=5)

    trainer = BiATTrainer(random_seed=seed, log_dir=argConfig.model_dir, suffix=f"{newDomainName}_FS{fewShotCnt}",
                         model_file=f"{argConfig.model_dir}/BiAT_{newDomainName}_FS{fewShotCnt}.pth",
                         class_num=2, lambda1=1.0, lambda2=0.5, lambda3=0.05, lambda4=0.01, temperature=0.05,
                          learning_rate=2e-5, batch_size=32)
    trainer.Training(model1, labeledSource, labeledTarget, unlabeledTarget, validTarget,testTarget,
                        testTarget.labelTensor().clone(), maxStep=100000, validEvery=30)