import sys
sys.path.append("../../")
from datareader import NLI_domain_map, text_to_batch_transformer
from MetaSelfTrainFramewoks import MetaSelfTrainV11, VanillaBert, NLIDatasetV2
import pickle, math, fitlog, random, numpy as np
from model import GradientReversal, accuracy_score
import torch, torch.nn as nn, torch.nn.functional as F
from NLI_Utils import reconfig_args, CRST_Dataset, SplitDataFile
from NLI_Utils import AnyStr, List, PreTrainedTokenizer, collate_SNLI_batch_with_device
from NLI_Utils import BertTokenizer, BertConfig, BertForSequenceClassification, \
                        DistilBertConfig, DistilBertTokenizer, DistilBertForSequenceClassification
import os
from prettytable import PrettyTable
import numpy as np

def arrString2floatList(arr_str:str):
    """
    format: array([0.78870674, 0.748     , 0.72161172])
    """
    s = arr_str.split("[")[1].split("]")[0].split(",")
    return [float(val) for val in s]

def ExtractArray(line_str:str):
    """
    BestPerf :  (0.7523035230352304, (array([0.78870674, 0.748     , 0.72161172]), array([0.72287145, 0.83482143, 0.68641115]), array([0.7543554 , 0.78902954, 0.70357143]), array([599, 672, 574])), tensor(1.2319, device='cuda:0'))
    """
    line = line_str.strip('\n').split("(", 1)[1]
    acc_s, line = line.split(',', 1)
    prf1_s, loss_tensor = line.rsplit('),', 1)
    loss_s = loss_tensor.split("tensor(")[1].split(",")[0]
    p, r, f1, cnt = prf1_s[1:].split("),")
    return float(acc_s), float(loss_s), arrString2floatList(p), arrString2floatList(r), arrString2floatList(f1),\
           arrString2floatList(cnt)

def ReadLogFile(filename):
    f1_list = []
    with open(filename, 'r') as fr:
        for line in fr:
            if ("BestPerf :" in line) and ("array" in line):
                _, _, _, _, f1, _ = ExtractArray(line)
                f1_list.append(f1)
            else:
                pass
    return f1_list


def exhibit(f1_dic):
    # domain_list = [
    #     'goverment',
    #     'slate',
    #     'telephone',
    #     'travel',
    #     'fiction',
    #     'letters',
    #     'nineeleven',
    #     'oup',
    #     'verbatim',
    #     'facetoface'
    # ]
    for key in f1_dic:
       print(f"{key} : ", np.array(f1_dic[key]).shape)
    table_0 = PrettyTable()
    dic_keys = list(f1_dic.keys())
    dic_keys.sort()
    for key in dic_keys:
        arr = np.array(f1_dic[key])[:, 0]
        table_0.add_column(key, [round(arr.min(), 3), round(arr.mean(), 3), round(arr.max(), 3), round(arr[-3:-1].mean(), 3)])
    print("table_0:")
    print(table_0)

    table_1 = PrettyTable()
    for key in dic_keys:
        arr = np.array(f1_dic[key])[:, 1]
        table_1.add_column(key, [round(arr.min(), 3), round(arr.mean(), 3), round(arr.max(), 3), round(arr[-3:-1].mean(), 3)])
    print("table_1:")
    print(table_1)

    table_2 = PrettyTable()
    for key in dic_keys:
        arr = np.array(f1_dic[key])[:, 2]
        table_2.add_column(key, [round(arr.min(), 3), round(arr.mean(), 3), round(arr.max(), 3), round(arr[-3:-1].mean(), 3)])
    print("table_2:")
    print(table_2)


def Generator1(dataset_1, dataset_2, dataset_3, batchSize):
    print("Generator1")
    if not hasattr(dataset_1, 'valid_indexs'):
        dataset_1.valid_indexs = list(range(len(dataset_1)))
    if not hasattr(dataset_2, 'valid_indexs'):
        dataset_2.valid_indexs = list(range(len(dataset_2)))
    if not hasattr(dataset_3, 'valid_indexs'):
        dataset_3.valid_indexs = list(range(len(dataset_3)))
    DT1_base, DT2_base, DT3_base = 0, len(dataset_1), len(dataset_1)+ len(dataset_2)
    idxsDT2 = random.sample(dataset_2.valid_indexs, len(dataset_2.valid_indexs)) * 2
    idxsDT1 = random.sample(dataset_1.valid_indexs, len(dataset_1.valid_indexs)) * 2
    bs_DT3 = 5 if len(dataset_3.valid_indexs) > 5 else len(dataset_3.valid_indexs)

    start_DT2, start_DT1, end_DT2, end_DT1 = 0, 0, 0, 0
    while end_DT2 < len(dataset_2.valid_indexs):
        start_DT2, start_DT1 = end_DT2%len(dataset_2.valid_indexs), end_DT1%len(dataset_1.valid_indexs)
        if start_DT1 <= batchSize//2:
            idxsDT1 = random.sample(dataset_1.valid_indexs, len(dataset_1.valid_indexs)) * 2
        if start_DT2 <= batchSize//2:
            idxsDT2 = random.sample(dataset_2.valid_indexs, len(dataset_2.valid_indexs)) * 2
        end_DT2, end_DT1 = start_DT2 + batchSize//2, (start_DT1 + batchSize//2) - bs_DT3
        items1 = [dataset_2[jj] for jj in idxsDT2[start_DT2:end_DT2]]
        items2 = [dataset_1[jj] for jj in idxsDT1[start_DT1:end_DT1]]
        idxs_DT3 = random.sample(dataset_3, bs_DT3)
        items3 = [dataset_3[jj] for jj in idxs_DT3]
        yield dataset_2.collate_raw_batch(items1 + items2 + items3), \
          idxsDT1[start_DT1:end_DT1]+ [idx+DT2_base for idx in idxsDT2[start_DT2:end_DT2]] + [idx+DT3_base for idx in idxs_DT3]

def Generator2(dataset_1, dataset_2, batchSize):
    print("Generator2")
    if not hasattr(dataset_1, 'valid_indexs'):
        dataset_1.valid_indexs = list(range(len(dataset_1)))
    if not hasattr(dataset_2, 'valid_indexs'):
        dataset_2.valid_indexs = list(range(len(dataset_2)))
    DT1_base, DT2_base, LT_base = 0, len(dataset_1), len(dataset_1) + len(dataset_2)
    idxsDT2 = random.sample(dataset_2.valid_indexs, len(dataset_2.valid_indexs)) * 2
    idxsDT1 = random.sample(dataset_1.valid_indexs, len(dataset_1.valid_indexs)) * 2

    start_DT2, start_DT1, end_DT2, end_DT1 = 0, 0, 0, 0
    while end_DT2 < len(dataset_2.valid_indexs):
        start_DT2, start_DT1 = end_DT2 % len(dataset_2.valid_indexs), end_DT1 % len(dataset_1.valid_indexs)
        if start_DT1 <= batchSize // 2:
            idxsDT1 = random.sample(dataset_1.valid_indexs, len(dataset_1.valid_indexs)) * 2
        if start_DT2 <= batchSize // 2:
            idxsDT2 = random.sample(dataset_2.valid_indexs, len(dataset_2.valid_indexs)) * 2

        end_DT2, end_DT1 = start_DT2 + batchSize // 2, (start_DT1 + batchSize // 2)
        items1 = [dataset_2[jj] for jj in idxsDT2[start_DT2:end_DT2]]
        items2 = [dataset_1[jj] for jj in idxsDT1[start_DT1:end_DT1]]
        yield dataset_2.collate_raw_batch(items1 + items2), \
              idxsDT1[start_DT1:end_DT1] + [idx + DT2_base for idx in idxsDT2[start_DT2:end_DT2]], \

def Generator3(dataset_1, batchSize):
    print("Generator3")
    if not hasattr(dataset_1, 'valid_indexs'):
        dataset_1.valid_indexs = list(range(len(dataset_1)))
    idxsDT1 = random.sample(dataset_1.valid_indexs, len(dataset_1.valid_indexs)) * 2
    for i in range(0, len(dataset_1.valid_indexs), batchSize):
        # i * bs1 could be larger than the len(labelSource), thus we need to start from the remainder
        start_DT1, end_DT1 = i, i+ batchSize
        items1 = [dataset_1[jj] for jj in idxsDT1[start_DT1:end_DT1]]
        batch1 = dataset_1.collate_raw_batch(items1)
        yield batch1, idxsDT1[start_DT1:end_DT1]

def DataIter(dataset_1, dataset_2=None, dataset_3=None, batch_size=32):
    if dataset_3 is not None:
        assert len(labeledTarget) > 0
        return  Generator1(dataset_1, dataset_2, dataset_3, batch_size)
    elif dataset_2 is not None:
        return Generator2(dataset_1, dataset_2, batch_size)
    else:
        return Generator3(dataset_1, batch_size)


class V12NLI(CRST_Dataset):
    def __init__(
            self,
            dataset_path: AnyStr = None,
            domains: List = None,
            tokenizer: PreTrainedTokenizer = None,
            domain_ids: List = None,
            max_data_size: int = -1,
            load_data=True
    ):
        super(V12NLI, self).__init__(dataset_path, domains, tokenizer, domain_ids, max_data_size, load_data=False)
        if load_data:
            self.load_data()

    def load_data(self):
        dataset = self.obtainDataset(self.dataset_path, data_size=self.max_data_size)
        if self.domain_ids is not None and self.domains is not None:
            for i in range(len(self.domain_ids)):
                dataset[dataset['domain'] == NLI_domain_map[self.domains[i]]][2] = self.domain_ids[i]
        self.premise = dataset['sent1'].values
        self.hypothesis = dataset['sent2'].values
        label = torch.tensor(dataset['label'].values, dtype=torch.int32)
        data_y = []
        for y_idx in range(label.max().data.item() + 1):
            data_y.append(
                label.__eq__(y_idx).float()
            )
        self._label = torch.stack(data_y).T.cpu().numpy()
        self._confidence = torch.ones(len(self._label))
        self._entrophy = torch.zeros(len(self._label))
        self.domain = dataset['domain'].values
        self.read_indexs = np.arange(len(self.domain))
        device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
        self.collate_raw_batch = collate_SNLI_batch_with_device(device)
        self.domain = self.domain.__ne__(0).astype(np.int64)

    def text_transform(self, sent):
        return sent

    def get_transformed_text(self, item):
        idx = self.read_indexs[item]
        input_ids, mask, seg_ids = text_to_batch_transformer([self.premise[idx]],
                                                             self.tokenizer,
                                                             text_pair=[self.hypothesis[idx]])
        ipt_len = len(input_ids[0])
        insert_cnt = min(math.ceil(ipt_len/10.0), 5)
        for _ in range(insert_cnt):
            rand_idx = random.randint(1, len(input_ids[0])-1)
            input_ids[0].insert(rand_idx, self.tokenizer.mask_token_id)
            mask[0].insert(rand_idx, 1)
            seg_ids[0].insert(rand_idx, seg_ids[0][rand_idx])
        return input_ids, mask, seg_ids, self._label[idx], \
                   self.domain[idx], item

class MetaSelfTrainV12(MetaSelfTrainV11):
    def __init__(self, beta, seed, alpha, topk, class_num, log_dir, suffix, weight_eta=0.1, lr4model=2e-2,
                 coeff4expandset=1.0, max_few_shot_size=20, Inner_BatchSize=5, meta_step=5, extra_step=5):
        super(MetaSelfTrainV12, self).__init__(seed, alpha, topk, class_num, log_dir, suffix, weight_eta, lr4model,
                 coeff4expandset, max_few_shot_size, Inner_BatchSize, meta_step, extra_step)
        self.beta = beta
        self.minMetaLoss = -1.0
        self.meta_acc_list = []

    def annotate(self, model: VanillaBert, data:NLIDatasetV2, pseaudo_idxs=[], batch_size=20):
        c_idxs = list(set(range(len(data))) - set(pseaudo_idxs))
        with torch.no_grad():
            pred_tensor = self.dataset_logits(model, data, idxs=c_idxs, batch_size=batch_size)
        p_tensor = (1 - self.beta) * data.labelTensor(device=pred_tensor.device)[c_idxs] + \
                                self.beta * pred_tensor
        pseudo_label = (p_tensor/(p_tensor.sum(dim=1).unsqueeze(-1))).cpu().numpy() # label is the accumulation of logits
        data.setLabel(pseudo_label, c_idxs)

    def entrophy_select(self, unlabeled_target:NLIDatasetV2, topk=100):
        logits = unlabeled_target.labelTensor(device=torch.device("cuda"))
        entrophy = (-1 * logits * (logits.log())).sum(dim=1)
        topK_idxs = entrophy.argsort()[ 0 : topk ].cpu().tolist()
        return topK_idxs

    def dataSelection(self, unlabeled_target:NLIDatasetV2, topk=0.5):
        if not hasattr(unlabeled_target, "valid_indexs"):
            unlabeled_target.valid_indexs = list(range(len(unlabeled_target)))

        if hasattr(unlabeled_target, "meta_idxs"):
            if (unlabeled_target.meta_idxs is not None):
                unlabeled_target.valid_indexs.extend(unlabeled_target.meta_idxs)
                unlabeled_target.meta_idxs = None

        topk_idxs = self.entrophy_select(unlabeled_target, topk=int(len(unlabeled_target)*topk))
        unlabeled_target.meta_idxs = topk_idxs
        unlabeled_target.valid_indexs = list(
            set(unlabeled_target.valid_indexs) - set(topk_idxs)
        )
        return unlabeled_target

    def Batch2Vecs(self, model: VanillaBert, batch):
        if batch[0].device != model.device:  # data is on a different device
            input_ids, masks, seg_ids = batch[0].to(model.device), \
                                        batch[1].to(model.device), \
                                        batch[2].to(model.device)
        else:
            input_ids, masks, seg_ids = batch[0], batch[1], batch[2]
        encoder_dict = model.bert.bert.forward(
            input_ids=input_ids,
            attention_mask=masks,
            token_type_ids=seg_ids
        )
        return encoder_dict.pooler_output

    def discriminatorLoss(self, trModel:VanillaBert, discriminator, batch,
                                    temperature=1.0, gradient_reversal=True):
        pooledOutput = self.Batch2Vecs(trModel, batch)
        if gradient_reversal:
            pooledOutput = GradientReversal.apply(pooledOutput)
        normedPoolOut = pooledOutput/(pooledOutput.norm())
        logits = discriminator(normedPoolOut)
        preds = F.softmax(logits/temperature, dim=1)
        epsilon = torch.ones_like(preds) * (1e-8)
        preds = (preds - epsilon).abs()
        loss = F.nll_loss(preds.log(), batch[-1].to(preds.device))
        acc = accuracy_score(batch[-1].cpu().numpy(), preds.argmax(dim=1).cpu().numpy())
        return loss, acc

    def PreTrainDomainClassifier(self, trModel:VanillaBert, discriminator:nn.Module,
                                    labeled_source : NLIDatasetV2, labeled_target : NLIDatasetV2,
                                    unlabeled_target : NLIDatasetV2, maxEpoch):
        # the rule for early stop: when the variance of the recent 50 training loss is smaller than 0.05,
        # the training process will be stopped
        optim = torch.optim.Adam([{
             'params': discriminator.parameters(),
             'lr': 2e-3
        }])
        lossList = []
        max_iters = max([(2*len(labeled_source))//self.batch_size, (2*len(unlabeled_target))//self.batch_size])
        for epoch in range(maxEpoch):
            step = 0
            for batch, _ in DataIter(unlabeled_target, dataset_2=labeled_source,
                                           dataset_3=labeled_target, batch_size=self.batch_size):
                DLoss, DAcc = self.discriminatorLoss(trModel, discriminator, batch)
                optim.zero_grad()
                DLoss.backward()
                optim.step()
                torch.cuda.empty_cache()
                print('####Pre Train Domain Classifier (%3d | %3d) %3d | %3d ####, loss = %6.8f, Acc = %6.8f' % (
                    step, max_iters, epoch, maxEpoch, DLoss.data.item(), DAcc
                ))
                lossList.append(DLoss.data.item())
                if len(lossList) > 20:
                    lossList.pop(0)
                    if np.mean(lossList) < 0.2:
                        torch.save(discriminator.state_dict(), "bestDCLS.pkl")
                        return
                step += 1

    def PreTrainNLIClassifier(self, trModel:VanillaBert, labeled_source : NLIDatasetV2, labeled_target : NLIDatasetV2,
                                    unlabeled_target : NLIDatasetV2, maxEpoch):
        # the rule for early stop: when the variance of the recent 50 training loss is smaller than 0.05,
        # the training process will be stopped
        optim = self.obtainOptim(trModel, self.lr4model)
        lossList = []
        max_iters = max([(2*len(labeled_source))//self.batch_size, (2*len(unlabeled_target))//self.batch_size])
        for epoch in range(maxEpoch):
            step = 0
            for batch, _ in DataIter(labeled_source, dataset_2=None,
                                           dataset_3=None, batch_size=self.batch_size):
                DLoss, DAcc = trModel.lossAndAcc(batch)
                optim.zero_grad()
                DLoss.backward()
                optim.step()
                torch.cuda.empty_cache()
                print('####Pre Train NLI Classifier (%3d | %3d) %3d | %3d ####, loss = %6.8f, Acc = %6.8f, mean_loss = %6.8f' % (
                    step, max_iters, epoch, maxEpoch, DLoss.data.item(), DAcc, np.mean(lossList).item()
                ))
                lossList.append(DLoss.data.item())
                if len(lossList) > 20:
                    lossList.pop(0)
                    if np.mean(lossList) < 0.5:
                        torch.save(trModel.state_dict(), "./initNLI.pkl")
                        return
                step += 1

    def expandBatchAugmentation(self, unlabeled_target:V12NLI):
        return [
            unlabeled_target.collate_raw_batch(
            [unlabeled_target.get_transformed_text(unlabeled_target.meta_idxs[kk])
            # [unlabeled_target[unlabeled_target.meta_idxs[kk]]
             for kk in range(start_k, min(len(unlabeled_target.meta_idxs), start_k + self.max_few_shot_size), 1)]
            ) for start_k in range(0, len(unlabeled_target.meta_idxs)-1, self.max_few_shot_size)
        ]

    def Batch2ExpandData(self, f_batch=None, e_batch_list=None):
        # extract out the expand data
        if f_batch is not None:
            self.few_shot_data = f_batch
        if e_batch_list is not None:
            print("- - - - - update expand data - - ->")
            e_label = torch.cat([batch[-2] for batch in e_batch_list])
            self.e_label_weight = torch.tensor(
                [1.0 / len(e_label) for _ in range(self.class_num)],
                device=self.device
            )
            self.expand_data_list = e_batch_list

    def updateGradDicts(self, model, batch, label_weight, coeff=1.0, GradDicts=None, Aug=False):
        # sum the loss with the averaged weights or the biased weights
        # will allow computing the mean risk on any size of few shot dataset
        if Aug:
            self.clsAdverAugmentation(model, batch, PGD_step=5)
        loss, acc = self.lossAndAcc(model, batch, label_weight=label_weight, reduction="sum")
        model.zero_grad()
        (coeff*loss).backward()
        if GradDicts is None:
            GradDicts = {n: p.grad.clone() for n, p in model.named_parameters()}
        else:
            for n, p in model.named_parameters():
                GradDicts[n] += p.grad.data
        model.bert.bert.embeddings.aug_type = None
        torch.cuda.empty_cache()
        return loss.data.item(), acc, GradDicts

    def meanGradOnValSet(self, model, few_shot_data=None, few_shot_data_list=None):
        assert few_shot_data is not None or few_shot_data_list is not None
        grad_dicts = None
        if few_shot_data_list is None:
            loss, acc, grad_dicts = self.updateGradDicts(model, few_shot_data, self.f_label_weight,
                                                            1.0, grad_dicts, Aug=True)
        else:
            print("-------> few shot data list ------>")
            f_loss_list, f_acc_list = [], []
            for i, few_data in enumerate(few_shot_data_list):
                f_loss, f_acc, grad_dicts = self.updateGradDicts(model, few_data, self.f_label_weight,
                                                                    1.0, grad_dicts, Aug=True)
                f_loss_list.append(f_loss)
                f_acc_list.append(f_acc)
            loss, acc = np.sum(f_loss_list), np.mean(f_acc_list)

        if len(self.meta_acc_list) > 20:
            self.meta_acc_list.pop(0)
        self.meta_acc_list.append(acc)

        if (self.expand_data_list is not None) and (self.valid_counter > 1):
            if len(self.expand_data_list) > 0:
                assert hasattr(self, 'e_label_weight')
                print("-------> expand data list ------>")
                e_loss_list, e_acc_list = [], []
                for i, e_data in enumerate(self.expand_data_list):
                    e_loss, e_acc, grad_dicts = self.updateGradDicts(model, e_data, self.e_label_weight,
                                                                     self.coefff4expandset*np.log(self.valid_counter+1),
                                                                     grad_dicts)
                    e_loss_list.append(e_loss)
                    e_acc_list.append(e_acc)
                exp_loss, exp_acc = np.sum(e_loss_list), np.mean(e_acc_list)
                print(f"##Perf on Meta Expand Set##  : exp_loss/exp_acc = {exp_loss}/{exp_acc}")
                return grad_dicts, \
                       loss + self.coefff4expandset*exp_loss, \
                       acc
        grad_dicts = {n: p.grad.clone() for n, p in model.named_parameters()}
        return grad_dicts, loss, acc

    def domainAdverAugmentation(self, model, batch, PGD_step=3):
        assert hasattr(self, "discriminator")
        model.zero_grad()
        model.bert.bert.embeddings.aug_type = "adver"
        for _ in range(PGD_step):
            loss, acc = self.discriminatorLoss(model, self.discriminator, batch, gradient_reversal=False)
            loss.backward()

    def clsAdverAugmentation(self, model, batch, PGD_step=3):
        assert hasattr(self, "discriminator")
        model.zero_grad()
        model.bert.bert.embeddings.aug_type = "adver"
        for _ in range(PGD_step):
            loss, acc = model.lossAndAcc(batch)
            loss.backward()

    def MetaStep(self, model:VanillaBert, optim:torch.optim, batch,
                    weight:torch.Tensor, few_data=None, few_data_list=None):
        if (few_data is None) and (few_data_list is None):
            assert hasattr(self, "few_shot_data")
            assert hasattr(self, "few_shot_data_list")
            few_data, few_data_list = self.few_shot_data, self.few_shot_data_list

        self.epsilon = 1e-5
        initStateDicts = model.state_dict()
        initStateDicts = {key: initStateDicts[key].clone() for key in initStateDicts}
        weight_best = None
        minMetaLoss = self.minMetaLoss
        optim_step = self.meta_step
        for step in range(self.meta_step):
            u = weight#.sigmoid()
            fewLoss, fewAcc = self.MetaValidation(model, optim, batch, u, few_data, few_data_list)
            if (step == 0) and (minMetaLoss==-1):
                minMetaLoss = fewLoss

            if fewLoss < minMetaLoss:
                minMetaLoss = fewLoss
                # If the PreMetaLoss is not updated, then the weight_best might be updated after the few loss is worser
                weight_best = weight.clone()
                if optim_step == self.meta_step:
                    optim_step = step

            print(f"##Perf on Meta Val Set## {step} | {self.meta_step} :  \
                        loss/acc = {fewLoss}/{fewAcc}, PreFewLoss={self.minMetaLoss}")
            if step == 0:
                fitlog.add_metric({"meta_valid_loss" : fewLoss}, step=self.counter)
                fitlog.add_metric({"meta_valid_acc" : fewAcc}, step=self.counter)
                self.counter += 1

            if (step == optim_step + self.extra_step) or ((step+1)==self.meta_step):
                break

            model.load_state_dict(initStateDicts)
            u_grads = self.ComputeGrads4Weights(model, batch, self.few_shot_data, self.few_shot_data_list)
            w_grads = u_grads#*u*(1-u)
            weightGrads = -1*w_grads / (w_grads.norm(2) + 1e-8)
            # weightGrads = -1* w_grads#.sign()
            update = self.weight_eta * weightGrads
            weight = weight - update#*(weight_mask.to(update.device))

        u = weight#.sigmoid()
        fewLoss, fewAcc = self.MetaValidation(model, optim, batch, u, few_data, few_data_list)
        if fewLoss < minMetaLoss:
            minMetaLoss = fewLoss
            weight_best = weight.clone()

        if (self.minMetaLoss == -1) or (minMetaLoss < self.minMetaLoss):
            self.minMetaLoss = minMetaLoss
        model.load_state_dict(initStateDicts)
        if (self.meta_step <= 5) and (weight_best is None):
            return step+1, weight
        return step+1, weight_best

    def OptimStep(self, model, model_optim, batch, weight):
        self.domainAdverAugmentation(model, batch, PGD_step=3)
        loss = self.LossList(model, batch)
        # sumLoss = ((weight.sigmoid()) * loss).sum()
        new_weight = weight * (weight.__gt__(0).float())
        sumLoss = (new_weight * loss).sum()
        model_optim.zero_grad()
        sumLoss.backward()
        model_optim.step()
        model.bert.bert.embeddings.aug_type = None

    def Training(self, model:VanillaBert, unlabeled_target:NLIDatasetV2, valid_set:NLIDatasetV2, UT_Label,
                 labeled_source:NLIDatasetV2=None, labeled_target:NLIDatasetV2=None,
                 max_epoch=100, max_valid_every=100, few_update_every=10,
                 model_file="./tmp.pkl"):
        self.initTrainingEnv()
        meta_optim = torch.optim.SGD(model.parameters(), lr=2*self.lr4model)
        model_optim = self.obtainOptim(model, self.lr4model)
        if not hasattr(self, "raw_weights"):
            weights = [0.0]*len(unlabeled_target) + \
                        ([] if labeled_source is None else [0.0]*len(labeled_source)) + \
                            ([] if labeled_target is None else [10.0]*len(labeled_target))
            self.raw_weights = torch.tensor(weights, device=self.device)
            print("raw_weights : ", len(self.raw_weights))

        step = 0
        self.annotate(model, unlabeled_target)
        self.dataSelection(unlabeled_target, topk=200.0/len(unlabeled_target))
        self.few_shot_data, self.few_shot_data_list = self.FewShotDataList(valid_set)
        e_batch_list = self.expandBatchAugmentation(unlabeled_target)
        print("e_batch_list length : ", len(e_batch_list))
        self.Batch2ExpandData(None, e_batch_list)
        for epoch in range(max_epoch):
            for batch, indices in DataIter(dataset_1=labeled_source, dataset_2=None,
                                                 dataset_3=None, batch_size=self.batch_size):
                weights = self.raw_weights[indices].clone()
                meta_step_count, new_weights = self.MetaStep(model, meta_optim, batch, weights)
                if new_weights is not None:
                    self.raw_weights[indices] = new_weights
                    self.OptimStep(model, model_optim, batch, new_weights)
                else:
                    self.domainAdverAugmentation(model, batch)
                    loss, acc = model.lossAndAcc(batch)
                    loss.backward()
                    model_optim.step()
                    model.bert.bert.embeddings.aug_type = None
                    self.minMetaLoss = -1

                step += 1
                mean_acc = np.mean(self.meta_acc_list)
                print(f"step = {step} | mean meta acc: {mean_acc}")
                if (step) % max_valid_every == 0:
                    self.valid(model, unlabeled_target, UT_Label, self.suffix, step)
                    self.annotate(model, unlabeled_target)
                    self.dataSelection(unlabeled_target, topk=200.0 / len(unlabeled_target))
                    e_batch_list = self.expandBatchAugmentation(unlabeled_target)
                    print("====>\n #### Self Training Alternate #### e_batch_list length : ", len(e_batch_list))
                    self.Batch2ExpandData(None, e_batch_list)
                    break

                if (math.fabs(self.minMetaLoss)< 0.2 ) or \
                            (step > (max_valid_every*max_valid_every)) or \
                                (np.mean(self.meta_acc_list)>0.80):
                    unlabeled_target.valid_indexs.extend(unlabeled_target.meta_idxs)
                    unlabeled_target.meta_idxs = None
                    self.minMetaLoss = -1.0
                    return

def obtain_domain_set(new_domain_name, tokenizer_M, few_shot_cnt=100, lt_count=0):
    SNLI_set = V12NLI("../../../snli_1.0/snli_1.0_train.jsonl", tokenizer=tokenizer_M)
    testFile = f"../../../multinli_1.0/Domain_{new_domain_name}.jsonl"
    if lt_count != 0:
        filenames = SplitDataFile(testFile, accumulation=[lt_count, -1])
        labeled_target = V12NLI(filenames[0], tokenizer=tokenizer_M)
        test_set = V12NLI(filenames[1], tokenizer=tokenizer_M)
    else:
        labeled_target = None
        test_set = V12NLI(testFile, tokenizer=tokenizer_M)
    val_set = V12NLI(f"../../../multinli_1.0/fewshot_Domain_{new_domain_name}.jsonl",
                              tokenizer=tokenizer_M, max_data_size=few_shot_cnt)
    return SNLI_set, val_set, test_set, labeled_target

def obtain_model(args, model_device):
    bert_model = 'bert-base-uncased' if args.full_bert else 'distilbert-base-uncased'
    if args.full_bert:
        bert_config = BertConfig.from_pretrained(bert_model, num_labels=3) if args.bertPath is None else \
                        BertConfig.from_pretrained(args.bertPath, num_labels=3)
        tokenizer_M = BertTokenizer.from_pretrained(bert_model) if args.bertPath is None else \
                        BertTokenizer.from_pretrained(args.bertPath)
    else:
        bert_config = DistilBertConfig.from_pretrained(bert_model, num_labels=3) if args.distillBertPath is None else \
                        DistilBertConfig.from_pretrained(args.distillBertPath, num_labels=3)
        tokenizer_M = DistilBertTokenizer.from_pretrained(bert_model) if args.distillBertPath is None else \
                        DistilBertTokenizer.from_pretrained(args.distillBertPath)
    bert_config.num_labels = 3
    bert_config.hidden_act = "relu"
    # Create the model
    if args.full_bert:
        bert = BertForSequenceClassification.from_pretrained(
                        bert_model, config=bert_config).to(model_device) if args.bertPath is None \
                else BertForSequenceClassification.from_pretrained(
                        args.bertPath, config=bert_config).to(model_device)
    else:
        bert = DistilBertForSequenceClassification.from_pretrained(
                    bert_model, config=bert_config).to(model_device) if args.distillBertPath is None \
                else DistilBertForSequenceClassification.from_pretrained(
                        args.distillBertPath, config=bert_config).to(model_device)
    model = VanillaBert(bert).to(model_device)

    # two category: source domain, target domain
    discriminator = nn.Sequential(nn.Linear(bert_config.hidden_size, bert_config.hidden_size*2),
                                  nn.ReLU(),
                                  nn.Linear(bert_config.hidden_size*2, 2)).to(model_device)
    return model, tokenizer_M, discriminator


def valid(self, model, test_set, test_label, test_suffix, step=0):
    rst_model = self.perf(model, test_set, test_label)
    acc_v, (p_v, r_v, f1_v, _) = rst_model
    print("self train loop {}| step = {} : ".format(self.self_train_loop_cnt, self.valid_counter), rst_model)
    output_items = [("valid_acc", acc_v)] + \
                   [('valid_prec_{}'.format(i), p_v[i]) for i in range(self.class_num)] + \
                   [('valid_recall_{}'.format(i), r_v[i]) for i in range(self.class_num)] + \
                   [('valid_f1_{}'.format(i), f1_v[i]) for i in range(self.class_num)]
    fitlog.add_metric({f"{test_suffix}": dict(output_items)}, step=self.valid_counter)
    self.valid_counter += 1
    return rst_model

if __name__ == "__main__":
    with open("../../args.pkl", 'rb') as fr:
        argConfig = pickle.load(fr)
    argConfig.model_dir = str(__file__).rstrip(".py")
    # Set all the seeds
    seed = argConfig.seed
    reconfig_args(argConfig)

    # See if CUDA available
    device = torch.device("cpu") if not torch.cuda.is_available() else torch.device("cuda:0")
    model1_path = "../../saved/modelSNLI_1.pth"

    domain_ID = 2
    fewShotCnt = 100
    NLIDomainList = list(NLI_domain_map.keys())
    newDomainName = NLIDomainList[domain_ID-1]

    model1, tokenizer, _ = obtain_model(args=argConfig, model_device=device)
    model1.load_state_dict(torch.load(model1_path))
    trainer = MetaSelfTrainV12(0.2, seed, alpha=0.1, topk=0.2, class_num=3, log_dir=argConfig.model_dir,
                                 suffix="{}_FS{}".format(newDomainName, fewShotCnt),
                                 weight_eta=1.0, lr4model=5e-5, coeff4expandset=0.2, max_few_shot_size=50,
                                 Inner_BatchSize=32, meta_step=5, extra_step=0)

    table_f = PrettyTable()
    table_a = PrettyTable()
    table_p = PrettyTable()
    table_r = PrettyTable()
    for domainID in range(1, 11):
        newDomainName = NLIDomainList[domainID - 1]
        print("newDomainName: ", newDomainName)
        labeledSource, validTarget, unlabeledTarget, labeledTarget = obtain_domain_set(newDomainName,
                                                                                       tokenizer_M=tokenizer,
                                                                                       lt_count=0)
        ULabel = unlabeledTarget.labelTensor()
        ULabel = ULabel.argmax(dim=1) if ULabel.dim() == 2 else ULabel.clone()
        rst_model = valid(trainer, model1, unlabeledTarget, ULabel, trainer.suffix, 0)
        acc_v, (p_v, r_v, f1_v, _) = rst_model
        table_a.add_column(newDomainName, [round(acc_v, 3)])
        table_p.add_column(newDomainName, [round(val, 3) for val in p_v.tolist()] + [round(p_v.mean(), 3)])
        table_r.add_column(newDomainName, [round(val, 3) for val in r_v.tolist()] + [round(r_v.mean(), 3)])
        table_f.add_column(newDomainName, [round(val, 3) for val in f1_v.tolist()] + [round(f1_v.mean(), 3)])
    print("--------------------- accuracy table -----------------------")
    print(table_a)

    print("--------------------- precision table -----------------------")
    print(table_p)

    print("--------------------- recall table -----------------------")
    print(table_r)

    print("--------------------- f1 table -----------------------")
    print(table_f)