import sys
sys.path.append("../../")
sys.path.append("../")
from Dataloader.dataloader_utils import Lemma_Factory
from Dataloader.weiboloader import BiGCNWeiboSet
from SentModel.Sent2Vec import TFIDFBasedVec_CN
from PropModel.GraphPropagation import BiGCN
from RumdetecFramework.GraphRumorDect import BiGCNRumorDetec
from MetaSelfTrainFramewoks_Weibo import MSTDataset, GradientReversal
from RumdetecFramework.BaseRumorFramework import RumorDetection
from RumdetecFramework.InstanceReweighting import InstanceReweightingV4
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score,precision_recall_fscore_support
import pickle, os, random, fitlog, pandas as pd, numpy as np
import torch, torch.nn as nn, torch.nn.functional as F
from tqdm import trange

def Generator1(dataset_1, dataset_2, dataset_3, batchSize):
    print("Generator1")
    if not hasattr(dataset_1, 'valid_indexs'):
        dataset_1.valid_indexs = list(range(len(dataset_1)))
    if not hasattr(dataset_2, 'valid_indexs'):
        dataset_2.valid_indexs = list(range(len(dataset_2)))
    if not hasattr(dataset_3, 'valid_indexs'):
        dataset_3.valid_indexs = list(range(len(dataset_3)))
    DT1_base, DT2_base, DT3_base = 0, len(dataset_1), len(dataset_1)+ len(dataset_2)
    idxsDT2 = random.sample(dataset_2.valid_indexs, len(dataset_2.valid_indexs)) * 2
    idxsDT1 = random.sample(dataset_1.valid_indexs, len(dataset_1.valid_indexs)) * 2
    bs_DT3 = 5 if len(dataset_3.valid_indexs) > 5 else len(dataset_3.valid_indexs)

    start_DT2, start_DT1, end_DT2, end_DT1 = 0, 0, 0, 0
    while end_DT2 < len(dataset_2.valid_indexs):
        start_DT2, start_DT1 = end_DT2%len(dataset_2.valid_indexs), end_DT1%len(dataset_1.valid_indexs)
        if start_DT1 <= batchSize//2:
            idxsDT1 = random.sample(dataset_1.valid_indexs, len(dataset_1.valid_indexs)) * 2
        if start_DT2 <= batchSize//2:
            idxsDT2 = random.sample(dataset_2.valid_indexs, len(dataset_2.valid_indexs)) * 2
        end_DT2, end_DT1 = start_DT2 + batchSize//2, (start_DT1 + batchSize//2) - bs_DT3
        items1 = [dataset_1[jj] for jj in idxsDT1[start_DT1:end_DT1]]
        items2 = [dataset_2[jj] for jj in idxsDT2[start_DT2:end_DT2]]
        idxs_DT3 = random.sample(dataset_3, bs_DT3)
        items3 = [dataset_3[jj] for jj in idxs_DT3]
        yield dataset_2.collate_raw_batch(items1 + items2 + items3), \
          idxsDT1[start_DT1:end_DT1]+ [idx+DT2_base for idx in idxsDT2[start_DT2:end_DT2]] + [idx+DT3_base for idx in idxs_DT3]

def Generator2(dataset_1, dataset_2, batchSize):
    print("Generator2")
    if not hasattr(dataset_1, 'valid_indexs'):
        dataset_1.valid_indexs = list(range(len(dataset_1)))
    if not hasattr(dataset_2, 'valid_indexs'):
        dataset_2.valid_indexs = list(range(len(dataset_2)))
    DT1_base, DT2_base, LT_base = 0, len(dataset_1), len(dataset_1) + len(dataset_2)
    idxsDT2 = random.sample(dataset_2.valid_indexs, len(dataset_2.valid_indexs)) * 2
    idxsDT1 = random.sample(dataset_1.valid_indexs, len(dataset_1.valid_indexs)) * 2

    start_DT2, start_DT1, end_DT2, end_DT1 = 0, 0, 0, 0
    while end_DT2 < len(dataset_2.valid_indexs):
        start_DT2, start_DT1 = end_DT2 % len(dataset_2.valid_indexs), end_DT1 % len(dataset_1.valid_indexs)
        if start_DT1 <= batchSize // 2:
            idxsDT1 = random.sample(dataset_1.valid_indexs, len(dataset_1.valid_indexs)) * 2
        if start_DT2 <= batchSize // 2:
            idxsDT2 = random.sample(dataset_2.valid_indexs, len(dataset_2.valid_indexs)) * 2

        end_DT2, end_DT1 = start_DT2 + batchSize // 2, (start_DT1 + batchSize // 2)
        items1 = [dataset_1[jj] for jj in idxsDT1[start_DT1:end_DT1]]
        items2 = [dataset_2[jj] for jj in idxsDT2[start_DT2:end_DT2]]
        yield dataset_2.collate_raw_batch(items1 + items2), \
              idxsDT1[start_DT1:end_DT1] + [idx + DT2_base for idx in idxsDT2[start_DT2:end_DT2]], \

def Generator3(dataset_1, batchSize):
    print("Generator3")
    if not hasattr(dataset_1, 'valid_indexs'):
        dataset_1.valid_indexs = list(range(len(dataset_1)))
    idxsDT1 = random.sample(dataset_1.valid_indexs, len(dataset_1.valid_indexs)) * 2
    for i in range(0, len(dataset_1.valid_indexs), batchSize):
        # i * bs1 could be larger than the len(labelSource), thus we need to start from the remainder
        start_DT1, end_DT1 = i, i+ batchSize
        items1 = [dataset_1[jj] for jj in idxsDT1[start_DT1:end_DT1]]
        batch1 = dataset_1.collate_raw_batch(items1)
        yield batch1, idxsDT1[start_DT1:end_DT1]

def DataIter(dataset_1, dataset_2=None, dataset_3=None, batch_size=32):
    if dataset_3 is not None:
        assert len(dataset_3) > 0
        return  Generator1(dataset_1, dataset_2, dataset_3, batch_size)
    elif dataset_2 is not None:
        return Generator2(dataset_1, dataset_2, batch_size)
    else:
        return Generator3(dataset_1, batch_size)

def pred_Logits(model:RumorDetection, data, idxs=None, batch_size=20):
    preds = []
    if idxs is None:
        idxs = list(range(len(data)))
    with torch.no_grad():
        for i in trange(0, len(idxs), batch_size):
            batch_idxs = idxs[i:min(len(idxs), i+batch_size)]
            batch = data.collate_raw_batch([data[idx] for idx in batch_idxs])
            pred = model.forward(batch)
            preds.append(pred)
    pred_tensor = torch.cat(preds)
    return pred_tensor

def prediction(model:RumorDetection, data, idxs=None, batch_size=20):
    pred_tensor = pred_Logits(model, data, idxs, batch_size)
    vals, idxs = pred_tensor.sort(dim=1)
    return idxs[:, -1], vals[:, -1]

def acc_P_R_F1(y_true, y_pred):
    return accuracy_score(y_true, y_pred.cpu()), \
                precision_recall_fscore_support(y_true, y_pred.cpu())

def Perf(model:RumorDetection, data, label, idxs=None, batch_size=20):
    y_pred, _ = prediction(model, data, idxs=idxs, batch_size=batch_size)
    y_true = label[idxs] if idxs is not None else label
    return acc_P_R_F1(y_true, y_pred)

class WindTrainer(InstanceReweightingV4):
    def __init__(self, class_num, log_dir, suffix, weight_eta=0.1, lr4model=2e-2,
                 coeff4expandset=1.0, max_few_shot_size=20, Inner_BatchSize=5, meta_step=5):
        super(WindTrainer, self).__init__(class_num, lr4model, coeff4expandset, max_few_shot_size,
                                                      Inner_BatchSize)
        self.log_dir = log_dir
        fitlog.set_log_dir(log_dir, new_log=True)
        self.suffix = suffix
        self.weight_eta = weight_eta
        self.best_valid_acc = 0.0
        self.meta_step = meta_step

    def MetaStep(self, model:RumorDetection, optim:torch.optim, batch,
                    weight:torch.Tensor, weight_mask):
        assert hasattr(self, "few_shot_data")
        assert hasattr(self, "few_shot_data_list")
        initStateDicts = model.state_dict()
        initStateDicts = {key: initStateDicts[key].clone() for key in initStateDicts}
        for step in range(self.meta_step):
            u = weight.sigmoid()
            model.zero_grad()
            loss = self.LossList(model, batch)
            sumLoss = (u * loss).sum()
            sumLoss.backward()
            optim.step()
            self.val_grad_dicts, fewLoss, fewAcc = self.meanGradOnValSet(model,
                                                                         few_shot_data=self.few_shot_data,
                                                                         few_shot_data_list=self.few_shot_data_list)
            print(f"##Perf on Meta Val Set## {step} | {self.meta_step} :  loss/acc = {fewLoss}/{fewAcc}")
            model.load_state_dict(initStateDicts)
            u_grads = self.ComputeGrads4Weights(model, batch, self.few_shot_data, self.few_shot_data_list)
            w_grads = u_grads*u*(1-u)
            weightGrads = -1 * (w_grads / (w_grads.norm(2)+1e-10))
            print("uGrads:", u_grads)
            print("wGrads:", w_grads)
            print("weightGrads:", weightGrads)
            update = self.weight_eta * weightGrads
            weight = weight - update*(weight_mask.to(update.device))
        return weight

    def OptimStep(self, model, model_optim, batch, weight):
        loss = self.LossList(model, batch)
        sumLoss = ((weight.sigmoid()) * loss).sum()
        sumLoss.backward()
        model_optim.step()

    def dataIter(self, OOD_Set, InD_Set=None, batch_size=32):
        p_idxs = list(range(len(OOD_Set)))
        p_len = len(p_idxs)
        if InD_Set is None:
            l_len = 0
            l_idxs = []
        else:
            l_idxs = list(range(len(InD_Set)))
            l_len = len(l_idxs)
        data_size = p_len + l_len
        idxs = random.sample(range(data_size), data_size)*2
        for start_i in range(0, data_size, batch_size):
            batch_idxs = idxs[(start_i):(start_i+batch_size)]
            items = [OOD_Set[p_idxs[idx]] if idx < p_len else \
                        InD_Set[l_idxs[idx-p_len]] for idx in batch_idxs]
            yield OOD_Set.collate_raw_batch(items), batch_idxs, \
                    torch.tensor([1. if idx < p_len else 0. for idx in batch_idxs])

    def Training(self, model:RumorDetection, train_set:BiGCNWeiboSet, valid_set:BiGCNWeiboSet,
                 test_set:BiGCNWeiboSet, indomain_set:BiGCNWeiboSet=None, max_epoch=100, max_valid_every=100,
                 model_file="./tmp.pkl"):
        meta_optim = torch.optim.SGD([
            {'params': model.parameters(), 'lr': self.lr4model}
        ])
        model_optim = torch.optim.Adam([
            {'params': model.parameters(), 'lr': self.lr4model}
        ])
        self.few_shot_data, self.few_shot_data_list = self.FewShotDataList(valid_set)
        weights = [0.0]*len(train_set) + \
                    ([] if indomain_set is None else [10.0]*len(indomain_set))
        self.train_set_weights = torch.tensor(weights, device=self.device)
        test_label = torch.tensor(test_set.data_y).argmax(dim=1)
        step = 0
        for epoch in range(max_epoch):
            for batch, indices, weight_mask in self.dataIter(train_set, indomain_set, self.batch_size):
                weights = self.train_set_weights[indices]
                new_weights = self.MetaStep(model, meta_optim, batch, weights, weight_mask)
                self.train_set_weights[indices] = new_weights
                self.OptimStep(model, model_optim, batch, new_weights)
                if (step+1) % max_valid_every == 0:
                    self.valid(model, test_set, test_label, self.suffix, step)
                step += 1
        model.save_model(model_file)

    def valid(self, model, test_set, test_label, test_suffix, step=0):
        rst_model = Perf(model, test_set, test_label)
        acc_v, (p_v, r_v, f1_v, _) = rst_model
        print("BestPerf : ", rst_model)
        output_items = [("valid_acc", acc_v)] + \
                       [('valid_prec_{}'.format(i), p_v[i]) for i in range(self.class_num)] + \
                       [('valid_recall_{}'.format(i), r_v[i]) for i in range(self.class_num)] + \
                       [('valid_f1_{}'.format(i), f1_v[i]) for i in range(self.class_num)]
        fitlog.add_metric({f"{test_suffix}": dict(output_items)}, step=step)
        fitlog.add_best_metric({f"FinalPerf_{self.suffix}": dict(output_items)})

class WindTrainer_V1(WindTrainer):
    def __init__(self, class_num, log_dir, suffix, weight_eta=0.1, lr4model=2e-2,
                 coeff4expandset=1.0, max_few_shot_size=20, Inner_BatchSize=5, meta_step=5):
        super(WindTrainer_V1, self).__init__(class_num, log_dir, suffix, weight_eta, lr4model,
                                             coeff4expandset, max_few_shot_size, Inner_BatchSize, meta_step)
        self.meta_acc_list = []

    def MetaStep(self, model:RumorDetection, optim:torch.optim, batch,
                    weight:torch.Tensor, weight_mask):
        assert hasattr(self, "few_shot_data")
        assert hasattr(self, "few_shot_data_list")
        initStateDicts = model.state_dict()
        initStateDicts = {key: initStateDicts[key].clone() for key in initStateDicts}
        for step in range(self.meta_step):
            u = weight.relu()
            model.zero_grad()
            loss = self.LossList(model, batch)
            sumLoss = (u * loss).sum()
            sumLoss.backward()
            optim.step()
            self.val_grad_dicts, fewLoss, fewAcc = self.meanGradOnValSet(model,
                                                                         few_shot_data=self.few_shot_data,
                                                                         few_shot_data_list=self.few_shot_data_list)
            print(f"##Perf on Meta Val Set## {step} | {self.meta_step} :  loss/acc = {fewLoss}/{fewAcc}")
            model.load_state_dict(initStateDicts)
            u_grads = -1 * self.ComputeGrads4Weights(model, batch, self.few_shot_data, self.few_shot_data_list)
            w_grads = u_grads*(u.sign())
            weightGrads = (w_grads / (w_grads.norm(2) + 1e-10))
            print("u:", u)
            print("uGrads:", u_grads)
            print("wGrads:", w_grads)
            print("weightGrads:", weightGrads)
            update = self.weight_eta * weightGrads
            weight = weight - update*(weight_mask.to(update.device))
        return weight

    def OptimStep(self, model, model_optim, batch, weight):
        loss = self.LossList(model, batch)
        sumLoss = ((weight.relu()) * loss).sum()
        sumLoss.backward()
        model_optim.step()

class WindTrainer_V2(WindTrainer):
    def __init__(self, class_num, log_dir, suffix, weight_eta=0.1, lr4model=2e-2,
                 coeff4expandset=1.0, max_few_shot_size=20, Inner_BatchSize=5, meta_step=5):
        super(WindTrainer_V2, self).__init__(class_num, log_dir, suffix, weight_eta, lr4model,
                                             coeff4expandset, max_few_shot_size, Inner_BatchSize, meta_step)
        self.meta_acc_list = []

    def LossList(self, model, batch):
        preds = model.predict(batch)
        # ------------------------------------------------------------#
        # to prevent extremely confident prediction, i.e., [1.0, 0.0],
        # as it can lead to a 'nan' value in log operation
        epsilon = torch.ones_like(preds) * 1e-8
        preds = (preds - epsilon).abs()
        labels = batch[-2]
        loss = (-1.0 *
                (preds.log()) *
                (labels.to(preds.device))
                ).sum(dim=1)
        return loss

    def discriminatorLoss(self, trModel:RumorDetection, discriminator, batch,
                                    temperature=1.0, gradient_reversal=True):
        pooledOutput = trModel.Batch2Vecs(batch)
        if gradient_reversal:
            pooledOutput = GradientReversal.apply(pooledOutput)
        normedPoolOut = pooledOutput/(pooledOutput.norm())
        logits = discriminator(normedPoolOut)
        preds = F.softmax(logits/temperature, dim=1)
        epsilon = torch.ones_like(preds) * (1e-8)
        preds = (preds - epsilon).abs()
        loss = F.nll_loss(preds.log(), batch[-1].to(preds.device))
        acc = accuracy_score(batch[-1].cpu().numpy(), preds.argmax(dim=1).cpu().numpy())
        return loss, acc

    def PreTrainDomainClassifier(self, trModel:RumorDetection, discriminator:nn.Module,
                                    labeled_source : MSTDataset, labeled_target : MSTDataset,
                                    unlabeled_target : MSTDataset, maxEpoch):
        # the rule for early stop: when the variance of the recent 50 training loss is smaller than 0.05,
        # the training process will be stopped
        optim = torch.optim.Adam([{
             'params': discriminator.parameters(),
             'lr': 2e-3
        }])
        lossList = []
        max_iters = max([(2*len(labeled_source))//self.batch_size, (2*len(unlabeled_target))//self.batch_size])
        for epoch in range(maxEpoch):
            step = 0
            for batch, _ in DataIter(unlabeled_target, dataset_2=labeled_source,
                                           dataset_3=labeled_target, batch_size=self.batch_size):
                DLoss, DAcc = self.discriminatorLoss(trModel, discriminator, batch)
                optim.zero_grad()
                DLoss.backward()
                optim.step()
                torch.cuda.empty_cache()
                print('####Pre Train Domain Classifier (%3d | %3d) %3d | %3d ####, loss = %6.8f, Acc = %6.8f' % (
                    step, max_iters, epoch, maxEpoch, DLoss.data.item(), DAcc
                ))
                lossList.append(DLoss.data.item())
                if len(lossList) > 20:
                    lossList.pop(0)
                    if np.mean(lossList) < 0.2:
                        torch.save(discriminator.state_dict(), "bestDCLS.pkl")
                        return
                step += 1

    def updateGradDicts(self, model, batch, label_weight, coeff=1.0, GradDicts=None, Aug=False):
        # sum the loss with the averaged weights or the biased weights
        # will allow computing the mean risk on any size of few shot dataset
        if Aug:
            self.clsAdverAugmentation(model, batch, PGD_step=5)
        loss, acc = self.lossAndAcc(model, batch, label_weight=label_weight, reduction="sum")
        model.zero_grad()
        (coeff*loss).backward()
        if GradDicts is None:
            GradDicts = {n: p.grad.clone() for n, p in model.named_parameters()}
        else:
            for n, p in model.named_parameters():
                GradDicts[n] += p.grad.data
        model.sent2vec.set_aug_type(None)
        torch.cuda.empty_cache()
        return loss.data.item(), acc, GradDicts

    def meanGradOnValSet(self, model, few_shot_data=None, few_shot_data_list=None):
        assert few_shot_data is not None or few_shot_data_list is not None
        grad_dicts = None
        if few_shot_data_list is None:
            loss, acc, grad_dicts = self.updateGradDicts(model, few_shot_data, self.f_label_weight,
                                                            1.0, grad_dicts, Aug=True)
        else:
            print("-------> few shot data list ------>")
            f_loss_list, f_acc_list = [], []
            for i, few_data in enumerate(few_shot_data_list):
                f_loss, f_acc, grad_dicts = self.updateGradDicts(model, few_data, self.f_label_weight,
                                                                    1.0, grad_dicts, Aug=True)
                f_loss_list.append(f_loss)
                f_acc_list.append(f_acc)
            loss, acc = np.sum(f_loss_list), np.mean(f_acc_list)

        if len(self.meta_acc_list) > 20:
            self.meta_acc_list.pop(0)
        self.meta_acc_list.append(acc)
        grad_dicts = {n: p.grad.clone() for n, p in model.named_parameters()}
        return grad_dicts, loss, acc

    def domainAdverAugmentation(self, model, batch, PGD_step=3):
        assert hasattr(self, "discriminator")
        model.zero_grad()
        model.sent2vec.set_aug_type("adver")
        for _ in range(PGD_step):
            loss, acc = self.discriminatorLoss(model, self.discriminator, batch, gradient_reversal=False)
            loss.backward()

    def clsAdverAugmentation(self, model:RumorDetection, batch, PGD_step=3):
        assert hasattr(self, "discriminator")
        model.zero_grad()
        model.sent2vec.set_aug_type("adver")
        for _ in range(PGD_step):
            loss, acc = model.RDMLoss(batch)
            loss.backward()

    def MetaStep(self, model:RumorDetection, optim:torch.optim, batch,
                    weight:torch.Tensor, weight_mask):
        assert hasattr(self, "few_shot_data")
        assert hasattr(self, "few_shot_data_list")
        initStateDicts = model.state_dict()
        initStateDicts = {key: initStateDicts[key].clone() for key in initStateDicts}
        for step in range(self.meta_step):
            u = weight.relu()
            model.zero_grad()
            loss = self.LossList(model, batch)
            sumLoss = (u * loss).sum()
            sumLoss.backward()
            optim.step()
            self.val_grad_dicts, fewLoss, fewAcc = self.meanGradOnValSet(model,
                                                                         few_shot_data=self.few_shot_data,
                                                                         few_shot_data_list=self.few_shot_data_list)
            print(f"##Perf on Meta Val Set## {step} | {self.meta_step} :  loss/acc = {fewLoss}/{fewAcc}")
            model.load_state_dict(initStateDicts)
            u_grads = self.ComputeGrads4Weights(model, batch, self.few_shot_data, self.few_shot_data_list)
            w_grads = u_grads*(u.sign())
            weightGrads = -1 * (w_grads / (w_grads.norm(2)+1e-10))
            print("uGrads:", u_grads)
            print("wGrads:", w_grads)
            print("weightGrads:", weightGrads)
            update = self.weight_eta * weightGrads
            weight = weight - update*(weight_mask.to(update.device))
        return weight

class WindTrainer_V3(WindTrainer):
    def __init__(self, class_num, log_dir, suffix, weight_eta=0.1, lr4model=2e-2,
                 coeff4expandset=1.0, max_few_shot_size=20, Inner_BatchSize=5, meta_step=5):
        super(WindTrainer_V3, self).__init__(class_num, log_dir, suffix, weight_eta, lr4model,
                                             coeff4expandset, max_few_shot_size, Inner_BatchSize, meta_step)
        self.meta_acc_list = []

    def FewShotDataList(self, few_shot_set):
        f_label = few_shot_set.labelTensor()
        f_label = f_label.argmax(dim=1) if f_label.dim() == 2 else f_label
        cnt = [f_label.__eq__(y_idx).sum().data.item() for y_idx in range(self.class_num)]
        assert sum(cnt) == len(few_shot_set)
        self.f_label_weight = torch.tensor([1.0/(self.class_num*y_cnt) for y_cnt in cnt],device=self.device)

        if len(few_shot_set) > self.max_few_shot_size:
            few_shot_data = None
            few_shot_data_list = [few_shot_set.collate_raw_batch(
                                            [few_shot_set[j] for j in range(i,
                                                                                 min(i+self.max_few_shot_size,
                                                                                     len(few_shot_set)))])
                                            for i in range(0, len(few_shot_set), self.max_few_shot_size)]
        else:
            few_shot_data = few_shot_set.collate_raw_batch(
                [few_shot_set[i] for i in range(len(few_shot_set))]
            )
            few_shot_data_list = None
        return few_shot_data, few_shot_data_list

class WindTrainer_V4A(WindTrainer):
    def __init__(self, beta, class_num, log_dir, suffix, weight_eta=0.1, lr4model=2e-2,
                 coeff4expandset=1.0, max_few_shot_size=20, Inner_BatchSize=5, meta_step=5):
        super(WindTrainer_V4A, self).__init__(class_num, log_dir, suffix, weight_eta, lr4model,
                                             coeff4expandset, max_few_shot_size, Inner_BatchSize, meta_step)
        self.meta_acc_list = []
        self.beta = beta

    def LossList(self, model, batch):
        preds = model.predict(batch)
        # ------------------------------------------------------------#
        # to prevent extremely confident prediction, i.e., [1.0, 0.0],
        # as it can lead to a 'nan' value in log operation
        epsilon = torch.ones_like(preds) * 1e-8
        preds = (preds - epsilon).abs()
        labels = batch[-2]
        loss = (-1.0 *
                (preds.log()) *
                (labels.to(preds.device))
                ).sum(dim=1)
        return loss

    def PreTrainNLIClassifier(self, trModel:RumorDetection, labeled_source : MSTDataset, lr4model=-1,
                                    maxEpoch=5):
        # the rule for early stop: when the variance of the recent 50 training loss is smaller than 0.05,
        # the training process will be stopped
        optim = torch.optim.Adam([
            {'params': trModel.parameters(), 'lr': self.lr4model if lr4model < 0 else lr4model}
        ])
        lossList = []
        max_iters = len(labeled_source)//self.batch_size
        for epoch in range(maxEpoch):
            step = 0
            for batch, _ in DataIter(labeled_source, dataset_2=None,
                                           dataset_3=None, batch_size=self.batch_size):
                DLoss, DAcc = trModel.RDMLoss(batch)
                optim.zero_grad()
                DLoss.backward()
                optim.step()
                torch.cuda.empty_cache()
                print('####Pre Train NLI Classifier (%3d | %3d) %3d | %3d ####, loss = %6.8f, Acc = %6.8f, mean_loss = %6.8f' % (
                    step, max_iters, epoch, maxEpoch, DLoss.data.item(), DAcc, np.mean(lossList).item()
                ))
                lossList.append(DLoss.data.item())
                if len(lossList) > 20:
                    lossList.pop(0)
                    if np.mean(lossList) < 0.2:
                        torch.save(trModel.state_dict(), "./WeiboInit.pkl")
                        return
                step += 1

    def MetaStepV4A(self, model:RumorDetection, optim:torch.optim, batch,
                    weight:torch.Tensor):
        assert hasattr(self, "few_shot_data")
        assert hasattr(self, "few_shot_data_list")
        initStateDicts = model.state_dict()
        initStateDicts = {key: initStateDicts[key].clone() for key in initStateDicts}
        for step in range(self.meta_step):
            u = weight.sigmoid()
            model.zero_grad()
            loss = self.LossList(model, batch)
            sumLoss = (u * loss).sum()
            sumLoss.backward()
            optim.step()
            self.val_grad_dicts, fewLoss, fewAcc = self.meanGradOnValSet(model,
                                                                         few_shot_data=self.few_shot_data,
                                                                         few_shot_data_list=self.few_shot_data_list)
            print(f"##Perf on Meta Val Set## {step} | {self.meta_step} :  loss/acc = {fewLoss}/{fewAcc}")
            model.load_state_dict(initStateDicts)
            u_grads = self.ComputeGrads4Weights(model, batch, self.few_shot_data, self.few_shot_data_list)
            w_grads = u_grads*u*(1-u)
            weightGrads = -1 * (w_grads / (w_grads.norm(2)+1e-10))
            print("uGrads:", u_grads)
            print("wGrads:", w_grads)
            print("weightGrads:", weightGrads)
            update = self.weight_eta * weightGrads
            weight = weight - update
        return weight

    def ModelTrain(self, model:RumorDetection, train_set:BiGCNWeiboSet, valid_set:BiGCNWeiboSet,
                    test_set:BiGCNWeiboSet, test_label:torch.Tensor, indomain_set:BiGCNWeiboSet=None,
                        max_epoch=100, max_valid_every=100, model_file="./tmp.pkl"):
        meta_optim = torch.optim.SGD([
            {'params': model.parameters(), 'lr': self.lr4model}
        ])
        model_optim = torch.optim.Adam([
            {'params': model.parameters(), 'lr': self.lr4model}
        ])
        self.few_shot_data, self.few_shot_data_list = self.FewShotDataList(valid_set)
        weights = [0.0]*len(train_set) + \
                    ([] if indomain_set is None else [0.0]*len(indomain_set))
        self.train_set_weights = torch.tensor(weights, device=self.device)
        step = 0
        for epoch in range(max_epoch):
            for batch, indices in DataIter(train_set, indomain_set, None, self.batch_size):
                weights = self.train_set_weights[indices]
                new_weights = self.MetaStepV4A(model, meta_optim, batch, weights)
                self.train_set_weights[indices] = new_weights
                self.OptimStep(model, model_optim, batch, new_weights)
                if (step+1) % max_valid_every == 0:
                    self.valid(model, test_set, test_label, self.suffix, step)
                step += 1
        model.save_model(model_file)

    def annotate(self, model: RumorDetection, data, pseaudo_idxs=[], batch_size=20):
        c_idxs = list(set(range(len(data))) - set(pseaudo_idxs))
        with torch.no_grad():
            pred_tensor = pred_Logits(model, data, idxs=c_idxs, batch_size=batch_size)
        p_tensor = (1 - self.beta) * data.labelTensor(device=pred_tensor.device)[c_idxs] + \
                            self.beta * pred_tensor
        pseudo_label = (p_tensor/(p_tensor.sum(dim=1).unsqueeze(-1))).cpu().numpy() # label is the accumulation of logits
        data.setLabel(pseudo_label, c_idxs)

    def dataSelection(self, pseudo_set):
        logits = pseudo_set.labelTensor(device=torch.device("cuda"))
        assert logits.dim() == 2
        entrophy = (-1 * logits * (logits.log())).sum(dim=1)
        sort_idxs = entrophy.argsort().cpu().tolist()
        start, end = len(sort_idxs)//4, (len(sort_idxs)*3)//4
        pseudo_set.valid_indexs = sort_idxs[start:end]

    def MetaSelfTrain(self, model, labeled_source, labeled_target, unlabeled_target, UT_label, valid_set,
                  max_iterate=100):
        for iterate in range(max_iterate):
            self.annotate(model, unlabeled_target)
            self.dataSelection(unlabeled_target)
            self.ModelTrain(model, labeled_source, valid_set, unlabeled_target, UT_label, unlabeled_target,
                            max_epoch=1, max_valid_every=30)

class MetaSelfTrainV14(WindTrainer_V4A):
    def __init__(self, expand_size, beta, class_num, log_dir, suffix, weight_eta=0.1, lr4model=2e-2,
                 coeff4expandset=1.0, max_few_shot_size=20, Inner_BatchSize=5, meta_step=5):
        super(MetaSelfTrainV14, self).__init__(beta, class_num, log_dir, suffix, weight_eta, lr4model,
                                             coeff4expandset, max_few_shot_size, Inner_BatchSize, meta_step)
        self.expand_size = expand_size

    def discriminatorLoss(self, trModel:RumorDetection, discriminator, batch, temperature=1.0, reversal=True):
        pooledOutput = trModel.Batch2Vecs(batch)
        if reversal:
            pooledOutput = GradientReversal.apply(pooledOutput)
        normedPoolOut = pooledOutput/(pooledOutput.norm(dim=1).unsqueeze(-1))
        logits = discriminator(normedPoolOut)
        preds = F.softmax(logits/temperature, dim=1)
        epsilon = torch.ones_like(preds) * (1e-8)
        preds = (preds - epsilon).abs()
        loss = F.nll_loss(preds.log(), batch[-1].to(preds.device))
        acc = accuracy_score(batch[-1].cpu().numpy(), preds.argmax(dim=1).cpu().numpy())
        return loss, acc

    def lossAndAcc(self, model:RumorDetection, batch, label_weight=None, reduction=None):
        preds = model.forward(batch)
        epsilon = torch.ones_like(preds) * 1e-8
        preds = (preds -epsilon).abs() # to avoid the prediction [1.0, 0.0], which leads to the 'nan' value in log operation
        labels = batch[-2].to(preds.device).argmax(dim=1)if batch[-2].dim() == 2 else batch[-2].to(preds.device)
        loss = F.nll_loss(preds.log(), labels)
        acc_t = ((preds.argmax(dim=1) - labels).__eq__(0).float().sum())/len(labels)
        acc = acc_t.data.item()
        return loss, acc

    def PreTrainDomainClassifier(self, trModel:RumorDetection, discriminator:nn.Module,
                                    labeledSource : BiGCNWeiboSet, labeledTarget : BiGCNWeiboSet,
                                    unlabeledTarget : BiGCNWeiboSet, maxEpoch, learning_rate=-1,
                                    cached_file="./tmp.pkl"):
        # the rule for early stop: when the variance of the recent 50 training loss is smaller than 0.05,
        # the training process will be stopped
        optim = torch.optim.Adam([{
             'params': discriminator.parameters(),
             'lr': 2e-3
        }])
        lossList = []
        max_iter = 2*max([len(labeledSource), len(unlabeledTarget)])//self.batch_size
        for epoch in range(maxEpoch):
            step = 0
            for batch, _ in DataIter(labeledSource, unlabeledTarget, labeledTarget, self.batch_size):
                DLoss, DAcc = self.discriminatorLoss(trModel, discriminator, batch)
                optim.zero_grad()
                DLoss.backward()
                optim.step()
                torch.cuda.empty_cache()
                print('####Pre Train Domain Classifier (%3d | %3d) %3d | %3d ####, loss = %6.8f, Acc = %6.8f' % (
                    step, max_iter, epoch, maxEpoch, DLoss.data.item(), DAcc
                ))
                lossList.append(DLoss.data.item())
                if len(lossList) > 20:
                    lossList.pop(0)
                    if np.std(lossList) < 0.05 and np.mean(lossList) < 0.1:
                        torch.save(trModel.state_dict(), cached_file)
                        return

    def AdversarialTrain(self, model, domain_classifier, optim_step=10, gStep=1, dStep=10):
        D_optim = torch.optim.Adam([{
             'params': domain_classifier.parameters(),
             'lr': 2e-3
        }])
        G_optim = torch.optim.Adam([{
             'params': model.parameters(),
             'lr': 5e-5
        }])
        few_data_list = [self.few_shot_data] if self.few_shot_data_list is None else self.few_shot_data_list
        for step in range(optim_step):
            ebatch = random.sample(self.expand_data_list, 1)[0]
            fbatch = random.sample(few_data_list, 1)[0]
            for d_idx in range(dStep):
                D_optim.zero_grad()
                ELoss, EAcc = self.discriminatorLoss(model, domain_classifier, ebatch)
                ELoss.backward()
                FLoss, FAcc = self.discriminatorLoss(model, domain_classifier, fbatch)
                ((len(few_data_list)*1.0/len(self.expand_data_list))*FLoss).backward()
                D_optim.step()
                print('#### D Step (%3d , %3d) %3d | %3d ####, loss = %6.8f, Acc = %6.8f' % (
                    d_idx, dStep, step, optim_step,
                    ELoss.data.item()+((len(few_data_list)*1.0/len(self.expand_data_list))*FLoss.data.item()),
                    (EAcc+FAcc)/2
                ))

            for g_idx in range(gStep):
                G_optim.zero_grad()
                ELoss, EAcc = self.discriminatorLoss(model, domain_classifier, ebatch)
                ELoss.backward()
                FLoss, FAcc = self.discriminatorLoss(model, domain_classifier, fbatch)
                ((len(few_data_list)*1.0/len(self.expand_data_list))*FLoss).backward()
                print('#### G Step (%3d , %3d) %3d | %3d ####, loss = %6.8f, Acc = %6.8f' % (
                    g_idx, gStep, step, optim_step,
                    ELoss.data.item()+((len(few_data_list)*1.0/len(self.expand_data_list))*FLoss.data.item()),
                    (EAcc+FAcc)/2
                ))
                G_optim.step()
                torch.cuda.empty_cache()

    def ConstructExpandData(self, model, source_set):
        with torch.no_grad():
            logits = pred_Logits(model, source_set)
        entrophy = (-1 * (logits * (logits.log()))).sum(dim=1)
        expand_idxs = entrophy.topk(self.expand_size)[1].cpu().tolist()

        self.e_label_weight = torch.tensor([1.0 / len(expand_idxs) for _ in range(self.class_num)], device=self.device)
        # extract out the expand data
        print("- - - - - update expand data - - ->")
        self.expand_data_list = [source_set.collate_raw_batch(
            [source_set[idx] for idx in expand_idxs[i:min(i + self.max_few_shot_size, len(expand_idxs))]]
        ) for i in range(0, len(expand_idxs), self.max_few_shot_size)]

        # modify the valid_indexs,
        source_set.valid_indexs = [idx for idx in range(len(source_set)) if not (idx in expand_idxs)]

    def meanGradOnValSet(self, model, few_shot_data=None, few_shot_data_list=None):
        model.zero_grad()
        assert few_shot_data is not None or few_shot_data_list is not None
        if few_shot_data_list is None:
            loss, acc = self.lossAndAcc(few_shot_data, label_weight=self.f_label_weight, reduction="sum")
            loss.backward()
            loss = loss.data.item()
        else:
            print("-------> few shot data list ------>")
            f_loss_list, f_acc_list = [], []
            for i, few_data in enumerate(few_shot_data_list):
                # sum the loss with the averaged weights or the biased weights
                # will allow computing the mean risk on any size of few shot dataset
                f_loss, f_acc = self.lossAndAcc(model, few_data, label_weight=self.f_label_weight, reduction="sum")
                f_loss.backward()
                f_loss_list.append(f_loss.data.item())
                f_acc_list.append(f_acc)
                torch.cuda.empty_cache()
            loss, acc = np.sum(f_loss_list), np.mean(f_acc_list)

        if self.expand_data_list is not None:
            if len(self.expand_data_list) > 0:
                assert hasattr(self, 'e_label_weight')
                print("-------> expand data list ------>")
                e_loss_list, e_acc_list = [], []
                for i, e_data in enumerate(self.expand_data_list):
                    e_loss, e_acc = self.lossAndAcc(model, e_data, label_weight=self.e_label_weight, reduction="sum")
                    (e_loss * self.coefff4expandset).backward()
                    e_loss_list.append(e_loss.data.item())
                    e_acc_list.append(e_acc)
                    torch.cuda.empty_cache()
                exp_loss, exp_acc = np.sum(e_loss_list), np.mean(e_acc_list)
                print(f"##Perf on Meta Expand Set##  : exp_loss/exp_acc = {exp_loss}/{exp_acc}")
                grad_dicts = {n: p.grad.clone() for n, p in model.named_parameters()}
                return grad_dicts, \
                       loss + self.coefff4expandset * exp_loss, \
                       acc
        grad_dicts = {n: p.grad.clone() for n, p in model.named_parameters()}
        return grad_dicts, loss, acc

    def MetaStepV4A(self, model:RumorDetection, optim:torch.optim, batch,
                    weight:torch.Tensor):
        assert hasattr(self, "few_shot_data")
        assert hasattr(self, "few_shot_data_list")
        initStateDicts = model.state_dict()
        initStateDicts = {key: initStateDicts[key].clone() for key in initStateDicts}
        for step in range(self.meta_step):
            u = weight.sigmoid()
            model.zero_grad()
            loss = self.LossList(model, batch)
            sumLoss = (u * loss).sum()
            sumLoss.backward()
            optim.step()
            self.val_grad_dicts, fewLoss, fewAcc = self.meanGradOnValSet(model,
                                                                         few_shot_data=self.few_shot_data,
                                                                         few_shot_data_list=self.few_shot_data_list)
            print(f"##Perf on Meta Val Set## {step} | {self.meta_step} :  loss/acc = {fewLoss}/{fewAcc}")
            if step == 0:
                self.few_loss_list.append(fewLoss)
            model.load_state_dict(initStateDicts)
            u_grads = self.ComputeGrads4Weights(model, batch, self.few_shot_data, self.few_shot_data_list)
            w_grads = u_grads*u*(1-u)
            weightGrads = -1 * (w_grads / (w_grads.norm(2)+1e-10))
            print("uGrads:", u_grads)
            print("wGrads:", w_grads)
            print("weightGrads:", weightGrads)
            update = self.weight_eta * weightGrads
            weight = weight - update
        return weight

    def ModelTrain(self, model:RumorDetection, train_set:MSTDataset, valid_set:MSTDataset,
                    test_set:MSTDataset, test_label:torch.Tensor, indomain_set:MSTDataset=None,
                        max_epoch=100, max_valid_every=100, model_file="./tmp.pkl"):
        meta_optim = torch.optim.SGD([
            {'params': model.parameters(), 'lr': self.lr4model}
        ])
        model_optim = torch.optim.Adam([
            {'params': model.parameters(), 'lr': self.lr4model}
        ])
        weights = [0.0]*len(train_set) + \
                    ([] if indomain_set is None else [0.0]*len(indomain_set))
        self.train_set_weights = torch.tensor(weights, device=self.device)
        self.few_loss_list = []
        step = 0
        for epoch in range(max_epoch):
            for batch, indices in DataIter(train_set, indomain_set, None, self.batch_size):
                weights = self.train_set_weights[indices]
                new_weights = self.MetaStepV4A(model, meta_optim, batch, weights)
                self.train_set_weights[indices] = new_weights
                self.OptimStep(model, model_optim, batch, new_weights)
                if (step+1) % max_valid_every == 0:
                    self.valid(model, test_set, test_label, self.suffix, step)
                if len(self.few_loss_list) == 20:
                    mean_few_loss = np.mean(self.few_loss_list)
                    if mean_few_loss < 0.2:
                        return
                    else:
                        self.few_loss_list.pop(0)
                step += 1

    def TrainingV14(self, model, domain_classifier, labeled_source, labeled_target, unlabeled_target,
                         valid_set, test_label, max_iterate=100):
        self.few_shot_data, self.few_shot_data_list = self.FewShotDataList(valid_set)
        for iterate in range(max_iterate):
            self.annotate(model, unlabeled_target)
            self.dataSelection(unlabeled_target)
            self.ConstructExpandData(model, labeled_source)
            self.AdversarialTrain(model, domain_classifier)
            self.ModelTrain(model, labeled_source, valid_set, unlabeled_target, test_label,
                                max_epoch=1, max_valid_every=30)

class MetaSelfTrainV15(MetaSelfTrainV14):
    def __init__(self, expand_size, beta, class_num, log_dir, suffix, weight_eta=0.1, lr4model=2e-2,
                 coeff4expandset=1.0, max_few_shot_size=20, Inner_BatchSize=5, meta_step=5):
        super(MetaSelfTrainV15, self).__init__(expand_size, beta, class_num, log_dir, suffix, weight_eta, lr4model,
                                             coeff4expandset, max_few_shot_size, Inner_BatchSize, meta_step)

    def AdversarialTrainV2(self, model, domain_classifier, labeled_source, unlabeled_target, gStep=1, dStep=10):
        D_optim = torch.optim.Adam([{
             'params': domain_classifier.parameters(),
             'lr': 2e-3
        }])
        G_optim = torch.optim.Adam([{
             'params': model.parameters(),
             'lr': 5e-5
        }])
        max_iter = 2*max([len(labeled_source), len(unlabeled_target)])//self.batch_size
        step = 0
        for batch, _ in DataIter(labeled_source, unlabeled_target, None, self.batch_size):
            step += 1
            for d_idx in range(dStep):
                D_optim.zero_grad()
                ELoss, EAcc = self.discriminatorLoss(model, domain_classifier, batch)
                ELoss.backward()
                D_optim.step()
                print('#### D Step (%3d , %3d) %3d | %3d ####, loss = %6.8f, Acc = %6.8f' % (
                    d_idx, dStep, step, max_iter,
                    ELoss.data.item(),
                    EAcc
                ))

            for g_idx in range(gStep):
                G_optim.zero_grad()
                ELoss, EAcc = self.discriminatorLoss(model, domain_classifier, batch)
                ELoss.backward()
                print('#### G Step (%3d , %3d) %3d | %3d ####, loss = %6.8f, Acc = %6.8f' % (
                    g_idx, gStep, step, max_iter,
                    ELoss.data.item(),
                    EAcc
                ))
                G_optim.step()
                torch.cuda.empty_cache()

    def TrainingV15(self, model, domain_classifier, labeled_source, labeled_target, unlabeled_target,
                         valid_set, test_label, max_iterate=100):
        self.few_shot_data, self.few_shot_data_list = self.FewShotDataList(valid_set)
        for iterate in range(max_iterate):
            self.annotate(model, unlabeled_target)
            self.dataSelection(unlabeled_target)
            self.ConstructExpandData(model, labeled_source)
            self.AdversarialTrain(model, domain_classifier, labeled_source, unlabeled_target)
            self.ModelTrain(model, labeled_source, valid_set, unlabeled_target, test_label,
                                max_epoch=1, max_valid_every=30)

    def TrainingV15_1(self, model, domain_classifier, labeled_source, labeled_target, unlabeled_target,
                         valid_set, test_label, max_iterate=100):
        self.few_shot_data, self.few_shot_data_list = self.FewShotDataList(valid_set)
        for iterate in range(max_iterate):
            self.annotate(model, unlabeled_target)
            self.dataSelection(unlabeled_target)
            self.ConstructExpandData(model, unlabeled_target)
            self.AdversarialTrain(model, domain_classifier, labeled_source, unlabeled_target)
            self.ModelTrain(model, labeled_source, valid_set, unlabeled_target, test_label,
                                max_epoch=1, max_valid_every=30)

def obtainOnlineSet(old_prefix, dev_prefix, te_prefix, lt_cnt=0):
    fs_set = MSTDataset()
    od_set = MSTDataset()
    nd_set = MSTDataset()
    try:
        od_set.load_data_fast(data_prefix=old_prefix)
        fs_set.load_data_fast(data_prefix=dev_prefix)
        nd_set.load_data_fast(data_prefix=te_prefix)
    except AssertionError:
        weibo_dir, df_file = "../../data/Weibo", "../../data/Weibo_IDs.csv"
        df = pd.read_csv(df_file)
        topic = np.ones([len(df)], dtype=np.int64)
        topic[:2500] = 0
        df['topic'] = topic
        od_set.load_data(weibo_dir, weibo_df=df[:2500])
        od_set.Caches_Data(old_prefix)
        dev_te_df = df[2500:].sample(frac=1.0)
        fs_set.load_data(weibo_dir, weibo_df=dev_te_df[:100])
        fs_set.Caches_Data(dev_prefix)
        nd_set.load_data(weibo_dir, weibo_df=dev_te_df[100:])
        nd_set.Caches_Data(te_prefix)
    if lt_cnt > 0:
        nd_set_1, nd_set_2 = nd_set.split([lt_cnt*1.0 / len(nd_set), 1.0])
        return fs_set, od_set, nd_set_2, nd_set_1
    else:
        return fs_set, od_set, nd_set, None

def obtain_model(tfidf_vec):
    lvec = TFIDFBasedVec_CN(tfidf_vec, 20, embedding_size=300,
                            w2v_file="../../word2vec_CN_WeiboBi.pkl",
                            emb_update=True, grad_preserve=True)
    prop = BiGCN(300, 256)
    cls = nn.Linear(1024, 2)
    discriminator = nn.Sequential(nn.Linear(1024, 512), nn.ReLU(), nn.Linear(512, 2)).cuda()
    model = BiGCNRumorDetec(lvec, prop, cls, batch_size=20, grad_accum_cnt=1)
    return model, discriminator

if __name__ == "__main__":
    # logDir = str(__file__).rstrip(".py")
    logDir = "WindTest"
    if os.path.exists(logDir):
        os.system("rm -rf %s" % logDir)
    os.system("mkdir %s" % logDir)

    fewShotCnt = 100
    validTarget, labeledSource, unlabeledTarget, labeledTarget = obtainOnlineSet(
                     f"../../data/WeiboOnline_FS{fewShotCnt}_tr",
                     f"../../data/WeiboOnline_FS{fewShotCnt}_dev",
                     f"../../data/WeiboOnline_FS{fewShotCnt}_te",
                     lt_cnt=0
    )
    ULabel = unlabeledTarget.labelTensor()
    ULabel = ULabel.argmax(dim=1) if ULabel.dim() == 2 else ULabel.clone()

    TfIdf_twitter_file = "../../saved/TfIdf_WEIBO.pkl"
    if os.path.exists(TfIdf_twitter_file):
        with open(TfIdf_twitter_file, "rb") as fr:
            tv = pickle.load(fr)
    else:
        lemma = Lemma_Factory()
        corpus = [" ".join(lemma(txt)) for data in [validTarget, labeledSource, unlabeledTarget, labeledTarget]
                                        for ID in data.data_ID for txt in data.data[ID]['text']]
        tv = TfidfVectorizer(use_idf=True, smooth_idf=True, norm=None)
        _ = tv.fit_transform(corpus)
        with open(TfIdf_twitter_file, "wb") as fw:
            pickle.dump(tv, fw, protocol=pickle.HIGHEST_PROTOCOL)
    newDomainName = "OnlineTest"

    model1, domainDiscriminator = obtain_model(tv)
    print(f"old/new/few = {len(labeledSource)}/{len(unlabeledTarget)}/{len(validTarget)}")
    trainer = MetaSelfTrainV14(expand_size=100, beta=0.2, class_num=2, log_dir=logDir, suffix=f"{newDomainName}_FS{fewShotCnt}",
                          weight_eta=0.1, lr4model=5e-4, max_few_shot_size=20, Inner_BatchSize=20, meta_step=5)
    if os.path.exists("./WeiboInit.pkl"):
        model1.load_state_dict(
            torch.load('./WeiboInit.pkl')
        )
    else:
        trainer.PreTrainNLIClassifier(model1, labeledSource, maxEpoch=5)

    if os.path.exists(f"./DomainClassifier_{newDomainName}.pkl"):
        domainDiscriminator.load_state_dict(
            torch.load(f'./DomainClassifier_{newDomainName}.pkl')
        )
    else:
        trainer.PreTrainDomainClassifier(model1, domainDiscriminator, labeledSource, labeledTarget, unlabeledTarget,
                                            maxEpoch=5, cached_file=f'./DomainClassifier_{newDomainName}.pkl')

    trainer.valid(model1, unlabeledTarget, ULabel, trainer.suffix, 0)
    trainer.TrainingV14(model1, domainDiscriminator, labeledSource, labeledTarget, unlabeledTarget,
                        validTarget, ULabel, max_iterate=100)